def extract_one_file(self, dataset, image_path, output_file=None):
        """Runs the feature extraction algorithms on specific image.

        Parameters
        ----------
        dataset : string
            Path to dataset.
        image_path : string
            Path to image.
        output_file : string, optional, default = None
            Name of output file continaing the features. If not informed is considered the name of dataset.

        Returns
        -------
        out : tuple
            Returns a tuple containing the name of output file and time spent in milliseconds.

        Raises
        ------
        IException 'Please select at least one extractor'
            Empty list of extractors.
        IException 'Image %s is possibly corrupt'
            Error opening image.
        """
        if len(self.extractors) == 0:
            raise IException("Please select at least one extractor")

        if output_file is None:
            output_file = File.get_filename(dataset)
        output_file = File.make_path(dataset, output_file + '.arff')

        classes = sorted(File.list_dirs(dataset))

        start_time = TimeUtils.get_time()

        try:
            image = File.open_image(image_path, rgb=False)
        except:
            raise IException("Image %s is possibly corrupt" % filepath)

        labels, types, values = [
            list(itertools.chain.from_iterable(ret)) for ret in zip(
                *([extractor().run(image) for extractor in self.extractors]))
        ]

        self._save_output(File.get_filename(dataset), classes, labels, types,
                          [values + [classes[0]]], output_file)

        end_time = TimeUtils.get_time()

        return output_file, (end_time - start_time)
示例#2
0
 def get_classifier(name):        
     """Return full name of classifier.
     
     Parameters
     ----------
     name : string
         Alias of classifier, not case sensitive.
         
     Returns
     -------
     classifier : string
         Full name of classifier.
     
     Raises
     ------
     IException 'Invalid classifier'
         The user must install the required dependencies to classifiers.
     """
     classifiers = [_syntactic_alias[alias] for alias in _syntactic_alias]
     
     if name in classifiers:
         return name
     
     alias = name.upper().strip()
     aliases = [a.upper() for a in _syntactic_alias]
     
     if alias in aliases:
         return _syntactic_alias.values()[ aliases.index(alias) ]
     
     raise IException('Invalid classifier')
示例#3
0
    def configure_classifier(self):
        """Set the configuration of current classifier.
        
        Raises
        ------
        IException 'You must install python-weka-wrapper'
            The user must install the required dependencies to classifiers.
        """
        if self.classifier is None:
            raise IException("Classifier not found! Select from the menu the option Training>Choose Classifier!")
        
        title = "Configuring %s" % self.classifier.get_name()
        self.tk.write_log(title)

        current_config = self.classifier.get_config()
        
        def process_config():
            new_config = self.tk.get_config_and_destroy()

            self.classifier.set_config(new_config)
            self.tk.append_log("\nConfig updated:\n%s", str(self.classifier.get_summary_config()))
            
            if self.classifier: self.classifier.reset()

        self.tk.dialogue_config(title, current_config, process_config)
示例#4
0
    def select_classifier(self):
        """Open a dialog to select the classifier.
        
        Raises
        ------
        IException 'You must install python-weka-wrapper'
            The user must install the required dependencies to classifiers.
        """
        if self.classifier is None:
            raise IException("Classifier not found! Select from the menu the option Training>Choose Classifier!")
        
        title = "Choosing a classifier"
        self.tk.write_log(title)

        current_config = classification.get_classifier_config()

        
        def process_config():
            """Update the current classifier."""
            new_config = self.tk.get_config_and_destroy()
            
            self.classifier = [new_config[classifier].meta for classifier in new_config
                                if new_config[classifier].value == True ][0]()

            self.tk.append_log("\nClassifier: %s\n%s", str(self.classifier.get_name()), str(self.classifier.get_summary_config()))
            classification.set_classifier_config(new_config)

        self.tk.dialogue_choose_one(title, current_config, process_config)
示例#5
0
    def add_class(self, dialog = True, name = None, color = None):
        """Add a new class.

        Parameters
        ----------
        dialog : boolean, optional, default = True
            If true open a config dialog to add the class.
        name : string, optional, default = None
            Name of class. If not informed set the name 'Class_nn' to class.
        color : string, optional, default = None
            Name of color in X11Color format, representing the class. It will used to paint the segments of class.
            If not informed choose a color at random.
            
        Raises
        ------
        IException 'You have reached the limite of %d classes'
            If you already have created self.tk.MAX_CLASSES classes.
        """
        n_classes = len(self.classes)
        if n_classes >= self.tk.MAX_CLASSES:
            raise IException("You have reached the limite of %d classes" % self.tk.MAX_CLASSES)
                
        def edit_class(index):
            """Calls method that edit the class."""
            self.edit_class(index)
            
        def update_current_class(index):
            """Calls method that update the class."""
            self.update_current_class(index)
        
        def process_config():
            """Add the class and refresh the panel of classes."""
            new_class = self.tk.get_config_and_destroy()
            new_class["name"].value = '_'.join(new_class["name"].value.split())

            self.classes.append( new_class )
            self.tk.write_log("New class: %s", new_class["name"].value)
            self.tk.refresh_panel_classes(self.classes, self._current_class)
            
        if name is None:
            name = "Class_%02d" % (n_classes+1)
        if color is None:
            color = util.X11Colors.random_color()
            
        class_config = OrderedDict()
        class_config["name"] = Config(label="Name", value=name, c_type=str)
        class_config["color"] = Config(label="Color (X11 Colors)", value=color, c_type='color')
        class_config["callback"] = Config(label=None, value=update_current_class, c_type=None, hidden=True)
        class_config["callback_color"] = Config(label=None, value=edit_class, c_type=None, hidden=True)
        class_config["args"] = Config(label=None, value=n_classes, c_type=int, hidden=True)
        
        if dialog == False:
            self.classes.append( class_config )
            return 

        title = "Add a new classe"
        self.tk.dialogue_config(title, class_config, process_config)        
示例#6
0
        def process_config():
            """Update the collection of extractors."""
            new_config = self.tk.get_config_and_destroy()

            self.extractors = [new_config[extractor].meta for extractor in new_config
                                if new_config[extractor].value == True ]
            
            if len(self.extractors) == 0:
                raise IException("Please select an extractor from the menu under Features Extraction> Select extractors! ")
            
            self.tk.append_log("\nConfig updated:\n%s", 
                                '\n'.join(["%s: %s" % (new_config[extractor].label, "on" if new_config[extractor].value==True else "off")
                                            for extractor in new_config]))
            extraction.set_extractor_config(new_config)
示例#7
0
 def close_image(self):
     """Close the image.
     
     Raises
     ------
     IException 'Image not found'
         If there's no image opened.
     """
     if self._const_image is None:
         raise IException("Image not found!  Open an image to test, select in the menu the option File>Open Image!")
     
     if self.tk.close_image():
         self.tk.write_log("Closing image...")
         self._const_image = None
         self._image = None
         self._image_path = None
示例#8
0
    def classify(self, dataset, test_dir, test_data, image):
        try:
            #self.model.load_weights(
            #"../models_checkpoints/" + self.file_name + ".h5")
            K.clear_session()
            if self.weight_path is not None:
                self.model = load_model(self.weight_path)
        except Exception as e:
            raise IException("Can't load the model in " + self.weight_path +
                             str(e))

        w = image.shape[0]
        h = image.shape[1]
        img = Image.fromarray(image)
        img = img.resize((IMG_HEIGHT, IMG_WIDTH))
        input_img = np.asarray(img)
        input_img = np.expand_dims(input_img, axis=0)

        if self.architecture.value == "ResNet50":
            output = self.model.predict(input_img, verbose=0)
        else:
            output = self.model.predict_proba(input_img, verbose=0)

        output = output.reshape(
            (output.shape[0], IMG_HEIGHT, IMG_WIDTH, self.num_classes.value))

        labeled = np.argmax(output[0], axis=-1)
        rgb = np.zeros((IMG_HEIGHT, IMG_WIDTH, 3))
        rgb[:, :, 0] = output[0, :, :, 1] * 255
        im = Image.fromarray(np.uint8(rgb))
        im.save('/home/diogo/to.png')

        rgb = np.zeros((IMG_HEIGHT, IMG_WIDTH, 3))
        rgb[:, :, 0] = labeled * 255
        im = Image.fromarray(np.uint8(rgb))
        im.save('/home/diogo/to2.png')

        img_labeled = Image.fromarray(labeled.astype('uint8'))
        img_labeled = img_labeled.resize((h, w))
        labeled = np.asarray(img_labeled)

        return labeled
示例#9
0
    def run_segmenter(self, refresh_image=True):
        """Do the segmentation of image, using the current segmenter.
        
        Raises
        ------
        IException 'Image not found'
            If there's no image opened.
        """
        if self._const_image is None:
            raise IException("Image not found!  Open an image to test, select in the menu the option File>Open Image!")
        
        self.tk.write_log("Running %s...", self.segmenter.get_name())

        self.tk.append_log("\nConfig: %s", str(self.segmenter.get_summary_config()))
        self._image, run_time = self.segmenter.run(self._const_image)
        self.tk.append_log("Time elapsed: %0.3f seconds", run_time)
        
        self._gt_segments = [None]*(max(self.segmenter.get_list_segments())+1)

        if refresh_image:
            self.tk.refresh_image(self._image)
    def sub_job_extractor(self, item, dataset, cl, classes):
        try:
            filepath = File.make_path(dataset, cl, item)
            image = cv2.imread(filepath)
            #image = self.equalize_size_image(image)

        except:
            raise IException("Image %s is possibly corrupt" % filepath)

        if len(self.data) > 0:
            if sys.version_info >= (3, 0):
                values = list(
                    zip(*([
                        extractor().run(image) for extractor in self.extractors
                    ])))
            else:
                values = list(
                    itertools.chain.from_iterable(
                        zip(*([
                            extractor().run(image)
                            for extractor in self.extractors
                        ]))[2]))

            self.data.append(values + [cl if cl in classes else classes[0]])

        else:
            labs, tys, values = [
                list(itertools.chain.from_iterable(ret))
                for ret in zip(*(extractor().run(image)
                                 for extractor in self.extractors))
            ]
            self.labels.append(labs)
            self.types.append(tys)
            self.data.append(values + [cl if cl in classes else classes[0]])
        image = None
        filepath = None
示例#11
0
    def experimenter_all(self):
        """Perform a test in all availabel classifiers e show the results.
        
        Raises
        ------
        IException 'You must install python-weka-wrapper'
            The user must install the required dependencies to classifiers.
        """
        if self.classifier is None:
            raise IException("Classifier not found! Select from the menu the option Training>Choose Classifier!")
        
        if self.tk.ask_ok_cancel("Experimenter All", "This may take several minutes to complete. Are you sure?"):
            if self.classifier.must_train():
                self.tk.write_log("Creating training data...")

                fextractor = FeatureExtractor(self.extractors)
                output_file, run_time = fextractor.extract_all(self.dataset, "training", overwrite = False)
                self.classifier.train(self.dataset, "training")
                
            self.tk.write_log("Running Experimenter All on %s...", self.classifier.get_name())
            
            popup_info = self.classifier.experimenter()
            self.tk.append_log("\nExperimenter All finished")
            self.tk.popup(popup_info)
示例#12
0
 def cross_validation(self):
     """Run a cross validation on all generated segments in image dataset.
     
     Raises
     ------
     IException 'You must install python-weka-wrapper'
         The user must install the required dependencies to classifiers.
     """
     if self.classifier is None:
         raise IException("Classifier not found! Select from the menu the option Training>Choose Classifier!")
     
     if self.classifier.must_train():
         self.tk.write_log("Creating training data...")
         
         fextractor = FeatureExtractor(self.extractors)
         output_file, run_time = fextractor.extract_all(self.dataset, "training", overwrite = False)
         self.classifier.train(self.dataset, "training")
     
     self.tk.write_log("Running Cross Validation on %s...", self.classifier.get_name())
     self.tk.append_log("\n%s", str(self.classifier.get_summary_config()))
     
     popup_info = self.classifier.cross_validate()
     self.tk.append_log("Cross Validation finished")
     self.tk.popup(popup_info)
示例#13
0
    def run_classifier_folder(self, foldername=None):

        if self.classifier is None:
            raise IException("Classifier not found! Select from the menu the option Training>Choose Classifier!")

        if foldername is None:
            foldername = self.tk.utils.ask_directory()

        valid_images_extension = ['.jpg', '.png', '.gif', '.jpeg', '.tif']

        fileimages = [name for name in os.listdir(foldername)
                    if os.path.splitext(name)[-1].lower() in valid_images_extension]

        fileimages.sort()

        all_accuracy = []
        all_IoU = []
        all_frequency_weighted_IU = []

        for file in fileimages:
            path_file = os.path.join(foldername, file)
            self.open_image(path_file)
            self.run_classifier()
            label_image = os.path.join(foldername, (os.path.splitext(file)[-2] + '_json'), 'label.png')
            self._image_gt = File.open_image_lut(label_image)
            self._image_gt_name = File.get_filename(label_image)

            tam_gt = self._image_gt.shape
            tam_im = self._mask_image.shape
            if len(tam_gt) > 2:
                self.tk.write_log("Color image is not supported. You must open a gray-scale image")
                return

            if tam_gt[0] != tam_im[0] or tam_gt[1] != tam_im[1]:
                self.tk.write_log("Images with different sizes")
                return

            
            confusion_matrix = MetricUtils.confusion_matrix(self._mask_image, self._image_gt)
            [mean_accuracy, accuracy] = MetricUtils.mean_accuracy(self._mask_image, self._image_gt)
            [mean_IoU, IoU] = MetricUtils.mean_IU(self._mask_image, self._image_gt)
            frequency_weighted_IU = MetricUtils.frequency_weighted_IU(self._mask_image, self._image_gt)

            print('Matriz de Confusao')
            print(confusion_matrix)

            print('Mean Pixel Accuracy')
            print(mean_accuracy)

            print('Pixel accuracy per class')
            print(accuracy)

            print('Mean Intersction over Union')
            print(mean_IoU)

            print('Intersction over Union per class')
            print(IoU)

            print('Frequency Weighted IU')
            print(frequency_weighted_IU)

            all_accuracy.append(accuracy)
            all_IoU.append(IoU)
            all_frequency_weighted_IU.append(frequency_weighted_IU)

            if not os.path.exists("../models_results/"):
                os.makedirs("../models_results/")
            
            path = File.make_path("../models_results/" + file + ".txt")
            path_img = File.make_path("../models_results/" + file + "_seg1.tif")
            path_img2 = File.make_path("../models_results/" + file + "_seg2.tif")

            img = Image.fromarray(self._image)
            img.save(path_img)
            img = Image.fromarray(self.class_color)
            img.save(path_img2)
            
            f=open(path,'ab')
            np.savetxt(f, ['Matriz de confusao'], fmt='%s')
            np.savetxt(f, confusion_matrix, fmt='%.5f')
            np.savetxt(f, ['\nAcuracia'], fmt='%s')
            np.savetxt(f, accuracy, fmt='%.5f')
            np.savetxt(f, ['\nInterseccao sobre uniao'], fmt='%s')
            np.savetxt(f, IoU, fmt='%.5f')
            np.savetxt(f, ['\nInterseccao sobre uniao com peso'], fmt='%s')
            np.savetxt(f, [frequency_weighted_IU], fmt='%.5f')
            f.close()


        path = File.make_path("../models_results/all_metrics.txt")
        f=open(path,'ab')
        np.savetxt(f, ['All Acuracia'], fmt='%s')
        np.savetxt(f, all_accuracy, fmt='%.5f')
        np.savetxt(f, ['\nAll IoU'], fmt='%s')
        np.savetxt(f, all_IoU, fmt='%.5f')
        np.savetxt(f, ['\nAll Frequency Weighted IU'], fmt='%s')
        np.savetxt(f, all_frequency_weighted_IU, fmt='%.5f')
        f.close()
示例#14
0
    def classify(self, dataset, test_dir, test_data, image):
        """"Perform the classification.

        Parameters
        ----------
        dataset : string
            Path to image dataset.
        test_dir : string
            Not used.
        test_data : string
            Name of test data file.

        Returns
        -------
        summary : list of string
            List of predicted classes for each instance in test data in ordered way.
        """

        predict_directory = File.make_path(dataset, test_dir)

        # Create a Keras class
        if not os.path.exists(File.make_path(predict_directory, "png")):
            os.makedirs(File.make_path(predict_directory, "png"))

        for file in os.listdir(predict_directory):
            print(File.make_path(predict_directory, file))
            if os.path.splitext(file)[-1] == ".tif":
                try:
                    img = Image.open(File.make_path(predict_directory, file))
                    # img.thumbnail(img.size)
                    new_file = os.path.splitext(file)[0] + ".png"
                    img.save(File.make_path(predict_directory,
                                            'png', new_file), "PNG", quality=100)
                except Exception as e:
                    print(e)
            else:
                print(File.make_path(predict_directory, file))
                os.symlink(File.make_path(predict_directory, file),
                           File.make_path(predict_directory, 'png', file))

        classify_datagen = ImageDataGenerator()

        classify_generator = classify_datagen.flow_from_directory(
            File.make_path(predict_directory, 'png'),
            target_size=(IMG_HEIGHT, IMG_WIDTH),
            batch_size=1,
            shuffle=False,
            class_mode=None)

        try:
            # self.model.load_weights(
            #"../models_checkpoints/" + self.file_name + ".h5")
            K.clear_session()
            if self.weight_path is not None:
                self.model = load_model(self.weight_path)
                path_classes = self.weight_path.replace(
                    "_model.h5", "_classes.npy")
                print("Load Model H5:"+self.weight_path)
                CLASS_NAMES = np.load(path_classes).item().keys()
        except Exception as e:
            raise IException("Can't load the model in " +
                             self.weight_path + str(e))

        output_classification = self.model.predict_generator(
            classify_generator, classify_generator.samples, verbose=2)

        one_hot_output = np.argmax(output_classification, axis=1)

        one_hot_output = one_hot_output.tolist()

        for index in range(0, len(one_hot_output)):
            one_hot_output[index] = CLASS_NAMES[one_hot_output[index]]

        return one_hot_output
示例#15
0
 def experimenter(self):
     """Perform a test using all classifiers available. 
     """
     raise IException("Method not available for this classifier")
示例#16
0
    def run_grafic_confusion_matrix(self):
        '''
        Generate a a graphical confusion matrix where images are classified and according to classification go to the wrong or right folder.
        Just Available to WekaClassifier and CNNKeras.
        '''
        from classification import WekaClassifiers, CNNKeras
        
        is_weka = isinstance(self.classifier, WekaClassifiers)
        is_keras = isinstance(self.classifier, CNNKeras)
        if not (is_weka or is_keras):
            message='Only available to Weka and CNN Keras classifiers.'
            raise IException(message)

        
        if not self.has_trained:
            message='Dataset Must Be Trained.'
            raise IException(message)
        
        from os.path import abspath, isdir

        folder = self.tk.utils.ask_directory()
        if not folder:
            message = 'No selected directory.'
            raise IException(message)
            return
            
        folder = abspath(folder)
        dataset = abspath(self.dataset)
        if folder == self.dataset:
            title = 'Same Dataset'
            message = 'The dataset selected is the same of the trained. Are you sure that is right?'
            option=self.tk.ask_ok_cancel(title, message)
            if not option:
                return
                
        from os import listdir, mkdir
        listdirs=listdir(folder)
        size_dirs = reduce(lambda a,b: a+b, [0]+[len(listdir(folder+'/'+d)) for d in listdirs if isdir(folder+'/'+d)])
        if not size_dirs:
            message = 'Dataset has no content or the subfolder has no content.'
            raise IException(message)
            
        from shutil import rmtree
        from os import symlink
        
        def create_folder_struct(matrix_path, class_names, human, computer):
            try:
                rmtree(matrix_path)
            except Exception as e:
                pass

            mkdir(matrix_path,0o777)
            for class_ in class_names:
                real=matrix_path+human+class_+'/'
                mkdir(real, 0o777)
                for _class in class_names:
                    mkdir(real+computer+_class,0o777)




        header_output = 'Starting Graphical Confusion Matrix\n\n'
        index=folder[-2::-1].index('/')
        matrix_path=folder[:-(index+1)]+'folder_confusion_matrix'
        class_names, classes=listdir(folder), {}
        
        for i in range(len(class_names)-1,-1,-1):
            if isdir(dataset+'/'+class_names[i]):
                if class_names[i][0] != '.':
                    continue
            del class_names[i]
        for i, name in enumerate(class_names):
            classes[name], classes[i]=i, name
        images=[]
        
        for classe in class_names:
            image_names=listdir(folder+'/'+classe)
            for i in range(len(image_names)):
                image_names[i]=folder+'/',classe ,'/'+image_names[i]
            images.extend(image_names)
        
        human, computer = '/human_', '/computer_'
        create_folder_struct(matrix_path, class_names, human, computer)
        
        header_output_middle = header_output + 'Dataset selected: ' + folder + '\n\n'
        self.tk.write_log(header_output_middle + 'Initializing...')
        
        total = str(len(images))
        # internal function in method for create threads, cannot change for Process(Have a problem with JVM Instances)
        total = str(len(images))
        print("Waiting  finish classification!")
        for i, image_path in enumerate(images):
            original_name = reduce(lambda a, b: a + b, image_path)
            real_class_path = matrix_path + human + image_path[1]
            predicted = self.classifier.single_classify(original_name, folder, self.extractors, classes)
            message = header_output_middle + str(i + 1) + ' of ' + total + ' images classifield.'
            self.tk.write_log(message)
            predicted_class_path = real_class_path + computer + predicted
            predicted_name = predicted_class_path + image_path[2]
            symlink(original_name, predicted_name)
                


        message = header_output + 'Saved in ' + matrix_path
        self.tk.write_log(message)
    def extract_all(self,
                    dataset,
                    output_file=None,
                    dirs=None,
                    overwrite=True,
                    processor_amd=False):
        self.processor_amd = processor_amd
        self.threads = []
        if self.processor_amd == True:
            self.data = Manager().list(
            )  #is a necessary because have a problem with use Process and normaly declaration
            self.labels = Manager().list()
            self.types = Manager().list()
        else:
            self.data = [
            ]  #is a necessary because have a problem with use Process and normaly declaration

            self.labels = []
            self.types = []
        """Runs the feature extraction algorithms on all images of dataset.

        Parameters
        ----------
        dataset : string
            Path to dataset.
        output_file : string, optional, default = None
            Name of output file continaing the features. If not informed is considered the name of dataset.
        dirs : list of string, optional, default = None
            List of directories to be serched. If not informed search in all directories with images inside dataset.
        overwrite : boolean, optional, default = True
            If False check if already exists a file containing the features.

        Returns
        -------
        out : tuple
            Returns a tuple containing the name of output file and time spent in milliseconds.

        Raises
        ------
        IException 'Please select at least one extractor'
            Empty list of extractors.
        IException 'Image %s is possibly corrupt'
            Error opening some image inside dataset.
        IException 'There are no images in dataset: %s'
            Dataset does not contain any image.
        """
        if len(self.extractors) == 0:
            raise IException("Please select at least one extractor")

        if output_file is None:
            output_file = File.get_filename(dataset)
        output_file = File.make_path(dataset, output_file + '.arff')

        # if already exists a output file and must not override, return current file
        if overwrite == False and os.path.isfile(output_file):
            return output_file, 0

        start_time = TimeUtils.get_time()

        classes = sorted(File.list_dirs(dataset))
        dirs = classes if dirs is None else dirs

        # Runs the feature extraction for all classes inside the dataset
        for cl in dirs:
            # start job for each extractor
            self.job_extractor(dataset, cl, classes)

        # Output is in kb, here I convert it in Mb for readability
        RAM_stats = self.getRAMinfo()
        RAM_total = round(int(RAM_stats[0]) / 1000, 1)
        RAM_used = round(int(RAM_stats[1]) / 1000, 1)
        print("RAM Total : " + str(RAM_total))
        print("RAM Used : " + str(RAM_used))
        self.print_console(
            "Wait a moment, the threads are processing " +
            str(len(self.threads)) +
            " images, it may be delayed depending on the size or quantity of the images!"
        )
        with tqdm(total=len(self.threads)) as pbar:
            for t in self.threads:
                t.start()
                if ((RAM_total) < 10000):  #se menor que 10gb
                    RAM_stats = self.getRAMinfo()
                    RAM_used = round(int(RAM_stats[1]) / 1000, 1)
                    if ((RAM_total - RAM_used) < 2000):
                        t.join()
                pbar.update(1)
            pbar.close()

        self.print_console(
            "Waiting for workers to finish extracting attributes from images!")
        with tqdm(total=len(self.threads)) as ppbar:
            for t in self.threads:
                t.join()

                ppbar.update(1)
            ppbar.close()
        self.print_console("The process was completed with " +
                           str(len(self.threads)) + " images!")
        if len(self.data) == 0:
            raise IException("There are no images in dataset: %s" % dataset)
        del self.threads
        gc.collect()
        # Save the output file in ARFF format
        # self._save_output(File.get_filename(dataset), classes, self.labels, self.types, self.data, output_file)
        self._save_output(File.get_filename(dataset), classes, self.labels[0],
                          self.types[0], self.data, output_file)
        end_time = TimeUtils.get_time()

        return output_file, (end_time - start_time)
示例#18
0
    def run_classifier(self):
        """Run the classifier on the current image.
        As result, paint the image with color corresponding to predicted class of all segment.

        Raises
        ------
        IException 'You must install python-weka-wrapper'
            The user must install the required dependencies to classifiers.
        IException 'Image not found'
            If there's no image opened.
        """
        if self.classifier is None:
            raise IException("Classifier not found! Select from the menu the option Training>Choose Classifier!")

        if self._const_image is None:
            raise IException("Image not found!  Open an image to test, select in the menu the option File>Open Image!")

        self.tk.write_log("Running %s...", self.classifier.get_name())
        self.tk.append_log("\n%s", str(self.classifier.get_summary_config()))

        #self.classifier.set

        start_time = TimeUtils.get_time()

        # Perform a segmentation, if needed.
        list_segments = self.segmenter.get_list_segments()
        if len(list_segments) == 0:
            self.tk.append_log("Running %s... (%0.3f seconds)", self.segmenter.get_name(), (TimeUtils.get_time() - start_time))

            self._image, _ = self.segmenter.run(self._const_image)
            self.tk.refresh_image(self._image)
            list_segments = self.segmenter.get_list_segments()
            self._gt_segments = [None]*(max(list_segments)+1)

        #  New and optimized classification
        tmp = ".tmp"
        File.remove_dir(File.make_path(self.dataset, tmp))

        self.tk.append_log("Generating test images... (%0.3f seconds)", (TimeUtils.get_time() - start_time))

        len_segments = {}

        print("Wait to complete processes all images!")
        with tqdm(total=len(list_segments)) as pppbar:
            for idx_segment in list_segments:
                segment, size_segment, idx_segment = self.segmenter.get_segment(self, idx_segment=idx_segment)[:-1]
                # Problem here! Dataset removed.
                filepath = File.save_only_class_image(segment, self.dataset, tmp, self._image_name, idx_segment)
                len_segments[idx_segment] = size_segment
                pppbar.update(1)
            pppbar.close()


        # Perform the feature extraction of all segments in image ( not applied to ConvNets ).
        if self.classifier.must_extract_features():
            self.tk.append_log("Running extractors on test images... (%0.3f seconds)", (TimeUtils.get_time() - start_time))
            fextractor = FeatureExtractor(self.extractors)
            output_file, _ = fextractor.extract_all(self.dataset, "test", dirs=[tmp])

        self.tk.append_log("Running classifier on test data... (%0.3f seconds)", (TimeUtils.get_time() - start_time))

        # Get the label corresponding to predict class for each segment of image.
        labels = self.classifier.classify(self.dataset, test_dir=tmp, test_data="test.arff", image=self._const_image)
        File.remove_dir(File.make_path(self.dataset, tmp))

        # Result is the class for each superpixel
        if type(labels) is types.ListType:
            self.tk.append_log("Painting segments... (%0.3f seconds)", (TimeUtils.get_time() - start_time))

            # If ground truth mode, show alternative results
            if self._ground_truth == True:
                return self._show_ground_truth(list_segments, len_segments, labels, start_time)

            # Create a popup with results of classification.
            popup_info = "%s\n" % str(self.classifier.get_summary_config())

            len_total = sum([len_segments[idx] for idx in len_segments])
            popup_info += "%-16s%-16s%0.2f%%\n" % ("Total", str(len_total), (len_total*100.0)/len_total)

            # Paint the image.
            self._mask_image = np.zeros(self._const_image.shape[:-1], dtype="uint8")
            height, width, channels = self._image.shape
            self.class_color = np.zeros((height,width,3), np.uint8)
            for (c, cl) in enumerate(self.classes):
                idx_segment = [ list_segments[idx] for idx in range(0, len(labels)) if cl["name"].value == labels[idx] or c == labels[idx]]
                if len(idx_segment) > 0:
                    self._image, _ = self.segmenter.paint_segment(self._image, cl["color"].value, idx_segment=idx_segment, border=False)
                    for idx in idx_segment:
                        self._mask_image[self.segmenter._segments == idx] = c
                        self.class_color[self.segmenter._segments == idx] = X11Colors.get_color(cl["color"].value)

                len_classes = sum([len_segments[idx] for idx in idx_segment])
                popup_info += "%-16s%-16s%0.2f%%\n" % (cl["name"].value, str(len_classes), (len_classes*100.0)/len_total)


            self.tk.refresh_image(self._image)
            self.tk.popup(popup_info)
        else:
            # Result is an image
            self._mask_image = labels
            height, width, channels = self._image.shape
            self.class_color = np.zeros((height,width,3), np.uint8)

            for (c, cl) in enumerate(self.classes):
                self.class_color[labels == c] = X11Colors.get_color(cl["color"].value)

            self._image = cv2.addWeighted(self._const_image, 0.7, self.class_color, 0.3, 0)
            self.tk.refresh_image(self._image)


        end_time = TimeUtils.get_time()

        self.tk.append_log("\nClassification finished")
        self.tk.append_log("Time elapsed: %0.3f seconds", (end_time - start_time))
        gc.collect()
示例#19
0
 def cross_validate(self, detail=True):
     """Perform cross validation using trained data.
     """
     raise IException("Method not available for this classifier")
    def classify(self, dataset, test_dir, test_data, image):
        """"Perform the classification.

        Parameters
        ----------
        dataset : string
            Path to image dataset.
        test_dir : string
            Not used.
        test_data : string
            Name of test data file.

        Returns
        -------
        summary : list of string
            List of predicted classes for each instance in test data in ordered way.
        """

        predict_directory = File.make_path(dataset, test_dir)

        # Create a Keras class
        if not os.path.exists(File.make_path(predict_directory, "png")):
            os.makedirs(File.make_path(predict_directory, "png"))

        for file in os.listdir(predict_directory):
            print(File.make_path(predict_directory, file))
            if os.path.splitext(file)[-1] == ".tif":
                try:
                    img = Image.open(File.make_path(predict_directory, file))
                    new_file = os.path.splitext(file)[0] + ".png"
                    img.save(File.make_path(predict_directory, 'png',
                                            new_file),
                             "PNG",
                             quality=100)
                except Exception as e:
                    print(e)
            else:
                print(File.make_path(predict_directory, file))
                os.symlink(File.make_path(predict_directory, file),
                           File.make_path(predict_directory, 'png', file))

        classify_datagen = ImageDataGenerator()

        classify_generator = classify_datagen.flow_from_directory(
            File.make_path(predict_directory, 'png'),
            target_size=(IMG_HEIGHT, IMG_WIDTH),
            batch_size=1,
            shuffle=False,
            class_mode=None)

        try:
            K.clear_session()
            if self.pseudo_label.weight_path is not None:
                self.create_model()
                self.model.load_weights(self.pseudo_label.weight_path)
        except Exception as e:
            raise IException("Can't load the model in " +
                             self.pseudo_label.weight_path + str(e))

        output_classification = self.model.predict_generator(
            classify_generator, classify_generator.samples, verbose=2)

        one_hot_output = np.argmax(output_classification, axis=1)

        one_hot_output = one_hot_output.tolist()

        return one_hot_output