コード例 #1
0
ファイル: pynovisao.py プロジェクト: kleyton67/pyn-modified
    def _init_classes(self, classes = None, colors = None):
        """Initialize the classes of dataset.

        Parameters
        ----------
        classes : list of string, optional, default = None
            List of classes. If not informed, the metod set as classes all classes in dataset. 
            If there's no classes in dataset, adds two default classes.
        colors : list of string, optional, default = None
            List de colors representing the color of classe, in same order. If not informed, chooses a color at random.
        """
        self.classes = []

        dataset_description_path = File.make_path(self.dataset, '.dataset_description.txt')

        if os.path.exists(dataset_description_path):
            colors = []
            classes = []
            file = open(dataset_description_path, "r") 
            for line in file:
                class_info = line.replace("\n", "").split(",")
                classes.append(class_info[0])
                colors.append(class_info[1])                 
        else:
            classes = sorted(File.list_dirs(self.dataset)) if classes is None else classes.split()
            colors = [] if colors is None else colors.split()

        if(len(classes) > 0):
            for i in range(0, len(classes)):
                self.add_class(dialog = False, name=classes[i], color=colors[i] if i < len(colors) else None)
        else:
            self.add_class(dialog = False, color='Green')
            self.add_class(dialog = False, color='Yellow')
                
        self._current_class = 0
コード例 #2
0
ファイル: pynovisao.py プロジェクト: kleyton67/pyn-modified
    def _init_dataset(self, directory):
        """Initialize the directory of image dataset.

        Parameters
        ----------
        directory : string
            Path to directory.
        """
        if(directory[-1] == '/'):
            directory = directory[:-1]
            
        self.dataset = directory
        File.create_dir(self.dataset)
コード例 #3
0
    def extract_one_file(self, dataset, image_path, output_file=None):
        """Runs the feature extraction algorithms on specific image.

        Parameters
        ----------
        dataset : string
            Path to dataset.
        image_path : string
            Path to image.
        output_file : string, optional, default = None
            Name of output file continaing the features. If not informed is considered the name of dataset.

        Returns
        -------
        out : tuple
            Returns a tuple containing the name of output file and time spent in milliseconds.

        Raises
        ------
        IException 'Please select at least one extractor'
            Empty list of extractors.
        IException 'Image %s is possibly corrupt'
            Error opening image.
        """
        if len(self.extractors) == 0:
            raise IException("Please select at least one extractor")

        if output_file is None:
            output_file = File.get_filename(dataset)
        output_file = File.make_path(dataset, output_file + '.arff')

        classes = sorted(File.list_dirs(dataset))

        start_time = TimeUtils.get_time()

        try:
            image = File.open_image(image_path, rgb=False)
        except:
            raise IException("Image %s is possibly corrupt" % filepath)

        labels, types, values = [
            list(itertools.chain.from_iterable(ret)) for ret in zip(
                *([extractor().run(image) for extractor in self.extractors]))
        ]

        self._save_output(File.get_filename(dataset), classes, labels, types,
                          [values + [classes[0]]], output_file)

        end_time = TimeUtils.get_time()

        return output_file, (end_time - start_time)
コード例 #4
0
 def train(self, dataset, training_data, force = False):
     """Perform the training of classifier.
     
     Parameters
     ----------
     dataset : string
         Path to image dataset.
     training_data : string
         Name of ARFF training file.
     force : boolean, optional, default = False
         If False don't perform new training if there is trained data.
     """
     
            
     if self.data is not None and not force:
         return 
     
     if self.data is not None:
         self.reset()
     
     loader = WLoader(classname="weka.core.converters.ArffLoader")
     
     training_file = File.make_path(dataset, training_data + ".arff")
     self.data = loader.load_file(training_file)
     self.data.class_is_last()
     
     options = None if self.options.value == 'default' else self.options.value.split()
     self.classifier = WClassifier(classname=self.classname.value, options=options)
     self.classifier.build_classifier(self.data)
コード例 #5
0
    def job_extractor(self, dataset, cl, classes):

        items = sorted(os.listdir(File.make_path(dataset, cl)))
        self.print_console("Processing class %s - %d itens" % (cl, len(items)))

        for item in items:
            if item.startswith('.'):
                continue

            if self.processor_amd == True:
                th = multiprocessing.Process(target=self.sub_job_extractor,
                                             args=(item, dataset, cl, classes))
            else:
                th = threading.Thread(target=self.sub_job_extractor,
                                      args=(item, dataset, cl, classes))

            self.threads.append(th)
コード例 #6
0
 def classify(self, dataset, test_dir, test_data, image):
     """Perform the classification. 
     
     Parameters
     ----------
     dataset : string
         Path to image dataset.
     test_dir : string
         Not used.
     test_data : string
         Name of test data file.
         
     Returns
     -------
     summary : list of string
         List of predicted classes for each instance in test data in ordered way.
     """
     
     
     loader = WLoader(classname="weka.core.converters.ArffLoader")
     
     test_file = File.make_path(dataset, test_data)
     predict_data = loader.load_file(test_file)
     predict_data.class_is_last()
     
     #values = str(predict_data.class_attribute)[19:-1].split(',')
     values = [str(predict_data.class_attribute.value(i)) for i in range(0, predict_data.class_attribute.num_values)]
     
     classes = []
     
     for index, inst in enumerate(predict_data):
         #pred = self.classifier.classify_instance(inst)
         prediction = self.classifier.distribution_for_instance(inst)
         #cl = int(values[prediction.argmax()][7:])
         cl = values[prediction.argmax()]
         #print 'Classe:', cl
         classes.append(cl)
     return classes
コード例 #7
0
    def sub_job_extractor(self, item, dataset, cl, classes):
        try:
            filepath = File.make_path(dataset, cl, item)
            image = cv2.imread(filepath)
            #image = self.equalize_size_image(image)

        except:
            raise IException("Image %s is possibly corrupt" % filepath)

        if len(self.data) > 0:
            if sys.version_info >= (3, 0):
                values = list(
                    zip(*([
                        extractor().run(image) for extractor in self.extractors
                    ])))
            else:
                values = list(
                    itertools.chain.from_iterable(
                        zip(*([
                            extractor().run(image)
                            for extractor in self.extractors
                        ]))[2]))

            self.data.append(values + [cl if cl in classes else classes[0]])

        else:
            labs, tys, values = [
                list(itertools.chain.from_iterable(ret))
                for ret in zip(*(extractor().run(image)
                                 for extractor in self.extractors))
            ]
            self.labels.append(labs)
            self.types.append(tys)
            self.data.append(values + [cl if cl in classes else classes[0]])
        image = None
        filepath = None
コード例 #8
0
    def make_dataset(self, dataset):
        KERAS_DATASET_DIR_NAME = ".keras_dataset"
        #KERAS_DATASET_DIR_NAME = File.make_path("..", os.path.split(dataset)[-1] + "_keras_dataset")
        KERAS_DIR_TRAIN_NAME = "train"
        KERAS_DIR_TRAIN_MASK_NAME = "train_mask"
        KERAS_DIR_VALIDATION_NAME = "validation"
        KERAS_DIR_VALIDATION_MASK_NAME = "validation_mask"
        KERAS_DIR_TEST_NAME = "test"
        KERAS_DIR_TEST_MASK_NAME = "test_mask"
        PERC_TRAIN = self.perc_train.value
        PERC_VALIDATION = self.perc_validation.value

        # create keras dir dataset
        if not os.path.exists(File.make_path(
                dataset,
                KERAS_DATASET_DIR_NAME)) or self.recreate_dataset.value:
            if os.path.exists(File.make_path(dataset, KERAS_DATASET_DIR_NAME)):
                shutil.rmtree(File.make_path(dataset, KERAS_DATASET_DIR_NAME))

            os.makedirs(File.make_path(dataset, KERAS_DATASET_DIR_NAME))

            # create keras dir train
            os.makedirs(
                File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                               KERAS_DIR_TRAIN_NAME, 'images'))
            os.makedirs(
                File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                               KERAS_DIR_TRAIN_MASK_NAME, 'images'))
            os.makedirs(
                File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                               KERAS_DIR_VALIDATION_NAME, 'images'))
            os.makedirs(
                File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                               KERAS_DIR_VALIDATION_MASK_NAME, 'images'))
            os.makedirs(
                File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                               KERAS_DIR_TEST_NAME, 'images'))
            os.makedirs(
                File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                               KERAS_DIR_TEST_MASK_NAME, 'images'))

            valid_images_extension = ['.jpg', '.png', '.gif', '.jpeg', '.tif']
            fileimages = [
                name for name in os.listdir(dataset)
                if os.path.splitext(name)[-1].lower() in valid_images_extension
            ]

            random.shuffle(fileimages)
            quant_files = len(fileimages)
            quant_train = int(round((quant_files / 100.0) * PERC_TRAIN))
            quant_validation = int(
                round((quant_files / 100.0) * PERC_VALIDATION))

            files_train = fileimages[0:quant_train]
            files_validation = fileimages[quant_train:quant_train +
                                          quant_validation]
            files_test = fileimages[quant_train + quant_validation:quant_files]

            for file in files_train:
                if os.path.splitext(file)[-1] == ".tif":
                    img = Image.open(File.make_path(dataset, file))
                    new_file = os.path.splitext(file)[0] + ".png"
                    img.save(File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                                            KERAS_DIR_TRAIN_NAME, 'images',
                                            new_file),
                             "PNG",
                             quality=100)
                    img = Image.open(
                        File.make_path(dataset,
                                       os.path.splitext(file)[0] + "_json",
                                       "label.png"))
                    img.save(File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                                            KERAS_DIR_TRAIN_MASK_NAME,
                                            'images', new_file),
                             "PNG",
                             quality=100)
                else:
                    os.symlink(
                        File.make_path(dataset, file),
                        File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                                       KERAS_DIR_TRAIN_NAME, 'images', file))
                    os.symlink(
                        File.make_path(dataset,
                                       os.path.splitext(file)[0] + "_json",
                                       "label.png"),
                        File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                                       KERAS_DIR_TRAIN_MASK_NAME, 'images',
                                       file))

            for file in files_validation:
                if os.path.splitext(file)[-1] == ".tif":
                    img = Image.open(File.make_path(dataset, file))
                    new_file = os.path.splitext(file)[0] + ".png"
                    img.save(File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                                            KERAS_DIR_VALIDATION_NAME,
                                            'images', new_file),
                             "PNG",
                             quality=100)
                    img = Image.open(
                        File.make_path(dataset,
                                       os.path.splitext(file)[0] + "_json",
                                       "label.png"))
                    img.save(File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                                            KERAS_DIR_VALIDATION_MASK_NAME,
                                            'images', new_file),
                             "PNG",
                             quality=100)
                else:
                    os.symlink(
                        File.make_path(dataset, file),
                        File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                                       KERAS_DIR_VALIDATION_NAME, 'images',
                                       file))
                    os.symlink(
                        File.make_path(dataset,
                                       os.path.splitext(file)[0] + "_json",
                                       "label.png"),
                        File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                                       KERAS_DIR_VALIDATION_MASK_NAME,
                                       'images', file))

            for file in files_test:
                if os.path.splitext(file)[-1] == ".tif":
                    img = Image.open(File.make_path(dataset, file))
                    #img.thumbnail(img.size)
                    new_file = os.path.splitext(file)[0] + ".png"
                    img.save(File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                                            KERAS_DIR_TEST_NAME, 'images',
                                            new_file),
                             "PNG",
                             quality=100)
                    img = Image.open(
                        File.make_path(dataset,
                                       os.path.splitext(file)[0] + "_json",
                                       "label.png"))
                    #img.thumbnail(img.size)
                    img.save(File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                                            KERAS_DIR_TEST_MASK_NAME, 'images',
                                            new_file),
                             "PNG",
                             quality=100)
                else:
                    os.symlink(
                        File.make_path(dataset, file),
                        File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                                       KERAS_DIR_TEST_NAME, 'images', file))
                    os.symlink(
                        File.make_path(dataset,
                                       os.path.splitext(file)[0] + "_json",
                                       "label.png"),
                        File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                                       KERAS_DIR_TEST_MASK_NAME, 'images',
                                       file))

        train_data = self.get_images(
            File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                           KERAS_DIR_TRAIN_NAME, 'images'))
        train_mask = self.get_mask_images(
            File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                           KERAS_DIR_TRAIN_MASK_NAME, 'images'))

        validation_data = self.get_images(
            File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                           KERAS_DIR_VALIDATION_NAME, 'images'))
        validation_mask = self.get_mask_images(
            File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                           KERAS_DIR_VALIDATION_MASK_NAME, 'images'))

        test_data = self.get_images(
            File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                           KERAS_DIR_TEST_NAME, 'images'))
        test_mask = self.get_mask_images(
            File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                           KERAS_DIR_TEST_MASK_NAME, 'images'))

        #rgb = np.zeros((IMG_HEIGHT, IMG_WIDTH, 3))
        #rgb[:, :, 0] = train_mask[0,:,:,1]*255
        #im = Image.fromarray(np.uint8(rgb))
        #im.save('/home/diogo/to.png')

        self.train_samples = train_data.shape[0]
        self.validation_samples = validation_data.shape[0]
        self.test_samples = test_data.shape[0]

        return train_data, train_mask, validation_data, validation_mask, test_data, test_mask
コード例 #9
0
ファイル: pynovisao.py プロジェクト: kleyton67/pyn-modified
    def run_classifier_folder(self, foldername=None):

        if self.classifier is None:
            raise IException("Classifier not found! Select from the menu the option Training>Choose Classifier!")

        if foldername is None:
            foldername = self.tk.utils.ask_directory()

        valid_images_extension = ['.jpg', '.png', '.gif', '.jpeg', '.tif']

        fileimages = [name for name in os.listdir(foldername)
                    if os.path.splitext(name)[-1].lower() in valid_images_extension]

        fileimages.sort()

        all_accuracy = []
        all_IoU = []
        all_frequency_weighted_IU = []

        for file in fileimages:
            path_file = os.path.join(foldername, file)
            self.open_image(path_file)
            self.run_classifier()
            label_image = os.path.join(foldername, (os.path.splitext(file)[-2] + '_json'), 'label.png')
            self._image_gt = File.open_image_lut(label_image)
            self._image_gt_name = File.get_filename(label_image)

            tam_gt = self._image_gt.shape
            tam_im = self._mask_image.shape
            if len(tam_gt) > 2:
                self.tk.write_log("Color image is not supported. You must open a gray-scale image")
                return

            if tam_gt[0] != tam_im[0] or tam_gt[1] != tam_im[1]:
                self.tk.write_log("Images with different sizes")
                return

            
            confusion_matrix = MetricUtils.confusion_matrix(self._mask_image, self._image_gt)
            [mean_accuracy, accuracy] = MetricUtils.mean_accuracy(self._mask_image, self._image_gt)
            [mean_IoU, IoU] = MetricUtils.mean_IU(self._mask_image, self._image_gt)
            frequency_weighted_IU = MetricUtils.frequency_weighted_IU(self._mask_image, self._image_gt)

            print('Matriz de Confusao')
            print(confusion_matrix)

            print('Mean Pixel Accuracy')
            print(mean_accuracy)

            print('Pixel accuracy per class')
            print(accuracy)

            print('Mean Intersction over Union')
            print(mean_IoU)

            print('Intersction over Union per class')
            print(IoU)

            print('Frequency Weighted IU')
            print(frequency_weighted_IU)

            all_accuracy.append(accuracy)
            all_IoU.append(IoU)
            all_frequency_weighted_IU.append(frequency_weighted_IU)

            if not os.path.exists("../models_results/"):
                os.makedirs("../models_results/")
            
            path = File.make_path("../models_results/" + file + ".txt")
            path_img = File.make_path("../models_results/" + file + "_seg1.tif")
            path_img2 = File.make_path("../models_results/" + file + "_seg2.tif")

            img = Image.fromarray(self._image)
            img.save(path_img)
            img = Image.fromarray(self.class_color)
            img.save(path_img2)
            
            f=open(path,'ab')
            np.savetxt(f, ['Matriz de confusao'], fmt='%s')
            np.savetxt(f, confusion_matrix, fmt='%.5f')
            np.savetxt(f, ['\nAcuracia'], fmt='%s')
            np.savetxt(f, accuracy, fmt='%.5f')
            np.savetxt(f, ['\nInterseccao sobre uniao'], fmt='%s')
            np.savetxt(f, IoU, fmt='%.5f')
            np.savetxt(f, ['\nInterseccao sobre uniao com peso'], fmt='%s')
            np.savetxt(f, [frequency_weighted_IU], fmt='%.5f')
            f.close()


        path = File.make_path("../models_results/all_metrics.txt")
        f=open(path,'ab')
        np.savetxt(f, ['All Acuracia'], fmt='%s')
        np.savetxt(f, all_accuracy, fmt='%.5f')
        np.savetxt(f, ['\nAll IoU'], fmt='%s')
        np.savetxt(f, all_IoU, fmt='%.5f')
        np.savetxt(f, ['\nAll Frequency Weighted IU'], fmt='%s')
        np.savetxt(f, all_frequency_weighted_IU, fmt='%.5f')
        f.close()
コード例 #10
0
    def classify(self, dataset, test_dir, test_data, image):
        """Perform the classification. 
        
        Parameters
        ----------
        dataset : string
            Path to image dataset.
        test_dir : string
            Name of test data directory.
        test_data : string
            Not used.
            
        Returns
        -------
        summary : list of string
            List of predicted classes for each instance in test data in ordered way.
        """
        # if CNNCaffe.CREATE_LMDB = True use the alternative approach.
        if CNNCaffe.CREATE_LMDB:
            return self._classify_lmdb(dataset, test_dir, test_data)

        test_dir = File.make_path(dataset, test_dir)

        classes = []
        labels = np.loadtxt(self.labels_file.value, str)

        images = sorted(os.listdir(File.make_path(test_dir)))

        # convert mean.binaryproto to mean.npy
        blob = caffe.proto.caffe_pb2.BlobProto()
        data = open(self.mean_image.value, 'rb').read()
        blob.ParseFromString(data)
        np.save(File.make_path(test_dir, 'mean.npy'),
                np.array(caffe.io.blobproto_to_array(blob))[0])

        # load the mean image for subtraction
        mu = np.load(File.make_path(test_dir, 'mean.npy'))
        mu = mu.mean(1).mean(
            1)  # average over pixels to obtain the mean (BGR) pixel values

        self.transformer.set_mean(
            'data', mu)  # subtract the dataset-mean value in each channel

        self.net.blobs['data'].reshape(
            1,  # batch size
            3,  # 3-channel (BGR) images
            227,
            227)  # image size is 227x227

        for im in images:
            filepath = File.make_path(test_dir, im)
            image = cv2.imread(filepath)

            # resize the segment
            resized_image = np.zeros((512, 512, image.shape[2]), dtype="uint8")
            resized_image[0:image.shape[0], 0:image.shape[1]] = image[:, :]
            resized_image = resized_image[0:256, 0:256]
            cv2.imwrite(filepath.replace('.tif', '.jpeg'), resized_image)

            # load the image
            input_image = caffe.io.load_image(filepath)
            transformed_image = self.transformer.preprocess(
                'data', input_image)

            # copy the image data into the memory allocated for the net
            self.net.blobs['data'].data[...] = [transformed_image]

            # perform classification
            output = self.net.forward()

            # the output probability vector for the each image in the batch
            prediction = output['prob'][0]
            print(["%0.4f" % pr for pr in prediction])

            # append the class with max probability.
            classes.append(labels[prediction.argmax()])

        return classes
コード例 #11
0
ファイル: pynovisao.py プロジェクト: kleyton67/pyn-modified
    def run_classifier(self):
        """Run the classifier on the current image.
        As result, paint the image with color corresponding to predicted class of all segment.

        Raises
        ------
        IException 'You must install python-weka-wrapper'
            The user must install the required dependencies to classifiers.
        IException 'Image not found'
            If there's no image opened.
        """
        if self.classifier is None:
            raise IException("Classifier not found! Select from the menu the option Training>Choose Classifier!")

        if self._const_image is None:
            raise IException("Image not found!  Open an image to test, select in the menu the option File>Open Image!")

        self.tk.write_log("Running %s...", self.classifier.get_name())
        self.tk.append_log("\n%s", str(self.classifier.get_summary_config()))

        #self.classifier.set

        start_time = TimeUtils.get_time()

        # Perform a segmentation, if needed.
        list_segments = self.segmenter.get_list_segments()
        if len(list_segments) == 0:
            self.tk.append_log("Running %s... (%0.3f seconds)", self.segmenter.get_name(), (TimeUtils.get_time() - start_time))

            self._image, _ = self.segmenter.run(self._const_image)
            self.tk.refresh_image(self._image)
            list_segments = self.segmenter.get_list_segments()
            self._gt_segments = [None]*(max(list_segments)+1)

        #  New and optimized classification
        tmp = ".tmp"
        File.remove_dir(File.make_path(self.dataset, tmp))

        self.tk.append_log("Generating test images... (%0.3f seconds)", (TimeUtils.get_time() - start_time))

        len_segments = {}

        print("Wait to complete processes all images!")
        with tqdm(total=len(list_segments)) as pppbar:
            for idx_segment in list_segments:
                segment, size_segment, idx_segment = self.segmenter.get_segment(self, idx_segment=idx_segment)[:-1]
                # Problem here! Dataset removed.
                filepath = File.save_only_class_image(segment, self.dataset, tmp, self._image_name, idx_segment)
                len_segments[idx_segment] = size_segment
                pppbar.update(1)
            pppbar.close()


        # Perform the feature extraction of all segments in image ( not applied to ConvNets ).
        if self.classifier.must_extract_features():
            self.tk.append_log("Running extractors on test images... (%0.3f seconds)", (TimeUtils.get_time() - start_time))
            fextractor = FeatureExtractor(self.extractors)
            output_file, _ = fextractor.extract_all(self.dataset, "test", dirs=[tmp])

        self.tk.append_log("Running classifier on test data... (%0.3f seconds)", (TimeUtils.get_time() - start_time))

        # Get the label corresponding to predict class for each segment of image.
        labels = self.classifier.classify(self.dataset, test_dir=tmp, test_data="test.arff", image=self._const_image)
        File.remove_dir(File.make_path(self.dataset, tmp))

        # Result is the class for each superpixel
        if type(labels) is types.ListType:
            self.tk.append_log("Painting segments... (%0.3f seconds)", (TimeUtils.get_time() - start_time))

            # If ground truth mode, show alternative results
            if self._ground_truth == True:
                return self._show_ground_truth(list_segments, len_segments, labels, start_time)

            # Create a popup with results of classification.
            popup_info = "%s\n" % str(self.classifier.get_summary_config())

            len_total = sum([len_segments[idx] for idx in len_segments])
            popup_info += "%-16s%-16s%0.2f%%\n" % ("Total", str(len_total), (len_total*100.0)/len_total)

            # Paint the image.
            self._mask_image = np.zeros(self._const_image.shape[:-1], dtype="uint8")
            height, width, channels = self._image.shape
            self.class_color = np.zeros((height,width,3), np.uint8)
            for (c, cl) in enumerate(self.classes):
                idx_segment = [ list_segments[idx] for idx in range(0, len(labels)) if cl["name"].value == labels[idx] or c == labels[idx]]
                if len(idx_segment) > 0:
                    self._image, _ = self.segmenter.paint_segment(self._image, cl["color"].value, idx_segment=idx_segment, border=False)
                    for idx in idx_segment:
                        self._mask_image[self.segmenter._segments == idx] = c
                        self.class_color[self.segmenter._segments == idx] = X11Colors.get_color(cl["color"].value)

                len_classes = sum([len_segments[idx] for idx in idx_segment])
                popup_info += "%-16s%-16s%0.2f%%\n" % (cl["name"].value, str(len_classes), (len_classes*100.0)/len_total)


            self.tk.refresh_image(self._image)
            self.tk.popup(popup_info)
        else:
            # Result is an image
            self._mask_image = labels
            height, width, channels = self._image.shape
            self.class_color = np.zeros((height,width,3), np.uint8)

            for (c, cl) in enumerate(self.classes):
                self.class_color[labels == c] = X11Colors.get_color(cl["color"].value)

            self._image = cv2.addWeighted(self._const_image, 0.7, self.class_color, 0.3, 0)
            self.tk.refresh_image(self._image)


        end_time = TimeUtils.get_time()

        self.tk.append_log("\nClassification finished")
        self.tk.append_log("Time elapsed: %0.3f seconds", (end_time - start_time))
        gc.collect()
コード例 #12
0
ファイル: pynovisao.py プロジェクト: kleyton67/pyn-modified
    def open_image(self, imagename = None):
        """Open a new image.

        Parameters
        ----------
        imagename : string, optional, default = None
            Filepath of image. If not informed open a dialog to choose.
        """
        
        def onclick(event):
            """Binds dataset generator event to click on image."""
            print(event)
            if event.xdata != None and event.ydata != None and int(event.ydata) != 0 and self._dataset_generator == True:
                x = int(event.xdata)
                y = int(event.ydata)
                self.tk.write_log("Coordinates: x = %d y = %d", x, y)
                
                segment, size_segment, idx_segment, run_time = self.segmenter.get_segment(x, y,  path_to_mask=self.path_to_mask_txt)
                
                if size_segment > 0:
                    self.tk.append_log("\nSegment = %d: %0.3f seconds", idx_segment, run_time)
                    
                    self._image, run_time = self.segmenter.paint_segment(self._image, self.classes[self._current_class]["color"].value, x, y,)
                    self.tk.append_log("Painting segment: %0.3f seconds", run_time)
                    self.tk.refresh_image(self._image)
                    
                    if self._ground_truth == True:
                        self._gt_segments[idx_segment] = self.classes[self._current_class]["name"].value

                    # elif self._dataset_generator == True:
                    #     filepath = File.save_class_image(segment, self.dataset, self.classes[self._current_class]["name"].value, self._image_name, idx_segment)
                    #     if filepath:
                    #         self.tk.append_log("\nSegment saved in %s", filepath)
        
        if imagename is None:
            imagename = self.tk.utils.ask_image_name()

        if imagename:
            self._image = File.open_image(imagename)
            self._image_name = File.get_filename(imagename)

            self.tk.write_log("Opening %s...", self._image_name)
            self.tk.add_image(self._image, self._image_name, onclick)
            self._const_image = self._image
            
            self.segmenter.reset()
            self._gt_segments = None

            # Debugging
            # print("abspath " + os.path.abspath(imagename)) # /home/citywalk3r/thesis_git/data_handler/images/test/test_rgb_image_downscaled.jpg
            # print("basename " + os.path.basename(imagename)) # test_rgb_image_downscaled.jpg
            # print("dirname " + os.path.dirname(imagename)) # /home/citywalk3r/thesis_git/data_handler/images/test
            # print("exists ", os.path.exists(imagename)) # True
            # print("cwd " + os.getcwd()) # /home/citywalk3r/thesis_git/pynovisao/src

            # print("___---------------------___")
            # print("JOINED PATH TO MASK", os.path.dirname(imagename) + '/mask.txt')
            # print("___---------------------___")

            self.path_to_mask_txt = os.path.dirname(imagename) + '/mask.txt'
            content = np.zeros((self._image.shape[0], self._image.shape[1]), dtype=int)
            print(content.shape)

            if os.path.exists(self.path_to_mask_txt):
                print("Mask already exists in folder: ", os.path.dirname(imagename))
                np.savetxt(self.path_to_mask_txt, content, fmt='%d')
                if os.path.isfile(self.path_to_mask_txt):
                    print("Mask successfully reset")
                else:
                    print("Could not reset the mask, please try again")
            else:
                print("Mask not found in path: ", os.path.dirname(imagename))
                print("Creating a new mask...")

                np.savetxt(self.path_to_mask_txt, content, fmt='%d')
                if os.path.isfile(self.path_to_mask_txt):
                    print("Mask successfully created")
                else:
                    print("Could not create the mask, please try again")
コード例 #13
0
    def make_dataset(self, dataset):

        # create symbolic links to the dataset
        KERAS_DATASET_DIR_NAME = ".keras_dataset"
        #KERAS_DATASET_DIR_NAME = File.make_path("..", os.path.split(dataset)[-1] + "_keras_dataset")
        KERAS_DIR_TRAIN_NAME = "train"
        KERAS_DIR_VALIDATION_NAME = "validation"
        KERAS_DIR_TEST_NAME = "test"
        PERC_TRAIN = self.perc_train.value
        PERC_VALIDATION = self.perc_validation.value

        # create keras dir dataset
        if not os.path.exists(File.make_path(dataset, KERAS_DATASET_DIR_NAME)) or self.recreate_dataset.value:
            if os.path.exists(File.make_path(dataset, KERAS_DATASET_DIR_NAME)):
                shutil.rmtree(File.make_path(dataset, KERAS_DATASET_DIR_NAME))

            os.makedirs(File.make_path(dataset, KERAS_DATASET_DIR_NAME))

            # create keras dir train
            if not os.path.exists(File.make_path(dataset, KERAS_DATASET_DIR_NAME, KERAS_DIR_TRAIN_NAME)):
                os.makedirs(File.make_path(
                    dataset, KERAS_DATASET_DIR_NAME, KERAS_DIR_TRAIN_NAME))

            # create keras dir validation
            if not os.path.exists(File.make_path(dataset, KERAS_DATASET_DIR_NAME, KERAS_DIR_VALIDATION_NAME)):
                os.makedirs(File.make_path(
                    dataset, KERAS_DATASET_DIR_NAME, KERAS_DIR_VALIDATION_NAME))

            # create keras dir test
            if not os.path.exists(File.make_path(dataset, KERAS_DATASET_DIR_NAME, KERAS_DIR_TEST_NAME)):
                os.makedirs(File.make_path(
                    dataset, KERAS_DATASET_DIR_NAME, KERAS_DIR_TEST_NAME))

            dir_classes = sorted(File.list_dirs(dataset))

            if KERAS_DATASET_DIR_NAME in dir_classes:
                dir_classes.remove(KERAS_DATASET_DIR_NAME)

            for dir_class in dir_classes:
                root = File.make_path(dataset, dir_class)
                files = os.listdir(root)
                random.shuffle(files)
                quant_files = len(files)
                quant_train = int((quant_files / 100.0) * PERC_TRAIN)
                quant_validation = int((quant_files / 100.0) * PERC_VALIDATION)

                files_train = files[0:quant_train]
                files_validation = files[quant_train:quant_train +
                                         quant_validation]
                files_test = files[quant_train + quant_validation:quant_files]
                print("Processing class %s - %d itens - %d train items - %d validation items" %
                      (dir_class, quant_files, quant_train, quant_validation))

                for file in files_train:
                    dir_class_train = File.make_path(
                        dataset, KERAS_DATASET_DIR_NAME, KERAS_DIR_TRAIN_NAME, dir_class)
                    if not os.path.exists(dir_class_train):
                        os.makedirs(dir_class_train)

                    if os.path.splitext(file)[-1] == ".tif":
                        img = Image.open(File.make_path(root, file))
                        # img.thumbnail(img.size)
                        new_file = os.path.splitext(file)[0] + ".png"
                        img.save(File.make_path(dir_class_train,
                                                new_file), "PNG", quality=100)
                    else:
                        print(100*'-')
                        print(File.make_path(root, file))
                        print(100*'-')
                        os.symlink(File.make_path(root, file),
                                   File.make_path(dir_class_train, file))

                for file in files_validation:
                    dir_class_validation = File.make_path(
                        dataset, KERAS_DATASET_DIR_NAME, KERAS_DIR_VALIDATION_NAME, dir_class)
                    if not os.path.exists(dir_class_validation):
                        os.makedirs(dir_class_validation)

                    if os.path.splitext(file)[-1] == ".tif":
                        img = Image.open(File.make_path(root, file))
                        # img.thumbnail(img.size)
                        new_file = os.path.splitext(file)[0] + ".png"
                        img.save(File.make_path(dir_class_validation,
                                                new_file), "PNG", quality=100)
                    else:
                        os.symlink(File.make_path(root, file),
                                   File.make_path(dir_class_validation, file))

                for file in files_test:
                    dir_class_test = File.make_path(
                        dataset, KERAS_DATASET_DIR_NAME, KERAS_DIR_TEST_NAME, dir_class)
                    if not os.path.exists(dir_class_test):
                        os.makedirs(dir_class_test)

                    if os.path.splitext(file)[-1] == ".tif":
                        img = Image.open(File.make_path(root, file))
                        # img.thumbnail(img.size)
                        new_file = os.path.splitext(file)[0] + ".png"
                        img.save(File.make_path(dir_class_test,
                                                new_file), "PNG", quality=100)
                    else:
                        os.symlink(File.make_path(root, file),
                                   File.make_path(dir_class_test, file))

        train_datagen = ImageDataGenerator()

        train_generator = train_datagen.flow_from_directory(
            File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                           KERAS_DIR_TRAIN_NAME),
            target_size=(IMG_HEIGHT, IMG_WIDTH),
            batch_size=self.batch_size.value,
            shuffle=True,
            class_mode="categorical")

        validation_datagen = ImageDataGenerator()

        validation_generator = validation_datagen.flow_from_directory(
            File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                           KERAS_DIR_VALIDATION_NAME),
            target_size=(IMG_HEIGHT, IMG_WIDTH),
            batch_size=self.batch_size.value,
            shuffle=True,
            class_mode="categorical")

        test_datagen = ImageDataGenerator()

        test_generator = test_datagen.flow_from_directory(
            File.make_path(dataset, KERAS_DATASET_DIR_NAME,
                           KERAS_DIR_TEST_NAME),
            target_size=(IMG_HEIGHT, IMG_WIDTH),
            batch_size=self.batch_size.value,
            shuffle=True,
            class_mode="categorical")

        return train_generator, validation_generator, test_generator
コード例 #14
0
    def train(self, dataset, training_data, force=False):
        """Perform the training of classifier.

        Parameters
        ----------
        dataset : string
            Path to image dataset.
        training_data : string
            Name of ARFF training file.
        force : boolean, optional, default = False
            If False don't perform new training if there is trained data.
        """

        # select .h5 filename
        if self.fine_tuning_rate.value == 100:
            self.file_name = str(self.architecture.value) + \
                '_learning_rate' + str(self.learning_rate.value) + \
                '_transfer_learning'
        elif self.fine_tuning_rate.value == -1:
            self.file_name = str(self.architecture.value) + \
                '_learning_rate' + str(self.learning_rate.value) + \
                '_without_transfer_learning'
        else:
            self.file_name = str(self.architecture.value) + \
                '_learning_rate' + str(self.learning_rate.value) + \
                '_fine_tunning_' + str(self.fine_tuning_rate.value)

        File.remove_dir(File.make_path(dataset, ".tmp"))

        train_generator, validation_generator, test_generator = self.make_dataset(
            dataset)

        # Save the model according to the conditions
        if self.save_weights:
            if not os.path.exists("../models_checkpoints/"):
                os.makedirs("../models_checkpoints/")

            checkpoint = ModelCheckpoint("../models_checkpoints/" + self.file_name + ".h5", monitor='val_acc',
                                         verbose=1, save_best_only=True, save_weights_only=False,
                                         mode='auto', period=1)
        else:
            checkpoint = None

        self.model = self.select_model_params(train_generator.num_classes)

        tensorboard = TensorBoard(
            log_dir="../models_checkpoints/logs_" + self.file_name, write_images=False)
        # tensorboard.set_model(self.model)
        # compile the model
        self.model.compile(loss="categorical_crossentropy",
                           optimizer=optimizers.SGD(
                               lr=self.learning_rate.value, momentum=self.momentum.value),
                           metrics=["accuracy"])

        # Train the model
        self.model.fit_generator(
            train_generator,
            steps_per_epoch=train_generator.samples // self.batch_size.value,
            epochs=self.epochs.value,
            callbacks=[checkpoint, tensorboard],
            validation_data=validation_generator,
            validation_steps=validation_generator.samples // self.batch_size.value)

        if self.save_weights:
            # self.model.save_weights(
            #    "../models_checkpoints/" + self.file_name + ".h5")
            self.model.save(
                "../models_checkpoints/" + self.file_name + "_model.h5")
            self.weight_path = "../models_checkpoints/" + self.file_name + "_model.h5"

            dict_classes = validation_generator.class_indices
            np.save("../models_checkpoints/" + self.file_name +
                    "_classes.npy", dict_classes)
コード例 #15
0
    def classify(self, dataset, test_dir, test_data, image):
        """"Perform the classification.

        Parameters
        ----------
        dataset : string
            Path to image dataset.
        test_dir : string
            Not used.
        test_data : string
            Name of test data file.

        Returns
        -------
        summary : list of string
            List of predicted classes for each instance in test data in ordered way.
        """

        predict_directory = File.make_path(dataset, test_dir)

        # Create a Keras class
        if not os.path.exists(File.make_path(predict_directory, "png")):
            os.makedirs(File.make_path(predict_directory, "png"))

        for file in os.listdir(predict_directory):
            print(File.make_path(predict_directory, file))
            if os.path.splitext(file)[-1] == ".tif":
                try:
                    img = Image.open(File.make_path(predict_directory, file))
                    # img.thumbnail(img.size)
                    new_file = os.path.splitext(file)[0] + ".png"
                    img.save(File.make_path(predict_directory,
                                            'png', new_file), "PNG", quality=100)
                except Exception as e:
                    print(e)
            else:
                print(File.make_path(predict_directory, file))
                os.symlink(File.make_path(predict_directory, file),
                           File.make_path(predict_directory, 'png', file))

        classify_datagen = ImageDataGenerator()

        classify_generator = classify_datagen.flow_from_directory(
            File.make_path(predict_directory, 'png'),
            target_size=(IMG_HEIGHT, IMG_WIDTH),
            batch_size=1,
            shuffle=False,
            class_mode=None)

        try:
            # self.model.load_weights(
            #"../models_checkpoints/" + self.file_name + ".h5")
            K.clear_session()
            if self.weight_path is not None:
                self.model = load_model(self.weight_path)
                path_classes = self.weight_path.replace(
                    "_model.h5", "_classes.npy")
                print("Load Model H5:"+self.weight_path)
                CLASS_NAMES = np.load(path_classes).item().keys()
        except Exception as e:
            raise IException("Can't load the model in " +
                             self.weight_path + str(e))

        output_classification = self.model.predict_generator(
            classify_generator, classify_generator.samples, verbose=2)

        one_hot_output = np.argmax(output_classification, axis=1)

        one_hot_output = one_hot_output.tolist()

        for index in range(0, len(one_hot_output)):
            one_hot_output[index] = CLASS_NAMES[one_hot_output[index]]

        return one_hot_output
コード例 #16
0
    def extract_all(self,
                    dataset,
                    output_file=None,
                    dirs=None,
                    overwrite=True,
                    processor_amd=False):
        self.processor_amd = processor_amd
        self.threads = []
        if self.processor_amd == True:
            self.data = Manager().list(
            )  #is a necessary because have a problem with use Process and normaly declaration
            self.labels = Manager().list()
            self.types = Manager().list()
        else:
            self.data = [
            ]  #is a necessary because have a problem with use Process and normaly declaration

            self.labels = []
            self.types = []
        """Runs the feature extraction algorithms on all images of dataset.

        Parameters
        ----------
        dataset : string
            Path to dataset.
        output_file : string, optional, default = None
            Name of output file continaing the features. If not informed is considered the name of dataset.
        dirs : list of string, optional, default = None
            List of directories to be serched. If not informed search in all directories with images inside dataset.
        overwrite : boolean, optional, default = True
            If False check if already exists a file containing the features.

        Returns
        -------
        out : tuple
            Returns a tuple containing the name of output file and time spent in milliseconds.

        Raises
        ------
        IException 'Please select at least one extractor'
            Empty list of extractors.
        IException 'Image %s is possibly corrupt'
            Error opening some image inside dataset.
        IException 'There are no images in dataset: %s'
            Dataset does not contain any image.
        """
        if len(self.extractors) == 0:
            raise IException("Please select at least one extractor")

        if output_file is None:
            output_file = File.get_filename(dataset)
        output_file = File.make_path(dataset, output_file + '.arff')

        # if already exists a output file and must not override, return current file
        if overwrite == False and os.path.isfile(output_file):
            return output_file, 0

        start_time = TimeUtils.get_time()

        classes = sorted(File.list_dirs(dataset))
        dirs = classes if dirs is None else dirs

        # Runs the feature extraction for all classes inside the dataset
        for cl in dirs:
            # start job for each extractor
            self.job_extractor(dataset, cl, classes)

        # Output is in kb, here I convert it in Mb for readability
        RAM_stats = self.getRAMinfo()
        RAM_total = round(int(RAM_stats[0]) / 1000, 1)
        RAM_used = round(int(RAM_stats[1]) / 1000, 1)
        print("RAM Total : " + str(RAM_total))
        print("RAM Used : " + str(RAM_used))
        self.print_console(
            "Wait a moment, the threads are processing " +
            str(len(self.threads)) +
            " images, it may be delayed depending on the size or quantity of the images!"
        )
        with tqdm(total=len(self.threads)) as pbar:
            for t in self.threads:
                t.start()
                if ((RAM_total) < 10000):  #se menor que 10gb
                    RAM_stats = self.getRAMinfo()
                    RAM_used = round(int(RAM_stats[1]) / 1000, 1)
                    if ((RAM_total - RAM_used) < 2000):
                        t.join()
                pbar.update(1)
            pbar.close()

        self.print_console(
            "Waiting for workers to finish extracting attributes from images!")
        with tqdm(total=len(self.threads)) as ppbar:
            for t in self.threads:
                t.join()

                ppbar.update(1)
            ppbar.close()
        self.print_console("The process was completed with " +
                           str(len(self.threads)) + " images!")
        if len(self.data) == 0:
            raise IException("There are no images in dataset: %s" % dataset)
        del self.threads
        gc.collect()
        # Save the output file in ARFF format
        # self._save_output(File.get_filename(dataset), classes, self.labels, self.types, self.data, output_file)
        self._save_output(File.get_filename(dataset), classes, self.labels[0],
                          self.types[0], self.data, output_file)
        end_time = TimeUtils.get_time()

        return output_file, (end_time - start_time)
コード例 #17
0
ファイル: pynovisao.py プロジェクト: kleyton67/pyn-modified
    def assign_using_labeled_image(self, imagename = None, refresh_image=True):
        """Open a new image.

        Parameters
        ----------
        imagename : string, optional, default = None
            Filepath of image. If not informed open a dialog to choose.
        """

        if len(self.segmenter.get_list_segments()) == 0:
            self.tk.write_log("Error: Image not segmented")
            return

        if self._image is None:
            self.tk.write_log("Error: Open the image to be targeted")
            return

        if imagename is None:
            imagename = self.tk.utils.ask_image_name()

        if imagename:
            self._image_gt = File.open_image_lut(imagename)
            self._image_gt_name = File.get_filename(imagename)

            self.tk.write_log("Opening %s...", self._image_gt_name)

            qtd_classes = len(self.classes)
            qtd_superpixel = len(self.segmenter.get_list_segments())

        tam_gt = self._image_gt.shape
        tam_im = self._image.shape
        if len(tam_gt) > 2:
            self.tk.write_log("Color image is not supported. You must open a gray-scale image")
            return

        if tam_gt[0] != tam_im[0] or tam_gt[1] != tam_im[1]:
            self.tk.write_log("Images with different sizes")
            return
            
        #hist_classes_superpixels = np.zeros((qtd_superpixel, qtd_classes), np.int)      
    
        #for i in range(0, tam_gt[0]):
        #    for j in range(0, tam_gt[1]):          
        #        class_pixel = self._image_gt[i,j]
        #        if class_pixel > qtd_classes:
        #            self.tk.write_log("There is no class for the pixel [%d,%d] = %d on the image", i, j, class_pixel)
        #        else:
        #            #segment, size_segment, idx_segment, run_time = self.segmenter.get_segment(px = j, py = i)
        #            idx_segment = self.segmenter._segments[i, j]
        #            hist_classes_superpixels[idx_segment, class_pixel] = hist_classes_superpixels[idx_segment, class_pixel] + 1
        #    if i % 10 == 0:
        #        self.tk.write_log("Annotating row %d of %d", i, tam_gt[0])
                
        qtd_bad_superpixels = 0
        
        for idx_segment in range(0, qtd_superpixel):
            hist_classes_superpixels = np.histogram(self._image_gt[self.segmenter._segments == idx_segment], bins=range(0,len(self.classes)+1))[0]

            idx_class = np.argmax(hist_classes_superpixels)
            sum_vector = np.sum(hist_classes_superpixels)
            if refresh_image:
                self._image, run_time = self.segmenter.paint_segment(self._image, self.classes[idx_class]["color"].value, idx_segment = [idx_segment])
            #self.tk.append_log("posicao maior = %x  --  soma vetor %d", x, sum_vector)
            if hist_classes_superpixels[idx_class]/sum_vector < 0.5:
                qtd_bad_superpixels = qtd_bad_superpixels + 1

            if self._ground_truth == True:
                self._gt_segments[idx_segment] = self.classes[self._current_class]["name"].value

            elif self._dataset_generator == True:
                if idx_segment % 10 == 0:
                    self.tk.write_log("Saving %d of %d", (idx_segment+1), qtd_superpixel)

                segment, size_segment, idx_segment, run_time = self.segmenter.get_segment(idx_segment = idx_segment)
                filepath = File.save_class_image(segment, self.dataset, self.classes[idx_class]["name"].value, self._image_name, idx_segment)
                if filepath:
                    self.tk.append_log("\nSegment saved in %s", filepath)

        self.tk.refresh_image(self._image)
        self.tk.write_log("%d bad annotated superpixels of %d superpixel (%0.2f)", qtd_bad_superpixels, qtd_superpixel, (float(qtd_bad_superpixels)/qtd_superpixel)*100)
コード例 #18
0
    def _classify_lmdb(self, dataset, test_dir, test_data):
        """Perform the alternative classification creating LMDB backend. 
        
        Parameters
        ----------
        dataset : string
            Path to image dataset.
        test_dir : string
            Name of test data directory.
        test_data : string
            Not used.
            
        Returns
        -------
        summary : list of string
            List of predicted classes for each instance in test data in ordered way.
        """
        test_dir = File.make_path(dataset, test_dir)

        classes = []
        labels = np.loadtxt(self.labels_file.value, str)

        images = sorted(os.listdir(File.make_path(test_dir)))

        # create LMDB listfile
        listfile = open(File.make_path(test_dir, 'listfile.txt'), 'w')

        for im in images:
            filepath = File.make_path(test_dir, im)
            image = cv2.imread(filepath)

            # resize the segment and save in jpeg format
            resized_image = np.zeros((512, 512, image.shape[2]), dtype="uint8")
            resized_image[0:image.shape[0], 0:image.shape[1]] = image[:, :]
            resized_image = resized_image[0:256, 0:256]
            cv2.imwrite(filepath.replace('.tif', '.jpeg'), resized_image)

            # append imagename in listfile
            listfile.write("%s %d\n" % (im.replace('.tif', '.jpeg'), 0))

        listfile.close()

        # create LMDB backend to be used as source of data
        from subprocess import call

        call([
            caffe_root + 'build/tools/convert_imageset',
            File.make_path(test_dir, ''),
            File.make_path(test_dir, 'listfile.txt'),
            File.make_path(test_dir, 'lmdb')
        ])

        # read model_def
        with open(self.model_def.value, 'r') as model_def:
            prototxt = model_def.read()

        # change structure of layer data
        layers = prototxt.split('layer')
        layers[1] = (' {\n'
                     '    name: "data"\n'
                     '    type: "Data"\n'
                     '    top: "data"\n'
                     '    top: "label"\n'
                     '    transform_param {\n'
                     '        mirror: false\n'
                     '        crop_size: 227\n'
                     '        mean_file: "' + self.mean_image.value + '"\n'
                     '    }\n'
                     '    data_param {\n'
                     '        source: "' + File.make_path(test_dir, 'lmdb') +
                     '"\n'
                     '        batch_size: 1\n'
                     '        backend: LMDB\n'
                     '    }\n'
                     '}\n')
        prototxt = 'layer'.join(layers)

        # create new model_def
        new_model_def_path = File.make_path(test_dir, 'deploy.prototxt')
        with open(new_model_def_path, 'w') as new_model_def:
            new_model_def.write(prototxt)

        net = caffe.Net(
            new_model_def_path,  # defines the structure of the model
            self.model_weights.value,  # contains the trained weights
            caffe.TEST)  # use test mode (e.g., don't perform dropout)

        for im in images:
            # perform classification
            output = net.forward()

            # the output probability vector for the first image in the batch
            prediction = output['prob'][0]
            print(["%0.4f" % pr for pr in prediction])

            classes.append(labels[prediction.argmax()])

        return classes
コード例 #19
0
    def classify(self, dataset, test_dir, test_data, image):
        """"Perform the classification.

        Parameters
        ----------
        dataset : string
            Path to image dataset.
        test_dir : string
            Not used.
        test_data : string
            Name of test data file.

        Returns
        -------
        summary : list of string
            List of predicted classes for each instance in test data in ordered way.
        """

        predict_directory = File.make_path(dataset, test_dir)

        # Create a Keras class
        if not os.path.exists(File.make_path(predict_directory, "png")):
            os.makedirs(File.make_path(predict_directory, "png"))

        for file in os.listdir(predict_directory):
            print(File.make_path(predict_directory, file))
            if os.path.splitext(file)[-1] == ".tif":
                try:
                    img = Image.open(File.make_path(predict_directory, file))
                    new_file = os.path.splitext(file)[0] + ".png"
                    img.save(File.make_path(predict_directory, 'png',
                                            new_file),
                             "PNG",
                             quality=100)
                except Exception as e:
                    print(e)
            else:
                print(File.make_path(predict_directory, file))
                os.symlink(File.make_path(predict_directory, file),
                           File.make_path(predict_directory, 'png', file))

        classify_datagen = ImageDataGenerator()

        classify_generator = classify_datagen.flow_from_directory(
            File.make_path(predict_directory, 'png'),
            target_size=(IMG_HEIGHT, IMG_WIDTH),
            batch_size=1,
            shuffle=False,
            class_mode=None)

        try:
            K.clear_session()
            if self.pseudo_label.weight_path is not None:
                self.create_model()
                self.model.load_weights(self.pseudo_label.weight_path)
        except Exception as e:
            raise IException("Can't load the model in " +
                             self.pseudo_label.weight_path + str(e))

        output_classification = self.model.predict_generator(
            classify_generator, classify_generator.samples, verbose=2)

        one_hot_output = np.argmax(output_classification, axis=1)

        one_hot_output = one_hot_output.tolist()

        return one_hot_output