Esempio n. 1
0
class HEDTester():

    def __init__(self, config_file):

        self.io = IO()
        self.init = True

        try:
            pfile = open(config_file)
            self.cfgs = yaml.load(pfile)
            pfile.close()

        except Exception as err:

            self.io.print_error('Error reading config file {}, {}'.format(config_file), err)

    def setup(self, session):

        try:

            self.model = Vgg16(self.cfgs, run='testing')

            meta_model_file = os.path.join(self.cfgs['save_dir'], 'models/hed-model-{}'.format(self.cfgs['test_snapshot']))

            saver = tf.train.Saver()
            saver.restore(session, meta_model_file)

            self.io.print_info('Done restoring VGG-16 model from {}'.format(meta_model_file))

        except Exception as err:

            self.io.print_error('Error setting up VGG-16 model, {}'.format(err))
            self.init = False

    def run(self, session):

        if not self.init:
            return

        self.model.setup_testing(session)

        filepath = os.path.join(self.cfgs['download_path'], self.cfgs['testing']['list'])
        train_list = self.io.read_file_list(filepath)

        self.io.print_info('Writing PNGs at {}'.format(self.cfgs['test_output']))

        for idx, img in enumerate(train_list):

            test_filename = os.path.join(self.cfgs['download_path'], self.cfgs['testing']['dir'], img)
            im = self.fetch_image(test_filename)

            edgemap = session.run(self.model.predictions, feed_dict={self.model.images: [im]})
            # self.save_egdemaps(edgemap, idx)

            self.io.print_info('Done testing {}, {}'.format(test_filename, im.shape))

    def save_egdemaps(self, em_maps, index):

        # Take the edge map from the network from side layers and fuse layer
        em_maps = [e[0] for e in em_maps]
        em_maps = em_maps + [np.mean(np.array(em_maps), axis=0)]

        for idx, em in enumerate(em_maps):

            em[em < self.cfgs['testing_threshold']] = 0.0

            em = 255.0 * (1.0 - em)
            em = np.tile(em, [1, 1, 3])

            em = Image.fromarray(np.uint8(em))
            em.save(os.path.join(self.cfgs['test_output'], 'testing-{}-{:03}.png'.format(index, idx)))

            im = 255 - cv2.imread(os.path.join(self.cfgs['test_output'], 'testing-{}-{:03}.png'.format(index, idx)), cv2.CV_LOAD_IMAGE_GRAYSCALE)
            _, bw2 = cv2.threshold(im, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY)
            bw2 = cv2.erode(bw2, np.ones((3,3), np.uint8), iterations=1)
            bw2 = thinning(bw2)
            cv2.imwrite(os.path.join(self.cfgs['test_output'], 'testing-{}-{:03}-th.png'.format(index, idx)), 255-bw2)



    def fetch_image(self, test_image):

        # is url
        image = None

        if not urlparse.urlparse(test_image).scheme == "":

            url_response = urllib.urlopen(test_image)

            if url_response.code == 404:
                print (self.io.print_error('[Testing] URL error code : {1} for {0}'.format(test_image, url_response.code)))
                return None

            try:

                image_buffer = io.BytesIO(url_response.read())
                image = self.capture_pixels(image_buffer)

            except Exception as err:

                print (self.io.print_error('[Testing] Error with URL {0} {1}'.format(test_image, err)))
                return None

        # read from disk
        elif os.path.exists(test_image):

            try:

                fid = open(test_image, 'r')
                stream = fid.read()
                fid.close()

                image_buffer = io.BytesIO(stream)
                image = self.capture_pixels(image_buffer)

            except Exception as err:

                print (self.io.print_error('[Testing] Error with image file {0} {1}'.format(test_image, err)))
                return None

        return image

    def capture_pixels(self, image_buffer):

        image = Image.open(image_buffer)
        image = image.resize((self.cfgs['testing']['image_width'], self.cfgs['testing']['image_height']))
        image = np.array(image, np.float32)
        image = self.colorize(image)

        image = image[:, :, self.cfgs['channel_swap']]
        image -= self.cfgs['mean_pixel_value']

        return image

    def colorize(self, image):

        # BW to 3 channel RGB image
        if image.ndim == 2:
            image = image[:, :, np.newaxis]
            image = np.tile(image, (1, 1, 3))
        elif image.shape[2] == 4:
            image = image[:, :, :3]

        return image
Esempio n. 2
0
class DataParser():
    def __init__(self, cfgs):

        self.io = IO()
        self.cfgs = cfgs
        self.train_file = os.path.join(cfgs['download_path'],
                                       cfgs['training']['list'])
        self.train_data_dir = os.path.join(cfgs['download_path'],
                                           cfgs['training']['dir'])
        self.training_pairs = self.io.read_file_list(self.train_file)

        self.samples = self.io.split_pair_names(self.training_pairs,
                                                self.train_data_dir)
        self.io.print_info('Training data set-up from {}'.format(
            os.path.join(self.train_file)))
        self.n_samples = len(self.training_pairs)

        self.all_ids = range(self.n_samples)
        np.random.shuffle(self.all_ids)

        self.training_ids = self.all_ids[:int(self.cfgs['train_split'] *
                                              len(self.training_pairs))]
        self.validation_ids = self.all_ids[int(self.cfgs['train_split'] *
                                               len(self.training_pairs)):]

        self.io.print_info('Training samples {}'.format(len(
            self.training_ids)))
        self.io.print_info('Validation samples {}'.format(
            len(self.validation_ids)))

    def get_training_batch(self):

        batch_ids = np.random.choice(self.training_ids,
                                     self.cfgs['batch_size_train'])

        return self.get_batch(batch_ids)

    def get_validation_batch(self):

        batch_ids = np.random.choice(self.validation_ids,
                                     self.cfgs['batch_size_val'])

        return self.get_batch(batch_ids)

    def get_batch(self, batch):

        tstart = time.time()

        filenames = []
        images = []
        edgemaps = []

        for idx, b in enumerate(batch):

            im = Image.open(self.samples[b][0])
            em = Image.open(self.samples[b][1])

            im = im.resize((self.cfgs['training']['image_width'],
                            self.cfgs['training']['image_height']))
            em = em.resize((self.cfgs['training']['image_width'],
                            self.cfgs['training']['image_height']))

            im = np.array(im, dtype=np.float32)
            im = im[:, :, self.cfgs['channel_swap']]
            im -= self.cfgs['mean_pixel_value']

            em = np.array(em.convert('L'), dtype=np.float32)

            if self.cfgs['target_regression']:
                bin_em = em / 255.0
            else:
                bin_em = np.zeros_like(em)
                bin_em[np.where(em)] = 1

            # Some edge maps have 3 channels some dont
            bin_em = bin_em if bin_em.ndim == 2 else bin_em[:, :, 0]
            # To fit [batch_size, H, W, 1] output of the network
            bin_em = np.expand_dims(bin_em, 2)

            images.append(im)
            edgemaps.append(bin_em)
            filenames.append(self.samples[b])

        return images, edgemaps, filenames
Esempio n. 3
0
class DataParser():
    def __init__(self, cfgs, dataDir=None, initmodelfile=None):

        self.io = IO()
        self.cfgs = cfgs
        download_path = dataDir
        self.train_file = os.path.join(download_path, cfgs['training']['list'])
        self.train_data_dir = os.path.join(download_path,
                                           cfgs['training']['dir'])
        self.training_pairs = self.io.read_file_list(self.train_file)

        self.samples = self.io.split_pair_names(self.training_pairs,
                                                self.train_data_dir)
        self.io.print_info('Training data set-up from {}'.format(
            os.path.join(self.train_file)))

        self.n_samples = len(self.training_pairs)
        self.all_ids = list(range(self.n_samples))
        np.random.shuffle(self.all_ids)

        self.training_ids = self.all_ids[:int(self.cfgs['train_split'] *
                                              len(self.training_pairs))]
        self.validation_ids = self.all_ids[int(self.cfgs['train_split'] *
                                               len(self.training_pairs)):]

        self.io.print_info('Training samples {}'.format(len(
            self.training_ids)))
        self.io.print_info('Validation samples {}'.format(
            len(self.validation_ids)))

    def get_training_batch(self):

        batch_ids = np.random.choice(self.training_ids,
                                     self.cfgs['batch_size_train'])

        return self.get_batch(batch_ids)

    def get_validation_batch(self):

        batch_ids = np.random.choice(self.validation_ids,
                                     self.cfgs['batch_size_val'])

        return self.get_batch(batch_ids)

    def get_batch(self, batch):

        tstart = time.time()

        filenames = []
        images = []
        edgemaps = []

        for idx, b in enumerate(batch):

            im = Image.open(self.samples[b][0])
            em = Image.open(self.samples[b][1])

            im = im.resize((self.cfgs['training']['image_width'],
                            self.cfgs['training']['image_height']))
            em = em.resize((self.cfgs['training']['image_width'],
                            self.cfgs['training']['image_height']))

            im = np.array(im, dtype=np.float32)
            im = im[:, :, self.cfgs['channel_swap']]
            im -= self.cfgs['mean_pixel_value']

            # Labels needs to be 1 or 0 (edge pixel or not)
            # or can use regression targets as done by the author
            # https://github.com/s9xie/hed/blob/9e74dd710773d8d8a469ad905c76f4a7fa08f945/src/caffe/layers/image_labelmap_data_layer.cpp#L213

            em = np.array(em.convert('L'), dtype=np.float32)

            if self.cfgs['target_regression']:
                bin_em = em / 255.0
            else:
                bin_em = np.zeros_like(em)
                bin_em[np.where(em)] = 1

            # Some edge maps have 3 channels some dont
            bin_em = bin_em if bin_em.ndim == 2 else bin_em[:, :, 0]
            # To fit [batch_size, H, W, 1] output of the network
            bin_em = np.expand_dims(bin_em, 2)

            images.append(im)
            edgemaps.append(bin_em)
            filenames.append(self.samples[b])

        return images, edgemaps, filenames
Esempio n. 4
0
class HEDTester():
    def __init__(self, config_file):

        self.io = IO()
        self.init = True

        try:
            pfile = open(config_file)
            self.cfgs = yaml.load(pfile)
            pfile.close()

        except Exception as err:

            self.io.print_error(
                'Error reading config file {}, {}'.format(config_file), err)

    def setup(self, session):

        try:

            self.model = Vgg16(self.cfgs, run='testing')

            meta_model_file = os.path.join(
                self.cfgs['save_dir'],
                'models/hed-model-{}'.format(self.cfgs['test_snapshot']))

            saver = tf.train.Saver()
            saver.restore(session, meta_model_file)

            self.io.print_info(
                'Done restoring VGG-16 model from {}'.format(meta_model_file))

        except Exception as err:

            self.io.print_error(
                'Error setting up VGG-16 model, {}'.format(err))
            self.init = False

    def run(self, session):

        if not self.init:
            return

        self.model.setup_testing(session)

        filepath = os.path.join(self.cfgs['download_path'],
                                self.cfgs['testing']['list'])
        train_list = self.io.read_file_list(filepath)

        self.io.print_info('Writing PNGs at {}'.format(
            self.cfgs['test_output']))

        for idx, img in enumerate(train_list):

            test_filename = os.path.join(self.cfgs['download_path'],
                                         self.cfgs['testing']['dir'], img)
            im = sio.loadmat(test_filename)['noisy']
            #im=cv2.resize(im,(self.cfgs['testing']['image_height'], self.cfgs['testing']['image_width']))

            new_im = im.astype(np.float32)
            new_im -= self.cfgs['mean_pixel_value']
            im = new_im[:, :, 1:5]

            #im=np.expand_dims(im,axis=2)

            edgemap = session.run(self.model.predictions,
                                  feed_dict={self.model.images: [im]})
            self.save_egdemaps(edgemap, idx)

            self.io.print_info('Done testing {}, {}'.format(
                test_filename, im.shape))

    def save_egdemaps(self, em_maps, index):

        # Take the edge map from the network from side layers and fuse layer
        em_maps = [e[0] for e in em_maps]
        em_maps = em_maps + [np.mean(np.array(em_maps), axis=0)]

        for idx, em in enumerate(em_maps):

            em[em < self.cfgs['testing_threshold']] = 0.0
            save_mat_path = os.path.join(self.cfgs['test_output'],
                                         'testing-{}-{:03}'.format(index, idx))
            sio.savemat(save_mat_path, {'magnitude_field': em})

            em = 255.0 * (1.0 - em)

            em = np.tile(em, [1, 1, 3])

            em = Image.fromarray(np.uint8(em))
            em.save(
                os.path.join(self.cfgs['test_output'],
                             'testing-{}-{:03}.png'.format(index, idx)))