示例#1
0
    def __init__(self, cfgs, run='training'):

        self.cfgs = cfgs
        self.io = IO()

        base_path = os.path.abspath(os.path.dirname(__file__))
        weights_file = os.path.join(base_path, self.cfgs['model_weights_path'])

        self.data_dict = np.load(weights_file,
                                 encoding='latin1',
                                 allow_pickle=True).item()
        self.io.print_info("Model weights loaded from {}".format(
            self.cfgs['model_weights_path']))

        self.images = tf.placeholder(tf.float32, [
            None, self.cfgs[run]['image_height'],
            self.cfgs[run]['image_width'], self.cfgs[run]['n_channels']
        ],
                                     name="input")
        self.edgemaps = tf.placeholder(tf.float32, [
            None, self.cfgs[run]['image_height'],
            self.cfgs[run]['image_width'], 1
        ])

        self.define_model()
示例#2
0
def main(args):

    if not (args.run_train or args.run_test or args.run_reshape
            or args.download_data):
        print(
            'Set atleast one of the options --train | --test | --reshape | --download-data'
        )
        parser.print_help()
        return

    if args.run_test or args.run_train or args.run_reshape:
        session = get_session(args.gpu_limit)

    if args.run_train:
        trainer = HEDTrainer(args.config_file)
        trainer.setup()
        trainer.run(session)

    if args.run_test:
        tester = HEDTester(args.config_file)
        tester.setup(session)
        tester.run(session)

    if args.run_reshape:
        reshaper = HEDReshaper(args.config_file)
        reshaper.setup(session)
        reshaper.run(session)

    if args.download_data:

        io = IO()
        cfgs = io.read_yaml_file(args.config_file)
        io.download_data(cfgs['rar_file'], cfgs['download_path'])
示例#3
0
    def __init__(self, cfgs, dataDir=None, initmodelfile=None):

        self.io = IO()
        self.cfgs = cfgs
        download_path = dataDir
        self.train_file = os.path.join(download_path, cfgs['training']['list'])
        self.train_data_dir = os.path.join(download_path,
                                           cfgs['training']['dir'])
        self.training_pairs = self.io.read_file_list(self.train_file)

        self.samples = self.io.split_pair_names(self.training_pairs,
                                                self.train_data_dir)
        self.io.print_info('Training data set-up from {}'.format(
            os.path.join(self.train_file)))

        self.n_samples = len(self.training_pairs)
        self.all_ids = list(range(self.n_samples))
        np.random.shuffle(self.all_ids)

        self.training_ids = self.all_ids[:int(self.cfgs['train_split'] *
                                              len(self.training_pairs))]
        self.validation_ids = self.all_ids[int(self.cfgs['train_split'] *
                                               len(self.training_pairs)):]

        self.io.print_info('Training samples {}'.format(len(
            self.training_ids)))
        self.io.print_info('Validation samples {}'.format(
            len(self.validation_ids)))
示例#4
0
    def __init__(self, cfgs, run='training'):

        self.cfgs = cfgs
        self.io = IO()

        base_path = os.path.abspath(os.path.dirname(__file__))
        
        self.images = tf.placeholder(tf.float32, [None, None, None,4])
        self.edgemaps = tf.placeholder(tf.float32, [None, self.cfgs[run]['image_height'], self.cfgs[run]['image_width'], 1])

        self.define_model()
示例#5
0
    def __init__(self, config_file):
        self.io = IO()
        self.init = True

        try:
            pfile = open(config_file)
            self.cfgs = yaml.load(pfile)
            pfile.close()
        except Exception as err:
            print(('Error reading config file {}, {}'.format(config_file,
                                                             err)))
            raise err
示例#6
0
 def start(self, args):
     io = IO()
     self.cfgs = io.read_yaml_file(args.config_file)
     self.model = Vgg16(self.cfgs, run='testing')
     meta_model_file = "/Users/csrproject/edge/holy-edge/hed/models/hed-model-5001"
     saver = tf.train.Saver()
     session = self.get_session(args.gpu_limit)
     saver.restore(session, meta_model_file)
     self.model.setup_testing(session)
     im = self.fetch_img()
     edgemap = session.run(self.model.predictions,
                           feed_dict={self.model.images: [im]})
     self.save_egdemaps(edgemap)
示例#7
0
    def __init__(self,dataDir=None,saveDir=None,configfile=None):

        self.io = IO()
        self.init = True
        self.dataDir = dataDir
        self.saveDir = saveDir
        try:
            pfile = open(configfile)
            self.cfgs = yaml.load(pfile)
            pfile.close()

        except Exception as err:

            self.io.print_error('Error reading config file {}, {}'.format(configfile), err)
示例#8
0
    def __init__(self, cfgs, saveDir=None, initmodelfile=None, run='training'):

        self.cfgs = cfgs
        self.saveDir = saveDir
        self.io = IO()
        weights_file = initmodelfile
        self.data_dict = np.load(weights_file, encoding='latin1').item()
        self.io.print_info("Model weights loaded from {}".format(
            self.cfgs['model_weights_path']))

        self.images = tf.placeholder(tf.float32, [
            None, self.cfgs[run]['image_height'],
            self.cfgs[run]['image_width'], self.cfgs[run]['n_channels']
        ],
                                     name="inputlayer")
        self.edgemaps = tf.placeholder(tf.float32, [
            None, self.cfgs[run]['image_height'],
            self.cfgs[run]['image_width'], 1
        ])
        self.define_model()
示例#9
0
    def __init__(self, cfgs, run='training'):

        self.cfgs = cfgs
        self.io = IO()

        base_path = 'holy-edge/hed/models/'
        weights_file = os.path.join(base_path, self.cfgs['model_weights_path'])

        self.data_dict = np.load(weights_file, encoding='latin1').item()
        self.io.print_info("Model weights loaded from {}".format(
            self.cfgs['model_weights_path']))

        self.images = tf.placeholder(tf.float32, [
            None, self.cfgs[run]['image_height'],
            self.cfgs[run]['image_width'], self.cfgs[run]['n_channels']
        ])
        self.edgemaps = tf.placeholder(tf.float32, [
            None, self.cfgs[run]['image_height'],
            self.cfgs[run]['image_width'], 1
        ])

        self.define_model()
示例#10
0
    def __init__(self, config_file, number_iterations, path_test_image):

        self.io = IO()
        self.init = True

        try:
            pfile = open(config_file)
            self.cfgs = yaml.load(pfile)
            pfile.close()

        except Exception as err:

            self.io.print_error(
                'Error reading config file {}, {}'.format(config_file), err)

        try:
            self.test_snapshot = number_iterations
        except Exception as err:
            self.io.print_error('please choose an existing pretrained model')
        try:
            self.test_image = path_test_image
        except Exception as err:
            self.io.print_error('the chosen test image does not exist')
示例#11
0
class HEDReshaper():
    def __init__(self, config_file):

        self.io = IO()
        self.init = True

        try:
            pfile = open(config_file)
            self.cfgs = yaml.load(pfile)
            pfile.close()

        except Exception as err:

            self.io.print_error(
                'Error reading config file {}, {}'.format(config_file), err)

    def setup(self, session):

        try:

            self.model = Vgg16(self.cfgs, run='reshaping')

            meta_model_file = os.path.join(
                self.cfgs['save_dir'],
                'models/hed-model-{}'.format(self.cfgs['test_snapshot']))

            saver = tf.train.Saver()
            saver.restore(session, meta_model_file)

            self.io.print_info(
                'Done restoring VGG-16 model from {}'.format(meta_model_file))

        except Exception as err:

            self.io.print_error(
                'Error setting up VGG-16 model, {}'.format(err))
            self.init = False

    def run(self, session):

        if not self.init:
            return

        self.model.setup_reshaping(session)

        idx = 0
        saver = tf.train.Saver()
        saver.save(session,
                   os.path.join(self.cfgs['save_dir'],
                                'reshape_models/hed-model'),
                   global_step=idx)
示例#12
0
class Vgg16():
    def __init__(self, cfgs, saveDir=None, initmodelfile=None, run='training'):

        self.cfgs = cfgs
        self.saveDir = saveDir
        self.io = IO()
        weights_file = initmodelfile
        self.data_dict = np.load(weights_file, encoding='latin1').item()
        self.io.print_info("Model weights loaded from {}".format(
            self.cfgs['model_weights_path']))

        self.images = tf.placeholder(tf.float32, [
            None, self.cfgs[run]['image_height'],
            self.cfgs[run]['image_width'], self.cfgs[run]['n_channels']
        ],
                                     name="inputlayer")
        self.edgemaps = tf.placeholder(tf.float32, [
            None, self.cfgs[run]['image_height'],
            self.cfgs[run]['image_width'], 1
        ])
        self.define_model()

    def define_model(self):
        """
        Load VGG params from disk without FC layers A
        Add branch layers (with deconv) after each CONV block
        """

        start_time = time.time()

        self.conv1_1 = self.conv_layer_vgg(self.images, "conv1_1")
        self.conv1_2 = self.conv_layer_vgg(self.conv1_1, "conv1_2")
        self.side_1 = self.side_layer(self.conv1_2, "side_1", 1)
        self.pool1 = self.max_pool(self.conv1_2, 'pool1')

        self.io.print_info('Added CONV-BLOCK-1+SIDE-1')

        self.conv2_1 = self.conv_layer_vgg(self.pool1, "conv2_1")
        self.conv2_2 = self.conv_layer_vgg(self.conv2_1, "conv2_2")
        self.side_2 = self.side_layer(self.conv2_2, "side_2", 2)
        self.pool2 = self.max_pool(self.conv2_2, 'pool2')

        self.io.print_info('Added CONV-BLOCK-2+SIDE-2')

        self.conv3_1 = self.conv_layer_vgg(self.pool2, "conv3_1")
        self.conv3_2 = self.conv_layer_vgg(self.conv3_1, "conv3_2")
        self.conv3_3 = self.conv_layer_vgg(self.conv3_2, "conv3_3")
        self.side_3 = self.side_layer(self.conv3_3, "side_3", 4)
        self.pool3 = self.max_pool(self.conv3_3, 'pool3')

        self.io.print_info('Added CONV-BLOCK-3+SIDE-3')

        self.conv4_1 = self.conv_layer_vgg(self.pool3, "conv4_1")
        self.conv4_2 = self.conv_layer_vgg(self.conv4_1, "conv4_2")
        self.conv4_3 = self.conv_layer_vgg(self.conv4_2, "conv4_3")
        self.side_4 = self.side_layer(self.conv4_3, "side_4", 8)
        self.pool4 = self.max_pool(self.conv4_3, 'pool4')

        self.io.print_info('Added CONV-BLOCK-4+SIDE-4')

        self.conv5_1 = self.conv_layer_vgg(self.pool4, "conv5_1")
        self.conv5_2 = self.conv_layer_vgg(self.conv5_1, "conv5_2")
        self.conv5_3 = self.conv_layer_vgg(self.conv5_2, "conv5_3")
        self.side_5 = self.side_layer(self.conv5_3, "side_5", 16)

        self.io.print_info('Added CONV-BLOCK-5+SIDE-5')

        self.side_outputs = [
            self.side_1, self.side_2, self.side_3, self.side_4, self.side_5
        ]

        w_shape = [1, 1, len(self.side_outputs), 1]
        self.fuse = self.conv_layer(tf.concat(self.side_outputs, axis=3),
                                    w_shape,
                                    name='fuse_1',
                                    use_bias=False,
                                    w_init=tf.constant_initializer(0.2))

        self.io.print_info('Added FUSE layer')

        # complete output maps from side layer and fuse layers
        self.outputs = self.side_outputs + [self.fuse]

        self.data_dict = None
        self.io.print_info("Build model finished: {:.4f}s".format(time.time() -
                                                                  start_time))

    def max_pool(self, bottom, name):
        return tf.nn.max_pool(bottom,
                              ksize=[1, 2, 2, 1],
                              strides=[1, 2, 2, 1],
                              padding='SAME',
                              name=name)

    def conv_layer_vgg(self, bottom, name):
        """
            Adding a conv layer + weight parameters from a dict
        """
        with tf.variable_scope(name):
            filt = self.get_conv_filter(name)

            conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')

            conv_biases = self.get_bias(name)
            bias = tf.nn.bias_add(conv, conv_biases)

            relu = tf.nn.relu(bias)
            return relu

    def conv_layer(self,
                   x,
                   W_shape,
                   b_shape=None,
                   name=None,
                   padding='SAME',
                   use_bias=True,
                   w_init=None,
                   b_init=None):

        W = self.weight_variable(W_shape, w_init)
        tf.summary.histogram('weights_{}'.format(name), W)

        if use_bias:
            b = self.bias_variable([b_shape], b_init)
            tf.summary.histogram('biases_{}'.format(name), b)

        conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding)

        return conv + b if use_bias else conv

    def deconv_layer(self, x, upscale, name, padding='SAME', w_init=None):

        x_shape = tf.shape(x)
        in_shape = x.shape.as_list()

        w_shape = [upscale * 2, upscale * 2, in_shape[-1], 1]
        strides = [1, upscale, upscale, 1]

        W = self.weight_variable(w_shape, w_init)
        tf.summary.histogram('weights_{}'.format(name), W)

        out_shape = tf.stack([x_shape[0], x_shape[1], x_shape[2], w_shape[2]
                              ]) * tf.constant(strides, tf.int32)
        deconv = tf.nn.conv2d_transpose(x,
                                        W,
                                        out_shape,
                                        strides=strides,
                                        padding=padding)

        return deconv

    def side_layer(self, inputs, name, upscale):
        """
            https://github.com/s9xie/hed/blob/9e74dd710773d8d8a469ad905c76f4a7fa08f945/examples/hed/train_val.prototxt#L122
            1x1 conv followed with Deconvoltion layer to upscale the size of input image sans color
        """
        with tf.variable_scope(name):

            in_shape = inputs.shape.as_list()
            w_shape = [1, 1, in_shape[-1], 1]

            classifier = self.conv_layer(inputs,
                                         w_shape,
                                         b_shape=1,
                                         w_init=tf.constant_initializer(),
                                         b_init=tf.constant_initializer(),
                                         name=name + '_reduction')

            classifier = self.deconv_layer(
                classifier,
                upscale=upscale,
                name='{}_deconv_{}'.format(name, upscale),
                w_init=tf.truncated_normal_initializer(stddev=0.1))

            return classifier

    def get_conv_filter(self, name):
        return tf.constant(self.data_dict[name][0], name="filter")

    def get_bias(self, name):
        return tf.constant(self.data_dict[name][1], name="biases")

    def weight_variable(self, shape, initial):

        init = initial(shape)
        return tf.Variable(init)

    def bias_variable(self, shape, initial):

        init = initial(shape)
        return tf.Variable(init)

    def setup_testing(self, session):
        """
            Apply sigmoid non-linearity to side layer ouputs + fuse layer outputs for predictions
        """

        self.predictions = []

        for idx, b in enumerate(self.outputs):
            output = tf.nn.sigmoid(b, name='output_{}'.format(idx))
            self.predictions.append(output)

    def setup_training(self, session):
        """
            Apply sigmoid non-linearity to side layer ouputs + fuse layer outputs
            Compute total loss := side_layer_loss + fuse_layer_loss
            Compute predicted edge maps from fuse layer as pseudo performance metric to track
        """

        self.predictions = []
        self.loss = 0

        self.io.print_warning('Deep supervision application set to {}'.format(
            self.cfgs['deep_supervision']))

        for idx, b in enumerate(self.side_outputs):
            output = tf.nn.sigmoid(b, name='output_{}'.format(idx))
            cost = sigmoid_cross_entropy_balanced(
                b, self.edgemaps, name='cross_entropy{}'.format(idx))

            self.predictions.append(output)
            if self.cfgs['deep_supervision']:
                self.loss += (self.cfgs['loss_weights'] * cost)

        fuse_output = tf.nn.sigmoid(self.fuse, name='fuse')
        tf.summary.image('fuseSigmoid', fuse_output)
        fuse_cost = sigmoid_cross_entropy_balanced(self.fuse,
                                                   self.edgemaps,
                                                   name='cross_entropy_fuse')
        tf.summary.image('cvSourceEdge', self.edgemaps)
        self.predictions.append(fuse_output)
        self.loss += (self.cfgs['loss_weights'] * fuse_cost)

        pred = tf.cast(tf.greater(fuse_output, 0.5),
                       tf.int32,
                       name='predictions')
        error = tf.cast(tf.not_equal(pred, tf.cast(self.edgemaps, tf.int32)),
                        tf.float32)
        self.error = tf.reduce_mean(error, name='pixel_error')
        tf.summary.scalar('loss', self.loss)
        tf.summary.scalar('error', self.error)

        self.merged_summary = tf.summary.merge_all()
        save_dir = self.saveDir
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        self.train_writer = tf.summary.FileWriter(self.saveDir + '/train',
                                                  session.graph)
        self.val_writer = tf.summary.FileWriter(self.saveDir + '/val')
示例#13
0
class DataParser():
    def __init__(self, cfgs, dataDir=None, initmodelfile=None):

        self.io = IO()
        self.cfgs = cfgs
        download_path = dataDir
        self.train_file = os.path.join(download_path, cfgs['training']['list'])
        self.train_data_dir = os.path.join(download_path,
                                           cfgs['training']['dir'])
        self.training_pairs = self.io.read_file_list(self.train_file)

        self.samples = self.io.split_pair_names(self.training_pairs,
                                                self.train_data_dir)
        self.io.print_info('Training data set-up from {}'.format(
            os.path.join(self.train_file)))

        self.n_samples = len(self.training_pairs)
        self.all_ids = list(range(self.n_samples))
        np.random.shuffle(self.all_ids)

        self.training_ids = self.all_ids[:int(self.cfgs['train_split'] *
                                              len(self.training_pairs))]
        self.validation_ids = self.all_ids[int(self.cfgs['train_split'] *
                                               len(self.training_pairs)):]

        self.io.print_info('Training samples {}'.format(len(
            self.training_ids)))
        self.io.print_info('Validation samples {}'.format(
            len(self.validation_ids)))

    def get_training_batch(self):

        batch_ids = np.random.choice(self.training_ids,
                                     self.cfgs['batch_size_train'])

        return self.get_batch(batch_ids)

    def get_validation_batch(self):

        batch_ids = np.random.choice(self.validation_ids,
                                     self.cfgs['batch_size_val'])

        return self.get_batch(batch_ids)

    def get_batch(self, batch):

        tstart = time.time()

        filenames = []
        images = []
        edgemaps = []

        for idx, b in enumerate(batch):

            im = Image.open(self.samples[b][0])
            em = Image.open(self.samples[b][1])

            im = im.resize((self.cfgs['training']['image_width'],
                            self.cfgs['training']['image_height']))
            em = em.resize((self.cfgs['training']['image_width'],
                            self.cfgs['training']['image_height']))

            im = np.array(im, dtype=np.float32)
            im = im[:, :, self.cfgs['channel_swap']]
            im -= self.cfgs['mean_pixel_value']

            # Labels needs to be 1 or 0 (edge pixel or not)
            # or can use regression targets as done by the author
            # https://github.com/s9xie/hed/blob/9e74dd710773d8d8a469ad905c76f4a7fa08f945/src/caffe/layers/image_labelmap_data_layer.cpp#L213

            em = np.array(em.convert('L'), dtype=np.float32)

            if self.cfgs['target_regression']:
                bin_em = em / 255.0
            else:
                bin_em = np.zeros_like(em)
                bin_em[np.where(em)] = 1

            # Some edge maps have 3 channels some dont
            bin_em = bin_em if bin_em.ndim == 2 else bin_em[:, :, 0]
            # To fit [batch_size, H, W, 1] output of the network
            bin_em = np.expand_dims(bin_em, 2)

            images.append(im)
            edgemaps.append(bin_em)
            filenames.append(self.samples[b])

        return images, edgemaps, filenames
示例#14
0
class HEDTester():

    def __init__(self, config_file):

        self.io = IO()
        self.init = True

        try:
            pfile = open(config_file)
            self.cfgs = yaml.load(pfile)
            pfile.close()

        except Exception as err:

            self.io.print_error('Error reading config file {}, {}'.format(config_file), err)

    def setup(self, session):

        try:

            self.model = Vgg16(self.cfgs, run='testing')

            meta_model_file = os.path.join(self.cfgs['save_dir'], 'models/hed-model-{}'.format(self.cfgs['test_snapshot']))

            saver = tf.train.Saver()
            saver.restore(session, meta_model_file)

            self.io.print_info('Done restoring VGG-16 model from {}'.format(meta_model_file))

        except Exception as err:

            self.io.print_error('Error setting up VGG-16 model, {}'.format(err))
            self.init = False

    def run(self, session, dataset):

        if not self.init:
            return

        self.model.setup_testing(session)

        filepath = os.path.join(self.cfgs['download_path'], self.cfgs['testing']['list'])
        
        train_list = os.listdir(dataset)

        self.io.print_info('Writing PNGs at {}'.format(self.cfgs['test_output']))
        
        for idx, img in enumerate(train_list):
            idx += 1
            test_filename = dataset+'/'+img
            im = self.fetch_image(test_filename)

            edgemap = session.run(self.model.predictions, feed_dict={self.model.images: [im]})
            self.save_egdemaps(edgemap, idx, img)
            print 'processing img ', test_filename 
    def save_egdemaps(self, em_maps, index, name):

        # Take the edge map from the network from side layers and fuse layer
        em_maps = [e[0] for e in em_maps]
        em_maps = em_maps + [np.mean(np.array(em_maps), axis=0)]

        for idx, em in enumerate(em_maps):
            if idx == 5:
                sio.savemat('holy-edge/edgemaps/'+name[:-4] + '.mat', {'predict':em})
            em[em < self.cfgs['testing_threshold']] = 0.0

            em = 255.0 * (1.0 - em)
            em = np.tile(em, [1, 1, 3])

            em = Image.fromarray(np.uint8(em))
            if idx == 5:
                em.save('holy-edge/edgemaps/'+name)

    def fetch_image(self, test_image):

        # is url
        image = None

        if not urlparse.urlparse(test_image).scheme == "":

            url_response = urllib.urlopen(test_image)

            if url_response.code == 404:
                print self.io.print_error('[Testing] URL error code : {1} for {0}'.format(test_image, url_response.code))
                return None

            try:

                image_buffer = cStringIO.StringIO(url_response.read())
                image = self.capture_pixels(image_buffer)

            except Exception as err:

                print self.io.print_error('[Testing] Error with URL {0} {1}'.format(test_image, err))
                return None

        # read from disk
        elif os.path.exists(test_image):

            try:

                fid = open(test_image, 'r')
                stream = fid.read()
                fid.close()

                image_buffer = cStringIO.StringIO(stream)
                image = self.capture_pixels(image_buffer)

            except Exception as err:

                print self.io.print_error('[Testing] Error with image file {0} {1}'.format(test_image, err))
                return None

        return image

    def capture_pixels(self, image_buffer):

        image = Image.open(image_buffer)
        image = image.resize((self.cfgs['testing']['image_width'], self.cfgs['testing']['image_height']))
        image = np.array(image, np.float32)
        image = self.colorize(image)

        image = image[:, :, self.cfgs['channel_swap']]
        image -= self.cfgs['mean_pixel_value']

        return image

    def colorize(self, image):

        # BW to 3 channel RGB image
        if image.ndim == 2:
            image = image[:, :, np.newaxis]
            image = np.tile(image, (1, 1, 3))
        elif image.shape[2] == 4:
            image = image[:, :, :3]

        return image
示例#15
0
class HEDTrainer():
    def __init__(self, config_file):
        self.io = IO()
        self.init = True

        try:
            pfile = open(config_file)
            self.cfgs = yaml.load(pfile)
            pfile.close()
        except Exception as err:
            print(('Error reading config file {}, {}'.format(config_file,
                                                             err)))
            raise err

    def setup(self):
        try:
            self.model = Vgg16(self.cfgs)
            self.io.print_info('Done initializing VGG-16 model')

            dirs = ['train', 'val', 'test', 'models']
            dirs = [
                os.path.join(self.cfgs['save_dir'] + '/{}'.format(d))
                for d in dirs
            ]
            [os.makedirs(d) for d in dirs if not os.path.exists(d)]
        except Exception as err:
            self.io.print_error(
                'Error setting up VGG-16 model, {}'.format(err))
            self.init = False
            raise err

    def run(self, session):
        if not self.init:
            return

        train_data = DataParser(self.cfgs)

        self.model.setup_training(session)

        opt = tf.train.AdamOptimizer(
            self.cfgs['optimizer_params']['learning_rate'])
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train = opt.minimize(self.model.loss)

        session.run(tf.global_variables_initializer())

        for idx in range(self.cfgs['max_iterations']):
            im, em, _ = train_data.get_training_batch()

            run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_metadata = tf.RunMetadata()

            _, summary, loss = session.run(
                [train, self.model.merged_summary, self.model.loss],
                feed_dict={
                    self.model.images: im,
                    self.model.edgemaps: em,
                    self.model.training: True
                },
                options=run_options,
                run_metadata=run_metadata)

            self.model.train_writer.add_run_metadata(run_metadata,
                                                     'step{:06}'.format(idx))
            self.model.train_writer.add_summary(summary, idx)

            self.io.print_info('[{}/{}] TRAINING loss : {}'.format(
                idx, self.cfgs['max_iterations'], loss))

            if idx % self.cfgs['save_interval'] == 0:
                saver = tf.train.Saver()
                saver.save(session,
                           os.path.join(self.cfgs['save_dir'],
                                        'models/hed-model'),
                           global_step=idx)

            if idx % self.cfgs['val_interval'] == 0:
                im, em, _ = train_data.get_validation_batch()

                val_im_summary, summary, error = session.run(
                    [
                        self.model.val_im_summary, self.model.merged_summary,
                        self.model.error
                    ],
                    feed_dict={
                        self.model.images: im,
                        self.model.edgemaps: em,
                        self.model.training: True
                    })

                self.model.val_writer.add_summary(summary, idx)
                self.model.val_writer.add_summary(val_im_summary, idx)
                self.io.print_info('[{}/{}] VALIDATION error : {}'.format(
                    idx, self.cfgs['max_iterations'], error))

        self.model.train_writer.close()
示例#16
0
class HEDTrainer():

    def __init__(self,dataDir=None,saveDir=None,initmodelfile=None,configfile=None):

        self.io = IO()
        self.init = True
        self.dataDir = dataDir
        self.saveDir = saveDir
        self.initmodelfile = initmodelfile
        try:
            pfile = open(configfile)
            self.cfgs = yaml.load(pfile)
            pfile.close()

        except Exception as err:

            print('Error reading config file {}, {}'.format(configfile, err))

    def setup(self):

        try:

            self.model = Vgg16(self.cfgs, self.saveDir, self.initmodelfile)
            # self.model = HedNet(self.cfgs,self.saveDir,self.initmodelfile)
            self.io.print_info('Done initializing VGG-16 model')
            dirs = ['train', 'val', 'test', 'models']
            save_dir = self.saveDir
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)

            dirs = [os.path.join(save_dir + '/{}'.format(d)) for d in dirs]
            _ = [os.makedirs(d) for d in dirs if not os.path.exists(d)]

        except Exception as err:

            self.io.print_error('Error setting up VGG-16 model, {}'.format(err))
            self.init = False

    def run(self, session):

        if not self.init:
            return

        train_data = DataParser(self.cfgs,self.dataDir)

        self.model.setup_training(session)

        opt = tf.train.AdamOptimizer(self.cfgs['optimizer_params']['learning_rate'])
        train = opt.minimize(self.model.loss)

        session.run(tf.global_variables_initializer())

        for idx in range(self.cfgs['max_iterations']):

            im, em, _ = train_data.get_training_batch()

            run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_metadata = tf.RunMetadata()

            _, summary, loss,error = session.run([train, self.model.merged_summary, self.model.loss,self.model.error],
                                           feed_dict={self.model.images: im, self.model.edgemaps: em},
                                           options=run_options,
                                           run_metadata=run_metadata)

            self.model.train_writer.add_run_metadata(run_metadata, 'step{:06}'.format(idx))
            self.io.print_info('[{}/{}] TRAINING loss : {}'.format(idx, self.cfgs['max_iterations'], loss))
            self.io.print_info('[{}/{}] TRAINING error : {}'.format(idx, self.cfgs['max_iterations'], error))

            if idx % 10 == 0:
                self.model.train_writer.add_summary(summary, idx)
            '''    
            if idx % self.cfgs['save_interval'] == 0:

                saver = tf.train.Saver()
                saver.save(session, os.path.join(self.saveDir, 'models/hed-model'), global_step=idx)

            if idx % self.cfgs['val_interval'] == 0:

                im, em, _ = train_data.get_validation_batch()

                summary, error = session.run([self.model.merged_summary, self.model.error], feed_dict={self.model.images: im, self.model.edgemaps: em})

                self.model.val_writer.add_summary(summary, idx)
                self.io.print_info('[{}/{}] VALIDATION error : {}'.format(idx, self.cfgs['max_iterations'], error))
            '''
            if idx == self.cfgs['max_iterations'] - 1:
                save_dir = self.saveDir
                if not os.path.exists(save_dir):
                    os.makedirs(save_dir)
                graph = tf.graph_util.convert_variables_to_constants(session, session.graph_def, ["fuse"])
                tf.train.write_graph(graph, os.path.join(save_dir, 'models'), 'testgraph.pb', as_text=False)

        self.model.train_writer.close()
示例#17
0
class HEDTester():
    def __init__(self, config_file):

        self.io = IO()
        self.init = True

        try:
            pfile = open(config_file)
            self.cfgs = yaml.load(pfile)
            pfile.close()

        except Exception as err:

            self.io.print_error(
                'Error reading config file {}, {}'.format(config_file), err)

    def setup(self, session):

        try:

            self.model = Vgg16(self.cfgs, run='testing')

            meta_model_file = os.path.join(
                self.cfgs['save_dir'],
                'models/hed-model-{}'.format(self.cfgs['test_snapshot']))

            saver = tf.train.Saver()
            saver.restore(session, meta_model_file)

            self.io.print_info(
                'Done restoring VGG-16 model from {}'.format(meta_model_file))

        except Exception as err:

            self.io.print_error(
                'Error setting up VGG-16 model, {}'.format(err))
            self.init = False

    def run(self, session):

        if not self.init:
            return

        self.model.setup_testing(session)

        filepath = os.path.join(self.cfgs['download_path'],
                                self.cfgs['testing']['list'])
        train_list = self.io.read_file_list(filepath)

        self.io.print_info('Writing PNGs at {}'.format(
            self.cfgs['test_output']))

        for idx, img in enumerate(train_list):

            test_filename = os.path.join(self.cfgs['download_path'],
                                         self.cfgs['testing']['dir'], img)
            im = sio.loadmat(test_filename)['noisy']
            #im=cv2.resize(im,(self.cfgs['testing']['image_height'], self.cfgs['testing']['image_width']))

            new_im = im.astype(np.float32)
            new_im -= self.cfgs['mean_pixel_value']
            im = new_im[:, :, 1:5]

            #im=np.expand_dims(im,axis=2)

            edgemap = session.run(self.model.predictions,
                                  feed_dict={self.model.images: [im]})
            self.save_egdemaps(edgemap, idx)

            self.io.print_info('Done testing {}, {}'.format(
                test_filename, im.shape))

    def save_egdemaps(self, em_maps, index):

        # Take the edge map from the network from side layers and fuse layer
        em_maps = [e[0] for e in em_maps]
        em_maps = em_maps + [np.mean(np.array(em_maps), axis=0)]

        for idx, em in enumerate(em_maps):

            em[em < self.cfgs['testing_threshold']] = 0.0
            save_mat_path = os.path.join(self.cfgs['test_output'],
                                         'testing-{}-{:03}'.format(index, idx))
            sio.savemat(save_mat_path, {'magnitude_field': em})

            em = 255.0 * (1.0 - em)

            em = np.tile(em, [1, 1, 3])

            em = Image.fromarray(np.uint8(em))
            em.save(
                os.path.join(self.cfgs['test_output'],
                             'testing-{}-{:03}.png'.format(index, idx)))
示例#18
0
class Vgg16():
    def __init__(self, cfgs, saveDir=None, initmodelfile=None, run='training'):

        self.cfgs = cfgs
        self.saveDir = saveDir
        self.io = IO()
        weights_file = initmodelfile
        self.data_dict = np.load(weights_file, encoding='latin1').item()
        self.io.print_info("Model weights loaded from {}".format(
            self.cfgs['model_weights_path']))

        self.images = tf.placeholder(tf.float32, [
            None, self.cfgs[run]['image_height'],
            self.cfgs[run]['image_width'], self.cfgs[run]['n_channels']
        ],
                                     name="inputlayer")
        self.edgemaps = tf.placeholder(tf.float32, [
            None, self.cfgs[run]['image_height'],
            self.cfgs[run]['image_width'], 1
        ])

        self.weights_regularizer = None
        self.filter_initializer = tf.contrib.layers.xavier_initializer()

        self.define_model()

    def define_model(self):
        """
        Load VGG params from disk without FC layers A
        Add branch layers (with deconv) after each CONV block
        """

        start_time = time.time()

        self.conv1_1 = self.conv_layer_vgg(self.images, "conv1_1")
        self.conv1_2 = self.conv_layer_vgg(self.conv1_1, "conv1_2")
        with tf.variable_scope('dsn1'):
            #self.side_1 = self.side_layer(self.conv1_2, "side_1", 1)
            self.side_1 = self.dsn_1x1_conv2d(self.conv1_2, 1)

        self.pool1 = self.max_pool(self.conv1_2, 'pool1')
        self.io.print_info('Added CONV-BLOCK-1+SIDE-1')

        self.conv2_1 = self.conv_layer_vgg(self.pool1, "conv2_1")
        self.conv2_2 = self.conv_layer_vgg(self.conv2_1, "conv2_2")
        with tf.variable_scope('dsn2'):
            #self.side_2 = self.side_layer(self.conv2_2, "side_2", 2)
            self.side_2 = self.dsn_1x1_conv2d(self.conv2_2, 1)
            self.side_2 = self.dsn_deconv2d_with_upsample_factor(
                self.side_2, 1, upsample_factor=2)

        self.pool2 = self.max_pool(self.conv2_2, 'pool2')
        self.io.print_info('Added CONV-BLOCK-2+SIDE-2')

        self.conv3_1 = self.conv_layer_vgg(self.pool2, "conv3_1")
        self.conv3_2 = self.conv_layer_vgg(self.conv3_1, "conv3_2")
        self.conv3_3 = self.conv_layer_vgg(self.conv3_2, "conv3_3")
        with tf.variable_scope('dsn3'):
            #self.side_3 = self.side_layer(self.conv3_3, "side_3", 4)
            self.side_3 = self.dsn_1x1_conv2d(self.conv3_3, 1)
            self.side_3 = self.dsn_deconv2d_with_upsample_factor(
                self.side_3, 1, upsample_factor=4)

        self.pool3 = self.max_pool(self.conv3_3, 'pool3')

        self.io.print_info('Added CONV-BLOCK-3+SIDE-3')

        self.conv4_1 = self.conv_layer_vgg(self.pool3, "conv4_1")
        self.conv4_2 = self.conv_layer_vgg(self.conv4_1, "conv4_2")
        self.conv4_3 = self.conv_layer_vgg(self.conv4_2, "conv4_3")
        with tf.variable_scope('dsn4'):
            #self.side_4 = self.side_layer(self.conv4_3, "side_4", 8)
            self.side_4 = self.dsn_1x1_conv2d(self.conv4_3, 1)
            self.side_4 = self.dsn_deconv2d_with_upsample_factor(
                self.side_4, 1, upsample_factor=8)

        self.pool4 = self.max_pool(self.conv4_3, 'pool4')
        self.io.print_info('Added CONV-BLOCK-4+SIDE-4')

        self.conv5_1 = self.conv_layer_vgg(self.pool4, "conv5_1")
        self.conv5_2 = self.conv_layer_vgg(self.conv5_1, "conv5_2")
        self.conv5_3 = self.conv_layer_vgg(self.conv5_2, "conv5_3")
        with tf.variable_scope('dsn5'):
            #self.side_5 = self.side_layer(self.conv5_3, "side_5", 16)
            self.side_5 = self.dsn_1x1_conv2d(self.conv5_3, 1)
            self.side_5 = self.dsn_deconv2d_with_upsample_factor(
                self.side_5, 1, upsample_factor=16)

        self.io.print_info('Added CONV-BLOCK-5+SIDE-5')
        self.side_outputs = [
            self.side_1, self.side_2, self.side_3, self.side_4, self.side_5
        ]

        w_shape = [1, 1, len(self.side_outputs), 1]

        with tf.variable_scope('fuse'):
            #self.fuse = self.conv_layer(tf.concat(self.side_outputs, axis=3),
            #                            w_shape, name='fuse_1', use_bias=False,
            #                            w_init=tf.constant_initializer(0.2))
            self.fuse = tf.concat([
                self.side_1, self.side_2, self.side_3, self.side_4, self.side_5
            ], 3)
            self.fuse = self.output_1x1_conv2d(self.fuse, 1)

        # complete output maps from side layer and fuse layers
        self.outputs = self.side_outputs + [self.fuse]

        self.data_dict = None
        self.io.print_info("Build model finished: {:.4f}s".format(time.time() -
                                                                  start_time))

    def dsn_1x1_conv2d(self, inputs, filters):
        use_bias = True
        #if const.use_batch_norm:
        #    use_bias = False

        kernel_size = [1, 1]
        outputs = tf.layers.conv2d(
            inputs,
            filters,
            kernel_size,
            padding='same',
            activation=None,  ## no activation
            use_bias=use_bias,
            kernel_initializer=self.filter_initializer,
            kernel_regularizer=self.weights_regularizer)

        #if const.use_batch_norm:
        #    outputs = tf.layers.batch_normalization(outputs, training=is_training)
        ## no activation
        return outputs

    def output_1x1_conv2d(self, inputs, filters):
        kernel_size = [1, 1]
        outputs = tf.layers.conv2d(
            inputs,
            filters,
            kernel_size,
            padding='same',
            activation=None,  ## no activation
            use_bias=True,  ## use bias
            kernel_initializer=self.filter_initializer,
            kernel_regularizer=self.weights_regularizer)

        ## no batch normalization
        ## no activation

        return outputs

    def dsn_deconv2d_with_upsample_factor(self, inputs, filters,
                                          upsample_factor):
        ## https://github.com/s9xie/hed/blob/master/examples/hed/train_val.prototxt
        ## 从这个原版代码里看,是这样计算 kernel_size 的
        kernel_size = [2 * upsample_factor, 2 * upsample_factor]
        outputs = tf.layers.conv2d_transpose(
            inputs,
            filters,
            kernel_size,
            strides=(upsample_factor, upsample_factor),
            padding='same',
            activation=None,  ## no activation
            use_bias=True,  ## use bias
            kernel_initializer=self.filter_initializer,
            kernel_regularizer=self.weights_regularizer)

        ## 概念上来说,deconv2d 已经是最后的输出 layer 了,只不过最后还有一步 1x1 的 conv2d 把 5 个 deconv2d 的输出再融合到一起
        ## 所以不需要再使用 batch normalization 了
        return outputs

    def max_pool(self, bottom, name):
        return tf.nn.max_pool(bottom,
                              ksize=[1, 2, 2, 1],
                              strides=[1, 2, 2, 1],
                              padding='SAME',
                              name=name)

    def conv_layer_vgg(self, bottom, name):
        """
            Adding a conv layer + weight parameters from a dict
        """
        with tf.variable_scope(name):
            filt = self.get_conv_filter(name)
            conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
            conv_biases = self.get_bias(name)
            bias = tf.nn.bias_add(conv, conv_biases)
            relu = tf.nn.relu(bias)
            return relu

    def conv_layer(self,
                   x,
                   W_shape,
                   b_shape=None,
                   name=None,
                   padding='SAME',
                   use_bias=True,
                   w_init=None,
                   b_init=None):

        W = self.weight_variable(W_shape, w_init)
        tf.summary.histogram('weights_{}'.format(name), W)
        if use_bias:
            b = self.bias_variable([b_shape], b_init)
            tf.summary.histogram('biases_{}'.format(name), b)
        conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding)
        return conv + b if use_bias else conv

    def deconv_layer(self, x, upscale, name, padding='SAME', w_init=None):

        x_shape = tf.shape(x)
        in_shape = x.shape.as_list()
        w_shape = [upscale * 2, upscale * 2, in_shape[-1], 1]
        strides = [1, upscale, upscale, 1]
        W = self.weight_variable(w_shape, w_init)
        tf.summary.histogram('weights_{}'.format(name), W)
        out_shape = tf.stack([x_shape[0], x_shape[1], x_shape[2], w_shape[2]
                              ]) * tf.constant(strides, tf.int32)
        deconv = tf.nn.conv2d_transpose(x,
                                        W,
                                        out_shape,
                                        strides=strides,
                                        padding=padding)
        return deconv

    def side_layer(self, inputs, name, upscale):
        """
            https://github.com/s9xie/hed/blob/9e74dd710773d8d8a469ad905c76f4a7fa08f945/examples/hed/train_val.prototxt#L122
            1x1 conv followed with Deconvoltion layer to upscale the size of input image sans color
        """
        with tf.variable_scope(name):
            in_shape = inputs.shape.as_list()
            w_shape = [1, 1, in_shape[-1], 1]
            classifier = self.conv_layer(inputs,
                                         w_shape,
                                         b_shape=1,
                                         w_init=tf.constant_initializer(),
                                         b_init=tf.constant_initializer(),
                                         name=name + '_reduction')

            classifier = self.deconv_layer(
                classifier,
                upscale=upscale,
                name='{}_deconv_{}'.format(name, upscale),
                w_init=tf.truncated_normal_initializer(stddev=0.1))
            return classifier

    def get_conv_filter(self, name):
        return tf.constant(self.data_dict[name][0], name="filter")

    def get_bias(self, name):
        return tf.constant(self.data_dict[name][1], name="biases")

    def weight_variable(self, shape, initial):
        init = initial(shape)
        return tf.Variable(init)

    def bias_variable(self, shape, initial):
        init = initial(shape)
        return tf.Variable(init)

    def setup_testing(self, session):
        """
            Apply sigmoid non-linearity to side layer ouputs + fuse layer outputs for predictions
        """
        self.predictions = []
        for idx, b in enumerate(self.outputs):
            output = tf.nn.sigmoid(b, name='output_{}'.format(idx))
            self.predictions.append(output)

    def setup_training(self, session):
        """
            Apply sigmoid non-linearity to side layer ouputs + fuse layer outputs
            Compute total loss := side_layer_loss + fuse_layer_loss
            Compute predicted edge maps from fuse layer as pseudo performance metric to track
        """

        self.predictions = []
        self.loss = 0
        self.io.print_warning('Deep supervision application set to {}'.format(
            self.cfgs['deep_supervision']))
        for idx, b in enumerate(self.side_outputs):
            #tf.summary.image('output_{}'.format(idx), b)
            output = tf.nn.sigmoid(b, name='output_{}'.format(idx))
            cost = sigmoid_cross_entropy_balanced(
                b, self.edgemaps, name='cross_entropy{}'.format(idx))

            self.predictions.append(output)
            if self.cfgs['deep_supervision']:
                self.loss += (self.cfgs['loss_weights'] * cost)

        #tf.summary.image('fuse', self.fuse)
        fuse_output = tf.nn.sigmoid(self.fuse, name='fuse')
        tf.summary.image('fuseSigmoid', fuse_output)
        fuse_cost = sigmoid_cross_entropy_balanced(self.fuse,
                                                   self.edgemaps,
                                                   name='cross_entropy_fuse')
        #tf.summary.image('cvSource', self.images)
        tf.summary.image('cvSourceEdge', self.edgemaps)
        self.predictions.append(fuse_output)
        self.loss += (self.cfgs['loss_weights'] * fuse_cost)

        pred = tf.cast(tf.greater(fuse_output, 0.5),
                       tf.int32,
                       name='predictions')
        error = tf.cast(tf.not_equal(pred, tf.cast(self.edgemaps, tf.int32)),
                        tf.float32)
        self.error = tf.reduce_mean(error, name='pixel_error')

        tf.summary.scalar('loss', self.loss)
        tf.summary.scalar('error', self.error)

        self.merged_summary = tf.summary.merge_all()

        save_dir = self.saveDir
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        self.train_writer = tf.summary.FileWriter(save_dir + '/train',
                                                  session.graph)
        self.val_writer = tf.summary.FileWriter(save_dir + '/val')

    '''
    for bn test 
    '''

    def build(self, input_image, is_training):
        with tf.name_scope('processing'):
            #bgr cv2
            b, g, r = tf.split(input_image, 3, axis=3)
            image = tf.concat([b * 0.00390625, g * 0.00390625, r * 0.00390625],
                              axis=3)
        # vgg16
        # block 1
        self.conv1_1 = self.conv_bn_f(image,
                                      is_training=is_training,
                                      name='conv1_1')
        self.conv1_2 = self.conv_bn_f(self.conv1_1,
                                      is_training=is_training,
                                      name='conv1_2')
        self.pool1 = self.max_pool(self.conv1_2, name='pool1')
        # block 2
        self.conv2_1 = self.conv_bn_f(self.pool1,
                                      is_training=is_training,
                                      name='conv2_1')
        self.conv2_2 = self.conv_bn_f(self.conv2_1,
                                      is_training=is_training,
                                      name='conv2_2')
        self.pool2 = self.max_pool(self.conv2_2, name='pool2')
        # block 3
        self.conv3_1 = self.conv_bn_f(self.pool2,
                                      is_training=is_training,
                                      name='conv3_1')
        self.conv3_2 = self.conv_bn_f(self.conv3_1,
                                      is_training=is_training,
                                      name='conv3_2')
        self.conv3_3 = self.conv_bn_f(self.conv3_2,
                                      is_training=is_training,
                                      name='conv3_3')
        self.pool3 = self.max_pool(self.conv3_3, name='pool3')
        # block 4
        self.conv4_1 = self.conv_bn_f(self.pool3,
                                      is_training=is_training,
                                      name='conv4_1')
        self.conv4_2 = self.conv_bn_f(self.conv4_1,
                                      is_training=is_training,
                                      name='conv4_2')
        self.conv4_3 = self.conv_bn_f(self.conv4_2,
                                      is_training=is_training,
                                      name='conv4_3')
        self.pool4 = self.max_pool(self.conv4_3, name='pool4')
        # block 5
        self.conv5_1 = self.conv_bn_f(self.pool4,
                                      is_training=is_training,
                                      name='conv5_1')
        self.conv5_2 = self.conv_bn_f(self.conv5_1,
                                      is_training=is_training,
                                      name='conv5_2')
        self.conv5_3 = self.conv_bn_f(self.conv5_2,
                                      is_training=is_training,
                                      name='conv5_3')

        self.upscore_dsn1_1 = self.conv_bn(self.conv1_1,
                                           ksize=[1, 1, 64, 1],
                                           is_training=is_training,
                                           name='upscore_dsn1_1')
        self.upscore_dsn1_2 = self.conv_bn(self.conv1_2,
                                           ksize=[1, 1, 64, 1],
                                           is_training=is_training,
                                           name='upscore_dsn1_2')

        self.score_dsn2_1 = self.conv_bn(self.conv2_1,
                                         ksize=[1, 1, 128, 1],
                                         is_training=is_training,
                                         name='score_dsn2_1')
        self.upscore_dsn2_1 = self.upsampling(self.score_dsn2_1,
                                              tf.shape(image)[1:3])

        self.score_dsn2_2 = self.conv_bn(self.conv2_2,
                                         ksize=[1, 1, 128, 1],
                                         is_training=is_training,
                                         name='score_dsn2_2')
        self.upscore_dsn2_2 = self.upsampling(self.score_dsn2_2,
                                              tf.shape(image)[1:3])

        self.score_dsn3_1 = self.conv_bn(self.conv3_1,
                                         ksize=[1, 1, 256, 1],
                                         is_training=is_training,
                                         name='score_dsn3_1')
        self.upscore_dsn3_1 = self.upsampling(self.score_dsn3_1,
                                              tf.shape(image)[1:3])

        self.score_dsn3_2 = self.conv_bn(self.conv3_2,
                                         ksize=[1, 1, 256, 1],
                                         is_training=is_training,
                                         name='score_dsn3_2')
        self.upscore_dsn3_2 = self.upsampling(self.score_dsn3_2,
                                              tf.shape(image)[1:3])

        self.score_dsn3_3 = self.conv_bn(self.conv3_3,
                                         ksize=[1, 1, 256, 1],
                                         is_training=is_training,
                                         name='score_dsn3_3')
        self.upscore_dsn3_3 = self.upsampling(self.score_dsn3_3,
                                              tf.shape(image)[1:3])

        self.score_dsn4_1 = self.conv_bn(self.conv4_1,
                                         ksize=[1, 1, 512, 1],
                                         is_training=is_training,
                                         name='score_dsn4_1')
        self.upscore_dsn4_1 = self.upsampling(self.score_dsn4_1,
                                              tf.shape(image)[1:3])

        self.score_dsn4_2 = self.conv_bn(self.conv4_2,
                                         ksize=[1, 1, 512, 1],
                                         is_training=is_training,
                                         name='score_dsn4_2')
        self.upscore_dsn4_2 = self.upsampling(self.score_dsn4_2,
                                              tf.shape(image)[1:3])

        self.score_dsn4_3 = self.conv_bn(self.conv4_3,
                                         ksize=[1, 1, 512, 1],
                                         is_training=is_training,
                                         name='score_dsn4_3')
        self.upscore_dsn4_3 = self.upsampling(self.score_dsn4_3,
                                              tf.shape(image)[1:3])

        self.score_dsn5_1 = self.conv_bn(self.conv5_1,
                                         ksize=[1, 1, 512, 1],
                                         is_training=is_training,
                                         name='score_dsn5_1')
        self.upscore_dsn5_1 = self.upsampling(self.score_dsn5_1,
                                              tf.shape(image)[1:3])

        self.score_dsn5_2 = self.conv_bn(self.conv5_2,
                                         ksize=[1, 1, 512, 1],
                                         is_training=is_training,
                                         name='score_dsn5_2')
        self.upscore_dsn5_2 = self.upsampling(self.score_dsn5_2,
                                              tf.shape(image)[1:3])

        self.score_dsn5_3 = self.conv_bn(self.conv5_3,
                                         ksize=[1, 1, 512, 1],
                                         is_training=is_training,
                                         name='score_dsn5_3')
        self.upscore_dsn5_3 = self.upsampling(self.score_dsn5_3,
                                              tf.shape(image)[1:3])

        self.concat = tf.concat([
            self.upscore_dsn1_1, self.upscore_dsn1_2, self.upscore_dsn2_1,
            self.upscore_dsn2_2, self.upscore_dsn3_1, self.upscore_dsn3_2,
            self.upscore_dsn3_3, self.upscore_dsn4_1, self.upscore_dsn4_2,
            self.upscore_dsn4_3, self.upscore_dsn5_1, self.upscore_dsn5_2,
            self.upscore_dsn5_3
        ],
                                axis=3)

        self.score = self.conv_bn(self.concat,
                                  ksize=[1, 1, 13, self.class_number],
                                  is_training=is_training,
                                  name='score')
        self.softmax = tf.nn.softmax(self.score + tf.constant(1e-4))

        self.pred = tf.argmax(self.softmax, axis=-1)

    '''
    tf.contrib.layers.batch_norm(inputs, decay=0.999, center=True, scale=False, epsilon=0.001, activation_fn=None,
    param_initializers=None, param_regularizers=None, updates_collections=tf.GraphKeys.UPDATE_OPS, is_training=True,
    reuse=None, variables_collections=None, outputs_collections=None, trainable=True, batch_weights=None, fused=None,
    data_format=DATA_FORMAT_NHWC, zero_debias_moving_mean=False, scope=None, renorm=False, renorm_clipping=None, renorm_decay=0.99, adjustment=None)

    tf.contrib.layers.batch_norm 参数:
    1 inputs: 输入
    2 decay :衰减系数。合适的衰减系数值接近1.0,特别是含多个9的值:0.999(默认值),0.99,0.9。如果训练集表现很好而验证/测试集表现得不好,选择小的系数(推荐使用0.9)。如果想要提高稳定性,zero_debias_moving_mean设为True
    3 center:如果为True,有beta偏移量;如果为False,无beta偏移量
    4 scale:如果为True,则乘以gamma。如果为False,gamma则不使用。当下一层是线性的时(例如nn.relu),由于缩放可以由下一层完成,所以可以禁用该层。
    5 epsilon:避免被零除
    6 activation_fn:用于激活,默认为线性激活函数
    7 param_initializers : beta, gamma, moving mean and moving variance的优化初始化
    8 param_regularizers : beta and gamma正则化优化
    9 updates_collections :Collections来收集计算的更新操作。updates_ops需要使用train_op来执行。如果为None,则会添加控件依赖项以确保更新已计算到位。
    10 is_training:图层是否处于训练模式。在训练模式下,它将积累转入的统计量moving_mean并 moving_variance使用给定的指数移动平均值 decay。当它不是在训练模式,那么它将使用的数值moving_mean和moving_variance。
    11 scope:可选范围variable_scope
    注意:训练时,需要更新moving_mean和moving_variance。默认情况下,更新操作被放入tf.GraphKeys.UPDATE_OPS,所以需要添加它们作为依赖项train_op。例如:  
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)  with tf.control_dependencies(update_ops):    train_op = optimizer.minimize(loss)
    可以将updates_collections = None设置为强制更新,但可能会导致速度损失,尤其是在分布式设置中。
    '''

    def conv_bn_f(self, bottom, is_training, name):
        # finu-tune and batch_norm ; fine-tune not shape,shape had known
        with tf.variable_scope(name):
            weights = self.get_conv_filter_v2(name)
            out = tf.nn.conv2d(bottom,
                               filter=weights,
                               strides=[1, 1, 1, 1],
                               padding='SAME')
            biases = self.get_bias_v2(name)
            out = tf.nn.bias_add(out, biases)
            #bn before relu and train True test False
            out = tf.contrib.layers.batch_norm(
                out, center=True, scale=True,
                is_training=is_training)  # 这里的scale为真需要注意下,神马意思
            out = tf.nn.relu(out)
        return out

    def conv_bn(self, bottom, ksize, is_training, name):
        # initialize and batch_norm ; stride =[1,1,1,1]
        with tf.variable_scope(name):
            weights = tf.get_variable('weights',
                                      ksize,
                                      tf.float32,
                                      initializer=xavier_initializer())
            biases = tf.get_variable('biases', [ksize[-1]],
                                     tf.float32,
                                     initializer=tf.constant_initializer(0.0))
            out = tf.nn.conv2d(bottom,
                               filter=weights,
                               strides=[1, 1, 1, 1],
                               padding='SAME')
            out = tf.nn.bias_add(out, biases)
            #bn
            out = tf.contrib.layers.batch_norm(out,
                                               center=True,
                                               scale=True,
                                               is_training=is_training)
            out = tf.nn.relu(out)
        return out

    def get_conv_filter_v2(self, name):
        init = tf.constant_initializer(self.vgg16_params[name]['weights'])
        shape = self.vgg16_params[name]['weights'].shape
        var = tf.get_variable('weights',
                              shape=shape,
                              dtype=tf.float32,
                              initializer=init)
        return var

    def get_bias_v2(self, name):
        init = tf.constant_initializer(self.vgg16_params[name]['biases'])
        shape = self.vgg16_params[name]['biases'].shape  # tuple
        bias = tf.get_variable('biases',
                               shape=shape,
                               dtype=tf.float32,
                               initializer=init)
        return bias

    def upsampling(self, bottom, feature_map_size):
        # feature_map_size: int [h,w]
        return tf.image.resize_bilinear(bottom, size=feature_map_size)
示例#19
0
class DataParser():
    def __init__(self, cfgs):

        self.io = IO()
        self.cfgs = cfgs
        self.train_file = os.path.join(cfgs['download_path'],
                                       cfgs['training']['list'])
        self.train_data_dir = os.path.join(cfgs['download_path'],
                                           cfgs['training']['dir'])
        self.training_pairs = self.io.read_file_list(self.train_file)

        self.samples = self.io.split_pair_names(self.training_pairs,
                                                self.train_data_dir)
        self.io.print_info('Training data set-up from {}'.format(
            os.path.join(self.train_file)))
        self.n_samples = len(self.training_pairs)

        self.all_ids = range(self.n_samples)
        np.random.shuffle(self.all_ids)

        self.training_ids = self.all_ids[:int(self.cfgs['train_split'] *
                                              len(self.training_pairs))]
        self.validation_ids = self.all_ids[int(self.cfgs['train_split'] *
                                               len(self.training_pairs)):]

        self.io.print_info('Training samples {}'.format(len(
            self.training_ids)))
        self.io.print_info('Validation samples {}'.format(
            len(self.validation_ids)))

    def get_training_batch(self):

        batch_ids = np.random.choice(self.training_ids,
                                     self.cfgs['batch_size_train'])

        return self.get_batch(batch_ids)

    def get_validation_batch(self):

        batch_ids = np.random.choice(self.validation_ids,
                                     self.cfgs['batch_size_val'])

        return self.get_batch(batch_ids)

    def get_batch(self, batch):

        tstart = time.time()

        filenames = []
        images = []
        edgemaps = []

        for idx, b in enumerate(batch):

            im = Image.open(self.samples[b][0])
            em = Image.open(self.samples[b][1])

            im = im.resize((self.cfgs['training']['image_width'],
                            self.cfgs['training']['image_height']))
            em = em.resize((self.cfgs['training']['image_width'],
                            self.cfgs['training']['image_height']))

            im = np.array(im, dtype=np.float32)
            im = im[:, :, self.cfgs['channel_swap']]
            im -= self.cfgs['mean_pixel_value']

            em = np.array(em.convert('L'), dtype=np.float32)

            if self.cfgs['target_regression']:
                bin_em = em / 255.0
            else:
                bin_em = np.zeros_like(em)
                bin_em[np.where(em)] = 1

            # Some edge maps have 3 channels some dont
            bin_em = bin_em if bin_em.ndim == 2 else bin_em[:, :, 0]
            # To fit [batch_size, H, W, 1] output of the network
            bin_em = np.expand_dims(bin_em, 2)

            images.append(im)
            edgemaps.append(bin_em)
            filenames.append(self.samples[b])

        return images, edgemaps, filenames
示例#20
0
class HEDTester():

    def __init__(self, config_file):

        self.io = IO()
        self.init = True

        try:
            pfile = open(config_file)
            self.cfgs = yaml.load(pfile)
            pfile.close()

        except Exception as err:

            self.io.print_error('Error reading config file {}, {}'.format(config_file), err)

    def setup(self, session):

        try:

            self.model = Vgg16(self.cfgs, run='testing')

            meta_model_file = os.path.join(self.cfgs['save_dir'], 'models/hed-model-{}'.format(self.cfgs['test_snapshot']))

            saver = tf.train.Saver()
            saver.restore(session, meta_model_file)

            self.io.print_info('Done restoring VGG-16 model from {}'.format(meta_model_file))

        except Exception as err:

            self.io.print_error('Error setting up VGG-16 model, {}'.format(err))
            self.init = False

    def run(self, session):

        if not self.init:
            return

        self.model.setup_testing(session)

        filepath = os.path.join(self.cfgs['download_path'], self.cfgs['testing']['list'])
        train_list = self.io.read_file_list(filepath)

        self.io.print_info('Writing PNGs at {}'.format(self.cfgs['test_output']))

        for idx, img in enumerate(train_list):

            test_filename = os.path.join(self.cfgs['download_path'], self.cfgs['testing']['dir'], img)
            im = self.fetch_image(test_filename)

            edgemap = session.run(self.model.predictions, feed_dict={self.model.images: [im]})
            # self.save_egdemaps(edgemap, idx)

            self.io.print_info('Done testing {}, {}'.format(test_filename, im.shape))

    def save_egdemaps(self, em_maps, index):

        # Take the edge map from the network from side layers and fuse layer
        em_maps = [e[0] for e in em_maps]
        em_maps = em_maps + [np.mean(np.array(em_maps), axis=0)]

        for idx, em in enumerate(em_maps):

            em[em < self.cfgs['testing_threshold']] = 0.0

            em = 255.0 * (1.0 - em)
            em = np.tile(em, [1, 1, 3])

            em = Image.fromarray(np.uint8(em))
            em.save(os.path.join(self.cfgs['test_output'], 'testing-{}-{:03}.png'.format(index, idx)))

            im = 255 - cv2.imread(os.path.join(self.cfgs['test_output'], 'testing-{}-{:03}.png'.format(index, idx)), cv2.CV_LOAD_IMAGE_GRAYSCALE)
            _, bw2 = cv2.threshold(im, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY)
            bw2 = cv2.erode(bw2, np.ones((3,3), np.uint8), iterations=1)
            bw2 = thinning(bw2)
            cv2.imwrite(os.path.join(self.cfgs['test_output'], 'testing-{}-{:03}-th.png'.format(index, idx)), 255-bw2)



    def fetch_image(self, test_image):

        # is url
        image = None

        if not urlparse.urlparse(test_image).scheme == "":

            url_response = urllib.urlopen(test_image)

            if url_response.code == 404:
                print (self.io.print_error('[Testing] URL error code : {1} for {0}'.format(test_image, url_response.code)))
                return None

            try:

                image_buffer = io.BytesIO(url_response.read())
                image = self.capture_pixels(image_buffer)

            except Exception as err:

                print (self.io.print_error('[Testing] Error with URL {0} {1}'.format(test_image, err)))
                return None

        # read from disk
        elif os.path.exists(test_image):

            try:

                fid = open(test_image, 'r')
                stream = fid.read()
                fid.close()

                image_buffer = io.BytesIO(stream)
                image = self.capture_pixels(image_buffer)

            except Exception as err:

                print (self.io.print_error('[Testing] Error with image file {0} {1}'.format(test_image, err)))
                return None

        return image

    def capture_pixels(self, image_buffer):

        image = Image.open(image_buffer)
        image = image.resize((self.cfgs['testing']['image_width'], self.cfgs['testing']['image_height']))
        image = np.array(image, np.float32)
        image = self.colorize(image)

        image = image[:, :, self.cfgs['channel_swap']]
        image -= self.cfgs['mean_pixel_value']

        return image

    def colorize(self, image):

        # BW to 3 channel RGB image
        if image.ndim == 2:
            image = image[:, :, np.newaxis]
            image = np.tile(image, (1, 1, 3))
        elif image.shape[2] == 4:
            image = image[:, :, :3]

        return image
示例#21
0
class HEDTester():
    def __init__(self, config_file, number_iterations, path_test_image):

        self.io = IO()
        self.init = True

        try:
            pfile = open(config_file)
            self.cfgs = yaml.load(pfile)
            pfile.close()

        except Exception as err:

            self.io.print_error(
                'Error reading config file {}, {}'.format(config_file), err)

        try:
            self.test_snapshot = number_iterations
        except Exception as err:
            self.io.print_error('please choose an existing pretrained model')
        try:
            self.test_image = path_test_image
        except Exception as err:
            self.io.print_error('the chosen test image does not exist')

    def setup(self, session):

        try:

            self.model = Vgg16(self.cfgs, run='testing')

            meta_model_file = os.path.join(
                self.cfgs['save_dir'],
                'models/hed-model-{}'.format(self.test_snapshot))

            saver = tf.train.Saver()
            saver.restore(session, meta_model_file)

            self.io.print_info(
                'Done restoring VGG-16 model from {}'.format(meta_model_file))

        except Exception as err:

            self.io.print_error(
                'Error setting up VGG-16 model, {}'.format(err))
            self.init = False

    def run(self, session):

        if not self.init:
            return

        self.model.setup_testing(session)
        test_filename = self.test_image
        im = sio.loadmat(test_filename)['noisy']
        new_im = im.astype(np.float32)
        MM, NN = np.shape(new_im)
        GR_n = np.zeros((MM, NN, 6), dtype=np.float32)
        GR_p = np.zeros((MM, NN, 6), dtype=np.float32)
        for alpha in range(1, 7):
            GR_n[:, :, alpha - 1], GR_p[:, :,
                                        alpha - 1] = compute_GR(new_im, alpha)

        GR_n -= self.cfgs['mean_pixel_value']
        im = GR_n[:, :, 1:5]
        edgemap = session.run(self.model.predictions,
                              feed_dict={self.model.images: [im]})

        em_maps = [e[0] for e in edgemap]
        em_maps = em_maps + [np.mean(np.array(em_maps), axis=0)]
        plt.figure()
        for idx, em in enumerate(em_maps):
            em = np.squeeze(em)
            save_mat_dir = 'temp_field'
            if not isdir(save_mat_dir):
                os.makedirs(save_mat_dir)

            save_mat_path = os.path.join('temp_field',
                                         'testing-example-{}'.format(idx))
            sio.savemat(save_mat_path, {'magnitude_field': em})