Exemplo n.º 1
0
def main(args):
    g = tf.Graph()      # A new graph
    with g.as_default():
        with tf.Session() as sess:
            # Building graph.
            image_data = tf.placeholder(tf.int32, name='input_image')
            height = tf.placeholder(tf.int32, name='height')
            width = tf.placeholder(tf.int32, name='width')

            # Reshape data
            image = tf.reshape(image_data, [height, width, 3])

            processed_image = utils.mean_image_subtraction(
                image, [123.68, 116.779, 103.939])                    # Preprocessing image
            batched_image = tf.expand_dims(processed_image, 0)        # Add batch dimension
            generated_image = model.net(batched_image, training=False)
            casted_image = tf.cast(generated_image, tf.int32)
            # Remove batch dimension
            squeezed_image = tf.squeeze(casted_image, [0])
            cropped_image = tf.slice(squeezed_image, [0, 0, 0], [height, width, 3])
            # stylized_image = tf.image.encode_jpeg(squeezed_image, name='output_image')
            stylized_image_data = tf.reshape(cropped_image, [-1], name='output_image')

            # Restore model variables.
            saver = tf.train.Saver(tf.global_variables(), write_version=tf.train.SaverDef.V1)
            sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
            # Use absolute path.
            model_file = os.path.abspath(args.model_file)
            saver.restore(sess, model_file)

            if args.is_debug:
                content_file = '/Users/Lex/Desktop/t.jpg'
                generated_file = '/Users/Lex/Desktop/xwz-stylized.jpg'

                with open(generated_file, 'wb') as img:
                    image_bytes = tf.read_file(content_file)
                    input_array, decoded_image = sess.run([
                        tf.reshape(tf.image.decode_jpeg(image_bytes, channels=3), [-1]),
                        tf.image.decode_jpeg(image_bytes, channels=3)])

                    start_time = time.time()
                    img.write(sess.run(tf.image.encode_jpeg(tf.cast(cropped_image, tf.uint8)), feed_dict={
                              image_data: input_array,
                              height: decoded_image.shape[0],
                              width: decoded_image.shape[1]}))
                    end_time = time.time()

                    tf.logging.info('Elapsed time: %fs' % (end_time - start_time))
            else:
                output_graph_def = tf.graph_util.convert_variables_to_constants(
                    sess, sess.graph_def, output_node_names=['output_image'])

                with tf.gfile.FastGFile('/Users/Lex/Desktop/' + args.model_name + '.pb', mode='wb') as f:
                    f.write(output_graph_def.SerializeToString())
Exemplo n.º 2
0
    def forward(self, images):
        images = self.quant(images)
        images = utils.mean_image_subtraction(images)

        f0 = self.s1(images)
        f1 = self.s2(f0)
        f2 = self.s3(f1)
        f3 = self.s4(f2)

        # _, f = self.mobilenet(images)
        h = f3  # bs 2048 w/32 h/32
        g = (self.unpool1(h))  # bs 2048 w/16 h/16
        c = self.conv1(self.skip_add.cat((g, f2), 1))
        c = self.bn1(c)
        c = self.relu1(c)

        h = self.conv2(c)  # bs 128 w/16 h/16
        h = self.bn2(h)
        h = self.relu2(h)
        g = self.unpool2(h)  # bs 128 w/8 h/8
        c = self.conv3(self.skip_add.cat((g, f1), 1))
        c = self.bn3(c)
        c = self.relu3(c)

        h = self.conv4(c)  # bs 64 w/8 h/8
        h = self.bn4(h)
        h = self.relu4(h)
        g = self.unpool3(h)  # bs 64 w/4 h/4
        c = self.conv5(self.skip_add.cat((g, f0), 1))
        c = self.bn5(c)
        c = self.relu5(c)

        h = self.conv6(c)  # bs 32 w/4 h/4
        h = self.bn6(h)
        h = self.relu6(h)
        g = self.conv7(h)  # bs 32 w/4 h/4
        g = self.bn7(g)
        g = self.relu7(g)

        F_score = self.conv8(g)  # bs 1 w/4 h/4
        F_score = self.sigmoid1(F_score)
        geo_map = self.conv9(g)
        geo_map = self.sigmoid2(geo_map) * 512
        angle_map = self.conv10(g)
        angle_map = self.sigmoid3(angle_map)
        angle_map = (angle_map - 0.5) * math.pi / 2

        self.dequant(F_score)
        self.dequant(geo_map)
        self.dequant(angle_map)

        F_geometry = torch.cat((geo_map, angle_map), 1)  # bs 5 w/4 h/4

        return F_score, F_geometry
def seg(seg_img):

    # image_filename = r'E:\rawdata\test_slice\Tumor_001\1_64.jpg'
    # image_filename_placeholder = tf.placeholder(tf.string)
    # feed_image_dict = {image_filename_placeholder: image_filename}
    # image = tf.read_file(image_filename_placeholder)
    # image_tensor = tf.image.decode_jpeg(contents=image, channels=3)
    # image_tensor = tf.image.decode_png(contents=image, channels=3)
    image = seg_img
    image_tensor = tf.stack(values=image)
    image_float = tf.to_float(image_tensor)
    mean_image = utils.mean_image_subtraction(image_float,
                                              [_R_MEAN, _G_MEAN, _B_MEAN])
    processed_image = tf.expand_dims(input=mean_image, axis=0)

    upsample_filter_8s_np = utils.bilinear_upsample_weights(
        upsample_factor, number_of_classes)
    upsample_filter_2s_np = utils.bilinear_upsample_weights(
        2, number_of_classes)

    upsample_filter_8_tensor = tf.constant(upsample_filter_8s_np)
    upsample_filter_2_tensor = tf.constant(upsample_filter_2s_np)

    with tf.variable_scope("fcn_8s") as fcn_8s_scope:
        with slim.arg_scope(utils.vgg_arg_scope()):
            last_layer_logits, end_points = utils.vgg_16(
                processed_image,
                num_classes=2,
                is_training=False,
                spatial_squeeze=False,
                fc_conv_padding='SAME')

        last_layer_logits_shape = tf.shape(last_layer_logits)
        # last downsample layer

        last_layer_upsample_logits_shape = tf.stack([
            last_layer_logits_shape[0], last_layer_logits_shape[1] * 2,
            last_layer_logits_shape[2] * 2, last_layer_logits_shape[3]
        ])

        last_layer_upsample_logits = tf.nn.conv2d_transpose(
            value=last_layer_logits,
            filter=upsample_filter_2_tensor,
            output_shape=last_layer_upsample_logits_shape,
            strides=[1, 2, 2, 1])

        pool4_features = end_points['fcn_8s/vgg_16/pool4']
        pool4_logits = slim.conv2d(pool4_features,
                                   number_of_classes, [1, 1],
                                   activation_fn=None,
                                   normalizer_fn=None,
                                   weights_initializer=tf.zeros_initializer,
                                   scope='pool4_fc')
        fused_last_layer_and_pool4_logits = pool4_logits + last_layer_upsample_logits
        fused_last_layer_and_pool4_logits_shape = tf.shape(
            fused_last_layer_and_pool4_logits)

        fused_last_layer_and_pool4_upsampled_by_factor_2_logits_shape = tf.stack(
            [
                fused_last_layer_and_pool4_logits_shape[0],
                fused_last_layer_and_pool4_logits_shape[1] * 2,
                fused_last_layer_and_pool4_logits_shape[2] * 2,
                fused_last_layer_and_pool4_logits_shape[3]
            ])
        fused_last_layer_and_pool4_upsampled_by_factor_2_logits = tf.nn.conv2d_transpose(
            value=fused_last_layer_and_pool4_logits,
            filter=upsample_filter_2_tensor,
            output_shape=
            fused_last_layer_and_pool4_upsampled_by_factor_2_logits_shape,
            strides=[1, 2, 2, 1])

        pool3_features = end_points['fcn_8s/vgg_16/pool3']

        pool3_logits = slim.conv2d(pool3_features,
                                   number_of_classes, [1, 1],
                                   activation_fn=None,
                                   normalizer_fn=None,
                                   weights_initializer=tf.zeros_initializer,
                                   scope='pool3_fc')

        fused_last_layer_and_pool4_and_pool3_logits = pool3_logits + fused_last_layer_and_pool4_upsampled_by_factor_2_logits

        fused_last_layer_and_pool4_and_pool3_logits_shape = tf.shape(
            fused_last_layer_and_pool4_and_pool3_logits)

        fused_last_layer_pool4_pool3_upsampled_by_8_logits_shape = tf.stack([
            fused_last_layer_and_pool4_and_pool3_logits_shape[0],
            fused_last_layer_and_pool4_and_pool3_logits_shape[1] *
            upsample_factor,
            fused_last_layer_and_pool4_and_pool3_logits_shape[2] *
            upsample_factor,
            fused_last_layer_and_pool4_and_pool3_logits_shape[3]
        ])
        fused_last_layer_pool4_pool3_logits = tf.nn.conv2d_transpose(
            value=fused_last_layer_and_pool4_and_pool3_logits,
            filter=upsample_filter_8_tensor,
            output_shape=
            fused_last_layer_pool4_pool3_upsampled_by_8_logits_shape,
            strides=[1, upsample_factor, upsample_factor, 1])

    pred = tf.argmax(fused_last_layer_pool4_pool3_logits, dimension=3)
    probabilities = tf.nn.softmax(fused_last_layer_pool4_pool3_logits)

    initializer = tf.local_variables_initializer()
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(initializer)

        # saver.restore(sess, r"E:\python program\checkpoint_ing\fcn32s_10e.ckpt")
        saver.restore(sess,
                      r"E:\python program\checkpoint_8s_6k\fcn8s_10e.ckpt")

        image_np, pred_np, final_probabilities = sess.run(
            [image_tensor, pred, probabilities])

        # plt.imshow(pred_np.squeeze())
        # plt.show()
        return (pred_np.squeeze())
Exemplo n.º 4
0
def main(unused_argv):
    tf.logging.set_verbosity(tf.logging.INFO)

    with tf.Graph().as_default(), tf.Session() as sess:
        # Building graph.
        inputTensor = None
        if FLAGS.saved_model_version is not None:
            inputTensor = tf.placeholder(shape=[],
                                         dtype=tf.string,
                                         name=_INPUT_NAME)
            decoded_bytes = tf.io.decode_base64(inputTensor)
            image_data = tf.image.decode_image(decoded_bytes, channels=3)
        else:
            image_data = tf.placeholder(tf.uint8,
                                        shape=(None, None, 3),
                                        name=_INPUT_NAME)
            inputTensor = image_data

        image_shape = tf.shape(image_data)

        # Preprocessing image
        processed_image = utils.mean_image_subtraction(
            image_data, [123.68, 116.779, 103.939])

        # Add batch dimension
        batched_image = tf.expand_dims(processed_image, 0)
        generated_image = model.net(batched_image, training=False)
        casted_image = tf.cast(generated_image, tf.uint8)

        # Remove batch dimension
        squeezed_image = tf.squeeze(casted_image, [0])

        cropped_image = tf.slice(squeezed_image, [0, 0, 0], image_shape)

        # Restore model variables.
        saver = tf.train.Saver()
        saver.restore(sess, FLAGS.checkpoint_path)

        if FLAGS.saved_model_version is not None:
            builder = tf.saved_model.builder.SavedModelBuilder(
                "style/Servo/" + FLAGS.saved_model_version)

            # Creates the TensorInfo protobuf objects that encapsulates the input/output tensors
            tensor_info_input = tf.saved_model.utils.build_tensor_info(
                inputTensor)

            # output tensor info
            styled_image = tf.image.encode_png(cropped_image,
                                               name=_OUTPUT_NAME)
            tensor_info_output = tf.saved_model.utils.build_tensor_info(
                styled_image)

            sigs = {}
            sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
                tf.saved_model.signature_def_utils.build_signature_def(
                    inputs = {"input" : tensor_info_input},
                    outputs = {"output_bytes" : tensor_info_output},
                    method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
                )

            builder.add_meta_graph_and_variables(sess, [tag_constants.SERVING],
                                                 signature_def_map=sigs,
                                                 clear_devices=True)

            builder.save()

        else:
            styled_image = tf.identity(cropped_image, name=_OUTPUT_NAME)

            output_graph_def = tf.graph_util.convert_variables_to_constants(
                sess, sess.graph_def, output_node_names=[_OUTPUT_NAME])

            with tf.gfile.FastGFile(FLAGS.export_path, mode='wb') as f:
                f.write(output_graph_def.SerializeToString())
    def seg(self):
        image = self.image
        #image_tensor = tf.stack(values=image)
        image_float = tf.to_float(image)
        mean_image = utils.mean_image_subtraction(image_float,
                                                  [_R_MEAN, _G_MEAN, _B_MEAN])
        processed_image = tf.expand_dims(input=mean_image, axis=0)

        upsample_filter_8s_np = utils.bilinear_upsample_weights(
            upsample_factor, number_of_classes)
        upsample_filter_2s_np = utils.bilinear_upsample_weights(
            2, number_of_classes)

        upsample_filter_8_tensor = tf.constant(upsample_filter_8s_np)
        upsample_filter_2_tensor = tf.constant(upsample_filter_2s_np)

        with tf.variable_scope("fcn_8s") as fcn_8s_scope:
            with slim.arg_scope(utils.vgg_arg_scope()):
                last_layer_logits, end_points = utils.vgg_16(
                    processed_image,
                    num_classes=2,
                    is_training=False,
                    spatial_squeeze=False,
                    fc_conv_padding='SAME')

            last_layer_logits_shape = tf.shape(last_layer_logits)
            # last downsample layer

            last_layer_upsample_logits_shape = tf.stack([
                last_layer_logits_shape[0], last_layer_logits_shape[1] * 2,
                last_layer_logits_shape[2] * 2, last_layer_logits_shape[3]
            ])

            last_layer_upsample_logits = tf.nn.conv2d_transpose(
                value=last_layer_logits,
                filter=upsample_filter_2_tensor,
                output_shape=last_layer_upsample_logits_shape,
                strides=[1, 2, 2, 1])

            pool4_features = end_points['fcn_8s/vgg_16/pool4']
            pool4_logits = slim.conv2d(
                pool4_features,
                number_of_classes, [1, 1],
                activation_fn=None,
                normalizer_fn=None,
                weights_initializer=tf.zeros_initializer,
                scope='pool4_fc')
            fused_last_layer_and_pool4_logits = pool4_logits + last_layer_upsample_logits
            fused_last_layer_and_pool4_logits_shape = tf.shape(
                fused_last_layer_and_pool4_logits)

            fused_last_layer_and_pool4_upsampled_by_factor_2_logits_shape = tf.stack(
                [
                    fused_last_layer_and_pool4_logits_shape[0],
                    fused_last_layer_and_pool4_logits_shape[1] * 2,
                    fused_last_layer_and_pool4_logits_shape[2] * 2,
                    fused_last_layer_and_pool4_logits_shape[3]
                ])
            fused_last_layer_and_pool4_upsampled_by_factor_2_logits = tf.nn.conv2d_transpose(
                value=fused_last_layer_and_pool4_logits,
                filter=upsample_filter_2_tensor,
                output_shape=
                fused_last_layer_and_pool4_upsampled_by_factor_2_logits_shape,
                strides=[1, 2, 2, 1])

            pool3_features = end_points['fcn_8s/vgg_16/pool3']

            pool3_logits = slim.conv2d(
                pool3_features,
                number_of_classes, [1, 1],
                activation_fn=None,
                normalizer_fn=None,
                weights_initializer=tf.zeros_initializer,
                scope='pool3_fc')

            fused_last_layer_and_pool4_and_pool3_logits = pool3_logits + fused_last_layer_and_pool4_upsampled_by_factor_2_logits

            fused_last_layer_and_pool4_and_pool3_logits_shape = tf.shape(
                fused_last_layer_and_pool4_and_pool3_logits)

            fused_last_layer_pool4_pool3_upsampled_by_8_logits_shape = tf.stack(
                [
                    fused_last_layer_and_pool4_and_pool3_logits_shape[0],
                    fused_last_layer_and_pool4_and_pool3_logits_shape[1] *
                    upsample_factor,
                    fused_last_layer_and_pool4_and_pool3_logits_shape[2] *
                    upsample_factor,
                    fused_last_layer_and_pool4_and_pool3_logits_shape[3]
                ])
            fused_last_layer_pool4_pool3_logits = tf.nn.conv2d_transpose(
                value=fused_last_layer_and_pool4_and_pool3_logits,
                filter=upsample_filter_8_tensor,
                output_shape=
                fused_last_layer_pool4_pool3_upsampled_by_8_logits_shape,
                strides=[1, upsample_factor, upsample_factor, 1])

        pred = tf.argmax(fused_last_layer_pool4_pool3_logits, dimension=3)
        probabilities = tf.nn.softmax(fused_last_layer_pool4_pool3_logits)

        initializer = tf.local_variables_initializer()
        saver = tf.train.Saver()
        _gpu_options = tf.GPUOptions(allow_growth=False,
                                     per_process_gpu_memory_fraction=0.2,
                                     visible_device_list='0')

        if not os.environ.get('OMP_NUM_THREADS'):
            config = tf.ConfigProto(allow_soft_placement=True,
                                    gpu_options=_gpu_options)
        else:
            num_thread = int(os.environ.get('OMP_NUM_THREADS'))
            config = tf.ConfigProto(intra_op_parallelism_threads=num_thread,
                                    allow_soft_placement=True,
                                    gpu_options=_gpu_options)
        sess = tf.Session(config=config)
        sess.run(initializer)
        # 模型路径
        saver.restore(sess, r"./seg_model/checkpoint_8s_6k/fcn8s_10e.ckpt")
        return sess, pred, probabilities
def main(_):

    tf.logging.set_verbosity(tf.logging.INFO)
    # Get image's height and width.
    height = 0
    width = 0
    with tf.gfile.GFile(FLAGS.image_file, 'rb') as f:
        with tf.Session().as_default() as sess:
            if FLAGS.image_file.lower().endswith('png'):
                image = sess.run(tf.image.decode_png(f.read()))
            else:
                image = sess.run(tf.image.decode_jpeg(f.read()))
            height = image.shape[0]
            width = image.shape[1]
    tf.logging.info('Image size: %dx%d' % (width, height))

    with tf.Graph().as_default():
        with tf.Session().as_default() as sess:

            # Read image data.
            image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model,
                is_training=False)
            image = reader.get_image(FLAGS.image_file, height, width, image_preprocessing_fn)
            print(image)
            plt.subplot(121)
            np_image = sess.run(image)
            plt.imshow(np_image)
            
            input_shape = (None, None, 3)
            input_tensor = tf.placeholder(dtype=tf.uint8, shape=input_shape, name='image_tensor')
            print(input_tensor)
            with tf.variable_scope("input_process"):   
                processed_image = utils.mean_image_subtraction(
                    input_tensor, [123.68, 116.779, 103.939])                    # Preprocessing image
                batched_image = tf.expand_dims(processed_image, 0)               # Add batch dimension

            generated = model.net(batched_image, training=False)
            
            generated = tf.cast(generated, tf.uint8)
            # Remove batch dimension
            generated = tf.squeeze(generated, [0],name='output_image')
            

            # Restore model variables.
            saver = tf.train.Saver(tf.global_variables(), write_version=tf.train.SaverDef.V1)
            sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
            # Use absolute path
            FLAGS.model_file = os.path.abspath(FLAGS.model_file)
            saver.restore(sess, FLAGS.model_file)
            
            summary_writer = tf.summary.FileWriter("logs",sess.graph)
            save_graph_to_file(sess,sess.graph_def ,"models/new_freeze_graph.pb") 
            
             
            # Make sure 'generated' directory exists.
            generated_file = 'generated/res.jpg'
            if os.path.exists('generated') is False:
                os.makedirs('generated')

            # Generate and write image data to file.
            with tf.gfile.GFile(generated_file, 'wb') as f:
                feed_dict={input_tensor:np_image}
                plt.subplot(122)
                plt.imshow(sess.run(generated,feed_dict))
                plt.show()
                start_time = time.time()
                f.write(sess.run(tf.image.encode_jpeg(generated),feed_dict))
                end_time = time.time()
                tf.logging.info('Elapsed time: %fs' % (end_time - start_time))
                tf.logging.info('Done. Please check %s.' % generated_file)