Пример #1
0
    def reshape_decode(self, data, shape):
        if self.float_data:  # @TODO(tzaman): this is LMDB specific - Make generic!
            data = tf.reshape(data, shape)
            data = digits.chw_to_hwc(data)
        else:
            # Decode image of any time option might come: https://github.com/tensorflow/tensorflow/issues/4009
            # Distinguish between mime types
            if self.data_encoded:
                if self.data_mime == 'image/png':
                    data = tf.image.decode_png(data, dtype=self.image_dtype, name='image_decoder')
                elif self.data_mime == 'image/jpeg':
                    data = tf.image.decode_jpeg(data, name='image_decoder')
                else:
                    logging.error('Unsupported mime type (%s); cannot be decoded' % (self.data_mime))
                    exit(-1)
            else:
                if self.backend == 'lmdb':
                    data = tf.decode_raw(data, self.image_dtype, name='raw_decoder')

                # if data is in CHW, set the shape and convert to HWC
                if self.unencoded_data_format == 'chw':
                    data = tf.reshape(data, [shape[0], shape[1], shape[2]])
                    data = digits.chw_to_hwc(data)
                else:  # 'hwc'
                    data = tf.reshape(data, shape)

                if (self.channels == 3) and self.unencoded_channel_scheme == 'bgr':
                    data = digits.bgr_to_rgb(data)

            # Convert to float
            data = tf.to_float(data)
            # data = tf.image.convert_image_dtype(data, tf.float32) # normalize to [0:1) range
        return data
Пример #2
0
def embedding(image_path, detection_method='hog'):
    '''
    Get an paht to image of SINGLE person and return his embedding
    :param image_path: path to SINGLE person image
    :param detection_method: face detection model to use: either hog or cnn
    '''
    # read the image
    image = cv.imread(image_path)
    if image is None:
        print("Cannot load current image")
        return None

    # convert images to RGB format
    rgb_frame = utils.bgr_to_rgb(image)

    # face_locations is a list of bounding boxes for each face location in a the image - list of [tuple(top, right, bottom, left).....]
    face_locations = fc.face_locations(rgb_frame, model=detection_method)

    # embedding is a list of 128-dimensional face encodings.
    # NOTE: Each embedding in the list is:  <class 'numpy.ndarray'> with shape of (128,) That store  <class 'numpy.float64'>
    embedding = fc.face_encodings(rgb_frame, face_locations)[0]
    return embedding
Пример #3
0
from utils import bgr_to_rgb
import numpy as np

from utils import get_model, rgb_to_bgr
from utils import get_style_gram_matrices
from utils import ShowCombination
from keras.preprocessing import image
from keras.applications import vgg19
from keras.optimizers import Adam

IMAGE_SIZE_REDUCE_FACTOR = .5
CAMERA_NUMBER = 0

CAMERA = cv2.VideoCapture(0)
frame = bgr_to_rgb(CAMERA.read()[1])

IMG_HEIGHT, IMG_WIDTH, N_CHANNELS = frame.shape
IMG_HEIGHT = int(IMG_HEIGHT * IMAGE_SIZE_REDUCE_FACTOR)
IMG_WIDTH = int(IMG_WIDTH * IMAGE_SIZE_REDUCE_FACTOR)
print((IMG_HEIGHT, IMG_WIDTH, N_CHANNELS))

content_img = cv2.resize(frame, (IMG_WIDTH, IMG_HEIGHT))
style_img = rgb_to_bgr(
    np.array(
        image.load_img('the_scream.jpg',
                       target_size=(IMG_HEIGHT, IMG_WIDTH, N_CHANNELS))))
style_gram_matrices = get_style_gram_matrices(style_img)

LEARNING_RATE = 30
DECAY_RATE = 0.01