Beispiel #1
0
def preprocess(img):
    print(img.shape)
    cv2.resize(img,(224,224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x
Beispiel #2
0
    def load_image(self, filepath: str) -> None:
        """
        Given path to image file, load image and preprocess

        Parameters:
            filepath (str): path to image, including file extension

        Returns:
            None
        """

        # Load image and perform preprocessing
        img = load_img(filepath)

        # If image is large, resize
        target_size = 500
        if max(img.size) > target_size:
            size_mult = target_size / max(img.size)
            new_size = tuple([int(size_mult * x) for x in img.size])
            img = img.resize(new_size, PIL.Image.ANTIALIAS)
        img = img_to_array(img)
        self._unprocessed = img.astype(int)

        img = preprocess_input(img)
        # Convert to 1 x n_pxl x n_pxl x n_channels
        img = np.expand_dims(img, axis=0)

        # Set image
        self._image = img
def preprocess_image(image_path):
    img = load_img(image_path, target_size=(img_nrows, img_ncols))
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg19.preprocess_input(img)

    return img
def predict(model, img):
    x = img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    prediction_index = np.argmax(preds[0])
    return CATEGORIES[prediction_index]
Beispiel #5
0
def load_img(path):
    """
    this method is used to read image data from previous path for vgg19
    :param path: image file
    :return: np.array object with shape(1,224,224,3) , ordered by RGB->BGR ,centering zero
    """
    img = image.load_img(path, target_size=(224, 224))
    arr = np.expand_dims(image.img_to_array(img), axis=0)
    arr = preprocess_input(arr)
    return arr
Beispiel #6
0
def load_and_process_image(path):
    """
    Loads an image from path and processes it as to use later in the VGG19
    model
    """
    img = load_img(path)
    img = img_to_array(img)
    img = preprocess_input(img)
    img = np.expand_dims(img, axis=0)

    return img
Beispiel #7
0
def preprocess(img):
    print(img)
    #print(k.get_value(img))
    resized_images = tf.image.resize_images(img, (224, 224))
    print(resized_images)
    #k.resize_images(img,height_factor=224,width_factor=224,data_format="channels_last")
    #print(img.shape)
    #x = image.img_to_array(img[0])
    #x = np.expand_dims(x, axis=0)
    #print(img)
    x = preprocess_input(resized_images)
    return x
Beispiel #8
0
 def _vectorize(self, model):
     img = image.load_img(self.path or self.file, target_size=(224, 224))
     print(f'Open img file!: {self.get_timing()}')
     x = image.img_to_array(img)
     print(f'Img to array img file!: {self.get_timing()}')
     x = np.expand_dims(x, axis=0)
     print(f'expand_dims!: {self.get_timing()}')
     x = preprocess_input(x)  # w_tm: 15.850202
     print(f'preprocess_input!: {self.get_timing()}')
     pred = model.predict(x)  # w_tm: 20.432673
     print(f'predict!: {self.get_timing()}')
     return pred.ravel()
Beispiel #9
0
def create_many_feature_map_files(directory_path,
                                  output_layer,
                                  image_prefix,
                                  image_features_size,
                                  input_shape,
                                  batch_size=32,
                                  split_name=None,
                                  expected_num_images=None,
                                  dryrun=False):
    """
        Given a TFRecord file, creates multiple numpy files: one for each pair of image id and feature map.
    """
    tfrecord_path = "/".join(
        [directory_path, get_tfrecord_filename(split_name)])
    dataset = make_dataset(tfrecord_path)
    dataset = dataset.batch(batch_size)
    dataset = dataset.make_one_shot_iterator()
    input_images, input_ids = dataset.get_next()

    processed_count = 0

    with tensorflow.Session() as sess:
        model = image_features_model(output_layer,
                                     image_features_size,
                                     input_shape,
                                     image_top_layer=False,
                                     image_top_layer_dropout_rate=0.0)
        try:
            while True:
                batch_images, batch_ids = sess.run([input_images, input_ids])
                batch_images = preprocess_input(batch_images, mode="caffe")
                batch_images = model.predict_on_batch(batch_images)
                __store_feature_maps_to_many(batch_ids, batch_images,
                                             directory_path, image_prefix,
                                             split_name)

                processed_count += len(batch_images)
                if expected_num_images:
                    print(">> Processing images {:d}/{:d} ({:3.0f}%)".format(
                        processed_count, expected_num_images,
                        processed_count / expected_num_images * 100),
                          end="\r")
                else:
                    print(">> Processing images {:d}".format(processed_count),
                          end="\r")

                if dryrun and processed_count > 100:
                    raise Exception("Dryrun finished")
        except:
            print("Processed all images: {}".format(processed_count))
Beispiel #10
0
def load_and_process_image(image_path):
    ''' 
    Loading and preprocessing of image.
    
    Arguments: 
        image_path: Path of image.
    
    Return:
        img: Precessed image array. 
    '''
  
    img = load_img(image_path)
    img = resize_img(img)
    img = img_to_array(img)
    img = preprocess_input(img)
    img = np.expand_dims(img, axis = 0)
    return img
Beispiel #11
0
def load_and_process_image(image_path):
    img = load_img(image_path)
    img = img_to_array(img)
    img = preprocess_input(img)
    img = np.expand_dims(img,axis=0)
    return img
Beispiel #12
0
 def load_and_process_image(self, image_path):
     img = load_img(image_path, target_size=(224, 224))
     img = img_to_array(img)
     img = preprocess_input(img)
     img = expand_dims(img, axis=0)
     return img
from tensorflow.python.keras.applications.vgg19 import preprocess_input
from tensorflow.python.keras.preprocessing.image import load_img,img_to_array

model = load_model('Arrow2242.h5')          #load your model...make sure path of the model is correct
cap= cv2.VideoCapture(0)                    #change this acc to port on which usb cam is connected

#img = load_img("D:/Arrow Detection/train/left/download.jpg", target_size=(224, 224))

while True:

    _,frame = cap.read()
    img=frame.copy()
    frame = cv2.resize(frame,(224,224))
    

    image = img_to_array(frame)
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    
    image = preprocess_input(image)
    yhat = model.predict(image)
    np.argmax(yhat,axis=1)
    
    print("check")        #1-left,2-none,3-right
    print(np.argmax(yhat) + 1)
    cv2.imshow("Frame",img)
    if cv2.waitKey(1)==13:
        break

cap.release()
cv2.destroyAllWindows()
Beispiel #14
0
    output = model.predict(x)
    print(output.shape)


base_model: keras.Model = VGG19(weights='imagenet', input_shape=(66, 66, 3), include_top=False, pooling="avg")
resnet_model: keras.Model = ResNet50(weights="imagenet", input_shape=(66, 66, 3), include_top=False, pooling="avg")

for layer in resnet_model.layers:
    print(layer.name)

loss_layers_name = ["block1_conv2", "block2_conv2", "block3_conv4", "block4_conv4", "block5_conv4"]
models = []

for layer in base_model.layers:
    layer: keras.layers.Layer = layer
    if layer.name in loss_layers_name:
        model = Model(inputs=base_model.input, outputs=base_model.get_layer(layer.name).output)
        models.append(model)

img_path = '../cq_data/cq/face/ar_1_1.png'
img = image.load_img(img_path)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

for model in models:
    calc_layer_loss(model, x)

calc_layer_loss(base_model, x)
calc_layer_loss(resnet_model, x)
Beispiel #15
0
def preprocess(image_path):
    image = load_img(image_path)
    image = img_to_array(image)
    image = np.expand_dims(image, axis=0)
    return preprocess_input(image)