def predict(model, img):
    x = img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    prediction_index = np.argmax(preds[0])
    return CATEGORIES[prediction_index]
def handle(req):
    global model
    """handle a request to the function
    Args:
        req (str): request body
    """

    if model is None:
        # load the trained model
        # https://github.com/jkjung-avt/keras-cats-dogs-tutorial
        model = load_model('function/model/model-resnet50-final.h5',
                           compile=False)

    # Deserialize req and retrieve 'image' value from JSON
    image_data = json.loads(req)['image']

    # Load a base64 encoded image
    img = (PIL.Image.open(BytesIO(base64.b64decode(image_data))).resize(
        (224, 224)))

    # Converts a PIL Image instance to a Numpy array.
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    with graph.as_default():
        # Generates output predictions for the input data 'x'
        preds = model.predict(x)[0]

        ret = {"cats": preds[0].item(), "dogs": preds[1].item()}

        return jsonify(ret)
Example #3
0
def read_and_prep_images(img_paths, img_height=img_size, img_width=img_size):
    imgs = [
        load_img(img_path, target_size=(img_height, img_width))
        for img_path in img_paths
    ]
    img_array = np.array([img_to_array(img) for img in imgs])
    return preprocess_input(img_array)
def preprocess_img(image_bytes, image_size=image_size):
    ''' preprocess an image from bytes array to input form for resnet model'''
    img = img_to_array(
        load_bytes_img(image_bytes, target_size=(image_size, image_size)))
    img_array = np.array(img)
    output = preprocess_input(img_array)
    return output
Example #5
0
def Example():
    # image_path = args.data_path + '\\20190301074833_Wave1_2019Y03M01D_07h48m33s_3sec_nonhighpass.png'
    image_path = 'D:\\Onepredict_MK\\LG CNS\\20190417_LG_CNS_test_mode\\puppy.jpg'

    resnet = ResNet50(input_shape=(224, 224, 3), weights='imagenet', include_top=True)
    print(resnet.summary())

    activation_layer = resnet.get_layer('activation_48')
    model = Model(inputs=resnet.input, outputs=activation_layer.output)
    final_dense = resnet.get_layer('fc1000')
    weight = final_dense.get_weights()[0]

    img = image.load_img(image_path, target_size=(224, 224))
    img_input = preprocess_input(np.expand_dims(img, 0))
    fmaps = model.predict(img_input)[0]                     # fmaps.shape = (7, 7, 2048)

    probs = resnet.predict(img_input)                       # probs.shape = (1, 1000)
    class_names = decode_predictions(probs)
    class_name = class_names[0][0]
    pred = np.argmax(probs[0])                              # pred = 207

    w = weight[:, pred]                                     # w.shape = (2048,)
    cam = fmaps.dot(w)                                      # cam.shape = (7, 7)
    camp = ndimage.zoom(cam, (32, 32), order=1)

    plt.subplot(1, 2, 1)
    plt.imshow(img, alpha=0.8)
    plt.imshow(camp, cmap='jet', alpha=0.5)

    plt.subplot(1, 2, 2)
    plt.imshow(img)
    plt.title(class_name)
    plt.show()
Example #6
0
def get_local_images(image_path, test_train_split, img_fmt, target_size):
    classes = [
        dir for dir in os.listdir(image_path)
        if os.path.isdir(os.path.join(image_path, dir))
    ]
    print("found {} classes: {}".format(len(classes), classes))
    input_arr = []
    target_labels = []
    for class_idx in range(len(classes)):
        #if not os.path.isdir(os.path.join(image_path, classes[class_idx])): continue
        paths = glob.glob(
            os.path.join(image_path, classes[class_idx]) +
            "/*.{}".format(img_fmt))
        print("found {} images with a .{} extension in {}".format(
            len(paths), img_fmt, os.path.join(image_path, classes[class_idx])))
        for img_path in tqdm(paths,
                             desc=f'Processing label {classes[class_idx]}: '):
            img = image.load_img(img_path, target_size=target_size)
            x = image.img_to_array(img)
            #             x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            target_labels.append(class_idx)
            input_arr.append(x)
    X_train, X_test, y_train, y_test = train_test_split(
        input_arr, target_labels, test_size=test_train_split)
    X_train = np.array(X_train)
    X_test = np.array(X_test)
    y_train = np.array(y_train)
    y_test = np.array(y_test)
    return X_train, X_test, y_train, y_test, classes
Example #7
0
 def read_and_prep_images(self, img_paths, img_height=IMAGE_SIZE,
                          img_width=IMAGE_SIZE):
     imgs = [load_img(img_path, target_size=(img_height, img_width)) for
             img_path in self.image_paths]
     print(imgs)
     img_array = np.array([img_to_array(img) for img in imgs])
     return preprocess_input(img_array)
def get_features(img_path, classifier):
    if classifier == 'VGG19':
        img = image.load_img(img_path, target_size=(224, 224))
    elif classifier == 'ResNet50':
        img = image.load_img(img_path, target_size=(224, 224))
    elif classifier == 'Xception':
        img = image.load_img(img_path, target_size=(299, 299))
    else:
        return False

    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    flatten = model.predict(x)
    if chunked:
        pred_dir = 'predictions_chunked'
        if normalized:
            pred_dir = pred_dir + '_normalized'
    else:
        pred_dir = 'predictions'
    feat_folder = os.path.join('dataset', pred_dir, classifier)
    os.makedirs(feat_folder, exist_ok=True)
    features_name = os.path.join(feat_folder, os.path.basename(img_path))
    features_name = features_name.split('.')[0]
    features_name = features_name.replace('_16bit', '')
    np.save(features_name, flatten)
    return list(flatten[0])
Example #9
0
 def predict(self, input_image, verbose=0):
     res = process_image(input_image)
     res = np.expand_dims(res, axis=0)
     res = preprocess_input(res)
     features = self.pretrained.predict(res)
     return features
     pass
Example #10
0
def data_generator(data, batch_size):
    # Get total number of samples in the data
    n = len(data)
    nb_batches = int(np.ceil(n/batch_size))

    # Get a numpy array of all the indices of the input data
    indices = np.arange(n)
    
    # Define two numpy arrays for containing batch data and labels
    batch_data = np.zeros((batch_size, img_rows, img_cols, img_channels), dtype=np.float32)
    batch_labels = np.zeros((batch_size,), dtype=np.float32)
    
    while True:
        # shuffle indices for the training data
        np.random.shuffle(indices)
            
        for i in range(nb_batches):
            # get the next batch 
            next_batch_indices = indices[i*batch_size:(i+1)*batch_size]
            
            # process the next batch
            for j, idx in enumerate(next_batch_indices):
                img = cv2.imread(data.iloc[idx]["image"])
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                img = seq.augment_image(img)
                img = cv2.resize(img, (img_rows, img_cols)).astype(np.float32)
                label = data.iloc[idx]["label"]
                
                batch_data[j] = img
                batch_labels[j] = label
            
            batch_data = preprocess_input(batch_data)
            yield batch_data, batch_labels
Example #11
0
    def mlThread(self):
        print("Loading ML model")
        self.model = self.loadKerasModel(self.model_path)
        img_height, img_width = 240, 320
        current_frame = None

        while self.driving:
            time.sleep(0.01)
            if self.data_in is not None:
                frame = self.data_in.getFrame()
                if frame is not current_frame:
                    current_frame = frame
                    frame = cv2.resize(frame,
                                       dsize=(img_height, img_width),
                                       interpolation=cv2.INTER_CUBIC)
                    img_array = np.array([frame])
                    frame = preprocess_input(img_array)
                    # Dejar el frame ready para el modelo
                    prediction = self.model.predict(frame)
                    predictions = [
                        'backward', 'forward', 'left', 'right', 'stop'
                    ]
                    self.prediction = predictions[prediction.argmax(
                        axis=-1)[0]]
                    print("Prediction:", self.prediction)
def predict(img_local_path):
    model = SqueezeNet(weights='imagenet')
    img = image.load_img(img_local_path, target_size=(227, 227))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    result = decode_predictions(preds)
    return result
Example #13
0
def read_and_prep_images(img_paths):
    '''read and prepare images'''
    image_size = 224
    imgs = [
        load_img(img_path, target_size=(image_size, image_size))
        for img_path in img_paths
    ]
    img_array = np.array([img_to_array(img) for img in imgs])
    return preprocess_input(img_array)
Example #14
0
def example():
    image_path = 'D:\\Onepredict_MK\\LG CNS\\cat.jpg'

    img = image.load_img(image_path, target_size=(224, 224))
    img_input = preprocess_input(np.expand_dims(img, 0))

    resnet = ResNet50(input_shape=(224, 224, 3),
                      weights='imagenet',
                      include_top=True)

    probs = resnet.predict(img_input)
    pred = np.argmax(probs[0])

    activation_layer = resnet.layers[-3].name
    inp = resnet.input
    # for idx in range(1000):
    y_c = resnet.output.op.inputs[0][:, pred]
    A_k = resnet.get_layer(activation_layer).output

    grads = K.gradients(y_c, A_k)[0]
    # Model(inputs=[inp], outputs=[A_k, grads, resnet.output])
    get_output = K.function(inputs=[inp], outputs=[A_k, grads, resnet.output])
    [conv_output, grad_val, model_output] = get_output([img_input])

    conv_output = conv_output[0]
    grad_val = grad_val[0]

    weights = np.mean(grad_val, axis=(0, 1))
    grad_cam = np.zeros(dtype=np.float32, shape=conv_output.shape[0:2])
    for k, w in enumerate(weights):
        grad_cam += w * conv_output[:, :, k]
        # RELU
        grad_cam = np.maximum(grad_cam, 0)

    grad_cam = cv2.resize(grad_cam, (224, 224))

    # Guided grad-CAM
    register_gradient()
    guided_model, activation_layer = modify_backprop(resnet, 'GuidedBackProp',
                                                     args.checkpoint_path,
                                                     args.main_name)
    saliency_fn = compile_saliency_function(guided_model, activation_layer)
    saliency = saliency_fn([img_input, 0])
    gradcam = saliency[0] * grad_cam[..., np.newaxis]
    gradcam = deprocess_image(gradcam)

    # grad_cam = ndimage.zoom(grad_cam, (32, 32), order=1)
    plt.subplot(1, 2, 1)
    plt.imshow(img, alpha=0.8)
    plt.imshow(grad_cam, cmap='jet', alpha=0.5)
    plt.axis('off')

    plt.subplot(1, 2, 2)
    plt.imshow(gradcam, cmap='jet', alpha=0.5)
    plt.axis('off')
    plt.show()
Example #15
0
def read_and_prep_images(train1_file,
                         img_height=image_size,
                         img_width=image_size):
    imgs = [
        load_img(img_path, target_size=(img_height, img_width))
        for img_path in train1_file
    ]
    img_array = np.array([img_to_array(img) for img in imgs])
    output = preprocess_input(img_array)
    return (output)
Example #16
0
def validate_load_image():
    import DataSources
    from detect_face import DetectFace

    from keras.preprocessing import image
    # from keras.applications.resnet50 import preprocess_input

    data = DataSources.load_validation_dataset2()
    data: [Data] = DetectFace.get_face_bboxes(data[:1])

    image_array = load_image(data[0])
    image_array = preprocess_input(image_array, mode='tf')

    img = image.load_img(data[0].image, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x, mode='tf')

    print('done')
Example #17
0
def train(img_path):
    K.clear_session()
    my_model = ResNet50(
        weights='./resnet50_weights_tf_dim_ordering_tf_kernels.h5')
    image_size = 224
    img = load_img(img_path, target_size=(image_size, image_size))
    img_array = np.array([img_to_array(img)])
    inpu = preprocess_input(img_array)
    preds = my_model.predict(inpu)
    deco = decode_predictions(preds)
    return deco
Example #18
0
def preprocess_image(im_path, im_size, model_name):
    im = image.load_img(im_path, target_size=(im_size[0], im_size[1]))
    im = image.img_to_array(im)
    im = np.expand_dims(im, axis=0)
    if model_name == 'inception_v3':
        im = inception_v3.preprocess_input(im)
    elif model_name == 'resnet50':
        im = resnet50.preprocess_input(im)
    elif model_name == 'vgg16':
        im = vgg16.preprocess_input(im)
    return im
def get_image(img_path):
    img = image.load_img(img_path, target_size=(224, 224))
    #     if img is None:
    #         continue
    x = image.img_to_array(img)
    x = preprocess_input(x)
    x = np.expand_dims(x, axis=0)
    # plt.imshow(x[0])
    # plt.show()
    pred = net.predict(x)[0]
    return x[0], pred
Example #20
0
def read_and_prep_images(img_paths,
                         img_height=image_size,
                         img_width=image_size):
    """
    Function to Read and Prep Images for Modeling
    """

    #imgs = [load_img(img_path, target_size= img_height, img_width)) for img_path in img_paths]
    imgs = [load_img(img_path, target_size=None) for img_path in img_paths]

    img_array = np.array([img_to_array(img) for img in imgs])
    return preprocess_input(img_array)
Example #21
0
def pretrained_path_to_tensor(img_path):
    # loads RGB image as PIL.Image.Image type
    img = image.load_img(img_path, target_size=(224, 224))
	
    # convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
    x = image.img_to_array(img)
	
    # convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
    x = np.expand_dims(x, axis=0)
	
    # convert RGB -> BGR, subtract mean ImageNet pixel, and return 4D tensor
    return preprocess_input(x)
def read_and_prep_images(img_paths, img_height=image_size, img_width=image_size):
    r= ""
    logf = open("/tmp/image.log", "w")
    try:
      imgs = [load_img(img_path, target_size=(img_height, img_width)) for img_path in img_paths]
      img_array = np.array([img_to_array(img) for img in imgs])
      r = preprocess_input(img_array)
    except Exception as e:     # most generic exception you can catch
      logf.write("{0}: {1}\n".format(str("Exception"), str(e)))
      logf.close()
     
    return r
Example #23
0
def read_images(images, label):
    data = []
    for img in images:
        img = cv2.imread(str(img))
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = cv2.resize(img, (img_rows, img_cols)).astype(np.float32)
        data.append(img)
    
    labels = [label]*len(data)
    data = np.array(data).astype(np.float32)
    data = preprocess_input(data)
    return data, labels
Example #24
0
    def SayHello(self, request, context):
        #res = decode_predictions(preds) # requires access to the Internet
        if request.name == "record":
            msg = 'Hello, %s!' % responses[0]
            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            preds = model.predict(x)
        elif request.name == "replay":
            msg = 'Hello, %s!' % responses[1]
            x2 = image.img_to_array(img2)
            x2 = np.expand_dims(x2, axis=0)
            x2 = preprocess_input(x2)
            preds2 = model.predict(x2)
        else:
            msg = 'Hello, %s!' % request.name
            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            preds = model.predict(x)

        #joblib.dump(model, '/var/local/dir/lr_model.pk')
        return helloworld_pb2.HelloReply(message=msg)
Example #25
0
def load_image(data: Data):
    image_array = cv2.imread(data.image)  # BGR
    image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2RGB)
    if data.bbox is not None:
        try:
            image_array = Utils.pre_process_image2(image_array, data.bbox)
        except:
            log.exception('failed to pre process image %s with bbox %s' %
                          (data.image, data.bbox))
            raise Exception('failed to pre process image %s with bbox %s' %
                            (data.image, data.bbox))
    else:
        raise Exception('no bbox data for image %s' % data.image)
    image_array = cv2.resize(image_array, (RESNET_SIZE, RESNET_SIZE),
                             interpolation=cv2.INTER_CUBIC)
    image_array = np.asarray(image_array, dtype=np.float32)
    image_array = preprocess_input(image_array, mode='caffe')
    return image_array
Example #26
0
def train(img_local_path, label_path, model_object_key):
    model = SqueezeNet(weights='imagenet')
    img = image.load_img(img_local_path, target_size=(227, 227))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    label_file = open(label_path)
    y = np.array([label_file.read()])
    label_file.close()

    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    history = model.fit(x, y)
    model.summary()
    model.save_weights(tmp_path + model_object_key)
    return history.history
Example #27
0
def hello():
    files = get_files('1.jpg')
    cls_list = ['Cat', 'Dog']
    net = load_model('model-resnet50-final.h5')
    for f in files:
        img = image.load_img(f, target_size=(224, 224))
        if img is None:
            continue
        x = image.img_to_array(img)
        x = preprocess_input(x)
        x = np.expand_dims(x, axis=0)
        pred = net.predict(x)[0]
        top_inds = pred.argsort()[::-1][:5]
        for i in top_inds:
            if pred[i] > 0.75:
                print(cls_list[i])
                html = "<html><body><h1>"
                html += cls_list[i] + "</h1></body></html>"
                return html
            else:
                return "I am Not sure"
Example #28
0
 def forward_pass(self):
     """
         Generate new features matrix (5063,2048)
     """
     tar = self.tar_set
     # self.features_matrix = []
     features_matrix = [[0] * 2048 for i in range(len(tar.getmembers()))]
     i = 0
     # print("=========================Extracting features_matrix=========================")
     # t_bar = tqdm(range(len(tar.getmembers())), total=5063, ascii=True)
     for tar_info in tqdm(tar.getmembers(), ascii=True,desc="Extracting features_matrix"):
         f = tar.extractfile(tar_info)
         f.read()
         res = process_image(f)
         res = np.expand_dims(res, axis=0)
         res = preprocess_input(res)
         features = self.pretrained.predict(res)
         # features_reduce = features.squeeze()
         # print("feature shape: ",np.shape(features))
         features_matrix[i] = features
         i += 1
     return features_matrix
Example #29
0
def do_predict(model, pred_data):
    # obtain predictions
    #print("getting prediction for {}".format(pred_data))
    #st = time.time()
    try:
        image_path = pred_data['image_path']
        im = Image.open(image_path)

        # adam decodes base64, we dont
        # img = decode_img(im).resize((224,224)).convert('RGB')
        # we dont
        img = im.resize((224, 224)).convert('RGB')
        img = image.img_to_array(img)
        x = preprocess_input(img)
        pred = model.predict(resnet50.predict(np.array([x])))[0]
        pred = [str(f) for f in pred]

        prediction = list(zip(pred, META['classes']))
        prediction = pred
        #print(prediction)
        return prediction
        '''
        predictions = predictor(
            {
                "image_bytes": [base64.decodestring(bytes(pred_data['image'], 'utf-8'))],
                "batch_size": 1
            }
        )
        '''
    except Exception as e:
        print("!!!!!!!!!! PREDICTION FAILED !!!!!!!!!!")
        print(e)
        return False

    #print("I made a prediction in {:.2f}s".format(time.time()-st))
    return predictions
Example #30
0
    height_shift_range=0.4,
    zoom_range=0.3,
    rotation_range=20,
)

model = tensorflow.keras.models.load_model('mymodel.h5')
model.summary()
batch_size = 20
train_generator = data_generator.flow_from_directory(
    'C:/Users/xzha/Desktop/resnet/Retroflexion/',
    target_size=(431, 401),
    batch_size=batch_size,
    class_mode='categorical')

num_classes = len(train_generator.class_indices)
feature = []
correct_num = 0

imgpath = gb.glob('C:/Users/xzha/Desktop/resnet/Retroflexion/tube/*.png')
for path in imgpath:
    imgs = load_img(path, target_size=(431, 401))
    img_array = np.array(img_to_array(imgs))
    img_array = np.expand_dims(img_array, axis=0)
    imginput = preprocess_input(img_array)
    prediction = model.predict_classes(imginput)
    classes = dict((v, k) for k, v in train_generator.class_indices.items())
    feature.append(classes[prediction[0]])
    print(classes[prediction[0]], 'tube')
    if classes[prediction[0]] == 'tube':
        correct_num += 1