Пример #1
0
def read_img(img_path):
    try:
        img = image.load_img(img_path, target_size=(224, 224))
    except Exception as e:
        print(e)
    img = image.img_to_array(img)
    img = np.expand_dims(img, axis=0)
    #    print("loadimg……")
    return img / 255
Пример #2
0
def convert_to_HDF5(dir, data):
    for i in np.arange(0, len(dir)):
        image = load_img(ROOT_DIR + dir[i], target_size=(224, 224))
        image = img_to_array(image)
        image = image/255
        image = np.expand_dims(image, axis=0)
        print(i)
        feature, _, __ = model.predict_on_batch([image, image, image])
        data[i] = feature
Пример #3
0
def read_and_prep_images(img_paths,
                         img_height=image_size,
                         img_width=image_size):
    imgs = [
        load_img(img_path, target_size=(img_height, img_width))
        for img_path in img_paths
    ]
    img_array = np.array([img_to_array(img) for img in imgs])
    return preprocess_input(img_array)
Пример #4
0
def get_lab_from_data_list(data_list):
    x_lab = []
    for f in data_list:
        rgb = img_to_array(load_img(f,
                                    target_size=(img_size,
                                                 img_size))).astype(np.uint8)
        lab = rgb2lab(rgb)
        x_lab.append(lab)
    return np.stack(x_lab)
Пример #5
0
def predict(path, filename):
    x = load_img(path, target_size=(width, height))
    x = img_to_array(x)
    x = np.expand_dims(x, axis=0)

    array = cnn.predict(x)
    result = array[0]
    answer = np.argmax(result)
    return answer
Пример #6
0
    def categorize(self, lmodel, imgName):
        #BGR
        img = image.load_img(imgName, target_size=(299, 299)) 
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        y = lmodel.predict(x) #

        return y #np.argmax(y)
Пример #7
0
def read_and_prep_images(img_paths):
    '''read and prepare images'''
    image_size = 224
    imgs = [
        load_img(img_path, target_size=(image_size, image_size))
        for img_path in img_paths
    ]
    img_array = np.array([img_to_array(img) for img in imgs])
    return preprocess_input(img_array)
Пример #8
0
 def create_test_data(self):
     sar_filename, lab_filename = self.load_sample(
         sar_dir='/emwusr/xianzhengshi/2020_test/20200404/test_256/sar',
         lab_dir='/emwusr/xianzhengshi/2020_test/20200404/test_256/lab')
     test_data = np.ndarray(
         (len(sar_filename), self.img_cols, self.img_rows, 3))
     test_lab = np.ndarray(
         (len(lab_filename), self.img_cols, self.img_rows, self.num_class))
     for num, name in enumerate(sar_filename):
         img = load_img(name)
         img = img_to_array(img)
         test_data[num] = img
         # test_data[num] = (img[:, :, 0]).reshape((self.img_rows, self.img_cols, 1))
     for num1, name1 in enumerate(lab_filename):
         lab = load_img(name1)
         lab = img_to_array(lab)
         test_lab[num1] = self.one_hot_lab(lab)
     return test_data, test_lab
Пример #9
0
 def get_batch_x(self, idx):
     # Fetch a batch of training data
     batch_imgs = self.image_list[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]
     X = np.array([
         img_to_array(load_img(im, target_size=(H, W))) / IMG_NORMALIZER
         for im in batch_imgs
     ])
     if PREPROCESS: X = F_PREPROC(X)
     return X
def predict(img_local_path):
    model = SqueezeNet(weights='imagenet')
    img = image.load_img(img_local_path, target_size=(227, 227))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    result = decode_predictions(preds)
    return result
Пример #11
0
    def run(self, path_image, model='VGG', k=5, load_model=True, load_features=True,
            fit_model=True, data_augmentation=False, algorithm='brute', metric='cosine',
            nb_imgs=100, remove_not_white=False):
        
        self.path_to_img = path_image
        
        #load the model
        if load_model:
            self._load_model(model=model)
            
        #load the features
        if load_features:
            self._load_features(model=model, 
                                data_augmentation=data_augmentation,
                                remove_not_white=remove_not_white)
            
        #fit the kNN model
        if fit_model:
            self._fit_kNN(algorithm=algorithm, metric=metric)
                  
        #calculate the features of the images
        if model=='Inception_Resnet':
            img = image.load_img(path_image, target_size=(299, 299)) 
            img = image.img_to_array(img)  # convert to array
#            if remove_background:
#                img = utils.remove_background(img.astype(np.uint8))
            img = np.expand_dims(img, axis=0)
            img = ppIR(img)
            self.img_features = [self.IR_model.predict(img).flatten()] 
        else:
            img = image.load_img(path_image, target_size=(224, 224)) 
            img = image.img_to_array(img)  # convert to array
#            if remove_background:
#                img = utils.remove_background(img.astype(np.uint8))
            img = np.expand_dims(img, axis=0)
            img = ppVGG19(img)
            self.img_features = [self.VGG_model.predict(img).flatten()] 
                     
        #find most similar images in the dataset
        _, self.NN = self.kNN.kneighbors(self.img_features)
        print(self.kNN.kneighbors(self.img_features))
        #identify most similar items
        self.similar_items = [self.items[i] for i in self.NN[0]][:nb_imgs]
        self.similar_images = [self.images[i] for i in self.NN[0]][:nb_imgs]
Пример #12
0
    def learn(self, style_name):
        img_paths = glob.glob("transfer/ns_model/train_img/*")
        batch_size = 2
        epochs = 5
        input_shape = (224, 224, 3)
        input_size = input_shape[:2]
        style = glob.glob("media/style/*")[0].split("\\")[-1]

        img_sty = load_img(
            'media/style/'+style,
            target_size=input_size
        )
        img_arr_sty = np.expand_dims(img_to_array(img_sty), axis=0)
        self.y_true_sty = self.model_sty.predict(img_arr_sty)
        shutil.rmtree("./media/style")
        os.mkdir("./media/style")

        self.gen = self.train_generator(
            img_paths,
            batch_size,
            self.model_con,
            self.y_true_sty,
            epochs=epochs
        )

        gen_output_layer = self.model_gen.layers[-1]
        tv_loss = self.TVRegularizer(gen_output_layer.output)
        gen_output_layer.add_loss(tv_loss)

        self.model.compile(
            optimizer = Adadelta(),
            loss = [
                self.style_loss,
                self.style_loss,
                self.style_loss,
                self.style_loss,
                self.feature_loss
            ],
            loss_weights = [1.0, 1.0, 1.0, 1.0, 3.0]
        )

        now_epoch = 0
        min_loss = np.inf
        steps_per_epoch = math.ceil(len(img_paths)/batch_size)

        # 学習
        for i , (X_train, y_train) in enumerate(self.gen):
            if i % steps_per_epoch == 0:
                now_epoch += 1

            loss = self.model.train_on_batch(X_train, y_train)
            if loss[0]<min_loss:
                min_loss = loss[0]
                self.model.save("transfer/ns_model/pretrained_model/" + style_name + ".h5")

            print("epoch: {}, iters: {}, loss: {:.3f}".format(now_epoch, i, loss[0]))
Пример #13
0
def example():
    image_path = 'D:\\Onepredict_MK\\LG CNS\\cat.jpg'

    img = image.load_img(image_path, target_size=(224, 224))
    img_input = preprocess_input(np.expand_dims(img, 0))

    resnet = ResNet50(input_shape=(224, 224, 3),
                      weights='imagenet',
                      include_top=True)

    probs = resnet.predict(img_input)
    pred = np.argmax(probs[0])

    activation_layer = resnet.layers[-3].name
    inp = resnet.input
    # for idx in range(1000):
    y_c = resnet.output.op.inputs[0][:, pred]
    A_k = resnet.get_layer(activation_layer).output

    grads = K.gradients(y_c, A_k)[0]
    # Model(inputs=[inp], outputs=[A_k, grads, resnet.output])
    get_output = K.function(inputs=[inp], outputs=[A_k, grads, resnet.output])
    [conv_output, grad_val, model_output] = get_output([img_input])

    conv_output = conv_output[0]
    grad_val = grad_val[0]

    weights = np.mean(grad_val, axis=(0, 1))
    grad_cam = np.zeros(dtype=np.float32, shape=conv_output.shape[0:2])
    for k, w in enumerate(weights):
        grad_cam += w * conv_output[:, :, k]
        # RELU
        grad_cam = np.maximum(grad_cam, 0)

    grad_cam = cv2.resize(grad_cam, (224, 224))

    # Guided grad-CAM
    register_gradient()
    guided_model, activation_layer = modify_backprop(resnet, 'GuidedBackProp',
                                                     args.checkpoint_path,
                                                     args.main_name)
    saliency_fn = compile_saliency_function(guided_model, activation_layer)
    saliency = saliency_fn([img_input, 0])
    gradcam = saliency[0] * grad_cam[..., np.newaxis]
    gradcam = deprocess_image(gradcam)

    # grad_cam = ndimage.zoom(grad_cam, (32, 32), order=1)
    plt.subplot(1, 2, 1)
    plt.imshow(img, alpha=0.8)
    plt.imshow(grad_cam, cmap='jet', alpha=0.5)
    plt.axis('off')

    plt.subplot(1, 2, 2)
    plt.imshow(gradcam, cmap='jet', alpha=0.5)
    plt.axis('off')
    plt.show()
Пример #14
0
Файл: app.py Проект: y9698/root7
def sample2():
    name2 = "here is index2"
    pic = request.files['img_file']
    #pic = urllib.request.urlopen(url).read()
    #print("pic",pic)
    # img_bin = io.BytesIO(pic)
    # img =Image.open(img_bin)
    test = img_to_array(
        load_img(pic, target_size=(28, 28), color_mode="grayscale"))
    # for test in tests:
    #     x.append(test)

    #test = x.reshape(1,2352)
    x = np.asarray(test)
    # print("shape===", x.shape)

    #x= np.squeeze(x)
    #x=np.asarray(x)
    x = x.astype('float32')
    #x=x.reshape(2352,1)
    x = x / 255.0
    x = (np.expand_dims(x, 0))
    #print("x;",x)
    # print('x:',x)
    # x_test = io.BytesIO(x_test)
    # # Pillowで開き、画像を保存する
    # x_test = Image.open(x_test).convert('L')

    # img_rows, img_cols = 28, 28

    # x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
    # x_test = x_test.astype('float32')
    # x_test /= 255
    #x_test = 1- np.array(x_test)
    #X_test = x_test.reshape(1,784)

    # test = x_test
    # test = (np.expand_dims(test,0))

    data = {'images': x.tolist()}

    API_URL = 'https://root67.herokuapp.com/'
    res = requests.post(API_URL, json=data)

    result = []
    for v in res:
        try:
            js = json.loads(v)
            result.append(js['data'])
        except Exception:
            pass

    result = int(result[0]['prediction'][0])
    #print("もしかしたら:",result,"?かも")

    return render_template("index2.html", result=result, name2=name2)
Пример #15
0
def get_images_array_from_path_list(img_path_list, image_size=(224, 224)):
    # img_array   : (N2, N1)
    # expand_dims : (N2, N1) => (1, N2, N1)
    # concatenate : (1, N2, N1) + (1, N2, N1) + ... => (1, N3, N2, N1)
    img_list = [
        np.expand_dims(img_to_array(load_img(img_path,
                                             target_size=image_size)),
                       axis=0) for img_path in img_path_list
    ]
    return np.concatenate(img_list, axis=0)
Пример #16
0
def image_generate(image, name, directory):
    #画像をkerasの形式にする
    img = load_img(image)
    x = img_to_array(img)
    x = x.reshape((1, ) + x.shape)
    g = datagen.flow(x, batch_size=1, save_to_dir=directory, save_format='jpg')

    #画像を水増し
    for i in range(5):
        batch = g.next()
Пример #17
0
 def load_imgs(self,img_paths, target_size=(224, 224)):
     """画像ファイルのパスのリストから、配列のバッチを返す"""
     _load_img = lambda x: img_to_array(
         load_img(x, target_size=target_size)
     )
     img_list = [
         np.expand_dims(_load_img(img_path), axis=0)
         for img_path in img_paths
     ]
     return np.concatenate(img_list, axis=0)
Пример #18
0
 def process(self, path_label_tuple):
     """
   Load image from gcs
   Yields: Image, categorical label
 """
     path, label = path_label_tuple
     with self.file_system.open(path) as img_file:
         img = np.array(
             load_img(img_file, target_size=(self.img_size, self.img_size)))
     yield img, label
Пример #19
0
def load_img(path):
    """
    this method is used to read image data from previous path for vgg19
    :param path: image file
    :return: np.array object with shape(1,224,224,3) , ordered by RGB->BGR ,centering zero
    """
    img = image.load_img(path, target_size=(224, 224))
    arr = np.expand_dims(image.img_to_array(img), axis=0)
    arr = preprocess_input(arr)
    return arr
Пример #20
0
def create_img_tensor(test_dir):
    img_path = os.path.join(test_dir, r'cats\cat.{}.jpg'.format(random.randint(1501, 1999)))
    plt.imshow(plt.imread(img_path))

    img = image.load_img(img_path, target_size=(150, 150))
    img_tensor = image.img_to_array(img)
    img_tensor = np.expand_dims(img_tensor, axis=0)
    img_tensor /= 255.
    #plt.imshow(img_tensor[0])
    return img_tensor
Пример #21
0
def readImages(folder_path):
    images = []
    for i in range(len(folder_path)):
        img = load_img(folder_path[i], target_size=(224, 224, 3))
        # img = Image.open(folder_path[i])
        img = np.array(img, dtype=np.float32)
        images.append(img)

    images = np.array(images)
    return images
Пример #22
0
def retrieve_similar_images(given_image, cosine_similarity_df):
    print('original image:')
    original = load_img(given_image, target_size=(imgs_width, imgs_height))
    plt.imshow(original)
    plt.show()

    print('most similar products........')

    closest_imgs = cosine_similarity_df[given_image].sort_values(
        ascending=False)[1:nb_closest_images + 1].index
    closest_imgs_scores = cosine_similarity_df[given_image].sort_values(
        ascending=False)[1:nb_closest_images + 1]

    for i in range(0, len(closest_imgs)):
        original = load_img(closest_imgs[i],
                            target_size=(imgs_width, imgs_height))
        plt.imshow(original)
        plt.show()
        print('similarity score:', closest_imgs_scores[i])
Пример #23
0
def predict(file):
    x = load_img(file, target_size=(longitud, altura))
    x = img_to_array(x)
    x = np.expand_dims(x, axis=0)
    print("Prediction..")
    array = cnn.predict(x)  ## [1,0,0]
    result = array[0]
    answer = np.argmax(result)
    print(predicciones[answer])
    return answer
Пример #24
0
def read_and_prep_images(train1_file,
                         img_height=image_size,
                         img_width=image_size):
    imgs = [
        load_img(img_path, target_size=(img_height, img_width))
        for img_path in train1_file
    ]
    img_array = np.array([img_to_array(img) for img in imgs])
    output = preprocess_input(img_array)
    return (output)
Пример #25
0
def read_and_prep_image(image_path, model_name, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT):
    image_raw = load_img(image_path, target_size=(img_height, img_width))
    image_array = np.array([img_to_array(image_raw)])
    if model_name == "ResNet50":
        image = preprocess_input_ResNet50(image_array)
    elif model_name == "VGG16":
        image = preprocess_input_VGG16(image_array)
    elif model_name == "VGG19":
        image = preprocess_input_VGG19(image_array)
    return image
Пример #26
0
def run():

    my_model = load_model(filepath=model_path)

    testing_car_df = csv_to_dataFrame(testing_df_path=testing_df)

    img_name_list = list()

    # instantiate a dictionary and fill up keys
    for row_index, row in testing_car_df.iterrows():
        image_name = row['ImageFilename']
        # img_dict[image_name] = list()
        img_name_list.append(image_name)

    column_list = ['ImageFilename', classifier]

    result_df = pd.DataFrame(columns=column_list)
    df_list = list()

    for img_name in img_name_list:
        test_img_path = join(img_folder_dir, img_name)

        # img_path = 'glass_model_test_sample/no_damage_0.jpg'
        img = image.load_img(path=test_img_path,
                             grayscale=False,
                             target_size=(224, 224))
        x = image.img_to_array(img)
        # print('image_to_array: ', x)
        # print(x.shape)
        x = np.expand_dims(x, axis=0)
        # print('np_expand_dims', x)
        # print(x.shape) # current shape -- (1, 224, 224, 3)
        ## the image is now prepared -- for CNN, input must be a 4-D tensor [batch_size, width, height, channel]
        ## channel -- 1: gray scale, 3: RGB(red, green glue)

        prediction = my_model.predict(x)
        print(test_img_path)
        print('NoGlassDmg', '  ', 'YesGlassDmg')
        print(prediction)
        print(prediction[0][0], prediction[0][1])
        no_val = prediction[0][0]
        yes_val = prediction[0][1]

        if no_val >= yes_val:
            eval = 'no'
        else:
            eval = 'yes'

        one_row_data = [img_name, eval]

        one_row_df = pd.DataFrame(data=[one_row_data], columns=column_list)

        result_df = result_df.append(one_row_df, ignore_index=True)

    csv_output(df=result_df, csv_output_name='bumper_damage_result.csv')
Пример #27
0
 def create_test_data(self):
     sar_filename = self.load_sample(
         sar_dir=
         '/emwusr/xianzhengshi/2020_test/20200404/sar_seg/sar_256_test')
     test_data = np.ndarray(
         (len(sar_filename), self.img_cols, self.img_rows, 3))
     for num, name in enumerate(sar_filename):
         img = load_img(name)
         img = img_to_array(img)
         test_data[num] = img
     return test_data
Пример #28
0
def preprocess_image(im_path, im_size, model_name):
    im = image.load_img(im_path, target_size=(im_size[0], im_size[1]))
    im = image.img_to_array(im)
    im = np.expand_dims(im, axis=0)
    if model_name == 'inception_v3':
        im = inception_v3.preprocess_input(im)
    elif model_name == 'resnet50':
        im = resnet50.preprocess_input(im)
    elif model_name == 'vgg16':
        im = vgg16.preprocess_input(im)
    return im
Пример #29
0
 def densenet_extract_feat(self, img_path):
     img = image.load_img(img_path,
                          target_size=(self.input_shape[0],
                                       self.input_shape[1]))
     img = image.img_to_array(img)
     img = np.expand_dims(img, axis=0)
     img = preprocess_input_densenet(img)
     feat = self.model_densenet.predict(img)
     # print(feat.shape)
     norm_feat = feat[0] / LA.norm(feat[0])
     return norm_feat
Пример #30
0
def _get_elephant(target_size):
    # For models that don't include a Flatten step,
    # the default is to accept variable-size inputs
    # even when loading ImageNet weights (since it is possible).
    # In this case, default to 299x299.
    if target_size[0] is None:
        target_size = (299, 299)
    test_image = data_utils.get_file('elephant.jpg', TEST_IMAGE_PATH)
    img = image.load_img(test_image, target_size=tuple(target_size))
    x = image.img_to_array(img)
    return np.expand_dims(x, axis=0)
Пример #31
0
def get_image(size):
  """Returns an image loaded into an np.ndarray with dims [1, size, size, 3].

  Args:
    size: Size of image.

  Returns:
    np.ndarray.
  """
  img_filename = _resource_loader.get_path_to_datafile(
      "testdata/grace_hopper.jpg")
  img = image.load_img(img_filename, target_size=(size, size))
  img_array = image.img_to_array(img)
  img_array = np.expand_dims(img_array, axis=0)
  return img_array
Пример #32
0
def test():
    import os
    import numpy as np
    from PIL import Image
    from tensorflow.python.keras.preprocessing.image import load_img

    from models import Darknet19Encoder, Darknet19Decoder

    inputShape = (256, 256, 3)
    batchSize = 10
    latentSize = 100

    img = load_img(os.path.join('..','images', 'img.jpg'), target_size=inputShape[:-1])
    img.show()

    img = np.array(img, dtype=np.float32) / 255 - 0.5
    img = np.array([img]*batchSize) # make fake batches to improve GPU utilization

    # This is how you build the autoencoder
    encoder = Darknet19Encoder(inputShape, batchSize, latentSize, 'bvae', beta=69, capacity=15, randomSample=True)
    decoder = Darknet19Decoder(inputShape, batchSize, latentSize)
    bvae = AutoEncoder(encoder, decoder)

    bvae.ae.compile(optimizer='adam', loss='mean_absolute_error')
    while True:
        bvae.ae.fit(img, img,
                    epochs=100,
                    batch_size=batchSize)
        
        # example retrieving the latent vector
        latentVec = bvae.encoder.predict(img)[0]
        print(latentVec)

        pred = bvae.ae.predict(img) # get the reconstructed image
        pred[pred > 0.5] = 0.5 # clean it up a bit
        pred[pred < -0.5] = -0.5
        pred = np.uint8((pred + 0.5)* 255) # convert to regular image values

        pred = Image.fromarray(pred[0])
        pred.show() # display popup
Пример #33
0
def load_imgs(img_paths, target_size):
    list_imgs = [img_to_array(load_img(path, target_size=target_size))
                 for path in img_paths]
    return np.array(list_imgs)
Пример #34
0
    def predictMultipleImages(self, sent_images_array, result_count_per_image=2, input_type="file"):
        """
                'predictMultipleImages()' function is used to predict more than one image by receiving the following arguments:
                    * input_type , the type of inputs contained in the parsed array. Acceptable values are "file", "array" and "stream"
                    * sent_images_array , an array of image file paths, image numpy array or image file stream
                    * result_count_per_image (optionally) , the number of predictions to be sent per image, which must be whole numbers between
                        1 and 1000. The default is 2.

                This function returns an array of dictionaries, with each dictionary containing 2 arrays namely 'prediction_results' and 'prediction_probabilities'. The 'prediction_results'
                contains possible objects classes arranged in descending of their percentage probabilities. The 'prediction_probabilities'
                contains the percentage probability of each object class. The position of each object class in the 'prediction_results'
                array corresponds with the positions of the percentage possibilities in the 'prediction_probabilities' array.


                :param input_type:
                :param sent_images_array:
                :param result_count_per_image:
                :return output_array:
                """

        output_array = []

        for image_input in sent_images_array:

            prediction_results = []
            prediction_probabilities = []
            if (self.__modelLoaded == False):
                raise ValueError("You must call the loadModel() function before making predictions.")

            else:

                if (self.__modelType == "squeezenet"):

                    from .imagenet_utils import preprocess_input, decode_predictions
                    if (input_type == "file"):
                        try:
                            image_to_predict = image.load_img(image_input, target_size=(self.__input_image_size, self.__input_image_size))
                            image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last")
                            image_to_predict = np.expand_dims(image_to_predict, axis=0)

                            image_to_predict = preprocess_input(image_to_predict)
                        except:
                            raise ValueError("You have set a path to an invalid image file.")
                    elif (input_type == "array"):
                        try:
                            image_input = Image.fromarray(np.uint8(image_input))
                            image_input = image_input.resize((self.__input_image_size, self.__input_image_size))
                            image_input = np.expand_dims(image_input, axis=0)
                            image_to_predict = image_input.copy()
                            image_to_predict = np.asarray(image_to_predict, dtype=np.float64)
                            image_to_predict = preprocess_input(image_to_predict)
                        except:
                            raise ValueError("You have parsed in a wrong numpy array for the image")
                    elif (input_type == "stream"):
                        try:
                            image_input = Image.open(image_input)
                            image_input = image_input.resize((self.__input_image_size, self.__input_image_size))
                            image_input = np.expand_dims(image_input, axis=0)
                            image_to_predict = image_input.copy()
                            image_to_predict = np.asarray(image_to_predict, dtype=np.float64)
                            image_to_predict = preprocess_input(image_to_predict)
                        except:
                            raise ValueError("You have parsed in a wrong stream for the image")

                    model = self.__model_collection[0]

                    prediction = model.predict(image_to_predict, steps=1)

                    try:
                        predictiondata = decode_predictions(prediction, top=int(result_count_per_image))

                        for results in predictiondata:
                            countdown = 0
                            for result in results:
                                countdown += 1
                                prediction_results.append(str(result[1]))
                                prediction_probabilities.append(str(result[2] * 100))
                    except:
                        raise ValueError("An error occured! Try again.")

                    each_image_details = {}
                    each_image_details["predictions"] = prediction_results
                    each_image_details["percentage_probabilities"] = prediction_probabilities
                    output_array.append(each_image_details)

                elif (self.__modelType == "resnet"):

                    model = self.__model_collection[0]

                    from .imagenet_utils import preprocess_input, decode_predictions
                    if (input_type == "file"):
                        try:
                            image_to_predict = image.load_img(image_input, target_size=(self.__input_image_size, self.__input_image_size))
                            image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last")
                            image_to_predict = np.expand_dims(image_to_predict, axis=0)

                            image_to_predict = preprocess_input(image_to_predict)
                        except:
                            raise ValueError("You have set a path to an invalid image file.")
                    elif (input_type == "array"):
                        try:
                            image_input = Image.fromarray(np.uint8(image_input))
                            image_input = image_input.resize((self.__input_image_size, self.__input_image_size))
                            image_input = np.expand_dims(image_input, axis=0)
                            image_to_predict = image_input.copy()
                            image_to_predict = np.asarray(image_to_predict, dtype=np.float64)
                            image_to_predict = preprocess_input(image_to_predict)
                        except:
                            raise ValueError("You have parsed in a wrong numpy array for the image")
                    elif (input_type == "stream"):
                        try:
                            image_input = Image.open(image_input)
                            image_input = image_input.resize((self.__input_image_size, self.__input_image_size))
                            image_input = np.expand_dims(image_input, axis=0)
                            image_to_predict = image_input.copy()
                            image_to_predict = np.asarray(image_to_predict, dtype=np.float64)
                            image_to_predict = preprocess_input(image_to_predict)
                        except:
                            raise ValueError("You have parsed in a wrong stream for the image")

                    prediction = model.predict(x=image_to_predict, steps=1)

                    try:
                        predictiondata = decode_predictions(prediction, top=int(result_count_per_image))

                        for results in predictiondata:
                            countdown = 0
                            for result in results:
                                countdown += 1
                                prediction_results.append(str(result[1]))
                                prediction_probabilities.append(str(result[2] * 100))
                    except:
                        raise ValueError("An error occured! Try again.")

                    each_image_details = {}
                    each_image_details["predictions"] = prediction_results
                    each_image_details["percentage_probabilities"] = prediction_probabilities
                    output_array.append(each_image_details)

                elif (self.__modelType == "densenet"):

                    model = self.__model_collection[0]

                    from .DenseNet.densenet import preprocess_input, decode_predictions
                    from .DenseNet.densenet import DenseNetImageNet121
                    if (input_type == "file"):
                        try:
                            image_to_predict = image.load_img(image_input, target_size=(self.__input_image_size, self.__input_image_size))
                            image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last")
                            image_to_predict = np.expand_dims(image_to_predict, axis=0)

                            image_to_predict = preprocess_input(image_to_predict)
                        except:
                            raise ValueError("You have set a path to an invalid image file.")
                    elif (input_type == "array"):
                        try:
                            image_input = Image.fromarray(np.uint8(image_input))
                            image_input = image_input.resize((self.__input_image_size, self.__input_image_size))
                            image_input = np.expand_dims(image_input, axis=0)
                            image_to_predict = image_input.copy()
                            image_to_predict = np.asarray(image_to_predict, dtype=np.float64)
                            image_to_predict = preprocess_input(image_to_predict)
                        except:
                            raise ValueError("You have parsed in a wrong numpy array for the image")
                    elif (input_type == "stream"):
                        try:
                            image_input = Image.open(image_input)
                            image_input = image_input.resize((self.__input_image_size, self.__input_image_size))
                            image_input = np.expand_dims(image_input, axis=0)
                            image_to_predict = image_input.copy()
                            image_to_predict = np.asarray(image_to_predict, dtype=np.float64)
                            image_to_predict = preprocess_input(image_to_predict)
                        except:
                            raise ValueError("You have parsed in a wrong stream for the image")

                    prediction = model.predict(x=image_to_predict, steps=1)

                    try:
                        predictiondata = decode_predictions(prediction, top=int(result_count_per_image))

                        for results in predictiondata:
                            countdown = 0
                            for result in results:
                                countdown += 1
                                prediction_results.append(str(result[1]))
                                prediction_probabilities.append(str(result[2] * 100))
                    except:
                        raise ValueError("An error occured! Try again.")

                    each_image_details = {}
                    each_image_details["predictions"] = prediction_results
                    each_image_details["percentage_probabilities"] = prediction_probabilities
                    output_array.append(each_image_details)

                elif (self.__modelType == "inceptionv3"):

                    model = self.__model_collection[0]

                    from imageai.Prediction.InceptionV3.inceptionv3 import InceptionV3, preprocess_input, \
                        decode_predictions

                    if (input_type == "file"):
                        try:
                            image_to_predict = image.load_img(image_input, target_size=(self.__input_image_size, self.__input_image_size))
                            image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last")
                            image_to_predict = np.expand_dims(image_to_predict, axis=0)

                            image_to_predict = preprocess_input(image_to_predict)
                        except:
                            raise ValueError("You have set a path to an invalid image file.")
                    elif (input_type == "array"):
                        try:
                            image_input = Image.fromarray(np.uint8(image_input))
                            image_input = image_input.resize((self.__input_image_size, self.__input_image_size))
                            image_input = np.expand_dims(image_input, axis=0)
                            image_to_predict = image_input.copy()
                            image_to_predict = np.asarray(image_to_predict, dtype=np.float64)
                            image_to_predict = preprocess_input(image_to_predict)
                        except:
                            raise ValueError("You have parsed in a wrong numpy array for the image")
                    elif (input_type == "stream"):
                        try:
                            image_input = Image.open(image_input)
                            image_input = image_input.resize((self.__input_image_size, self.__input_image_size))
                            image_input = np.expand_dims(image_input, axis=0)
                            image_to_predict = image_input.copy()
                            image_to_predict = np.asarray(image_to_predict, dtype=np.float64)
                            image_to_predict = preprocess_input(image_to_predict)
                        except:
                            raise ValueError("You have parsed in a wrong stream for the image")

                    prediction = model.predict(x=image_to_predict, steps=1)

                    try:
                        predictiondata = decode_predictions(prediction, top=int(result_count_per_image))

                        for results in predictiondata:
                            countdown = 0
                            for result in results:
                                countdown += 1
                                prediction_results.append(str(result[1]))
                                prediction_probabilities.append(str(result[2] * 100))
                    except:
                        raise ValueError("An error occured! Try again.")

                    each_image_details = {}
                    each_image_details["predictions"] = prediction_results
                    each_image_details["percentage_probabilities"] = prediction_probabilities
                    output_array.append(each_image_details)


        return output_array