def load_image(img_path, show=True): img_original = image.load_img(img_path) img = image.load_img(img_path, target_size=(64, 64)) img_tensor = image.img_to_array(img) img_tensor = np.expand_dims(img_tensor, axis=0) img_tensor /= 255. if show: plt.imshow(img_original) plt.axis('off') plt.show() return img_tensor
def load_image(img_path, show=True): img_original = image.load_img(img_path) img = image.load_img(img_path, target_size=(64, 64)) img_tensor = image.img_to_array(img) # (height, width, channels) img_tensor = np.expand_dims( img_tensor, axis=0 ) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels) img_tensor /= 255. # imshow expects values in the range [0, 1] if show: plt.imshow(img_original) plt.axis('off') plt.show() return img_tensor
def predict(self, video_name) -> float: img = 0 try: img = image.load_img(self.base_path + '/Test_Data_Raw/real/' + video_name + '.jpg', target_size=(480, 270)) except: img = image.load_img(self.base_path + '/Test_Data_Raw/spoof/' + video_name + '.jpg', target_size=(480, 270)) finally: img = image.img_to_array(img) img = np.expand_dims(img, axis=0) return self.model.predict(img)[0][0]
def readData(self, path): dirs = os.listdir(path) dataImgPath = [] metaData = [] metaDataLabel = [] temp = [] for folder in dirs: if not folder.startswith("."): subFolder = path + folder subFolders = os.listdir(subFolder) if folder not in temp: temp.append(folder) for img in subFolders: if not img.startswith("."): imgPath = subFolder + '/' + img dataImgPath.append(imgPath) #load order will change label order temp = sorted(temp, key=int) print(temp) random.shuffle(dataImgPath) for path in dataImgPath: img = image.load_img(path) img = image.img_to_array(img) metaData.append(img) label = path.split("/")[2] for index in range(0, len(temp)): if label == temp[index]: metaDataLabel.append(index) return metaData, metaDataLabel
def visualize_predicttion(classifier, n_cases): # Loop through images for i in range(0,n_cases): # Set path for test images path = random.choice([test_LR_dir, test_RL_dir]) #path = 'data/RandomLRR.jpg' # Get pictures random_img = random.choice(os.listdir(path)) img_path = os.path.join(path, random_img) img = image.load_img(img_path, target_size=(img_width, img_height)) img_tensor = image.img_to_array(img) # Image data encoded as integers in the 0-255 range img_tensor /= 255. # Normalize to [0,1] for matplotlib application # Extract feature features = conv_base.predict(img_tensor.reshape(1, img_width, img_height, img_channel)) # Make prediction try: prediction = classifier.predict(features) except: prediction = classifier.predict(features.reshape(1, 7*7*512)) # Show image with prediction plt.title(random_img) plt.imshow(img_tensor) plt.show() # Write prediction if prediction < 0.5: print('LR') else: print('RL')
def predict_one1(self, img_path): # assumption that model already loaded # Epoch 18/100 # - 46s - loss: 0.4270 - acc: 0.8118 - val_loss: 0.4171 - val_acc: 0.8092 # Epoch 00018: val_loss improved from 0.64311 to 0.41714, saving model to ../../stage/models/default/builds/default-1544411418/default-improvement-18-0.417.hdf5 # load ../../stage/models/default/builds/default-1544411418/default-improvement-18-0.417.hdf5 # --> binary test_image = image.load_img(img_path, target_size=self.image_shape) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis=0) test_image = test_image * 1. / 255 result = self.model.predict(test_image) # TODO: last minute hard-coded labels for imagenet tests class_indices = {'drunkard-n10037385': 0, 'not-drunk-n00007846': 1} #-- test code #print(result) #prediction = 'idk' #if result[0][0] >= 0.5: # print('not') # prediction = 'Not drunk' #else: # print('drunk') # prediciton = 'Drunk' #print(prediction) #--- return 'likelyhood of being sober: ' + str(result[0][0])
def get(self, resource_id): arr = [ 'taj.jpg', 'state.jpg', 'yos.jpg', 'waipio.jpg', 'owens.jpg', 'sonoma.jpg' ] model = tf.keras.applications.inception_v3.InceptionV3( include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000) # model = tf.keras.models.load_model('./inception.h5') for xi in arr: path = os.path.abspath('files/' + xi) img = image.load_img(path, target_size=(299, 299)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) preds = model.predict(x) # print(preds) print('Predicted:', decode_predictions(preds, top=5)[0]) return {'images': 'data'}
def upload_file(): if request.method == 'POST': if 'file' not in request.files: return redirect(request.url) file = request.files['file'] if file.filename == '': return redirect(request.url) if file and allowed_file(file.filename): base_path = path.abspath(path.dirname(__file__)) upload_path = path.join(base_path, 'static/planttest/test/') filename = upload_path + secure_filename(file.filename) file.save(filename) try: test_datagen = ImageDataGenerator(rescale=1. / 255) img = image.load_img(file, target_size=(224, 224)) array_img = image.img_to_array(img, dtype='float32') test_generator = test_datagen.flow(array_img.reshape( 1, 224, 224, 3), batch_size=1) pred = modelPlant.predict_generator(test_generator) result = decode_predictions_ill(pred) return json.dumps(result) except: result = {'error': 'predict error...'} return json.dumps(result) return render_template('index.html')
def predict(self, mode, sample_image): test_image = image.load_img(sample_image, target_size=(154, 154)) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis=0) result = self.FruitModel.predict(test_image) result = np.argmax(result) #k.clear_session() return result
def get_vector(img_path): img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) result = model.predict(x) result[result < 1e-4] = 0 return result.flatten()
def show_pictures(path): random_img = random.choice(os.listdir(path)) img_path = os.path.join(path, random_img) img = image.load_img(img_path, target_size=(img_width, img_height)) img_tensor = image.img_to_array(img) # Image data encoded as integers in the 0–255 range img_tensor /= 255. # Normalize to [0,1] for plt.imshow application plt.imshow(img_tensor) plt.show()
def newImage(): name = input("Name of file: ") random_pic = image.load_img(name, target_size=(64, 64)) random_pic = image.img_to_array(random_pic) random_pic = np.expand_dims(random_pic, axis=0) result = model.predict(random_pic) if result[0][0] >= 0.5: print("dog") else: print("cat")
def transform(self, filename): img01 = image.load_img(self.dir_input + filename) size = img01.size if self.resize_max_pixel is not None: ratio = size[0] * size[1] / self.resize_max_pixel new_size = (int(size[0] / ratio), int(size[1] / ratio)) img01 = img01.resize(new_size) size = img01.size my_image = np.array(img01).reshape(1, size[1], size[0], 3) return image.array_to_img( (next(self.ImageDataGenerator.flow(my_image))[0]))
def inference_label(self, model, img_path, IMSIZE, label): img = image.load_img(img_path, target_size=(IMSIZE, IMSIZE)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = self.normalization(x) predict_result = model.predict(x) print(predict_result[0]) index = np.where(predict_result[0] == np.max(predict_result[0])) row_number = index[0][0] print(label[row_number]) print(row_number) return row_number
def preprocessing(path, X, y): img_list = os.listdir(path) for img_name in img_list: img_path = path + '/' + img_name img = image.load_img(img_path, color_mode="grayscale", target_size=(150, 150)) img_tensor = image.img_to_array(img) X.append(img_tensor) if path.split("/")[-1] == "NORMAL": y.append([0]) else: y.append([1])
def predict_image(path, model): # loading image img1 = image.load_img(path, target_size=(224, 224)) # image convert to array img = image.img_to_array(img1) img = np.expand_dims(img, axis=0) # preprocessing image img = preprocess_input(img) pred = model.predict(img) print(pred) return pred
def predict(filepath): #read image #img = cv2.imread(filepath) #resize image #img_resize = cv2.resize(img, (224,224)) img = image.load_img(filepath, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) preds = loaded_model.predict(x) preds = decode_predictions(preds, top=3)[0] os.remove(filepath) return preds
def swat_dcnn(input_image, swatdcnn_stage_1, swatdcnn_stage_2, swatdcnn_stage_3): # Load the image. img1 = image.load_img(input_image) plt.imshow(img1) img = image.load_img(input_image, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = efn.preprocess_input(x) # CONDITIONS ################################################################################################# # SWAT-DCNN (Stage-1) swatdcnn_stage_1_result = swatdcnn_stage1(x, swatdcnn_stage_1) ################################################################################################# # SWAT-DCNN (Stage-2) # Run iff Stage-1 result is unhealthy. if swatdcnn_stage_1_result["is_unhealthy"]: swatdcnn_stage_2_result = swatdcnn_stage2(x, swatdcnn_stage_2) else: swatdcnn_stage_2_result = None ################################################################################################# # SWAT-DCNN (Stage-3) # Run iff Stage-2 result is `UNHEALTHY_BSL`. if swatdcnn_stage_2_result and swatdcnn_stage_2_result["disease"] == UNHEALTHY_BSL: swatdcnn_stage_3_result = swatdcnn_stage3(x, swatdcnn_stage_3) else: swatdcnn_stage_3_result = None return { "swatdcnn_stage1": swatdcnn_stage_1_result, "swatdcnn_stage2": swatdcnn_stage_2_result, "swatdcnn_stage3": swatdcnn_stage_3_result }
def upload(request): latest = UploadImage.objects.last() # print("Latest Image",latest.predict_image.url) # filename=latest.predict_image.url # File path filename = os.path.join(BASE_DIR + "/" + latest.predict_image.url) file_path = os.path.join(BASE_DIR, 'models/modelMultipleClass2.h5') # CNN prediction model = load_model(file_path) img = image.load_img(filename, target_size=(150, 150)) img = image.img_to_array(img) img = np.expand_dims(img, axis=0) result = model.predict(img) predicted_class_indices = np.argmax(result, axis=1) probabilities = model.predict_proba(img) if predicted_class_indices[0] == 0: lass_name = "Apple" prediction = "Frogeye_Spot" elif predicted_class_indices[0] == 1: class_name = "Apple" prediction = "Healthy" elif predicted_class_indices[0] == 2: class_name = "Tomato" prediction = "Leaf_Mold" elif predicted_class_indices[0] == 3: class_name = "Tomato" prediction = "Healthy" form = AddPred(request.POST, request.FILES, instance=latest) if form.is_valid(): updatePred = form.save(commit=False) updatePred.prediction = prediction updatePred.save() form = AddPred(instance=latest) # # form.save() context_dict = { 'imagePath': latest, 'class': class_name, 'prediction': prediction, 'form': form, } return render(request, 'result.html', context_dict)
def find_image(test_model, img_path='Test_data/01.jpg'): try: img = image.load_img(img_path, target_size=(224, 224)) img = np.asarray(img) img = np.expand_dims(img, axis=0) plt.imshow(img[0]) output = list(test_model.predict(img)[0]) print(output) maxindex = output.index(max(output)) return {'text': '您是否是要查詢:' + str(product_list[maxindex])} except Exception as e: return {'text': str(e).split(':')[0]}
def pred_model(imgsrc): model = tf.keras.applications.inception_v3.InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000) img = image.load_img(imgsrc, target_size=(299, 299)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) preds = model.predict(x) # print(preds) print('Predicted:', decode_predictions(preds, top=5)[0])
def test_image(path,output): test_image = image.load_img(imagePath, target_size = (224, 224)) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis = 0) result = loaded_model.predict(test_image) confidence = ["{:.10f}".format(r) for r in result[0]] if output: print("Confidence: ", result) if (result[0][0] == result[0][1]): if output: print("Thyroid Type: Unknown. Unable to classify this image.") return "Unknown", confidence elif (np.argmax(result, axis = 1) == 0): if output: print("Thyroid Type: Benign") return "Benign", confidence else: if output: print("Thyroid Type: Malignant") return "Malignant", confidence
def __getitem__(self, index): """Generate one batch of data""" # selects indices of data for next batch indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size] # select data and load images labels = np.array([self.labels[k] for k in indexes]) images = [ image.img_to_array( image.load_img(self.images_paths[k], target_size=(224, 224))) for k in indexes ] # preprocess and augment data if self.augment: images = self.augmentor(images) images = np.array([preprocess_input(img) for img in images]) return images, labels
def getPredictions(self, model, test_dir): # Obtains predictions from a test directory count = 0 imageFormat = '.png' fileList = [ os.path.join(test_dir, f) for f in os.listdir(test_dir) if f.endswith(imageFormat) ] for imagename in fileList: img = image.load_img(imagename, target_size=(img_width, img_height), color_mode="grayscale") img = image.img_to_array(img) img = img / 255 img = np.expand_dims(img, axis=0) classes = model.predict_classes(img) count = count + 1 print(classes)
def get_ops(self): images = [] ops = [] for i, j in zip(self.path, self.path_list): for k in j: img = image.load_img(os.path.join(i, k), target_size=(299, 299)) x = image.img_to_array(img) #x = np.expand_dims(x, axis = 0) x = preprocess_input(x) images.append(x) print('get_images', np.array(images).shape) features = self.model.predict(np.array(images)) ops.append(np.squeeze(np.array(decode_predictions(features, top=1)))) return ops
def classify(img_path, fig=None, rows=1, cols=1, i=1): img = image.load_img(img_path, target_size=(img_height, img_width), color_mode="grayscale") imgArray = np.expand_dims(image.img_to_array(img), 0) datagen.standardize(imgArray) predict = model.predict(imgArray) predict_classes = model.predict_classes(imgArray) result = [classes[i] for i in predict_classes] print( f'{img_path} --> predict: {predict} predict_classes: {predict_classes} class: {result}' ) if fig is not None: fig.add_subplot(rows, cols, i, title=result) plt.imshow(img, cmap='gray', vmin=0, vmax=255)
def loadImages(start, stop, csvFile): """ Carica le immagini prendendo il nome dal file csv e restituisce il dataset contenente le immagini :param start: indice di partenza del csv :param stop: indice di fine del csv :param csvFile: file csv contenente le labels :return: dataset contenente stop-start immagini """ dataset = [] for i in tqdm(range(start, stop)): # print(DATASET_PATH + "/" + csvLabels.loc[i]["image_id"]) # print(csvFile.loc[i]["image_id"]) img = image.load_img(DATASET_PATH + "/" + csvFile.loc[i]["image_id"], target_size=IMAGE_DIMS) img = image.img_to_array(img) img = img / 255 dataset.append(img) return dataset
def predict_one3(self, img_path): #--- categorical # ../../stage/models/insta-cog/builds/insta-cog-1544592705/insta-cog # ../../stage/models/insta-cog/split_data/train # - 49s - loss: 0.8492 - acc: 0.6170 - val_loss: 0.8661 - val_acc: 0.6020 # Epoch 00193: val_loss improved from 0.88596 to 0.86613, saving model to ../../stage/models/insta-cog/builds/insta-cog-1544592705/insta-cog-improvement-193-0.866.hdf5 #--- class_list = ['drunkselfie', 'soberselfie', 'stonedselfie'] test_image = image.load_img(img_path, target_size=self.image_shape) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis=0) test_image = test_image * 1. / 255 predicted_vector = self.model.predict( test_image) #Vector with the prob of each class #print(predicted_vector) #print(class_list[np.argmax(predicted_vector)]) #Prints ellement with highest prob return class_list[0] + ': ' + str(predicted_vector[0][0]) + ', ' \ + class_list[1] + ': ' + str(predicted_vector[0][1]) + ', ' \ + class_list[2] + ': ' + str(predicted_vector[0][2])
def predict(image_upload): # img_width, img_height = 150, 150 img_width, img_height = 224, 224 #load image you want to make prediction for img = image.load_img(image_upload, target_size=(img_width, img_height)) img_tensor = image.img_to_array(img) img_tensor = np.expand_dims(img_tensor, axis=0) img_tensor /= 255 pred = model.predict(img_tensor) index_predict = np.argmax(pred[0]) dict_labels = { 0: 'Contemporary', 1: 'Midcentury', 2: 'Traditional', 3: 'Transitional' } return dict_labels[index_predict]
def predict(request): ans = ['daisy', 'sunflower', 'rose', 'sunflower', 'tulip'] need = Flower.objects.last() predict_path = "C:\\Users\\DELL\\Desktop\\florista\media\\" + str( need.flower_image) test_image = image.load_img(predict_path, target_size=(128, 128)) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis=0) result = classifier.predict(test_image) ans2 = 0 result = result[0] result = result.tolist() i = 0 for x in result: if x == 1: ans2 = ans[i] i = i + 1 need.flower_name = ans2 need.save() context = { 'ans2': ans2, 'need': need, } return context