def load_img_scaled(input_path, target_shape, grayscale=False): return np.expand_dims( image.img_to_array( image.load_img( input_path, target_size=target_shape, grayscale=grayscale)) / 255.0, axis=0)
def load_img(input_path, target_shape, grayscale=False, mean=None, std=None): img = image.load_img( input_path, target_size=target_shape, grayscale=grayscale) img_arr = np.expand_dims(image.img_to_array(img), axis=0) if not grayscale: img_arr = preprocess_input(img_arr, mean=mean, std=std) return img_arr
def DataSet(): train_path_0 = 'class0_train/' train_path_1 = 'class1_train/' test_path_0 = 'class0_test/' test_path_1 = 'class1_test/' imglist_train_0 = os.listdir(train_path_0) imglist_train_1 = os.listdir(train_path_1) imglist_test_0 = os.listdir(test_path_0) imglist_test_1 = os.listdir(test_path_1) X_train = np.empty( (len(imglist_train_0) + len(imglist_train_1), 192, 192, 3)) Y_train = np.empty((len(imglist_train_0) + len(imglist_train_1), 2)) count = 0 for img_name in imglist_train_0: img_path = train_path_0 + img_name img = image.load_img(img_path, target_size=(192, 192)) img = image.img_to_array(img) / 255.0 X_train[count] = img Y_train[count] = np.array((1, 0)) count += 1 for img_name in imglist_train_1: img_path = train_path_1 + img_name img = image.load_img(img_path, target_size=(192, 192)) img = image.img_to_array(img) / 255.0 X_train[count] = img Y_train[count] = np.array((0, 1)) count += 1 X_test = np.empty((len(imglist_test_0) + len(imglist_test_1), 192, 192, 3)) Y_test = np.empty((len(imglist_test_0) + len(imglist_test_1), 2)) count = 0 for img_name in imglist_test_0: img_path = test_path_0 + img_name img = image.load_img(img_path, target_size=(192, 192)) img = image.img_to_array(img) / 255.0 X_test[count] = img Y_test[count] = np.array((1, 0)) count += 1 for img_name in imglist_test_1: img_path = test_path_1 + img_name img = image.load_img(img_path, target_size=(192, 192)) img = image.img_to_array(img) / 255.0 X_test[count] = img Y_test[count] = np.array((0, 1)) count += 1 # shuffle the order of training pictures # # 1st time # # index = [i for i in range(len(X_train))] np.random.shuffle(index) X_train = X_train[index] Y_train = Y_train[index] # # 2nd time # # index = [i for i in range(len(X_train))] np.random.shuffle(index) X_train = X_train[index] Y_train = Y_train[index] # # 3rd time # # index = [i for i in range(len(X_train))] np.random.shuffle(index) X_train = X_train[index] Y_train = Y_train[index] # shuffle the order of testing pictures # # 1st time # # index = [i for i in range(len(X_test))] np.random.shuffle(index) X_test = X_test[index] Y_test = Y_test[index] # # 2nd time # # index = [i for i in range(len(X_test))] np.random.shuffle(index) X_test = X_test[index] Y_test = Y_test[index] # # 3rd # # index = [i for i in range(len(X_test))] np.random.shuffle(index) X_test = X_test[index] Y_test = Y_test[index] return X_train, Y_train, X_test, Y_test
def visualize_features_output(model, array_files): all_list = [] #----------------------------------------------------- # Get the outputs of the layers of the model # Redefine model to output right after the first hidden layer #----------------------------------------------------- successive_outputs = [layer.output for layer in model.layers[1:]] #----------------------------------------------------- # Build a new model with the functional API #----------------------------------------------------- visualization_model = Model(inputs=model.input, outputs=successive_outputs) #----------------------------------------------------- # Randomly choose an image from cats and dogs # from the training sets #----------------------------------------------------- for array in array_files: all_list.extend(array) img_path = random.choice(all_list) img = load_img(img_path, target_size=(150, 150)) #----------------------------------------------------- # Preprocessing #----------------------------------------------------- img_array = img_to_array(img) img_array = img_array.reshape((1, ) + img_array.shape) img_array /= 255.0 #----------------------------------------------------- # Prediction #----------------------------------------------------- successive_feature_maps = visualization_model.predict(img_array) #----------------------------------------------------- # Get the layer's names #----------------------------------------------------- layer_names = [layer.name for layer in model.layers[1:]] for layer_name, feature_map in zip(layer_names, successive_feature_maps): if len(feature_map.shape) == 4: n_features = feature_map.shape[-1] size = feature_map.shape[1] display_grid = np.zeros((size, size * n_features)) for i in range(n_features): x = feature_map[0, :, :, i] x = normalize(x, None, alpha=0, beta=255, norm_type=NORM_MINMAX, dtype=CV_32F) x = x.astype(np.uint8) display_grid[:, i * size:(i + 1) * size] = x scale = 40. / n_features plt.figure(figsize=(scale * n_features, scale)) plt.title(layer_name) plt.grid(False) plt.imshow(display_grid, aspect='auto', cmap='viridis')
def preprocess_image(file_path): img = image.load_img(file_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return x
face_array = np.asarray(image) x1, y1 = abs(x1)/160.0, abs(y1)/160.0 x2, y2 = x1 + width/160.0, y1 + height/160.0 faces_array_coords.append([(x1, y1), (x2, y2)]) faces_array.append(face_array) if return_coords: return faces_array, faces_array_coords return faces_array def img_to_encoding(image_path): """This face recognition system is built on the siamese network architecture, this is the function that converts an image into its representation""" loaded = load_img(image_path) resized = loaded.resize([160, 160]) resized = np.array(resized) resized = tf.expand_dims(resized, 0) encoding = tf.squeeze(model(resized)) return encoding database = {'shahid':img_to_encoding(base_path + "assets/shahid.jpg")} # In this DB one person has only one pic list_style_db = {'shahid':[img_to_encoding(base_path + 'assets/' + file) for file in assets if re.search(r"shahid_\d*", file)]} # In this DB everyone can have multiple faces to compare to.
from tensorflow.keras.preprocessing.image import load_img from tensorflow.keras.preprocessing.image import img_to_array from shutil import copyfile import os import random folder = 'data/original_data/train/' images = [] categories = [] for file in os.listdir(folder): y = 0 if file.startswith('cat'): y = 1 image = load_img(folder + file, target_size=(150, 150)) image = img_to_array(image) images.append(image) categories.append(y) images = np.asarray(images) categories = np.asarray(categories) print(f"Shape of image array: {images.shape}") print(f"Shape of category array: {categories.shape}") np.save(os.path.join('data/', "cats_and_dogs_all_images.npy"), images) np.save(os.path.join('data/', "cats_and_dogs_all_categories.npy"), categories) # Make folders for data data_folder = 'data/sorted_data/'
import numpy as np from tensorflow.keras.preprocessing import image from tensorflow.keras.applications import resnet50 # 加载 Keras训练好的ResNet50模型 model = resnet50.ResNet50() # 加载图片, 调整格式大小为:224x224像素 img = image.load_img("dataset/bay.jpg", target_size=(224, 224)) # 把图片转为numpy数组 x = image.img_to_array(img) # keras预测的是多张图片列表,所以多加一个维度 x = np.expand_dims(x, axis=0) # 数据归一化:resnet50模型 x = resnet50.preprocess_input(x) # 开始预测 predictions = model.predict(x, ) # 查看图像的label predicted_classes = resnet50.decode_predictions(predictions, top=9) print("This is an image of:") for imagenet_id, name, likelihood in predicted_classes[0]: print(" - {}: {:2f} likelihood".format(name, likelihood))
print(_target_ys.shape) model_kwargs = {} wrapper_kwargs = {} w_file = 'densenet121_resisc45_v1.h5' model = get_art_model(model_kwargs, wrapper_kwargs, w_file) print(model) images_list, target_image = list(), None labels = [] dir = 'resisc45/' NUM = 0 for image_path in tqdm(sorted(os.listdir('resisc45/'))): label = int(image_path.split('.')[0]) labels.append(label) im = image.load_img(dir + image_path, target_size=(224, 224)) im = image.img_to_array(im) im = np.expand_dims(im, axis=0) im = preprocessing_fn(im) images_list.append(im) mean, std = mean_std() UP = art.attacks.evasion.UniversalPerturbation(classifier=model,attacker='pgd',\ attacker_params=None,delta=0.2,max_iter=20,eps=10.0,norm=math.inf) print(UP) batch = 45 X = images_list[:batch] X = np.concatenate(X, axis=0) from __future__ import absolute_import, division, print_function, unicode_literals
timestamp = datetime.fromtimestamp(time()).strftime('%Y%m%d-%H%M%S') output_directory = "{}{}/".format(OUTPUT_DIRECTORY, timestamp) if not os.path.exists(output_directory): os.makedirs(output_directory) preprocessing_times = [] inference_times = [] all_pred = [] # adding this to store the predictions from inference full_pred = [] pics_names = os.listdir("/home/zhangguofeng/temp/plant1") for i in range(len(pics_names)): pics_dir = "/home/zhangguofeng/temp/plant1/" + pics_names[i] #print(pics_dir) start_time = time() img = image.load_img(pics_dir,target_size=(384,224)) # Map to batc x = image.img_to_array(img) x = np.expand_dims(x, axis=0) # Scale from int to float preprocessing_time = time() - start_time start_time = time() # Predict label feed_dict = { input_tensor_name: x} predictions = tf_sess.run(output_tensor, feed_dict) inference_time = time() - start_time for prediction in predictions: print(prediction) new_list = [0,0,0,0,0,0,0,0,0,0,0,0] negative_class_flag = True for index in range(len(prediction)):
file = open('models/classes_all_filtered.pkl', 'rb') classes = pickle.load(file) # Load model model = TransferModel('ResNet', INPUT_SHAPE, classes) model.load('models/resnet_unfreeze_all_filtered.tf') # Get random file sample_path = sample(file_paths, 1)[0] # Get label parts = sample_path.split('/')[-1] label = parts.split('_')[0] # Load and prepare (normalize) image img = image.load_img(sample_path, target_size=(224, 224)) img = image.img_to_array(img) img /= 255.0 # Show image plt.figure() plt.imshow(img) plt.axis('off') plt.show() # Reshape image to (batch_size, heigh, width, channel) img = img.reshape(-1, *img.shape) # Get label index label_idx = np.where(np.array(classes) == label)[0][0]
''' Loads a trained model, and classifies an image argv[1]: path to hdf5 model to load argv[2]: path to image to classify ''' from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing.image import load_img, img_to_array, array_to_img import sys import time import numpy as np model = load_model(sys.argv[1]) img = load_img(sys.argv[2], target_size=(72, 72)) x = img_to_array(img) x = np.expand_dims(x, axis=0) start_time = time.time() prediction = model.predict(x, verbose=1)[0][0] end_time = time.time() labels = ['LC', 'NLC'] rounded = int(round(prediction)) result = labels[rounded] old_range = 0.5 new_range = 100.0 if rounded:
vgg_face = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output) #Data to train the model x_train = [] y_train = [] persons = dict() #creating persons dictionary #The following directory has the training dataset, the name of each folder is the name of person person_folders = os.listdir( 'C:\\Users\\Admin\\Desktop\\Face recognition\\AttendanceSystem\\Training_images' ) for i, person in enumerate(person_folders): persons[i] = person images = os.listdir('Training_images/' + person + '/') for image in images: img = load_img('Training_images/' + person + '/' + image, target_size=(224, 224)) img = img_to_array(img) img = np.expand_dims(img, axis=0) img = preprocess_input(img) img_encode = vgg_face(img) x_train.append(np.squeeze(K.eval(img_encode)).tolist()) y_train.append(i) #print(persons) # Prepare Test Data x_test = [] y_test = [] persons = dict() person_folders = os.listdir( 'C:\\Users\\Admin\\Desktop\\Face recognition\\AttendanceSystem\\Testing_Images' ) for i, person in enumerate(person_folders):
def index(): if request.method == "POST": type_ = request.form.get("type", None) data = None final_json = [] if 'img' in request.files: file_ = request.files['img'] name = os.path.join(tempfile.gettempdir(), str(uuid.uuid4().hex[:10])) file_.save(name) print("[DEBUG: %s]" % dt.now(), name) if (type_ == 'tom' or type_ == 'grape' or type_ == 'corn' or type_ == 'potato'): test_image = image.load_img(name, target_size=(256, 256)) test_image = image.img_to_array(test_image) test_image = test_image / 255 test_image = np.expand_dims(test_image, axis=0) data = test_image #model=get_model(type_)[0] #model = load_model("static/weights/tomato.h5") #type_="tom" if (type_ == 'tom'): model = load_model("static/weights/tomato.h5") pred_val = translate_tomato(model.predict(data)) final_json.append({ "empty": False, "type": type_, "pred_val": pred_val }) #final_json.append({"empty": False, "type":type_, #"pred_val": warn}) elif (type_ == 'grape'): model = load_model("static/weights/grape.h5") pred_val = translate_grape(model.predict(data)) final_json.append({ "empty": False, "type": type_, "pred_val": pred_val }) elif (type_ == 'corn'): model = load_model("static/weights/corn.h5") pred_val = translate_corn(model.predict(data)) final_json.append({ "empty": False, "type": type_, "pred_val": pred_val }) elif (type_ == 'potato'): model = load_model("static/weights/potato.h5") pred_val = translate_potato(model.predict(data)) final_json.append({ "empty": False, "type": type_, "pred_val": pred_val }) else: warn = "Feeding blank image won't work. Please enter an input image to continue." pred_val = " " final_json.append({ "pred_val": warn, "Tomato___Bacterial_spot": " ", "Tomato___Early_blight": " ", "Tomato___Late_blight": " ", "Tomato___Leaf_Mold": " ", "Tomato___Septoria_leaf_spot": " ", "Tomato___Spider_mites Two-spotted_spider_mite": " ", "Tomato___Target_Spot": " ", "Tomato___Tomato_Yellow_Leaf_Curl_Virus": " ", "Tomato___Tomato_mosaic_virus": " ", "Tomato___healthy": " ", }) K.clear_session() return jsonify(final_json) return jsonify({"empty": True})
ngpus = 4 batch_size = int(sys.argv[1]) epochs = int(sys.argv[2]) image_list = list(iglob('../dataset/*/*')) image_list.sort() print(f'=> Found {len(image_list)} images <=') w = [] h = [] # max_w = 1928 # max_h = 64 for img in tqdm(image_list): w.append(img_to_array(load_img(img)).shape[1]) h.append(img_to_array(load_img(img)).shape[0]) max_w = max(w) if max(w) % 2 == 0 else max(w) + 1 assert len(set(h)) == 1 max_h = h[0] print(f'=> Max width of images {max_w} <=') labels = ' '.join([x.split('/')[-1].split('_')[0] for x in image_list]) vocab = sorted(list(set(labels))) vocab_size = len(vocab) print(f'=> Vocab size of dataset {vocab_size} <=') letter_idx = {x: idx for idx, x in enumerate(vocab)} idx_letter = {v: k for k, v in letter_idx.items()} string_lens = [
from tensorflow.keras.preprocessing.image import ImageDataGenerator train_data_gen = ImageDataGenerator(rescale=1. / 255) train_generator = train_data_gen.flow_from_directory( '/Users/daisy/Downloads/horse-or-human', target_size=(300, 300), batch_size=28, class_mode='binary') callback = Callback() history = model.fit(train_generator, epochs=56, steps_per_epoch=8, callbacks=[callback]) validation = image.load_img('/Users/daisy/Desktop/validation/hm.jpeg', target_size=(300, 300)) img = image.img_to_array(validation) img /= 255 img = np.resize(img, (1, 300, 300, 3)) print(model.predict(img)) print(train_generator.class_indices) plt.imshow(img[0, :]) act_layers = model.layers layer_output = [i.output for i in act_layers] activation = tf.keras.models.Model(inputs=model.input, outputs=layer_output) fig, ax = plt.subplots(8, 10, figsize=(26, 26)) demo = activation.predict(img) for i in range(len(act_layers)):
from tensorflow.keras.preprocessing.image import img_to_array, load_img # Let's define a new Model that will take an image as input, and will output # intermediate representations for all layers in the previous model after # the first. successive_outputs = [layer.output for layer in model.layers[1:]] #visualization_model = Model(img_input, successive_outputs) visualization_model = tf.keras.models.Model(inputs = model.input, outputs = successive_outputs) # Let's prepare a random input image of a cat or dog from the training set. cat_img_files = [os.path.join(train_cats_dir, f) for f in train_cat_fnames] dog_img_files = [os.path.join(train_dogs_dir, f) for f in train_dog_fnames] img_path = random.choice(cat_img_files + dog_img_files) img = load_img(img_path, target_size=(150, 150)) # this is a PIL image x = img_to_array(img) # Numpy array with shape (150, 150, 3) x = x.reshape((1,) + x.shape) # Numpy array with shape (1, 150, 150, 3) # Rescale by 1/255 x /= 255.0 # Let's run our image through our network, thus obtaining all # intermediate representations for this image. successive_feature_maps = visualization_model.predict(x) # These are the names of the layers, so can have them as part of our plot layer_names = [layer.name for layer in model.layers] # -----------------------------------------------------------------------
def get_image(self, path, img_size): return load_img(path, target_size=[img_size[0], img_size[1]])
def preprocess_img(image): image = tf.cast(image, tf.float32) image = tf.image.resize(image, (224, 224)) image = preprocess_input(image) image = image[None, ...] return image def get_label(logits): label = decode_predictions(logits, top=1)[0][0] return label img = load_img('../assets/dog1.jpg', color_mode="rgb") img = img_to_array(img) img = preprocess_img(img) preds = model.predict(img) _, image_class, class_confidence = get_label(preds) print(image_class, class_confidence) def fgsm(x, y_adv, epsilon): loss_func = tf.keras.losses.CategoricalCrossentropy() with tf.GradientTape() as gt: gt.watch(x) label = model(x) loss = loss_func(y_adv, label)
label_names = [line.rstrip('\n') for line in open(args.labels)] print(label_names) if not args.savedmodel: model = keras.models.load_model(args.model) else: model = tf.contrib.saved_model.load_keras_model(args.savedmodel) imgs = [] uris = [args.input] if os.path.isdir(args.input): uris = glob.glob(args.input + '/*.jpg') for uri in uris: print(uri) img = image.load_img(uri, target_size=(args.size, args.size)) img = image.img_to_array(img) imgs.append(img) x = preprocess_input(np.array(imgs)) preds = model.predict(x, batch_size=2, verbose=1) tops = [] tops_labels = [] tops_confidents = [] for pred in preds: top = pred.argsort()[-5:][::-1] tops.append(top) top_labels = list(map(lambda x: label_names[x], top)) tops_labels.append(top_labels) top_confidents = list(map(lambda x: pred[x], top))
args = vars(ap.parse_args()) # initial learning rate, number of epochs to train, batch size INIT_LR = 1e-4 EPOCHS = 20 BS = 32 # initialize the list of data and classes imagePaths = list(paths.list_images(args["dataset"])) data = [] labels = [] for imagePath in imagePaths: # load image and preprocess label = imagePath.split(os.path.sep)[-2] image = load_img(imagePath, target_size=(224,224)) image = img_to_array(image) image = preprocess_input(image) # update data and labels data.append(image) labels.append(label) # conver the data and labels to numpy arrays data = np.array(data, dtype="float32") labels = np.array(labels) # perform one-hot encoding on the labels lb = LabelBinarizer() labels = lb.fit_transform(labels) labels = to_categorical(labels)
def prepare_image(img_path): img = image.load_img(img_path, target_size=(224, 224)) img_array = image.img_to_array(img) img_array_expanded_dims = np.expand_dims(img_array, axis=0) return keras.applications.mobilenet.preprocess_input(img_array_expanded_dims)
def part5(): # !wget --no-check-certificate \ # https://storage.googleapis.com/laurencemoroney-blog.appspot.com/horse-or-human.zip \ # -O /tmp/horse-or-human.zip import os # for unzip # import zipfile # local_zip = './tmp/horse-or-human.zip' # zip_ref = zipfile.ZipFile(local_zip , 'r') # zip_ref.extractall('./tmp/horse-or-human') # zip_ref.close() #Let's define each of these directories: train_horse_dir = os.path.join('./tmp/horse-or-human/horses') train_human_dir = os.path.join('./tmp/horse-or-human/humans') train_horse_names = os.listdir(train_horse_dir) train_human_names = os.listdir(train_human_dir) # let's see what the filenames # print(train_horse_names[:10]) # print(train_human_names[:10]) # Let's find out the total number of horse print('total training horse images:', len(os.listdir(train_horse_dir))) print('total training human images:', len(os.listdir(train_human_dir))) show = False if show: # Parameters for our graph; we'll output images in a 4x4 configuration nrows = 4 ncols = 4 # Index for iterating over images pic_index = 0 # Set up matplotlib fig, and size it to fit 4x4 pics fig = plt.gcf() fig.set_size_inches(ncols * 4, nrows * 4) pic_index += 8 next_horse_pix = [ os.path.join(train_horse_dir, fname) for fname in train_horse_names[pic_index - 8:pic_index] ] next_human_pix = [ os.path.join(train_human_dir, fname) for fname in train_human_names[pic_index - 8:pic_index] ] for i, img_path in enumerate(next_horse_pix + next_human_pix): # Set up subplot; subplot indices start at 1 sp = plt.subplot(nrows, ncols, i + 1) sp.axis('Off') # Don't show axes (or gridlines) img = mpimg.imread(img_path) plt.imshow(img) plt.show() test_img = mpimg.imread( os.path.join(train_horse_dir, train_horse_names[0])) print("1 image : ", test_img.shape) #Building a Small Model from Scratch model = keras.models.Sequential([ # Note the input shape is the desired size of the image 300x300 with 3 bytes color # This is the first convolution tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(300, 300, 3)), tf.keras.layers.MaxPooling2D(2, 2), # The second convolution tf.keras.layers.Conv2D(32, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), # The third convolution tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), # The fourth convolution tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), # The fifth convolution tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), # 512 neuron hidden layer tf.keras.layers.Dense(512, activation='relu'), # OutPut layer # Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('horses') and 1 for the other ('humans') # a binary classification problem, we will end our network with a sigmoid activation, tf.keras.layers.Dense(1, activation='sigmoid') ]) model.summary() ''' NOTE: In this case, using the RMSprop optimization algorithm is preferable to stochastic gradient descent (SGD), because RMSprop automates learning-rate tuning for us. (Other optimizers, such as Adam and Adagrad, also automatically adapt the learning rate during training, and would work equally well here.) ''' from tensorflow.keras.optimizers import RMSprop model.compile( optimizer=RMSprop(lr=0.001), loss='binary_crossentropy', metrics=['accuracy'], ) #Data Preprocessing ''' It is uncommon to feed raw pixels into a convnet convert them to float32 tensors In our case, we will preprocess our images by normalizing the pixel values to be in the [0, 1] range (originally all values are in the [0, 255] range). ''' from tensorflow.keras.preprocessing.image import ImageDataGenerator # All images will be rescaled by 1./255 train_datagen = ImageDataGenerator(rescale=1 / 255) # Flow training images in batches of 128 using train_datagen generator train_generator = train_datagen.flow_from_directory( './tmp/horse-or-human/', # This is the source directory for training images target_size=(300, 300), # All images will be resized to 150x150 batch_size=128, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') history = model.fit(train_generator, steps_per_epoch=8, epochs=15, verbose=1) import random from tensorflow.keras.preprocessing.image import img_to_array, load_img # Let's define a new Model that will take an image as input, and will output # intermediate representations for all layers in the previous model after # the first. successive_outputs = [layer.output for layer in model.layers[1:]] # visualization_model = Model(img_input, successive_outputs) visualization_model = tf.keras.models.Model(inputs=model.input, outputs=successive_outputs) # Let's prepare a random input image from the training set. horse_img_files = [ os.path.join(train_horse_dir, f) for f in train_horse_names ] human_img_files = [ os.path.join(train_human_dir, f) for f in train_human_names ] img_path = random.choice(horse_img_files + human_img_files) img = load_img(img_path, target_size=(300, 300)) # this is a PIL image x = img_to_array(img) # Numpy array with shape (150, 150, 3) x = x.reshape((1, ) + x.shape) # Numpy array with shape (1, 150, 150, 3) # Rescale by 1/255 x /= 255 # Let's run our image through our network, thus obtaining all # intermediate representations for this image. successive_feature_maps = visualization_model.predict(x) # These are the names of the layers, so can have them as part of our plot layer_names = [layer.name for layer in model.layers[1:]] # Now let's display our representations for layer_name, feature_map in zip(layer_names, successive_feature_maps): if len(feature_map.shape) == 4: # Just do this for the conv / maxpool layers, not the fully-connected layers n_features = feature_map.shape[ -1] # number of features in feature map # The feature map has shape (1, size, size, n_features) size = feature_map.shape[1] # We will tile our images in this matrix display_grid = np.zeros((size, size * n_features)) for i in range(n_features): # Postprocess the feature to make it visually palatable x = feature_map[0, :, :, i] x -= x.mean() x /= x.std() x *= 64 x += 128 x = np.clip(x, 0, 255).astype('uint8') # We'll tile each filter into this big horizontal grid display_grid[:, i * size:(i + 1) * size] = x # Display the grid scale = 20. / n_features plt.figure(figsize=(scale * n_features, scale)) plt.title(layer_name) plt.grid(False) plt.imshow(display_grid, aspect='auto', cmap='viridis')
import numpy as np import random from tensorflow.keras.preprocessing.image import img_to_array, load_img # Let's define a new Model that will take an image as input, and will output # intermediate representations for all layers in the previous model after # the first. successive_outputs = [layer.output for layer in model.layers[1:]] #visualization_model = Model(img_input, successive_outputs) visualization_model = tf.keras.models.Model(inputs = model.input, outputs = successive_outputs) # Let's prepare a random input image from the training set. horse_img_files = [os.path.join(train_horse_dir, f) for f in train_horse_names] human_img_files = [os.path.join(train_human_dir, f) for f in train_human_names] img_path = random.choice(horse_img_files + human_img_files) img = load_img(img_path, target_size=(300, 300)) # this is a PIL image x = img_to_array(img) # Numpy array with shape (150, 150, 3) x = x.reshape((1,) + x.shape) # Numpy array with shape (1, 150, 150, 3) # Rescale by 1/255 x /= 255 # Let's run our image through our network, thus obtaining all # intermediate representations for this image. successive_feature_maps = visualization_model.predict(x) # These are the names of the layers, so can have them as part of our plot layer_names = [layer.name for layer in model.layers] # Now let's display our representations for layer_name, feature_map in zip(layer_names, successive_feature_maps):
import tensorflow as tf import numpy as np from IPython.display import Image from tensorflow.keras.preprocessing import image import matplotlib.pyplot as plt import cv2 from PIL import Image file_path = 'G:/rauf/STEPBYSTEP/Data/dogcat/1.jpeg' # display image first method with python my_image_ipython = Image(file_path, width=224, height=224) print(my_image_ipython) # display image second method with keras my_image_keras = image.load_img(file_path, target_size=(224, 224)) plt.imshow(my_image_keras) # display image third method with cv2 my_image_cv2 = cv2.imread(file_path) plt.imshow(my_image_cv2) # display image fourth method with pillow my_image_pillow = Image.open(file_path) plt.imshow(my_image_pillow)
from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions import numpy as np model = ResNet50(weights='imagenet') img_path = 'elephant.jpg' img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) preds = model.predict(x) print('Predicted:', decode_predictions(preds, top=3)[0])
# load our the network weights from disk (NOTE: if this is the # first time you are running this script for a given network, the # weights will need to be downloaded first -- depending on which # network you are using, the weights can be 90-575MB, so be # patient; the weights will be cached and subsequent runs of this # script will be *much* faster) print("[INFO] loading {}...".format(args["model"])) Network = MODELS[args["model"]] model = Network(weights="imagenet") # load the input image using the Keras helper utility while ensuring # the image is resized to 'inputShape', the required input dimensions # for the ImageNet pre-trained network print("[INFO] loading and pre-processing image...") image = load_img(args["image"], target_size=inputShape) image = img_to_array(image) #our input image is now represented as a NumPy array of shape #(inputShape[0], inputShape[1], 3) however we need to expand the #dimension by making the shape (1, inputShape[0], inputShape[1], 3) #so we can pass it through thenetwork image = np.expand_dims(image, axis=0) # pre-process the image using the appropriate function based on the # model that has been loaded (i.e., mean subtraction, scaling, etc.) image = preprocess(image) # classify the image print("[INFO] classifying image with '{}'...".format(args["model"])) preds = model.predict(image)
def handle_image_message(event): message_content = line_bot_api.get_message_content(event.message.id) # 取得した画像ファイル with open("data/" + event.message.id + ".jpg", "wb") as f: #get_img_text = "AI判別中です。 \n少しお待ちください。" #line_bot_api.reply_message(event.reply_token, TextSendMessage(text=get_img_text)) f.write(message_content.content) test_url = "./data/" + event.message.id + ".jpg" #img = image.load_img(test_url, target_size=(224, 224)) # read image as PIL data img = image.load_img(test_url, target_size=(160, 160)) # read image as PIL data x = image.img_to_array(img) # convert PIL data to Numpy Array x = np.expand_dims(x, axis=0) x = x / 255.0 # モデルのロード try: predict = model.predict(x).flatten() """ suzaki_score = predict[0]*100 gamou_score = predict[1]*100 nagata_score = predict[2]*100 hinode_score = predict[3]*100 tamura_score = predict[4]*100 setobare_score = predict[5]*100 hayuka_score = predict[6]*100 ippuku_score = predict[7]*100 tanigawa_score = predict[8]*100 mugizou_score = predict[9]*100 miyoshi_score = predict[10]*100 ookura_score = predict[11]*100 yamagoe_score = predict[12]*100 okasen_score = predict[13]*100 nakamura_score = predict[14]*100 yoshiya_score = predict[15]*100 kamakiri_score = predict[16]*100 joto_score = predict[17]*100 nekko_score = predict[18]*100 yamadaya_score = predict[19]*100 """ """ classnames = ["000_suzaki-shokuryohinten_mitoyo", "001_gamou_sakaide", "002_nagata-in-kanoka_zentsuji","003_hinode-seimenjo_sakaide", "004_tamura_ayagawa","005_setobare_takamatsu", "006_hayuka_ayagawa","007_ippuku_takamatsu","008_tanigawa-beikokuten_mannou", "009_mugizou_takamatsu","010_miyoshi-udon_mitoyo","011_ookura_takamatsu", "012_yamagoe_ayagawa","013_okasen_utazu", "014_nakamura-udon_marugame","015_yoshiya_marugame", "016_kamakiri_kanonji","017_joto_kanonji", "018_nekko_tadotsu","019_yamadaya_takamatsu"] """ classnames = [ "須崎食料品店", "讃岐うどん がもう", "釜あげうどん 長田 in 香の香", "日の出製麺所", "手打うどん たむら", "おうどん 瀬戸晴れ", "本格手打うどん はゆか", "うどん 一福", "谷川米穀店", "手打うどん 麦蔵", "三好うどん", "手打ちうどん 大蔵", "山越うどん", "本格手打うどん おか泉", "中村うどん", "純手打うどん よしや", "カマ喜ri ", "西端手打 上戸", "根ッ子うどん", "うどん本陣 山田家" ] index = np.argmax(predict) udonya_score = predict[index] * 100 label = classnames[index] text = f"これは「{label}」のうどんです。\n自信は{udonya_score:.1f}%です。" line_bot_api.reply_message(event.reply_token, TextSendMessage(text=text)) except: line_bot_api.reply_message(event.reply_token, TextSendMessage(text="failed"))
# Evaluating the Model ############################################## pred = model.predict_generator(test_image_gen) predictions = pred > 0.5 predictions from sklearn.metrics import classification_report, confusion_matrix print(classification_report(test_image_gen.classes, predictions)) confusion_matrix(test_image_gen.classes, predictions) from tensorflow.keras.preprocessing import image my_image = image.load_img(para_cell, target_size = image_shape) my_image my_image_arr = image.img_to_array(my_image) my_image_arr.shape my_image_arr = np.expand_dims(my_image_arr,axis=0) my_image_arr.shape model.predict(my_image_arr)
model.compile(loss=tf.keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy']) complete_model = model layer_outputs = outputs = [layer.output for layer in model.layers][1:] img_path = random.choice( os.listdir("/home/nitish/Desktop/ninad/kras/data/data5/test/kras")) test_image = os.path.join( "/home/nitish/Desktop/ninad/kras/data/data5/test/kras", img_path) print(f"Image: {test_image}") img = image.load_img(test_image, target_size=(512, 512, 3)) img_tensor = image.img_to_array(img) img_tensor = np.expand_dims(img_tensor, axis=0) img_tensor /= 255. activation_model = Model(inputs=complete_model.input, outputs=layer_outputs) activations = activation_model.predict(img_tensor) layer_names = [ 'conv2d_1', 'activation_1', 'conv2d_4', 'activation_4', 'conv2d_9', 'activation_9' ] activ_list = [ activations[1], activations[3], activations[11], activations[13], activations[18], activations[20] ]
def __init__(self): super(ESPCNCallback, self).__init__() self.test_img = get_lowres_image(load_img(test_img_paths[0]), upscale_factor)
def load_image(img): return img_to_array(load_img(img, color_mode='grayscale')) / 255.