Пример #1
0
def classify(arg):
    #'./data/people',
    data,sample = arg.split('/')
    path = os.path.join(data,sample)
    print(path)
    dirs = os.listdir(path)
    num_test=4
    classified_list={}
    model = MobileNet(weights='imagenet')
    if os.path.exists(os.path.join('./cloth_label.txt')):
        f = open('./cloth_label.txt','r')
        categories = f.read().split()
        f.close()
    else :
        return False
    print(categories)
    for category in categories:
        classified_list[category]=0

    for dir in dirs :
        dir_path = os.path.join(data,sample,dir)
        files = os.listdir(dir_path)
        for file in files:
            name,extension = os.path.splitext(os.path.join(dir_path,file))
            if extension=='.jpg' or extension=='.png' or extension=='.jpeg':     
                src = os.path.join(dir_path,file)
                print("src : "+src)
                img = image.load_img(src,target_size=(224,224))
                x = image.img_to_array(img)
                x = np.expand_dims(x,axis=0)
                x=preprocess_input(x)
                preds = model.predict(x)
                labels = decode_predictions(preds,top=num_test)[0]
                flag =False
                
                for label in labels:
                    label = list(label)
                    #print(classified_list.get(label[1]))
                    if classified_list.get(label[1])!=None:
                        """ save_dir='./cloth_and_people'
                        shutil.move(src,dst) """
                        """ if not(os.path.isdir(os.path.join(save_dir))):
                            os.makedirs(os.path.join(save_dir))
                        dst = os.path.join(save_dir,file) """
                        flag=True
                        break
                if not(flag):
                    label = 'etc'
                    new_filename = 'etc_'+file
                    print(new_filename)
                    os.rename(os.path.join(dir_path,file),os.path.join(dir_path,new_filename))
                    """ save_dir='./etc' """
                    """ if dir =='./data/cloth_and_people':
                        save_dir = 'etc_'+str(num_test)
                    else:
                        save_dir='etc_'+str(num_test)+'_etc' """
                    """ if not(os.path.exists(save_dir)):
Пример #2
0
class ImageClassifier:
    def __init__(self):
        self.model = MobileNet()
    def classify(self, img_storage):
        byte_storage = BytesIO(img_storage.read())
        byte_storage.seek(0)
        img = Image.open(byte_storage)
        img = img.resize((224, 224))
        x = img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        predictions = self.model.predict(x)
        return decode_predictions(predictions)
Пример #3
0
class ImgNet(object):
    def __init__(self, name="resnet"):

        # 学習済みのVGG16をロード
        # 構造とともに学習済みの重みも読み込まれる
        if name == "mobilenet":
            self.model = MobileNet(weights='imagenet')
        elif name == "resnet":
            self.model = ResNet50(weights='imagenet')
        else:
            self.model = ResNet50(weights='imagenet')
        self.model.summary()

    def array2predict(self, x):

        # 3次元テンソル(rows, cols, channels) を
        # 4次元テンソル (samples, rows, cols, channels) に変換
        # 入力画像は1枚なのでsamples=1でよい
        x = np.expand_dims(x, axis=0)

        # Top-5のクラスを予測する
        # VGG16の1000クラスはdecode_predictions()で文字列に変換される
        preds = self.model.predict(preprocess_input(x))
        results = decode_predictions(preds, top=5)[0]

        return results

    def file2predict(self, filename):

        # 引数で指定した画像ファイルを読み込む
        # サイズはVGG16のデフォルトである224x224にリサイズされる
        img = image.load_img(filename, target_size=(224, 224))
        # 読み込んだPIL形式の画像をarrayに変換
        x = image.img_to_array(img)

        results = self.array2predict(x)

        return results
Пример #4
0
import numpy as np
import cv2
from tensorflow.keras.models import load_model
from tensorflow.keras.applications.mobilenet import MobileNet
from tensorflow.keras.applications.mobilenet import preprocess_input, decode_predictions

#include_top=True,完整的模型
#include_top=False,去掉最后的3个全连接层,用来做fine-tuning专用,专门开源了这类模型。
model = MobileNet(weights='imagenet')
print(model.summary())

img_path = "elephant.jpg"
img = image.load_img(img_path, target_size=(224, 224))
#将输入数据转换为0~1之间
img = image.img_to_array(img) / 255.0
# 为batch添加第四维,axis=0表示在0位置添加,因为MobileNet的Iput层结构是(None,224,224,3)
img = np.expand_dims(img, axis=0)
print(img.shape)

predictions = model.predict(img)
print('Predicted:', decode_predictions(predictions, top=3)[0])
print(predictions)

description = decode_predictions(predictions, top=3)[0][0][1]

src = cv2.imread(img_path)
cv2.putText(src, description, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
            (255, 0, 0), 2)
cv2.imshow("Predicted", src)
cv2.waitKey()
Пример #5
0
class Predict():
    def __init__(self):

        self.Model = MobileNet(input_shape=(224, 224, 3),
                               include_top=False,
                               pooling='avg')

        with open(
                'Models/parms.json', 'r'
        ) as f:  # load the parms used in training which contains cluster size, indexes, data_paths etc.
            self.parms_data = json.load(f)

        with open(self.parms_data["cluster_model"], 'rb') as f:
            self.cluster_model = pickle.load(f)

        self.training_imgs_dir = self.parms_data['training_data_path']

        self.knn_trees = self.load_knn_trees()
        self.knn_index_dicts = self.load_knn_index_dicts()

    def load_knn_trees(self):
        knn_trees = []
        for i in range(self.parms_data["n_clusters"]):
            with open(self.parms_data["knn_model_" + str(i)], 'rb') as f:
                tree_model = pickle.load(f)
                knn_trees.append(tree_model)
        return knn_trees

    def load_knn_index_dicts(self):
        knn_index_dicts = []
        for i in range(self.parms_data["n_clusters"]):
            with open(self.parms_data["cluster_index_file_" + str(i)],
                      'rb') as f:
                index_dict = pickle.load(f)
                knn_index_dicts.append(index_dict)
        return knn_index_dicts

    def plot_images(self, no_of_images_to_display, query_image,
                    cluster_predicted, indices, path_to_input_image):

        N = no_of_images_to_display
        cols = 3
        rows = int(np.ceil((N + 3) / 3))
        axes = []
        fig = plt.figure()

        index_dict = self.knn_index_dicts[cluster_predicted]

        imgs_names = [index_dict[idx] for idx in indices[0][:N]]
        imgs_dirs_to_display = [
            os.path.join(self.training_imgs_dir, pth) for pth in imgs_names
        ]

        axes.append(fig.add_subplot(rows, cols, 1))
        query_image = query_image[:, :, ::-1]
        subplot_title = ('query image')
        axes[-1].set_title(subplot_title)
        axes[-1].axis('off')
        plt.imshow(query_image)

        for i, img_dir in enumerate(imgs_names):
            image = cv2.imread(img_dir)[:, :, ::-1]
            axes.append(fig.add_subplot(rows, cols, i + 4))
            axes[-1].axis('off')
            # subplot_title=('query image')
            # axes[-1].set_title(subplot_title)
            plt.imshow(image)

        if not os.path.isdir('output'):
            os.mkdir('output')

        plt.axis('off')
        plt.savefig(os.path.join('output',
                                 path_to_input_image.split('/')[-1]),
                    dpi=300,
                    bbox_inches='tight')
        plt.show()

    def display_similar_images(self, path_to_input_image,
                               no_of_images_to_display):
        image = cv2.imread(path_to_input_image)
        # product embedding
        embdding = self.encode_image(image_array=image)[0]
        embdding = np.expand_dims(embdding, axis=0)
        # predict which cluster it belong
        cluster_predicted = self.cluster_model.predict(embdding)[0]
        # find indices of the images near the embedding
        distances, indices = self.knn_trees[cluster_predicted].kneighbors(
            embdding)
        self.plot_images(no_of_images_to_display, image, cluster_predicted,
                         indices, path_to_input_image)

    def preprocess_image(self, image_array_nd, target_size=(224, 224)):
        image_pil = Image.fromarray(image_array_nd)
        image_pil = image_pil.resize(target_size, Image.ANTIALIAS)
        image_array = np.array(image_pil).astype('uint8')
        image_pp = preprocess_input(image_array)
        image_pp = np.array(image_pp)[np.newaxis, :]
        return image_pp

    def encode_image(self, image_array):
        image_pp = self.preprocess_image(image_array_nd=image_array)
        return self.Model.predict(image_pp)
Пример #6
0
from tensorflow.keras.applications.mobilenet import MobileNet
from alibi.datasets import fetch_imagenet
from alibi.explainers import AnchorImage
import dill
import alibi

# Be careful when you're using Jupyter Kernel
# Better to use Docker Container Environment
print(alibi.__version__)
print(dill.__version__)

model = MobileNet(weights='imagenet')

predict_fn = lambda x: model.predict(x)

segmentation_fn = 'slic'
kwargs = {'n_segments': 15, 'compactness': 20, 'sigma': .5}
image_shape = (224, 224, 3)
explainer = AnchorImage(predict_fn,
                        image_shape,
                        segmentation_fn=segmentation_fn,
                        segmentation_kwargs=kwargs,
                        images_background=None)

explainer.predict_fn = None  # Clear explainer predict_fn as its a lambda and will be reset when loaded
with open("explainer.dill", 'wb') as f:
    dill.dump(explainer, f)
Пример #7
0
class Train():

    def __init__(self):
        self.Model = MobileNet(input_shape=(224, 224, 3), include_top=False, pooling='avg')
        self.n_clusters = 7
        self.cluster_model_file_name = "kmean-7.pkl"
        self.no_of_neighbours = 50
        #self.encoder = CNN()


    def create_required_folders(self):

        if os.path.isdir('Models'):
            pass
        else:
            os.mkdir('Models')

        if os.path.isdir('Models/embeddings'):
            pass
        else:
            os.mkdir('Models/embeddings')

        if os.path.isdir('clustered_data'):
            pass
        else:
            os.mkdir('clustered_data')

        if os.path.isdir('Models/embeddings_index'):
            pass
        else:
            os.mkdir('Models/embeddings_index')

    def kmean_clustering(self, X):
        Kmean = KMeans(n_clusters=self.n_clusters)
        return Kmean.fit(X)

    def knn(self, X):
        return NearestNeighbors(n_neighbors=self.no_of_neighbours, algorithm='ball_tree').fit(X)


    def train(self, input_data_dir, save_images_in_cluster_dir=True):

        assert len(os.listdir(input_data_dir)) >= 7, "insufficient data"

        self.create_required_folders()
        parms_file_log = {}                         # params_file_log will be used to keep track of parameters and paths of inputs and output that will used while inferencing
        parms_file_log['training_data_path'] = input_data_dir
        parms_file_log['n_clusters'] = self.n_clusters
        parms_file_log['no_of_neighbours'] = self.no_of_neighbours

        X, index_dict = self.prepare_data(input_data_dir) # output will be embedding of images

        # train the clustering model
        Kmean = self.kmean_clustering(X)
        cluster_pkl_filename = os.path.join("Models",self.cluster_model_file_name)
        with open(cluster_pkl_filename, 'wb') as file:                   # save the cluster model for inferencing
            pickle.dump(Kmean, file)

        parms_file_log['cluster_model'] = cluster_pkl_filename
        logging.info('Training Clustering Model Done')

        clu = []
        for _ in range(self.n_clusters):
            clu.append([])

        for i, label in enumerate(Kmean.labels_):
            clu[label].append(index_dict[i])

        for i in range(self.n_clusters):
            op = 'no of images in cluster {} --- {}'.format(i, len(clu[i]))
            logging.info(op)

        if save_images_in_cluster_dir:                 # saving images in respective cluster
            for i in range(self.n_clusters):
                cluster_dir = os.path.join('clustered_data', 'cluster_'+str(i))
                if not os.path.isdir(cluster_dir):
                    os.mkdir(cluster_dir)
                remove_files(cluster_dir)
                for path in clu[i]:
                    src = path
                    dst = os.path.join(cluster_dir,src.split('/')[-1])
                    copyfile(src, dst)
            #print('Images are saved in respectve clusters')
            logging.info('Images are saved in respectve clusters in clustered_data folder')

        # train a nearest neighbour model for each cluster and save the model
        for i in range(self.n_clusters):
            X_C, cluster_index = self.partiton_cluster_data(X, index_dict, clu[i])
            knn_model = self.knn(X_C)
            knn_model_name_dir = os.path.join('Models', 'cluster_tree_'+str(i)+'.pkl')

            with open(knn_model_name_dir, 'wb') as file:
                pickle.dump(knn_model, file)
                parms_file_log['knn_model_'+str(i)] = knn_model_name_dir

            index_name = os.path.join('Models/embeddings_index', 'cluster_tree_'+str(i)+'.pkl')
            with open(index_name, 'wb') as file:
                pickle.dump(cluster_index, file)
                parms_file_log['cluster_index_file_'+str(i)] = index_name

        with open('Models/parms.json', 'w') as f:
            json.dump(parms_file_log, f)

    def partiton_cluster_data(self, X, index_dict, clu):
        '''
        funcition will separate out the data for nearest neighbour algo and preserves the indexes
        '''
        reversed_index_dict = {value: key for key, value in index_dict.items()}
        X_C = []
        cluster_index = {}
        for i, f_name in enumerate(clu):
            X_C.append(X[reversed_index_dict[f_name]])
            cluster_index[i] = f_name
        X_C = np.array(X_C)
        return X_C, cluster_index

    def preprocess_image(self, image_array_nd, target_size=(224, 224)):
        '''
        will preprocess in require format for mobilenet
        '''
        image_pil = Image.fromarray(image_array_nd)
        image_pil = image_pil.resize(target_size, Image.ANTIALIAS)
        image_array = np.array(image_pil).astype('uint8')
        image_pp = preprocess_input(image_array)
        image_pp = np.array(image_pp)[np.newaxis, :]
        return image_pp

    def encode_image(self, image_array):
        '''
        will produce a embeddng for image
        '''
        image_pp = self.preprocess_image(image_array_nd = image_array)
        return self.Model.predict(image_pp)

    def make_dirs(self, img_paths_dir):
        '''
        helping function of makin directories
        '''
        img_paths = [os.path.join(img_paths_dir, path) for path in os.listdir(img_paths_dir)]
        return img_paths

    def prepare_data(self, input_data_dir, save_embeddings=True):
        X = []
        index_dict = {}
        logging.info('creation of embedding started')
        for i in tqdm(range(len(os.listdir(input_data_dir)))):
            path = os.path.join(input_data_dir, str(i) + '.jpg')
            # print("-----------", path)
            image = cv2.imread(path)
            # print(image.shape)
            #embd = self.encode_image(image_array=image)[0]
            embd = self.encode_image(image_array=image)[0]
            #embd = normalize(embd[:, np.newaxis], axis=0).ravel()
            X.append(embd)
            index_dict[i] = path

        X = np.array(X)
        #print(index_dict)
        #print('creation of embeddings done')
        logging.info('creation of embeddings done')

        if save_embeddings:
            with open('Models/embeddings/data.npy', 'wb') as f:
                np.save(f, X)

            filename = 'Models/embeddings_index/index_dict.pkl'
            #os.makedirs(os.path.dirname(filename), exist_ok=True)
            # filename = 'index_dict.pkl'
            with open(filename, 'wb') as f:
                pickle.dump(index_dict, f)

            logging.info('embeddings and index files are saved in Model directory ')
        return X, index_dict
Пример #8
0
from tensorflow.keras.applications.mobilenet import MobileNet, decode_predictions

mobile = MobileNet()
# mobile.summary()

import cv2
import time


img = cv2.imread('./img/bird.jpg', -1)
img = cv2.resize(img, (224, 224))

start = time.time()
yhat = mobile.predict(img.reshape(-1, 224, 224, 3))
time = time.time() - start
# label_key = np.argmax(yhat)
label = decode_predictions(yhat)
label = label[0][0]

print("테스트 시 소요 시간 : {}".format(time))
print('%s (%.2f%%)' % (label[1], label[2]*100))
img = img[:,:,::-1]
plt.figure(figsize=(11,11))
plt.imshow(img)
plt.axis("off")
plt.show()
Пример #9
0
class Classifier:
    def __init__(self):
        self.categories = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
        self.category_map = {i: v for i, v in enumerate(self.categories)}
        self.num_classes = len(self.categories)
        self.img_size = 224
        self.batch_size = 64

        self.train_path, self.test_path = None, None
        self.train_generator, self.test_generator = None, None
        self.step_size_train, self.step_size_valid = None, None
        self.images, self.labels = None, None
        self.classifier = None
        self.history = None
        self.grad_cam_names = None
        self.save_folder = None

        self.feature_extractor = None
        self.latent_vectors = None
        self.latent_path = None
        self.latent_test = None
        self.latent_train = None


    def generate_data(self, train_path, test_path, batch_size, figsize=(10,10), fontsize=16):
        self.train_path = train_path
        self.test_path = test_path
        self.batch_size = batch_size

        test_datagen = ImageDataGenerator(
                rescale=1/255.)
        train_datagen = ImageDataGenerator(
                rescale=1/255.,
                brightness_range=[.9, 1.],
                rotation_range=5,
                zoom_range=.1,
                width_shift_range=.1,
                height_shift_range=.1)
        self.train_generator = train_datagen.flow_from_directory(
                self.train_path,
                shuffle=True,
                target_size=(self.img_size, self.img_size),
                color_mode='rgb',
                batch_size=batch_size,
                seed=0,
                class_mode="categorical")
        self.test_generator = test_datagen.flow_from_directory(
                self.test_path,
                shuffle=True,
                target_size=(self.img_size, self.img_size),
                color_mode='rgb',
                batch_size=batch_size,
                seed=0,
                class_mode="categorical")

        _, axes = plt.subplots(6, 6, figsize=figsize)
        for i, category in enumerate(self.categories[:6]):
            path = self.train_path + '/' + category
            images = os.listdir(path)
            for j in range(6):
                image = cv2.imread(path + '/' + images[j])
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                axes[i, j].imshow(image)
                axes[i, j].set(xticks=[], yticks=[])
                axes[i, j].set_title(category, color = 'tomato').set_size(fontsize)
        plt.suptitle('Vanilla Data').set_size(2*fontsize)
        plt.tight_layout()

        images, labels = self.train_generator.next()
        _, axes = plt.subplots(6, 6, figsize=figsize)
        for i in range(6):
            for j in range(6):
                image = images[i+j]
                label = self.category_map[np.argmax(labels[i+j])]
                axes[i, j].imshow(image)
                axes[i, j].set(xticks=[], yticks=[])
                axes[i, j].set_title(label, color = 'tomato').set_size(fontsize)
        plt.suptitle('Augmented Data').set_size(2*fontsize)
        plt.tight_layout()

        self.step_size_train=int(self.train_generator.n // self.train_generator.batch_size)
        self.step_size_valid=int(self.test_generator.n // self.test_generator.batch_size)

    def notify(self, fig):
        fig.savefig('tmp.jpg')
        with open('tmp.jpg', 'rb') as f:
            telegram_send.send(images=[f])
        os.remove('tmp.jpg')

    def plot_accuracy(self, history):
        f, axes = plt.subplots(1, 2, figsize=(12, 4))
        accuracy = history.history['accuracy']
        loss = history.history['loss']
        val_accuracy = history.history['val_accuracy']
        val_loss = history.history['val_loss']
        print('Training accuracy: {:.{}f}'.format(np.max(accuracy), 3))
        print('Training loss: {:.{}f}'.format(np.max(loss), 3))
        print('Validation accuracy: {:.{}f}'.format(np.max(val_accuracy), 3))
        print('Validation loss: {:.{}f}'.format(np.max(val_loss), 3))
        axes[0].plot(history.history['accuracy'])
        axes[0].plot(history.history['val_accuracy'])
        axes[0].set_title('Model accuracy')
        axes[0].set(ylabel = 'accuracy', xlabel = 'Epoch')
        axes[0].legend(['Train', 'Test'], loc='upper left')
        axes[1].plot(history.history['loss'])
        axes[1].plot(history.history['val_loss'])
        axes[1].set_title('Model loss')
        axes[1].set(ylabel = 'Loss', xlabel = 'Epoch')
        axes[1].legend(['Train', 'Test'], loc='upper left')
        return f

    def set_feature_extractor(self, name = 'mobilenet', summary = False):
        if name == 'mobilenet':
            self.feature_extractor = MobileNet(input_shape = (self.img_size, self.img_size, 3), include_top=True,weights ='imagenet')
            output = self.feature_extractor.layers[-6].output
            self.feature_extractor = tf.keras.Model(self.feature_extractor.inputs, output)
        if summary:
            self.feature_extractor.summary()

    def extract_and_save(self, latent_path, latent_vectors, save=True):
        '''model: Model used to extract encoded features
           generator: yields (x_batch, y_batch)
        '''
        self.latent_vectors = latent_vectors
        self.latent_path = latent_path
        if not save:
            return
        for folder in ['train', 'test']:
            save_path = os.path.join(latent_path, folder)
            if os.path.exists(save_path) and os.path.isdir(save_path):
                shutil.rmtree(save_path)
            os.makedirs(save_path, exist_ok=True)
            template = 'batch_{}.h5'
            batch = 0
            for generator in [self.train_generator, self.test_generator]:
                for x_batch, y_batch in tqdm.tqdm(generator):
                    IPython.display.clear_output(wait=True)
                    features = self.feature_extractor.predict(x_batch)
                    file_path = os.path.join(save_path, template.format(batch))
                    with h5py.File(file_path, 'w') as file:
                        # encoded features and hard labels
                        file.create_dataset('features', data=features)
                        file.create_dataset('labels', data=y_batch)
                    batch += 1
                    if folder == 'train':
                        if batch >= self.step_size_train:
                            break
                    else:
                       if batch >= self.step_size_valid:
                            break

    def load_data(self, folder):
        '''yields (x_batch, y_batch) for model.fit()
        '''
        root_path = os.path.join(self.latent_path, folder)
        while True:
            for file_path in os.listdir(root_path):
                with h5py.File(os.path.join(root_path, file_path), 'r') as file:
                    yield (np.array(file['features']), np.array(file['labels']))

    def clear_session(self):
        tf.keras.backend.clear_session()

    def train(self,
              lr=None,
              optimizer=None,
              epochs=None,
              decay_lr=False,
              save_folder=None,
              notification = False):

        self.save_folder = save_folder

        # shape of VGG16 encoded features
        inputs = layers.Input(shape=self.latent_vectors)
        x = layers.Dense(self.num_classes, activation='softmax')(inputs)
        self.classifier = tf.keras.Model(inputs, x)

        self.classifier.compile(optimizer=optimizer(lr=lr),
                                loss='categorical_crossentropy',
                                metrics=['accuracy'])

        def lr_decay(epoch):
            alpha, decay = 1, 1
            return lr / (alpha + decay * epoch)
        callback_learning_rate = tf.keras.callbacks.LearningRateScheduler(lr_decay, verbose=True)
        plot_losses = PlotLosses()
        callback_is_nan = tf.keras.callbacks.TerminateOnNaN()
        callback_early = tf.keras.callbacks.EarlyStopping(monitor='loss', min_delta = .001, patience = 10)

        callbacks = [plot_losses, callback_is_nan, callback_early]
        callbacks += [callback_learning_rate] if decay_lr else []
        self.latent_train, self.latent_test = 'train', 'test'

        self.history = self.classifier.fit(
                  x = self.load_data(self.latent_train),
                  epochs=epochs,
                  workers=15,
                  steps_per_epoch=self.step_size_train,
                  validation_steps=self.step_size_valid,
                  validation_data=self.load_data(self.latent_test),
                  callbacks=callbacks)

        fig = self.plot_accuracy(self.history)
        if save_folder:
            asl_detection.save.save(save_folder, 'acc_loss', fig=fig)
            asl_detection.save.save(save_folder, 'model', self.classifier)
        if notification:
            self.notify(fig)
        self.images, self.labels = self.test_generator.next()

    def _visualize_feature_maps(self, image, _layers, scale):
        model_layers = self.feature_extractor.layers
        # Extracts the outputs
        layer_outputs = [layer.output for layer in self.feature_extractor.layers]
        # Creates a model that will return these outputs, given the model input
        activation_model = tf.keras.Model(inputs=self.feature_extractor.inputs, outputs=layer_outputs)
        # get activations
        activations = activation_model.predict(image)
        images_per_row = 4; count = -1
        # Displays the feature maps
        for layer, layer_activation in zip(model_layers, activations):
            if not isinstance(layer, layers.Conv2D):
                continue
            count += 1
            # show first 3 conv layers
            if count != _layers:
                continue
            n_features = layer_activation.shape[-1] # Number of features in the feature map
            size = layer_activation.shape[1] #The feature map has shape (1, size, size, n_features).
            n_cols = n_features // images_per_row # Tiles the activation channels in this matrix
            display_grid = np.zeros((size * n_cols, images_per_row * size))
            for col in range(n_cols): # Tiles each filter into a big horizontal grid
                for row in range(images_per_row):
                    channel_image = layer_activation[0, :, :, col * images_per_row + row]
                    # Post-processes the feature to make it visually palatable
                    channel_image -= channel_image.mean()
                    channel_image /= channel_image.std() + 1e-8
                    channel_image *= 64
                    channel_image += 128
                    channel_image = np.clip(channel_image, 0, 255).astype('uint8')
                    display_grid[col * size : (col + 1) * size, # Displays the grid
                                 row * size : (row + 1) * size] = channel_image
            fig_scale = scale / size
            fig = plt.figure(figsize=(fig_scale * display_grid.shape[1],
                                fig_scale * display_grid.shape[0]))
            plt.title(layer.name)
            plt.grid(False)
            plt.imshow(display_grid, aspect='auto', cmap='gray')
        if self.save_folder:
            asl_detection.save.save(self.save_folder, 'feature_maps', fig=fig)

    def visualize_feature_maps(self, index, _layers=1, scale=2):
        image = self.images[index:index+1]
        self._visualize_feature_maps(image, _layers, scale)

    def generate_heat_map(self, _input):
        self.grad_cam_names = [layer.name for layer in self.feature_extractor.layers if isinstance(layer, layers.Conv2D)]
        image = self.images[_input:_input+1] if isinstance(_input, int) else _input
        preds = self.classifier(self.feature_extractor(image))
        idx = np.argmax(preds[0])
        # initialize our gradient class activation map and build the heatmap
        cam = GradCAM(self.feature_extractor, idx, self.grad_cam_names[-1])
        heatmap = cam.compute_heatmap(image)
        (heatmap, overlay) = cam.overlay_heatmap(heatmap, image, self.img_size, alpha=0.4)
        label = self.category_map[idx]

        if isinstance(_input, int):
            description = 'image\ntrue: {} pred: {}\nconfidence: {:.3f}'.format\
            (self.category_map[np.argmax(self.labels[_input])], self.category_map[idx], preds[0][idx])
        else:
            description = 'pred: {}\nconfidence: {:.3f}'.format(self.category_map[idx], preds[0][idx])

        results = {'image': image, 'heatmap': heatmap, 'overlay': overlay, 'description': description, 'label': label}
        return results

    def visualize_heat_maps(self, index, rows=3, figsize=(8, 8)):
        f, axes = plt.subplots(rows, 3, figsize=figsize)
        for i in range(rows):
            results = self.generate_heat_map(index+i)

            axes[i, 0].imshow(results['image'].reshape(self.img_size, self.img_size, 3))
            axes[i, 1].imshow(results['heatmap'].reshape(self.img_size, self.img_size, 3))
            axes[i, 2].imshow(results['overlay'].reshape(self.img_size, self.img_size, 3))
            axes[i, 0].set_title(results['description']).set_size(12)
            axes[i, 1].set_title('heatmap')
            axes[i, 2].set_title('overlay')
            axes[i, 0].axis('off')
            axes[i, 1].axis('off')
            axes[i, 2].axis('off')
        plt.tight_layout(w_pad=0.1)
        if self.save_folder:
            asl_detection.save.save(self.save_folder, 'heat_maps', fig=f)
Пример #10
0
image_path = IMAGES_DIR / 'lena.jpg'
image_path = IMAGES_DIR / 'military-raptor.jpg'  # warplane
image_path = IMAGES_DIR / 'baboon.png'  # baboon
image_path = IMAGES_DIR / 'fighter_jet.jpg'  # warplane
image_path = IMAGES_DIR / 'watch.png'  # stopwatch
image_path = IMAGES_DIR / 'bear.tif'  # brown_bear
image_path = IMAGES_DIR / 'wild_flowers.tif'  # greenhouse
image_path = IMAGES_DIR / 'elephant.jpg'  # tusker

print(image_path)
# read image in RGB format
image = imageio.imread(str(image_path))
print(image.shape)
image = vision.resize(image, 224, 224)
batch = expand_to_batch(image)
batch = preprocess_input(batch)

b_model = model_mobilenet(weights="imagenet")
b_y = b_model.predict(batch)
b_label = decode_predictions(b_y)
b_label = b_label[0][0]
# print the classification
print('OUR: %s (%.2f%%)' % (b_label[1], b_label[2] * 100))

a_model = MobileNet(weights="imagenet")
a_y = a_model.predict(batch)
a_label = decode_predictions(a_y)
a_label = a_label[0][0]
# print the classification
print('KERAS: %s (%.2f%%)' % (a_label[1], a_label[2] * 100))
Пример #11
0
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.applications.mobilenet import MobileNet
from tensorflow.keras.applications.mobilenet import preprocess_input
from tensorflow.keras.applications.mobilenet import decode_predictions

# 建立 MobileNet 模型
model = MobileNet(weights="imagenet", include_top=True)
# 載入測試圖片
img = load_img("koala.png", target_size=(224, 224))
x = img_to_array(img)  # 轉換成 Numpy陣列
print("x.shape: ", x.shape)
# Reshape (1, 224, 224, 3)
img = x.reshape((1, x.shape[0], x.shape[1], x.shape[2]))
# 資料預處理
img = preprocess_input(img)
print("img.shape: ", img.shape)
# 使用模型進行預測
Y_pred = model.predict(img)
# 解碼預測結果
label = decode_predictions(Y_pred)
result = label[0][0]  # 取得最可能的結果
print("%s (%.2f%%)" % (result[1], result[2] * 100))
Пример #12
0
              metrics=['accuracy', top5_acc])

t = time.time() - t
#model.summary()

print('model build time: %f s' % t)

#%%

img_path = '../test_images/wine.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

preds = model.predict(x)
print('Predicted:', decode_predictions(preds, top=5)[0])

#%%
# evaluate model

print('preparing dataset...')

#evaluation_datagen = ImageDataGenerator(rescale=1. / 255)
evaluation_datagen = ImageDataGenerator(
    preprocessing_function=preprocess_input)
evaluation_generator = evaluation_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(img_width, img_height),
    class_mode='categorical',
    shuffle=False)