#dropout and data-augmentation to reduce overfitting

from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Flatten, Dense, Dropout
from keras import backend as K
#import collections
import os
import pandas as pd
import utils
from keras.callbacks import ModelCheckpoint, EarlyStopping

train_dir, validation_dir, test_dir, nb_train_samples, nb_validation_samples,nb_test_samples = \
                    utils.preapare_small_dataset_for_flow(
                            train_dir_original='C:\\Users\\data\\train',
                            test_dir_original='C:\\Users\\data\\test',
                            target_base_dir='C:\\Users\\Thimma Reddy\\data1')

img_width, img_height = 150, 150
epochs = 100
batch_size = 20

if K.image_data_format() == 'channels_first':
    input_shape = (3, img_width, img_height)
else:
    input_shape = (img_width, img_height, 3)

model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
Beispiel #2
0
import os
os.chdir(
    "E:\\Data Science\\deeplearning\\Python scripts\\kaggle-cats vs dogs\\")
import utils
import pandas as pd

img_width, img_height = 150, 150
epochs = 20
batch_size = 20
#top_model_weights_path = 'E:\\Data Science\\deeplearning\\python scripts\\bottleneck_model.h5'
top_model_weights_path = 'E:\\Data Science\\deeplearning\\python scripts\\model.h5'


train_dir, validation_dir, test_dir, nb_train_samples, nb_validation_samples,nb_test_samples = \
                    utils.preapare_small_dataset_for_flow(
                            train_dir_original='E:\\Data Science\\Data\\CatsVsDogs\\train',
                            test_dir_original='E:\\Data Science\\Data\\CatsVsDogs\\test',
                            target_base_dir='E:\\Data Science\\Data\\CatsVsDogs\\target base dir')
#Feature Extraction using pre-trained nets
base_model = applications.VGG16(include_top=False,
                                weights='imagenet',
                                input_shape=(img_width, img_height, 3))

#Fully connected layeres
top_model = Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(512, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.2))
top_model.add(Dense(128, activation='relu'))
top_model.add(Dropout(0.2))
Beispiel #3
0
                batch_size=batch_size,
                class_mode=None,
                shuffle=False)
        bottleneck_features_test = model.predict_generator(
                test_generator, nb_test_samples // batch_size, , verbose=1)
        np.save(open('bottleneck_features_test.npy', 'wb'), bottleneck_features_test)
        
        print("Test features extracted")

    else:
        print('bottleneck directory already exists')
    
    
train_dir, validation_dir, test_dir, nb_train_samples, nb_validation_samples,nb_test_samples = \
                    utils.preapare_small_dataset_for_flow(
                            train_dir_original='C:\\Users\\prana\\Downloads\\Deep_Learning_Data\\train\\train', 
                            test_dir_original='C:\\Users\\prana\\Downloads\\Deep_Learning_Data\\test1\\test1',
                            target_base_dir='C:\\Users\\prana\\Downloads\\Deep_Learning_Data\\data1')

model = applications.VGG16(include_top=False, weights='imagenet')

bottleneck_dir = 'C:\\Users\\prana\\Downloads\\Deep_Learning_Data\\bottleneck_features'
save_bottlebeck_features(model, train_dir, validation_dir, test_dir, bottleneck_dir)

os.chdir(bottleneck_dir)

X_train = np.load(open('bottleneck_features_train.npy','rb'))
train_labels = np.array( 
        [0] * (nb_train_samples // 2) + [1] * (nb_train_samples // 2))
y_train = np_utils.to_categorical(train_labels)