예제 #1
0
#Preprocesamiento
entrenamiento_datagen = ImageDataGenerator(
    rescale= 1./255,
    shear_range= 0.3,
    zoom_range= 0.3,
    horizontal_flip= True
)

validacion_datagen = ImageDataGenerator(
    rescale= 1./255
)

imagen_entrenamiento = entrenamiento_datagen.flow_from_directory(
    data_entrenamiento,
    target_size=(altura, longitud),
    batch_size= batch_size,
    class_mode= 'categorical'
)

imagen_validacion = validacion_datagen.flow_from_directory(
    data_validacion,
    target_size=(altura, longitud),
    batch_size= batch_size,
    class_mode= 'categorical'
)

#Crear red CNN

cnn = Sequential()
cnn.add(Convolution2D(filtrosConv1, tamano_filtro1, padding='same',input_shape=(altura, longitud,3), activation='relu' ))
cnn.add(MaxPooling2D(pool_size=tamano_pool))
# Show a summary of the model. Check the number of trainable parameters
model.summary()

model.compile(optimizer='sgd',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

from tensorflow.python.keras.preprocessing.image import ImageDataGenerator

data_generator = ImageDataGenerator(preprocessing_function=preprocess_input,
                                    horizontal_flip=True)

train_generator = data_generator.flow_from_directory(
    'C:/Users/w10007346/Dropbox/CNN/FewerClasses/train',
    target_size=(image_size, image_size),
    batch_size=12,
    class_mode='categorical')

validation_generator = data_generator.flow_from_directory(
    'C:/Users/w10007346/Dropbox/CNN/FewerClasses/valid',
    target_size=(image_size, image_size),
    batch_size=12,
    class_mode='categorical')

# set class weigths given the unbalanced data set
#class_weights = class_weight.compute_class_weight(
#           'balanced',
#            np.unique(train_generator.classes),
#            train_generator.classes)
예제 #3
0
                            alpha=1,
                            include_top=False,
                            weights='imagenet',
                            input_tensor=None,
                            pooling=max)

# TRAINGING AND VALIDATION DATASET
# generates batches of normalized/augmented image training data in tensor form
data_gen = ImageDataGenerator(rescale=1.0 / 255,
                              data_format='channels_last',
                              validation_split=0.1)

train_generator = data_gen.flow_from_directory(dataset_path,
                                               target_size=(IMAGE_SIZE,
                                                            IMAGE_SIZE),
                                               batch_size=BATCH_SIZE,
                                               class_mode='categorical',
                                               subset='training',
                                               shuffle=False)

validate_generator = data_gen.flow_from_directory(dataset_path,
                                                  target_size=(IMAGE_SIZE,
                                                               IMAGE_SIZE),
                                                  batch_size=BATCH_SIZE,
                                                  class_mode='categorical',
                                                  subset='validation',
                                                  shuffle=False)

print(" Extract bottleneck features...")
# Get bottle neck features in numpay arrays from convolution part of model
bottleneck_train_features = mobileNet_model.predict_generator(
예제 #4
0
print(tf.__version__)

##train = r'D:\Study\COMPX591\Data\singleimages3\train'
#test = r'D:\Study\COMPX591\Data\singleimages\test'

train = '/Scratch/dans/kauri/data/singleimages/train'
test = '/Scratch/dans/kauri/data/singleimages/test'

# create a data generator
datagen = ImageDataGenerator(
    brightness_range=[0.4, 1.0], horizontal_flip=True
)  #brightness_range=[0.4,1.0] #horizontal_flip=True doesnt help accuracy

# load and iterate training dataset
train_generator = datagen.flow_from_directory(train,
                                              class_mode='categorical',
                                              batch_size=50,
                                              shuffle=True)
# load and iterate validation dataset
##val_it = datagen.flow_from_directory(validate, class_mode='binary', batch_size=64)
# load and iterate test dataset
##test_generator = datagen.flow_from_directory(test, class_mode='categorical', batch_size=100, shuffle=False)

test_generator = datagen.flow_from_directory(test,
                                             class_mode='categorical',
                                             batch_size=50,
                                             shuffle=False)

##train_generator.
##print(train_generator.labels)

# confirm the iterator works
EPOCHS = 3
"""
마지막에 Sigmoid 를 쓰므로, 

크게 맞은 결과 : 모델이 예측한 값이 0.8초과 0.2 미만 이고, 맞춘 경우
크게 틀린 결과 : 모델이 예측한 값이 0.8초과 0.2 미만 이고, 틀린 경우
근소하게 틀린 결과 : 모델이 예측한 값이 0.4 에서 0.6 사이이고, 틀린 경우
"""

# 시각화 데이터 생성

testing_data_generator = ImageDataGenerator(rescale=1. / 255)

test_set = testing_data_generator.flow_from_directory(
    'Dataset/PetImages/Test/',
    target_size=(INPUT_SIZE, INPUT_SIZE),
    batch_size=1,
    class_mode='binary')  # batchsize = 1;;

strongly_wrong_idx = []
strongly_right_idx = []
weakly_wrong_idx = []

model = load_model('my_model_vgg16')

for i in range(test_set.__len__()):
    img = test_set.__getitem__(i)[0]
    pred_prob = model.predict(img)[0][0]  # sigmoid 통과전값

    pred_label = int(pred_prob > 0.5)  # 예측값
    actual_label = int(test_set.__getitem__(i)[1][0])  # 실제 값
예제 #6
0
tamano_pool = (2, 2)
clases = 4
lr = 0.0005

##Preparamos nuestras imagenes

entrenamiento_datagen = ImageDataGenerator(rescale=1. / 255,
                                           shear_range=0.2,
                                           zoom_range=0.2,
                                           horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

entrenamiento_generador = entrenamiento_datagen.flow_from_directory(
    data_entrenamiento,
    target_size=(altura, longitud),
    batch_size=batch_size,
    class_mode='categorical')

validacion_generador = test_datagen.flow_from_directory(
    data_validacion,
    target_size=(altura, longitud),
    batch_size=batch_size,
    class_mode='categorical')

##Creacion red nueronal

cnn = Sequential()
cnn.add(
    Convolution2D(filtrosConv1,
                  tamano_filtro1,
예제 #7
0
    plt.subplots_adjust(top=0.93)
    plt.show()


train_dir = 'C:/Users/Win10/Desktop/LEGO brick images/train'
val_dir = 'C:/Users/Win10/Desktop/LEGO brick images/valid'

augs_gen = ImageDataGenerator(rescale=1. / 255,
                              shear_range=0.2,
                              zoom_range=0.2,
                              horizontal_flip=True,
                              validation_split=0.2)

train_gen = augs_gen.flow_from_directory(train_dir,
                                         target_size=(150, 150),
                                         batch_size=16,
                                         class_mode='categorical',
                                         shuffle=True)
test_gen = augs_gen.flow_from_directory(val_dir,
                                        target_size=(150, 150),
                                        batch_size=16,
                                        class_mode='categorical',
                                        shuffle=False)


def ConvBlock(model, layers, filters):
    for i in range(layers):
        model.add(Conv2D(filters, (3, 3), activation='selu'))
        model.add(SeparableConv2D(filters, (3, 3), activation='selu'))
        model.add(BatchNormalization())
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
import numpy as np

datagen = ImageDataGenerator(rotation_range=40,
                             width_shift_range=0.2,
                             height_shift_range=0.2,
                             rescale=1. / 255,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             fill_mode='nearest')

generator = datagen.flow_from_directory('images',
                                        target_size=(32, 32),
                                        class_mode='categorical',
                                        batch_size=1)
X = list()
Y = list()
count = 0
for inputs, outputs in generator:
    X.append(inputs[0])
    Y.append(outputs[0])
    count += 1
    if count > 70:
        break

x = np.array(X)
y = np.array(Y)

samples_1 = list()
samples_2 = list()
class TransferModel(object):

    def __init__(self):
        # 定义训练和测试图片的变换方法 标准化以及数据增强
        self.train_generator = ImageDataGenerator(rescale=1.0/255.0)
        self.test_generator = ImageDataGenerator(rescale=1.0/255.0)

        # 指定训练数据和测试数据的目录
        self.train_dir = "./data/train"
        self.test_dir = "./data/test"

        # 定义图片相关的网络参数
        self.image_size = (224, 224)
        self.batch_size = 32

        # 定义迁移学习的基类模型
        # 不包含VGG当中3个全连接层的模型加载,并且加载了参数
        self.base_model = VGG16(weights='imagenet', include_top=False)

    def get_local_data(self):
        '''
        读取本地的图片数据以及类别
        :return: 训练数据以及测试数据迭代器
        '''
        # 使用flow_from_derectory
        train_gen = self.train_generator.flow_from_directory(self.train_dir,
                                                             target_size=self.image_size,
                                                             batch_size=self.batch_size,
                                                             class_mode="binary",
                                                             shuffle=True)  # 打乱顺序训练

        test_gen = self.test_generator.flow_from_directory(self.test_dir,
                                                           target_size=self.image_size,
                                                           batch_size=self.batch_size,
                                                           class_mode="binary",
                                                           shuffle=True)
        return train_gen, test_gen


    def refine_base_model(self):
        '''
        微调VGG结构 5blocks后面 + 全局平均池化 (减少迁移学习的参数数量) + 两个全连接层
        :return:
        '''
        # 1-获取原notop模型输出
        x = self.base_model.outputs[0]

        # 2-在输出后面增加我们结构
        x = keras.layers.GlobalAveragePooling2D()(x)
        # 3-定义新的迁移模型
        x = keras.layers.Dense(1024, activation=tf.nn.relu)(x)
        y_predict = keras.layers.Dense(5, activation=tf.nn.softmax)

        # model 定义新模型
        # VGG模型的输入 输出y_predict
        transfer_model = keras.models.Model(inputs=self.base_model.inputs, outputs=y_predict)

        return transfer_model


    def freeze_model(self):
        '''
        冻结VGG模型(5个blocks)
        冻结VGG的多少, 根据你的数据量
        :return:
        '''
        # self.base_model.layers 获取所有层, 返回层的列表
        for layer in self.base_model.layers:
            layer.trainable = False

    def compile(self, model):
        # 编译模型
        modle.compile(optimizer=keras.optimizers.Adam(),
                      loss=keras.losses.sparse_categorical_crossentropy,
                      metrics=["accuracy"])
        return None

    def fit_generator(self, model, train_gen, test_gen):

        # 训练模型 model.fit_generator() 不是选择fit()
        modelckpt = keras.callbacks.ModelCheckpoint("./cpkt/transfer_{epoch:02d}-{val_acc:.2f}.h5",
                                                    monitor="val_acc",
                                                    save_weights_only=True,
                                                    save_best_only=True,
                                                    mode="auto",
                                                    period=1)

        modle.transfer_model.fit_generator(train_gen, epochs=3, validation_data=test_gen, callbacks=[])

        return None
def train(model_file,
          train_path,
          validation_path,
          target_size=(256, 256),
          num_classes=5,
          steps=32,
          num_epochs=28):
    if os.path.exists(model_file):
        print('\n*** existing model found at {}. Loading. ***\n\n'.format(
            model_file))
        model = load_existing(model_file)
    else:
        print("\n*** Creating new model ***\n\n")
        model = create_model(num_classes=num_classes)

    check_point = ModelCheckpoint(model_file, period=1)
    model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
    train_datagen = ImageDataGenerator(
        rescale=1. / 255,
        shear_range=0.3,
        zoom_range=0.3,
        # horizontal_flip=True,
        rotation_range=20,
        width_shift_range=0.2,
        height_shift_range=0.2,
        brightness_range=(0.8, 1.2))
    test_datagen = ImageDataGenerator(rescale=1. / 255)
    train_generator = train_datagen.flow_from_directory(
        train_path,
        target_size=target_size,
        batch_size=32,
        class_mode='categorical')
    validation_generator = test_datagen.flow_from_directory(
        validation_path,
        target_size=target_size,
        batch_size=32,
        class_mode='categorical')
    model.fit_generator(
        train_generator,
        steps_per_epoch=steps,
        epochs=num_epochs,
        callbacks=[
            check_point,
        ],
        validation_data=validation_generator,
        validation_steps=50,
        shuffle=True,
    )
    for layer in model.layers[:249]:
        layer.trainable = False

    for layer in model.layers[249:]:
        layer.trainable = True

    model.compile(optimizer=SGD(lr=0.00001, momentum=0.9),
                  loss='categorical_crossentropy')
    model.fit_generator(train_generator,
                        steps_per_epoch=steps,
                        epochs=num_epochs,
                        callbacks=[check_point],
                        validation_data=validation_generator,
                        validation_steps=50)
예제 #11
0
# Preprocesamiento de las imagenes
# Preparamos nuestras imagenes Se hace un generador
entrenamiento_datagen = ImageDataGenerator(
    rescale=1. / 255,  # Normalizamos los valores de los pixeles 0-255...
    shear_range=0.2,  # Permite inclinar la imagen
    zoom_range=0.2,  # Las imagenes pueden tener zoom o por secciones
    horizontal_flip=True  # toma la imagen y la invierte
)

# Para validar solo se hace la re-escalación
test_datagen = ImageDataGenerator(rescale=1. / 255)

entrenamiento_generator = entrenamiento_datagen.flow_from_directory(
    data_entrenamiento,  # Nuestro set de entrenamiento
    target_size=(altura, longitud),  # Resolución en pixeles
    batch_size=batchsize,
    class_mode='categorical'  # Modo de categorización
)

print(entrenamiento_generator.class_indices)

validacion_generator = test_datagen.flow_from_directory(
    data_validacion,  # Nuestro generador de datos de validacion
    target_size=(altura, longitud),
    batch_size=batchsize,
    class_mode='categorical'  # Modo de categorización
)

cnn = Sequential()  # La red neuronal convolucional sera secuencial
# Agrega capa 1 de convolucion 2D, 150x150 y activación relu
cnn.add(
예제 #12
0
        rotation_range=
        30,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=
        0.2,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=
        0.2,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False,  # randomly flip images
    )

    # Put all images into CNN to train the model
    train_dir = config.train_dir
    print(os.listdir(train_dir))
    train_generator = datagen.flow_from_directory(
        train_dir,
        target_size=(image_size, image_size),
        batch_size=batch_size,
        shuffle=True,
        seed=666,
        # classes=dirs,
        class_mode='categorical',
    )
    # train model
    model.fit_generator(train_generator,
                        steps_per_epoch=train_generator.samples // batch_size,
                        epochs=epochs)

    # save model to specified path with specified format
    MODEL_PATH = config.save_model_pb
    tf.keras.experimental.export_saved_model(model, MODEL_PATH + '/SavedModel')
seed(args.seed)
'''
DATASET_PATH = config.DATASET_PATH  # dataset path
IMAGE_SIZE = config.IMAGE_SIZE  # wrap the input image size
NUM_CLASSES = config.NUM_CLASSES
BATCH_SIZE = config.BATCH_SIZE
FREEZE_LAYERS = config.FREEZE_LAYERS
NUM_EPOCHS = config.NUM_EPOCHS
WEIGHTS_FINAL = 'model-resnet50-final.h5'
earlyStopping = EarlyStopping(monitor='val_loss', patience=100)

train_datagen_ref = ImageDataGenerator()
train_batches_ref = train_datagen_ref.flow_from_directory(
    DATASET_PATH + '/train',
    target_size=IMAGE_SIZE,
    interpolation='bicubic',
    class_mode='categorical',
    shuffle=True,
    batch_size=BATCH_SIZE)

#valid_datagen = ImageDataGenerator(rescale=1./255)
valid_datagen = ImageDataGenerator()
valid_batches = valid_datagen.flow_from_directory(DATASET_PATH + '/val',
                                                  target_size=IMAGE_SIZE,
                                                  interpolation='bicubic',
                                                  class_mode='categorical',
                                                  shuffle=False,
                                                  batch_size=BATCH_SIZE)

# test dataset
'''
예제 #14
0
my_new_model.add(VGG19(include_top=False, pooling='avg', weights='imagenet'))
my_new_model.add(Dense(num_classes, activation='softmax'))

# Say not to train first layer model. It is already trained
my_new_model.layers[0].trainable = False

my_new_model.compile(optimizer='sgd',
                     loss='categorical_crossentropy',
                     metrics=['accuracy'])

data_generator_train = ImageDataGenerator()
data_generator_test = ImageDataGenerator()

train_generator = data_generator_train.flow_from_directory(
    '../TFM/Dataset_Resize_Split_Train',
    target_size=(image_size, image_size),
    batch_size=24,
    class_mode='categorical')

validation_generator = data_generator_test.flow_from_directory(
    '../TFM/Dataset_Resize_Split_Dev',
    target_size=(image_size, image_size),
    batch_size=24,
    class_mode='categorical')

tbCallBack = TensorBoard(log_dir='/Tensorboard/Graph_VGG19_Final',
                         histogram_freq=0,
                         write_graph=True,
                         write_images=True)
early_stopping = EarlyStopping(monitor='val_loss', patience=2)
from tensorflow.python.keras.applications.vgg16 import VGG16
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import GlobalAveragePooling2D
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator

train_data_dir = "../images/cats_dogs/"
img_width, img_height = 224, 224
epochs = 1

# Data Augmentation
train_datagen = ImageDataGenerator(rescale=1. / 255,shear_range=0.2,zoom_range=0.2,horizontal_flip=True)

train_generator = train_datagen.flow_from_directory(
                        directory=train_data_dir,
                        target_size=[img_width, img_height],
                        class_mode='categorical')

# Step 2-1: Replace the softmax layer
base_model = VGG16(include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
prediction = Dense(2, activation='softmax')(x)

model = Model(inputs=base_model.input, outputs=prediction)
for layer in model.layers:
    layer.trainable = False
model.compile(loss='binary_crossentropy',optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),metrics=['accuracy'])
model.fit_generator(train_generator,epochs=epochs)
예제 #16
0
import logging
logger = tf.get_logger()
logger.setLevel(logging.ERROR)


# In[ ]:


#load dataset


# In[89]:


datagen= ImageDataGenerator()
train_it= datagen.flow_from_directory('data/')


# In[36]:


CLASS_NAMES = ['Blue', 'Green','Red']


# In[37]:


num_red = len(os.listdir('data/train/red'))
print(num_red)
num_blue = len(os.listdir('data/train/blue'))
print(num_blue)
])

model.compile(optimizer=RMSprop(lr=0.001), loss='binary_crossentropy', metrics=['acc'])


TRAINING_DIR = "/tmp/cats-v-dogs/training/"
train_datagen = ImageDataGenerator(rescale=1./255,
      rotation_range=40,
      width_shift_range=0.2,
      height_shift_range=0.2,
      shear_range=0.2,
      zoom_range=0.2,
      horizontal_flip=True,
      fill_mode='nearest')
train_generator = train_datagen.flow_from_directory(TRAINING_DIR,
                                                    batch_size=100,
                                                    class_mode='binary',
                                                    target_size=(150, 150))

VALIDATION_DIR = "/tmp/cats-v-dogs/testing/"
validation_datagen = ImageDataGenerator(rescale=1./255,
      rotation_range=40,
      width_shift_range=0.2,
      height_shift_range=0.2,
      shear_range=0.2,
      zoom_range=0.2,
      horizontal_flip=True,
      fill_mode='nearest')
validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR,
                                                              batch_size=100,
                                                              class_mode='binary',
                                                              target_size=(150, 150))
예제 #18
0
import numpy as np

# Just disables the warning, doesn't enable AVX/FMA (no GPU)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

print('YTD')

path_dataset_lr = './ytd'
img_width_lr, img_height_lr = 24, 24
batch_size = 8

datagen = ImageDataGenerator(rescale=1. / 255)

testing_generator = datagen.flow_from_directory(str(path_dataset_lr + '/test'),
                                                target_size=(img_width_lr,
                                                             img_height_lr),
                                                batch_size=batch_size,
                                                class_mode='categorical',
                                                shuffle=False)

test_steps_per_epoch = np.math.ceil(testing_generator.samples /
                                    testing_generator.batch_size)

model = load_model('tbe_cnn_ytd_sgd.h5')
model.summary()

print('predictions: ')

predictions = model.predict_generator(testing_generator,
                                      steps=test_steps_per_epoch)

predicted_classes = np.argmax(predictions, axis=1)
예제 #19
0
def train_model(model, way='fast'):
    #ata_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
    if way == 'fast':
        data_generator_with_aug = ImageDataGenerator(
            preprocessing_function=preprocess_input,
            width_shift_range=0.1,
            height_shift_range=0.1,
            #sear_range=0.01,
            zoom_range=[0.9, 1.25],
            horizontal_flip=True,
            vertical_flip=False,
            data_format='channels_last',
            brightness_range=[0.5, 1.5])
        train_generator = data_generator_with_aug.flow_from_directory(
            'input/fire-detection-from-cctv/data/data/img_data/train',
            target_size=(FAST_IMG_SIZE, FAST_IMG_SIZE),
            batch_size=FAST_TRAIN_BATCH_SIZE,
            class_mode='categorical')
        validation_generator = data_generator_with_aug.flow_from_directory(
            'input/fire-detection-from-cctv/data/data/img_data/test',
            target_size=(FAST_IMG_SIZE, FAST_IMG_SIZE),
            batch_size=FAST_TEST_BATCH_SIZE,
            shuffle=False,
            class_mode='categorical')
        H = model.fit_generator(train_generator,
                                steps_per_epoch=train_generator.n /
                                TRAIN_BATCH_SIZE,
                                epochs=FAST_NUM_EPOCHS,
                                validation_data=validation_generator,
                                validation_steps=1)
        return model, train_generator, validation_generator

    else:
        data_generator_with_aug = ImageDataGenerator(
            preprocessing_function=preprocess_input,
            width_shift_range=0.1,
            height_shift_range=0.1,
            #sear_range=0.01,
            zoom_range=[0.9, 1.25],
            horizontal_flip=True,
            vertical_flip=False,
            data_format='channels_last',
            brightness_range=[0.5, 1.5])
        train_generator = data_generator_with_aug.flow_from_directory(
            'input/fire-detection-from-cctv/data/data/img_data/train',
            target_size=(IMG_SIZE, IMG_SIZE),
            batch_size=TRAIN_BATCH_SIZE,
            class_mode='categorical')

        validation_generator = data_generator_with_aug.flow_from_directory(
            'input/fire-detection-from-cctv/data/data/img_data/test',
            target_size=(IMG_SIZE, IMG_SIZE),
            batch_size=TEST_BATCH_SIZE,
            shuffle=False,
            class_mode='categorical')

        #y_train = get_labels(train_generator)
        #weights = class_weight.compute_class_weight('balanced',np.unique(y_train), y_train)
        #dict_weights = { i: weights[i] for i in range(len(weights)) }

        H = model.fit_generator(
            train_generator,
            steps_per_epoch=train_generator.n / TRAIN_BATCH_SIZE,
            epochs=NUM_EPOCHS,
            validation_data=validation_generator,
            validation_steps=1  #,
            #class_weight=dict_weights
        )

        #plot_history( H, NUM_EPOCHS )
        return model, train_generator, validation_generator
    width_shift_range=0.3,
    height_shift_range=0.3,
    rotation_range=30
)
test_datagen = ImageDataGenerator(
    rescale=1./255,
    horizontal_flip=True,
    fill_mode='nearest',
    zoom_range=0.3,
    width_shift_range=0.3,
    height_shift_range=0.3,
    rotation_range=30
)
train_generator = train_datagen.flow_from_directory(
    train_data_dir,
    target_size=(img_height, img_width),
    batch_size=batch_size,
    class_mode='categorical'
)
# print(train_generator)
validation_generator = test_datagen.flow_from_directory(
    validation_dir,
    target_size=(img_height, img_width),
    batch_size=batch_size,
    class_mode='categorical'
)
checkpoint = ModelCheckpoint('./log/vgg16_1.h5', monitor='val_acc', verbose=1,
                             save_best_only=True, save_weights_only=True, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0,
                      patience=10, verbose=1, mode='auto')
model_final.fit_generator(train_generator, steps_per_epoch=nb_train_samples, epochs=epochs,
                          validation_data=validation_generator, validation_steps=nb_validation_samples, callbacks=[checkpoint, early])
예제 #21
0
    kl_loss = -0.5 * tf.reduce_mean(
        tf.reduce_sum(
            (1 + sigma - tf.math.pow(mu, 2) - tf.math.exp(sigma)), axis=1))
    return K.mean(reconstruction_loss + BETA * kl_loss)


#========================================================================================================

# initialize the number of epochs to train for, initial learning rate,

train_datagen = ImageDataGenerator(
    rescale=1. / 255, validation_split=0.2)  # set validation split

train_generator = train_datagen.flow_from_directory(
    './mycar/vae/imagFace',
    target_size=(height, width),
    batch_size=BS,
    class_mode='input',
    subset='training')  # set as training data

validation_generator = train_datagen.flow_from_directory(
    './mycar/vae/imagFace',  # same directory as training data
    target_size=(height, width),
    batch_size=BS,
    class_mode='input',
    subset='validation')  # set as validation data

# construct our convolutional autoencoder
print("[INFO] building autoencoder...")
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
if autoenc:
    autoencoder.compile(loss=kl_reconstruction_loss,
예제 #22
0
def main(style,
         style_weight=4,
         content_weight=1.0,
         tv_weight=1e-6,
         image_size=256):
    img_width = img_height = image_size

    style_image_path = get_style_img_path(style)

    net = nets.image_transform_net(img_width, img_height, tv_weight)
    model = nets.loss_net(net.output, net.input, img_width, img_height,
                          style_image_path, content_weight, style_weight)
    model.summary()

    nb_epoch = 82785 * 2
    train_batchsize = 1
    train_image_path = "artfilter/images/content/"

    learning_rate = 1e-3  #1e-3
    optimizer = Adam()  # Adam(lr=learning_rate,beta_1=0.99)

    model.compile(
        optimizer,
        dummy_loss)  # Dummy loss since we are learning from regularizes

    datagen = ImageDataGenerator()

    dummy_y = np.zeros(
        (train_batchsize, img_width, img_height,
         3))  # Dummy output, not used since we use regularizers to train

    #model.load_weights(style+'_weights.h5',by_name=False)

    skip_to = 0

    i = 0
    t1 = time.time()
    for x in datagen.flow_from_directory(train_image_path,
                                         class_mode=None,
                                         batch_size=train_batchsize,
                                         target_size=(img_width, img_height),
                                         shuffle=False):
        if i > nb_epoch:
            break

        if i < skip_to:
            i += train_batchsize
            if i % 1000 == 0:
                print("skip to: %d" % i)

            continue

        hist = model.train_on_batch(x, dummy_y)

        if i % 50 == 0:
            print(hist, (time.time() - t1))
            t1 = time.time()

        if i % 500 == 0:
            print("epoc: ", i)
            val_x = net.predict(x)

            display_img(i, x[0], style)
            display_img(i, val_x[0], style, True)
            model.save_weights(style + '_weights.h5')

        i += train_batchsize
예제 #23
0
import os
import tensorflow.compat.v1 as tf

tf.disable_v2_behavior()

# 從參數讀取圖檔路徑
files = sys.argv[1:]

# 載入訓練好的模型
net = load_model(r'C:\Users\user\Documents/model.h5')
IMAGE_SIZE = (224, 224)

test_datagen = ImageDataGenerator(fill_mode='wrap')
test_batches = test_datagen.flow_from_directory(files[0],
                                                target_size=IMAGE_SIZE,
                                                interpolation='bicubic',
                                                class_mode='categorical',
                                                shuffle=False,
                                                batch_size=1)


def filename(dir):
    #使用一個list來存所有wav檔的檔名
    files = []
    for root, _, file in os.walk(dir):
        for f in file:
            absfile = os.path.join(root, f)
            if absfile.endswith(".png"):
                absfile = absfile.replace("\\", "/")
                files.append(absfile)
    return files
예제 #24
0
    layers.Dense(1),
    layers.Activation('sigmoid')
])
model.compile(loss='binary_crossentropy',
              optimizer='adam', metrics=['accuracy'])
img_width, img_height = 250, 250
batch_size = 1
nb_train_samples = len(os.listdir(relative('train/cat')) +
                       os.listdir(relative('train/dog')))
nb_validation_samples = len(os.listdir(
    relative('val/cat')) + os.listdir(relative('val/dog')))
nb_test_samples = len(os.listdir(relative('test/cat')) +
                      os.listdir(relative('test/dog')))


data_gen = ImageDataGenerator(rescale=1./255)
train_generator = data_gen.flow_from_directory(
    relative('train'), batch_size=batch_size, target_size=(img_width, img_height), class_mode='binary')

val_generator = data_gen.flow_from_directory(
    relative('val'), batch_size=batch_size, target_size=(img_width, img_height), class_mode='binary')

test_generator = data_gen.flow_from_directory(
    relative('val'), batch_size=batch_size, target_size=(img_width, img_height), class_mode='binary')

epochs = 10
history = model.fit_generator(train_generator,
                              steps_per_epoch=nb_train_samples//epochs,
                              validation_data=val_generator, epochs=epochs,
                              validation_steps=nb_validation_samples // epochs)
train_data_generator = ImageDataGenerator(rescale=1. / 255,
                                          rotation_range=30,
                                          shear_range=0.3,
                                          zoom_range=0.3,
                                          width_shift_range=0.4,
                                          height_shift_range=0.4,
                                          horizontal_flip=True,
                                          fill_mode='nearest')

validation_data_generator = ImageDataGenerator(rescale=1. / 255)

train_generator = train_data_generator.flow_from_directory(
    train_data_dir,
    color_mode='grayscale',
    target_size=(img_rows, img_cols),
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=True)

validation_generator = validation_data_generator.flow_from_directory(
    validation_data_dir,
    color_mode='grayscale',
    target_size=(img_rows, img_cols),
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=True)

model = Sequential()

# Feature Learning Layer 0
예제 #26
0
function every time it reads an image, we use this function to be consistent with how
the pre-trained model is created
"""
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)


"""
we use the flow from directory command, we tell it what directory the data is in, 
target_size = what size image we want,
batch size = how many images to read in at a time,
and that we tell it we're classifying data into different categories(class_mode='categorical')
more information about the choice of batch size in the slides
"""
train_generator = data_generator.flow_from_directory(
        'train',
        target_size=target_size,
       batch_size = 25,
        class_mode='categorical')
"""
we do the as above to setup the way to read the validation data
that creates a validtion generator
"""
validation_generator = data_generator.flow_from_directory(
        'valid',
        target_size=target_size,
        batch_size = 25,
        class_mode='categorical')
"""
the ImageDataGenerator is especially valuable when working with
large data sets because we don't need to hold the whole data set
in memory at once 
    print(layer, layer.trainable)

my_new_model.layers[0].trainable = False

my_new_model.compile(optimizer='sgd',
                     loss='categorical_crossentropy',
                     metrics=['accuracy'])

from tensorflow.python.keras.applications.inception_v3 import preprocess_input
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator

data_generator = ImageDataGenerator(preprocessing_function=preprocess_input,
                                    horizontal_flip=True)

train_generator = data_generator.flow_from_directory(
    'C:/Users/w10007346/Pictures/Celeb_sets/train',
    target_size=(178, 218),
    batch_size=12,
    class_mode='categorical')

validation_generator = data_generator.flow_from_directory(
    'C:/Users/w10007346/Pictures/Celeb_sets/valid',
    target_size=(178, 218),
    batch_size=12,
    class_mode='categorical')

my_new_model.fit_generator(train_generator,
                           epochs=20,
                           steps_per_epoch=2667,
                           validation_data=validation_generator,
                           validation_steps=667)
예제 #28
0
    # === Generators ===

    train_datagen = ImageDataGenerator(shear_range=0.2,
                                       zoom_range=0.2,
                                       rotation_range=30,
                                       height_shift_range=0.2,
                                       width_shift_range=0.2,
                                       horizontal_flip=True,
                                       brightness_range=[0.8, 1.2],
                                       preprocessing_function=preprocess_input)

    valid_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)

    train_generator = train_datagen.flow_from_directory(training_dir,
                                                        target_size=(HEIGHT,
                                                                     WIDTH),
                                                        batch_size=batch_size)
    valid_generator = valid_datagen.flow_from_directory(
        validating_dir,
        target_size=(HEIGHT, WIDTH),
        batch_size=batch_size // 2)

    # === Model ===

    # ResNet structure without classification layer
    model = Sequential()
    model.add(VGG16(include_top=False, pooling='avg', weights='imagenet'))

    # Output layer
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.2))
예제 #29
0
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(4))
model.add(Activation('softmax'))

model.compile(loss=tf.keras.losses.categorical_crossentropy,
              optimizer='adam',
              metrics=['accuracy'])

datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = datagen.flow_from_directory(train_dir,
                                              target_size=(img_width,
                                                           img_height),
                                              batch_size=batch_size,
                                              class_mode='categorical')

val_generator = datagen.flow_from_directory(val_dir,
                                            target_size=(img_width,
                                                         img_height),
                                            batch_size=batch_size,
                                            class_mode='categorical')

test_generator = datagen.flow_from_directory(test_dir,
                                             target_size=(img_width,
                                                          img_height),
                                             batch_size=batch_size,
                                             class_mode='categorical')
예제 #30
0
# In[6]:


train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input,
                                   rotation_range=40,
                                   width_shift_range=0.2,
                                   height_shift_range=0.2,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   channel_shift_range=10,
                                   horizontal_flip=True,
                                   fill_mode='nearest')
train_batches = train_datagen.flow_from_directory(DATASET_PATH + '/train',
                                                  target_size=IMAGE_SIZE,
                                                  interpolation='bicubic',
                                                  class_mode='categorical',
                                                  shuffle=True,
                                                  batch_size=BATCH_SIZE)


# First check 1 batch of uncropped images

# In[7]:


batch_x, batch_y = next(train_batches)
batch_x.shape


# In[8]:
예제 #31
0
        plt.subplot(5, 5, i + 1)
        plt.imshow(np.squeeze(images[i].numpy().astype('uint8')))
        plt.title(train_ds.class_names[labels[i]])
        plt.axis('off')

# In[10]:
#image augmentation

training_dir = "X:/miniproject/train"
training_generator = ImageDataGenerator(rescale=1. / 255,
                                        rotation_range=15,
                                        shear_range=0.2,
                                        zoom_range=0.2)
train_generator = training_generator.flow_from_directory(training_dir,
                                                         target_size=(200,
                                                                      200),
                                                         batch_size=4,
                                                         class_mode='binary')

# In[11]:

validation_dir = "X:/miniproject/val"
validation_generator = ImageDataGenerator(rescale=1. / 255)
valid_generator = validation_generator.flow_from_directory(validation_dir,
                                                           target_size=(200,
                                                                        200),
                                                           batch_size=4,
                                                           class_mode='binary')

# In[12]: