Пример #1
0
x = np.load('../../data/npy/LPD_train_x1.npy', allow_pickle=True)
y = np.load('../../data/npy/LPD_train_y1.npy', allow_pickle=True)
target = np.load('../../data/npy/target1.npy', allow_pickle=True)

from tensorflow.keras.applications.efficientnet import preprocess_input
x = preprocess_input(x)
target = preprocess_input(target)

# print(x.shape)
# print(y.shape)
# print(target.shape)

#generagtor
idg = ImageDataGenerator(
    zoom_range = 0.1,
    height_shift_range=0.1,
    width_shift_range=0.1,
    rotation_range=32 
)

idg2 = ImageDataGenerator()

from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x, y, train_size = 0.9, random_state = 128, shuffle = True)

#control
bts = 128
optimizer = Adam(learning_rate = 1e-3)

train_generator = idg.flow(x_train, y_train, batch_size = bts, seed=1024)
valid_generator = idg2.flow(x_val, y_val)
test_generator = idg2.flow(target)
if K.image_data_format() == 'channels_first':
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
    x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
    input_shape = (1, img_rows, img_cols)
else:
    x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
    x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
    input_shape = (img_rows, img_cols, 1)

number_of_classes = 10
train_labels = tf.keras.utils.to_categorical(train_labels, number_of_classes)
test_labels = tf.keras.utils.to_categorical(test_labels, number_of_classes)

train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   rotation_range=40,
                                   width_shift_range=0.2,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   fill_mode='nearest')

val_datagen = ImageDataGenerator(rescale=1. / 255.0)

train_datagen.fit(x_train)
val_datagen.fit(x_test)

train_iterator = train_datagen.flow(x_train, train_labels, batch_size=128)
test_iterator = val_datagen.flow(x_test, test_labels, batch_size=128)

model = Sequential()
model.add(
    Conv2D(32,
           3,
Пример #3
0
def train_model(epochs=100, id=''):
    model = tf.keras.models.Sequential([
        tf.keras.layers.Conv2D(32, (3, 3),
                               activation='relu',
                               input_shape=(150, 150, 3)),
        tf.keras.layers.MaxPooling2D(2, 2),
        tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
        tf.keras.layers.MaxPooling2D(2, 2),
        tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
        tf.keras.layers.MaxPooling2D(2, 2),
        tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
        tf.keras.layers.MaxPooling2D(2, 2),
        tf.keras.layers.Dropout(0.5),
        tf.keras.layers.Flatten(),
        tf.keras.layers.Dense(512, activation='relu'),
        tf.keras.layers.Dense(1, activation='sigmoid')
    ])

    model.compile(
        optimizer=RMSprop(lr=0.001),
        loss='binary_crossentropy',
        metrics=['acc'],
    )

    print(model.summary())

    TRAINING_DIR = r"data/cats-v-dogs/training/"
    train_datagen = ImageDataGenerator(
        rescale=1. / 255,
        rotation_range=40,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        fill_mode='nearest',
    )

    train_generator = train_datagen.flow_from_directory(
        TRAINING_DIR,
        batch_size=256,
        class_mode='binary',
        target_size=(150, 150),
    )

    VALIDATION_DIR = r"data/cats-v-dogs/validation/"
    validation_datagen = ImageDataGenerator(rescale=1. / 255)
    validation_generator = validation_datagen.flow_from_directory(
        VALIDATION_DIR,
        batch_size=32,
        class_mode='binary',
        target_size=(150, 150),
    )

    history = model.fit_generator(
        train_generator,
        epochs=epochs,
        verbose=1,
        use_multiprocessing=True,
        validation_data=validation_generator,
    )

    os.makedirs(os.path.join("assets", "models", "cats-v-dogs"), exist_ok=True)
    model.save(
        os.path.join("assets", "models", "cats-v-dogs", f"{id}-model.h5"))

    return history
Пример #4
0
# Define parameters
MODELS_PATH = '../models/'
MODEL_NAME = 'bs_xception_model_gb'
REPORTS_PATH = '../reports/'
NUM_CLASSES = 4
TARGET_SIZE = (299, 299)
BATCH_SIZE = 64

# Get test subset
print('Loading image data...')
label_df = pd.read_csv(args["csv"])
test_df = label_df[label_df['subset'] == 'test']

# Preprocess test data
print('Preprocess test data...')
test_datagen = ImageDataGenerator(rescale=1. / 255,
                                  preprocessing_function=gaussian_blur)

test_generator = test_datagen.flow_from_dataframe(test_df,
                                                  directory=args["dataset"],
                                                  x_col='file_path',
                                                  y_col='label',
                                                  class_mode='categorical',
                                                  target_size=TARGET_SIZE,
                                                  shuffle=False,
                                                  batch_size=BATCH_SIZE)

# Evaluate model
print('Load model...')
model = load_model(MODELS_PATH + MODEL_NAME + '.h5')

print('Calculate test accuracy...')
!ls

X = cv2.imread('ferrari-spider-indian-theluxecafe.jpg')
X = cv2.cvtColor(X, cv2.COLOR_BGR2RGB)
plt.imshow(X)

print(X.shape)

IMAGE_SIZE = X.shape

X = np.expand_dims(X, axis=0)

print(X.shape)

y = np.ndarray([1])
print(y.shape)

from tensorflow.keras.preprocessing.image import ImageDataGenerator

datagen = ImageDataGenerator(shear_range = 50.5)

datagen.fit(X)
batch_size = 10
X_batch = datagen.flow(X, batch_size=batch_size)

print(X_batch)

show(X_batch, batch_size)

Пример #6
0
x_letter = train[:, 1]
x_letter = np.reshape(x_letter, (-1, 1))
en = OneHotEncoder()
x_letter = en.fit_transform(x_letter).toarray()

y = train[:, 0]
y = np.reshape(y, (-1, 1))
en = OneHotEncoder()
y = en.fit_transform(y).toarray()

print(x.shape)
print(x_letter.shape)
print(y.shape)

image_generator = ImageDataGenerator(width_shift_range=0.1,
                                     height_shift_range=0.1,
                                     zoom_range=[0.8, 1.2],
                                     shear_range=10)

x_total = x.copy()


def augment(x):
    aug_list = []
    for i in range(x.shape[0]):
        num_aug = 0
        tmp = x[i]
        tmp = tmp.reshape((1, ) + tmp.shape)
        for x_aug in image_generator.flow(tmp, batch_size=1):
            if num_aug >= 1:
                break
            aug_list.append(x_aug[0])
Пример #7
0
    tf.keras.layers.Dense(
        1, activation='sigmoid')  # 1 output neuron b/c binary, note sigmoid
])

model.compile(optimizer=RMSprop(lr=0.001),
              loss='binary_crossentropy',
              metrics=['acc'])

# # NOTE:
#
# In the cell below you **MUST** use a batch size of 10 (`batch_size=10`) for the `train_generator` and the `validation_generator`. Using a batch size greater than 10 will exceed memory limits on the Coursera platform.

# In[93]:

TRAINING_DIR = '/tmp/cats-v-dogs/training/'
train_datagen = ImageDataGenerator(rescale=1.0 / 255.)

# NOTE: YOU MUST USE A BATCH SIZE OF 10 (batch_size=10) FOR THE
# TRAIN GENERATOR.
train_generator = train_datagen.flow_from_directory(TRAINING_DIR,
                                                    batch_size=10,
                                                    class_mode='binary',
                                                    target_size=(150, 150))

VALIDATION_DIR = '/tmp/cats-v-dogs/testing/'
validation_datagen = ImageDataGenerator(rescale=1.0 / 255.)

# NOTE: YOU MUST USE A BACTH SIZE OF 10 (batch_size=10) FOR THE
# VALIDATION GENERATOR.
validation_generator = train_datagen.flow_from_directory(VALIDATION_DIR,
                                                         batch_size=10,
Пример #8
0
# Marina Joel
# Created (10/16/20)
# Updated (12/22/20)

from data_loader import load_dataset
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from configs import path, input_shape, batch_size
import numpy as np

# query user for dataset name
dataset = input('Enter dataset to be used (brain_mri, ddsm, lidc)\n')
# load dataset
x_train, y_train, x_test, y_test = load_dataset(dataset, path, aug=False)

datagen = ImageDataGenerator(vertical_flip=True,
                             horizontal_flip=True,
                             rotation_range=20,
                             fill_mode="nearest")
datagen.fit(x_train)

x_train_aug = []
y_train_aug = []

batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=1):
    x_train_aug.append(x_batch)
    y_train_aug.append(y_batch)
    batches += 1
    if batches >= 10 * len(x_train):
        # we need to break the loop by hand because
        # the generator loops indefinitely
        break
Пример #9
0
model=Model(inputs=base_model.input,outputs=preds)

# Print a summary representation of your model
model.summary()


for layer in model.layers[:87]:
    layer.trainable=False


for layer in model.layers[87:]:
    layer.trainable=True


train_datagen=ImageDataGenerator(preprocessing_function=preprocess_input) #included in our dependencies


# local path: BigSetFull
# training_data_path = '/media/lukas/TeraTest/temp/alltours/BigSetFull'

# training_data_path = '/data'

# paperspace P4000/P5000
# training_data_path = '/data/BigSetFull'
# training_data_path = '/data/small'
# training_data_path = '/data/medium'

training_data_path = '/data/train'

Пример #10
0
class RCNN():
    def __init__(self):
        self.init_lr = 1e-4
        self.epochs = 5
        self.bs = 2
        self.baseModel = MobileNetV2(weights="imagenet",
                                     include_top=False,
                                     input_tensor=Input(shape=(224, 224, 3)))
        self.model = None
        self.aug = ImageDataGenerator(rotation_range=20,
                                      zoom_range=0.15,
                                      width_shift_range=0.2,
                                      height_shift_range=0.2,
                                      shear_range=0.15,
                                      horizontal_flip=True,
                                      fill_mode="nearest")
        self.trainX, self.trainY = None, None
        self.testX, self.testY = None, None
        self.H = None
        self.lb = None
        self.build_model()

    def load_dataset(self):
        imagePaths = list(paths.list_images(config.BASE_PATH))
        data = []
        labels = []

        for imagePath in imagePaths:
            label = imagePath.split(os.path.sep)[-2]
            image = load_img(imagePath, target_size=config.INPUT_DIMS)
            image = img_to_array(image)
            image = preprocess_input(image)

            data.append(image)
            labels.append(label)
        data = np.array(data, dtype="float32")
        labels = np.array(labels)
        self.lb = LabelBinarizer()
        labels = self.lb.fit_transform(labels)
        labels = to_categorical(labels)
        (self.trainX, self.testX, self.trainY,
         self.testY) = train_test_split(data,
                                        labels,
                                        test_size=0.20,
                                        stratify=labels,
                                        random_state=42)
        return self

    def build_model(self):
        headModel = self.baseModel.output
        headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
        headModel = Flatten(name="flatten")(headModel)
        headModel = Dense(128, activation="relu")(headModel)
        headModel = Dropout(0.5)(headModel)
        headModel = Dense(len(config.LABELS), activation="softmax")(headModel)

        self.model = Model(inputs=self.baseModel.input, outputs=headModel)
        for layer in self.baseModel.layers:
            layer.trainable = False
        return self

    def summary(self):
        self.model.summary()

    def compile(self):
        print("[+] Model is compiling...")
        opt = Adam(lr=self.init_lr)
        self.model.compile(loss="binary_crossentropy",
                           optimizer=opt,
                           metrics=["accuracy"])
        return self

    def train(self):
        print("[+] Model is training...")
        self.H = self.model.fit(self.aug.flow(self.trainX,
                                              self.trainY,
                                              batch_size=self.bs),
                                steps_per_epoch=len(self.trainX) // self.bs,
                                validation_data=(self.testX, self.testY),
                                validation_steps=len(self.testX) // self.bs,
                                epochs=self.epochs)
        return self

    def evaluate(self):
        print("[INFO] evaluating network...")
        predIdxs = self.model.predict(self.testX, batch_size=self.bs)

        # for each image in the testing set we need to find the index of the
        # label with corresponding largest predicted probability
        predIdxs = np.argmax(predIdxs, axis=1)

        # show a nicely formatted classification report
        print(
            classification_report(self.testY.argmax(axis=1),
                                  predIdxs,
                                  target_names=self.lb.classes_))

        # serialize the model to disk
        print("[+] saving mask detector model...")
        self.model.save(config.MODEL_PATH, save_format="h5")

        # serialize the label encoder to disk
        print("[+] saving label encoder...")
        f = open(config.ENCODER_PATH, "wb")
        f.write(pickle.dumps(self.lb))
        f.close()

        # plot the training loss and accuracy
        N = self.epochs
        plt.style.use("ggplot")
        plt.figure()
        plt.plot(np.arange(0, N), self.H.history["loss"], label="train_loss")
        plt.plot(np.arange(0, N), self.H.history["val_loss"], label="val_loss")
        plt.plot(np.arange(0, N),
                 self.H.history["accuracy"],
                 label="train_acc")
        plt.plot(np.arange(0, N),
                 self.H.history["val_accuracy"],
                 label="val_acc")
        plt.title("Training Loss and Accuracy")
        plt.xlabel("Epoch #")
        plt.ylabel("Loss/Accuracy")
        plt.legend(loc="lower left")
        plt.savefig('test.png')
Пример #11
0
#from google.colab import drive

#drive.mount('/content/drive')

# import drive files
#from google.colab import drive

#drive.mount('/content/drive')

# grab the list of images in our dataset directory, then initialize
# the list of data (i.e., images) and class images
print("[INFO] loading images...")

train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

training_set = train_datagen.flow_from_directory(TRAIN_PATH,
                                                 target_size=(224, 224),
                                                 batch_size=BS,
                                                 class_mode='categorical')

testing_set = test_datagen.flow_from_directory(TEST_PATH,
                                               target_size=(224, 224),
                                               batch_size=BS,
                                               class_mode='categorical')

# implementing the MobileNetV2 network
print('train_i {}, val_i {}, test_i {}'.format(count_train,count_val,count_test))

#Subclass train_i
subClassHelper(train_i_dir)
#Subclass val_i
subClassHelper(val_i_dir)
#Subclass test_i
subClassHelper(test_i_dir)

# this is the augmentation configuration we will use for training
# train_datagen = ImageDataGenerator(rescale=1. / 255,
#                                     shear_range=0.2,
#                                     zoom_range=0.2,
#                                     horizontal_flip=True)
# val_datagen = ImageDataGenerator(rescale=1./255)
train_datagen = ImageDataGenerator(featurewise_center=True)
	# specify imagenet mean values for centering
train_datagen.mean = [123.68, 116.779, 103.939]

train_generator = train_datagen.flow_from_directory(
        train_i_dir,
        target_size=(IMAGE_SIZE, IMAGE_SIZE), 
        batch_size = BATCH_SIZE, 
        class_mode='binary')

# validation_generator = val_datagen.flow_from_directory(
#         val_i_dir,
#         target_size=(IMAGE_SIZE, IMAGE_SIZE), 
#         batch_size = BATCH_SIZE, 
#         class_mode='binary')
Пример #13
0
def get_values(x):
    # Model Architecture Configuration
    # Conv Layer 1
    #print(x)
    arch_param = {}
    i = 0
    arch_param['f1'] = x[0]
    arch_param['k1'] = x[1]
    arch_param['p1'] = x[2]
    while i < 2:
        arch_param['f%s' % (i + 2)] = x[i * 4 + 3]
        arch_param['k%s' % (i + 2)] = x[i * 4 + 4]
        arch_param['l%s' % (i + 2)] = x[i * 4 + 5]
        arch_param['p%s' % (i + 2)] = x[i * 4 + 6]
    arch_param['fc1'] = x[11]
    arch_param['fc2'] = x[12]

    # Model Training Configuration
    num_classes = 100  #10 or 100 depending on type of CIFAR
    batch_size = 128
    no_epochs = 20
    learning_rate = 0.1
    input_shape = (32, 32, 3)  #depending on CIFAR or imagenet

    file = open(f"CNN.txt", "w")

    # Define overall score containers

    # Load data
    (x_train, y_train), (x_test, y_test) = cifar100.load_data()
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train, x_test = normalize(x_train, x_test)
    #x_train = x_train.astype('float32')
    #x_test = x_test.astype('float32')
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    #data augmentation
    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=
        False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=
        15,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=
        0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=
        0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False)  # randomly flip images
    # (std, mean, and principal components if ZCA whitening is applied).
    datagen.fit(x_train)

    # Defining the model architecture
    input = keras.Input(
        shape=input_shape, dtype='float32'
    )  # Edit shape depending on whether it's cifar or imagenet

    convA = layers.Conv2D(arch_param['f1'],
                          (arch_param['k1'], arch_param['k1']), (4, 4),
                          padding='valid',
                          data_format="channels_last")(input)
    convA = layers.BatchNormalization(3)(convA)
    convA = layers.Activation('relu')(convA)
    if arch_param['p1'] == 0:
        poolA = convA
    elif arch_param['p1'] == 1:
        poolA = layers.MaxPooling2D((2, 2), (2, 2),
                                    data_format="channels_last")(convA)
    else:
        poolA = layers.MaxPooling2D((3, 3), (2, 2),
                                    data_format="channels_last")(convA)

    convB1 = layers.Conv2D(arch_param['f2'],
                           (arch_param['k2'], arch_param['k2']), (1, 1),
                           padding='same',
                           data_format="channels_last")(poolA)
    convB1 = layers.BatchNormalization(3)(convB1)
    convB1 = layers.Activation('relu')(convB1)
    if arch_param['l2'] == 3:
        convB2 = layers.Conv2D(arch_param['f2'],
                               (arch_param['k2'], arch_param['k2']), (1, 1),
                               padding='same',
                               data_format="channels_last")(convB1)
        convB2 = layers.BatchNormalization(3)(convB2)
        convB2 = layers.Activation('relu')(convB2)
        convB3 = layers.Conv2D(arch_param['f2'],
                               (arch_param['k2'], arch_param['k2']), (1, 1),
                               padding='same',
                               data_format="channels_last")(convB2)
        convB3 = layers.BatchNormalization(3)(convB3)
        convB3 = layers.Activation('relu')(convB3)
    elif arch_param['l2'] == 2:
        convB2 = layers.Conv2D(arch_param['f2'],
                               (arch_param['k2'], arch_param['k2']), (1, 1),
                               padding='same',
                               data_format="channels_last")(convB1)
        convB2 = layers.BatchNormalization(3)(convB2)
        convB2 = layers.Activation('relu')(convB2)
        convB3 = convB2
    else:
        convB3 = convB1
    if arch_param['p2'] == 0:
        poolB = convB3
    elif arch_param['p2'] == 1:
        poolB = layers.MaxPooling2D((2, 2), (2, 2),
                                    data_format="channels_last")(convB3)
    else:
        poolB = layers.MaxPooling2D((3, 3), (2, 2),
                                    data_format="channels_last")(convB3)

    convC1 = layers.Conv2D(arch_param['f3'],
                           (arch_param['k3'], arch_param['k3']), (1, 1),
                           padding='same',
                           data_format="channels_last")(poolB)
    convC1 = layers.BatchNormalization(3)(convC1)
    convC1 = layers.Activation('relu')(convC1)
    if arch_param['l3'] == 3:
        convC2 = layers.Conv2D(arch_param['f3'],
                               (arch_param['k3'], arch_param['k3']), (1, 1),
                               padding='same',
                               data_format="channels_last")(convC1)
        convC2 = layers.BatchNormalization(3)(convC2)
        convC2 = layers.Activation('relu')(convC2)
        convC3 = layers.Conv2D(arch_param['f3'],
                               (arch_param['k3'], arch_param['k3']), (1, 1),
                               padding='same',
                               data_format="channels_last")(convC2)
        convC3 = layers.BatchNormalization(3)(convC3)
        convC3 = layers.Activation('relu')(convC3)
    elif arch_param['l3'] == 2:
        convC2 = layers.Conv2D(arch_param['f3'],
                               (arch_param['k3'], arch_param['k3']), (1, 1),
                               padding='same',
                               data_format="channels_last")(convC1)
        convC2 = layers.BatchNormalization(3)(convC2)
        convC2 = layers.Activation('relu')(convC2)
        convC3 = convC2
    else:
        convC3 = convC1
    if arch_param['p3'] == 0:
        poolC = convC3
    elif arch_param['p3'] == 1:
        poolC = layers.MaxPooling2D((2, 2), (2, 2),
                                    data_format="channels_last")(convC3)
    else:
        poolC = layers.MaxPooling2D((3, 3), (2, 2),
                                    data_format="channels_last")(convC3)

    flatten = layers.Flatten()(poolC)
    if arch_param['fc1'] == 1:
        fc1 = layers.Dense(int(arch_param['fc1'] *
                               flatten.output_shape[1]))(flatten)
        fc1 = layers.BatchNormalization(3)(fc1)
        fc1 = layers.Activation('relu')(fc1)
    else:
        fc1 = flatten
    if arch_param['fc2'] == 1:
        fc2 = layers.Dense(int(arch_param['fc2'] * fc1.output_shape[1]))(fc1)
        fc2 = layers.BatchNormalization(3)(fc2)
        fc2 = layers.Activation('relu')(fc2)
    else:
        fc2 = fc1

    output = layers.Dense(num_classes)(fc2)
    output = layers.Activation('softmax')(output)

    model = keras.Model(inputs=input, outputs=output)

    model.summary()

    model.compile(optimizer=keras.optimizers.Adam(lr=learning_rate),
                  loss=keras.losses.CategoricalCrossentropy(from_logits=True),
                  metrics=['accuracy'])

    history = model.fit(x_train,
                        y_train,
                        batch_size=batch_size,
                        epochs=no_epochs,
                        verbose=2,
                        validation_data=(x_test, y_test))
    #If data augmentation is needed
    #history = model.fit(datagen.flow(x_train, y_train, batch_size=batch_size), steps_per_epoch = x_train.shape[0] // batch_size, epochs=no_epochs, verbose=2, validation_data=(x_test, y_test))

    #If you want to save the weights
    #model.save_weights('vgg_16.h5')

    # Generate generalization metrics
    test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)  #Verbosity
    print(test_acc)

    return test_acc
Пример #14
0
            label = float(label)
            labels.append(label)
    images = np.array(images)

    labels = np.array(labels)
    return images, labels


training_images, training_labels = get_data(train_path)
print(training_images.shape, training_labels.shape)
testing_images, testing_labels = get_data(test_path)
print(training_labels)
training_images = np.expand_dims(training_images, axis=-1)
testing_images = np.expand_dims(testing_images, axis=-1)
print(testing_images.shape, testing_labels.shape)
train_data = ImageDataGenerator(rescale=1 / 255.)
test_data = ImageDataGenerator(rescale=1 / 255.)

train_gen = train_data.flow(training_images, training_labels, batch_size=32)
model = tf.keras.models.Sequential([
    tf.keras.layers.Conv2D(32, 3, activation="relu", input_shape=(28, 28, 1)),
    tf.keras.layers.MaxPooling2D(),
    tf.keras.layers.Conv2D(32, 3, activation="relu"),
    tf.keras.layers.MaxPooling2D(),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(128, activation="relu"),
    tf.keras.layers.Dense(25, "softmax")
])
model.compile(optimizer="adam",
              loss="sparse_categorical_crossentropy",
              metrics=["acc"])
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)

# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
# دیتاست را به دو بخش تقسیم می کنیم. ۷۵ درصد برای آموزش دادن، و ۲۵ درصد برای تست کردن
(trainX, testX, trainY, testY) = train_test_split(data, labels,
	test_size=0.20, stratify=labels, random_state=42)

# construct the training image generator for data augmentation
# شرایط و محدودیت های عکس ها را تعیین می کنیم برای زمان فیت کردن مدل
aug = ImageDataGenerator(
	rotation_range=20,
	zoom_range=0.15,
	width_shift_range=0.2,
	height_shift_range=0.2,
	shear_range=0.15,
	horizontal_flip=True,
	fill_mode="nearest")

# load the MobileNetV2 network, ensuring the head FC layer sets are
# left off
# از شبکه عصبی MobileNetV2 استفاده کرده، شبکه ای که برای استفاده در دستگاه های موبایل بهینه شده
baseModel = MobileNetV2(weights="imagenet", include_top=False,
	input_tensor=Input(shape=(224, 224, 3)))

# construct the head of the model that will be placed on top of the
# the base model

headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
Пример #16
0
labels = np.array(labels)

# split dataset for training and validation data
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.2,
                                                  random_state=42)

trainY = to_categorical(trainY, num_classes=2)  # [[1, 0], [0, 1], [0, 1], ...]
testY = to_categorical(testY, num_classes=2)

# augmenting datset
aug = ImageDataGenerator(rotation_range=25,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode="nearest")


# define model
def build(width, height, depth, classes):
    model = Sequential()
    inputShape = (height, width, depth)
    chanDim = -1

    if K.image_data_format(
    ) == "channels_first":  #Returns a string, either 'channels_first' or 'channels_last'
        inputShape = (depth, height, width)
        chanDim = 1
Пример #17
0
    args = parser.parse_args()

    bs = args.batch_size
    lr = args.learning_rate
    size = (args.size, args.size)
    shape = (args.size, args.size, 3)
    epochs = args.epochs

    # Load and preprocess data
    train_dir = os.path.join(args.data_dir, 'train')
    test_dir = os.path.join(args.data_dir, 'test')
    valid_dir = os.path.join(args.data_dir, 'validation')

    train_datagen = ImageDataGenerator(rescale=1./255, rotation_range=5, zoom_range=0.2, \
                                       shear_range=0.2, brightness_range=[0.9, 1.1], \
                                       horizontal_flip=True)
    valid_datagen = ImageDataGenerator(rescale=1./255, rotation_range=5, zoom_range=0.2, \
                                       shear_range=0.2, brightness_range=[0.9, 1.1], \
                                       horizontal_flip=True)
    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(train_dir,
                                                        target_size=size,
                                                        shuffle=True,
                                                        batch_size=bs,
                                                        class_mode='binary')
    valid_generator = valid_datagen.flow_from_directory(valid_dir,
                                                        target_size=size,
                                                        shuffle=True,
                                                        batch_size=bs,
@author: Suman
"""
# Importing the libraries
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img,img_to_array
from tensorflow.keras.layers import Dense, Input, Dropout, GlobalAveragePooling2D, Flatten, Conv2D, BatchNormalization, Activation, MaxPooling2D
from tensorflow.keras.models import Model, Sequential
import numpy as np
tf.__version__

# Part 1 - Data Preprocessing

# Generating images for the Training set
train_datagen = ImageDataGenerator(rescale = 1./255,
                                   shear_range = 0.2,
                                   zoom_range = 0.2,
                                   horizontal_flip = True)

# Generating images for the Test set
test_datagen = ImageDataGenerator(rescale = 1./255)

# Creating the Training set
training_set = train_datagen.flow_from_directory('images/train',
                                                 target_size = (48, 48),
                                                 batch_size = 32,
                                                 color_mode='grayscale',
                                                 class_mode = 'categorical')

# Creating the Test set
valid_set = test_datagen.flow_from_directory('images/validation',
                                            target_size = (48,48),
from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img


datagen = ImageDataGenerator(
    rotation_range=10,
    shear_range=0.3,
    zoom_range=0.2
)

folder = 'Z'
img_name = 'Z280'


img = load_img(f'Data/{folder}/{img_name}.jpg')
x = img_to_array(img)
x = x.reshape((1,)+x.shape)

i = 0
for batch in datagen.flow(x, batch_size=1, save_to_dir='../Data/Z', save_prefix='Znew', save_format='jpeg'):
    i += 1
    if i > 5000:
        break
Пример #20
0
    loaded_model.fit(train_generator,
                     steps_per_epoch=train_step,
                     epochs=epochs,
                     validation_data=test_generator,
                     validation_steps=val_step)

    get_report(loaded_model, X_test, y_test)
    return loaded_model


if __name__ == '__main__':
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    # Create data generators with Data augmentation
    train_gen = ImageDataGenerator(shear_range=0.3,
                                   width_shift_range=0.08,
                                   height_shift_range=0.08,
                                   zoom_range=0.08)
    test_gen = ImageDataGenerator()

    epochs = 5
    batch_size = 128
    train_step = X_train.shape[0] // batch_size
    val_step = X_test.shape[0] // batch_size
    prms = {
        'epochs': epochs,
        'batch_size': batch_size,
        'train_step': train_step,
        'val_step': val_step
    }

    # Train a base model
Пример #21
0
def train():
    print('Loading and preprocessing train data...')
    print('-' * 30)

    imgs_train, imgs_mask_train = load_train_data()
    print('Loaded train images: ', imgs_train.shape, imgs_train.dtype,
          imgs_mask_train.shape, imgs_mask_train.dtype)
    print('-' * 30)

    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    # Normalization of the train set (Exp 1)
    imgs_train = imgs_train.astype(np.float32)
    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype(np.float32)

    print('Train test split')
    X_train, X_test, y_train, y_test = train_test_split(imgs_train,
                                                        imgs_mask_train,
                                                        test_size=0.1)

    print('-' * 30)
    print('Data Augmentation Start')
    data_gen_args = dict(shear_range=0.1,
                         rotation_range=20,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         zoom_range=0.3,
                         fill_mode='constant',
                         horizontal_flip=True,
                         vertical_flip=True,
                         cval=0)
    image_datagen = ImageDataGenerator(**data_gen_args)
    mask_datagen = ImageDataGenerator(**data_gen_args)

    seed = 1
    image_datagen.fit(X_train, augment=True, seed=seed)
    mask_datagen.fit(y_train, augment=True, seed=seed)

    image_generator = image_datagen.flow(X_train, batch_size=BATCH_SIZE)
    mask_generator = mask_datagen.flow(y_train, batch_size=BATCH_SIZE)

    train = zip(image_generator, mask_generator)
    # train = zip(X_train, y_train)
    # val = zip(X_test, y_test)

    print('-' * 30)
    print('Data Augmentation End')
    print('-' * 30)

    print('Creating and compiling model...')
    print('-' * 30)

    model = unet()
    #Saving the weights and the loss of the best predictions we obtained
    model_checkpoint = ModelCheckpoint(
        '/data/flavio/anatiel/models/dissertacao/final_tests/original_augment_unet_exp5_2_best.h5',
        monitor='val_loss',
        save_best_only=True,
        mode="min")

    print('Fitting model...')
    print('-' * 30)
    # history = model.fit(imgs_train, imgs_mask_train,
    history = model.fit(
        train,
        batch_size=BATCH_SIZE,
        epochs=EPOCHS,
        verbose=1,
        shuffle=True,
        validation_data=(X_test, y_test),
        # validation_split=0.1,
        steps_per_epoch=imgs_train.shape[0] * 0.9,
        callbacks=[model_checkpoint])

    model.save(
        '/data/flavio/anatiel/models/dissertacao/final_tests/original_augment_unet_exp5_2_last.h5'
    )

    # convert the history.history dict to a pandas DataFrame:
    hist_df = pd.DataFrame(history.history)

    # save to json:
    hist_json_file = '/data/flavio/anatiel/models/dissertacao/final_tests/original_augment_unet_exp5_2.json'
    with open(hist_json_file, mode='w') as f:
        hist_df.to_json(f)
    print("history saved")

    plt.plot(history.history['dice_coef'])
    plt.plot(history.history['val_dice_coef'])
    plt.title('Model dice coeff')
    plt.ylabel('Dice coeff')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Val'], loc='upper left')
    # save plot to file
    plt.savefig(
        '/data/flavio/anatiel/models/dissertacao/final_tests/original_augment_unet_exp5_2.png'
    )
image_path = data_root   # image data set path
train_dir = image_path + "train"
validation_dir = image_path + "valid"
test_dir = image_path + "test"

im_height = 224
im_width = 224
batch_size = 16
epochs = 5

def pre_function(img):
    img = img / 255.
    img = (img - 0.5) * 2.0
    return img

test_image_generator = ImageDataGenerator(preprocessing_function=pre_function)

test_data_gen = test_image_generator.flow_from_directory(directory=test_dir,
                                                              batch_size=batch_size,
                                                              shuffle=True,
                                                              target_size=(im_height, im_width),
                                                              class_mode='categorical')
total_val = test_data_gen.n

model = resnet50(num_classes=2, include_top=True)
model.load_weights(filepath="E:/machine_learning/Resnet/resnet50_new/save_weights/transfer/resNet50_fpn_56-256_transfer_0.002lr.ckpt")

# using keras low level api for training
loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.CategoricalAccuracy(name='test_accuracy')
Пример #23
0
labels = {'helthy': 0, 'bacteria': 1, 'virus': 2}

path_train, y_train = get_data(train_folder)
path_valid, y_valid = get_data(valid_folder)

path_train = np.array(path_train)
y_train = np.array(y_train)

path_valid = np.array(path_valid)
y_valid = np.array(y_valid)

datagen = ImageDataGenerator(
    featurewise_center=False,
    featurewise_std_normalization=False,
    rotation_range=20,
    width_shift_range=0.1,
    height_shift_range=0.1,
    horizontal_flip=False,
    rescale=1 / 255,
)

# randomly permutate the link, in order to shuffle data
permutation_train = np.random.permutation(path_train.shape[0])
path_train = path_train[permutation_train]
y_train = y_train[permutation_train]

# randomly permutate the link, in order to shuffle data
permutation_valid = np.random.permutation(path_valid.shape[0])
path_valid = path_valid[permutation_valid]
y_valid = y_valid[permutation_valid]
Пример #24
0
losses = []
val_losses = []
datagen = ImageDataGenerator(
    featurewise_center=False,  # set input mean to 0 over the dataset
    samplewise_center=False,  # set each sample mean to 0
    featurewise_std_normalization=False,  # divide inputs by std of the dataset
    samplewise_std_normalization=False,  # divide each input by its std
    zca_whitening=False,  # apply ZCA whitening
    zca_epsilon=1e-06,  # epsilon for ZCA whitening
    rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
    # randomly shift images horizontally (fraction of total width)
    width_shift_range=0.1,
    # randomly shift images vertically (fraction of total height)
    height_shift_range=0.1,
    shear_range=0.,  # set range for random shear
    zoom_range=0.,  # set range for random zoom
    channel_shift_range=0.,  # set range for random channel shifts
    # set mode for filling points outside the input boundaries
    fill_mode='nearest',
    cval=0.,  # value used for fill_mode = "constant"
    horizontal_flip=True,  # randomly flip images
    vertical_flip=False,  # randomly flip images
    # set rescaling factor (applied before any other transformation)
    rescale=None,
    # set function that will be applied on each input
    preprocessing_function=None,
    # image data format, either "channels_first" or "channels_last"
    data_format=None,
    # fraction of images reserved for validation (strictly between 0 and 1)
    validation_split=0.33)

datagen.fit(x_train)
    tf.keras.layers.MaxPooling2D(2, 2),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Dense(1, activation='sigmoid')
])

# print(model.summary())

model.compile(optimizer=RMSprop(lr=0.001),
              loss=binary_crossentropy,
              metrics=['accuracy'])

# Data pre-processing
from tensorflow.keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(rescale=1.0 / 255.)
valid_datagen = ImageDataGenerator(rescale=1.0 / 255.)

train_generator = train_datagen.flow_from_directory(train_dir,
                                                    batch_size=20,
                                                    class_mode='binary',
                                                    target_size=(150, 150))

validation_generator = valid_datagen.flow_from_directory(validation_dir,
                                                         batch_size=20,
                                                         class_mode='binary',
                                                         target_size=(150,
                                                                      150))

history = model.fit(train_generator,
                    validation_data=validation_generator,
Пример #26
0
    def __init__(
        self,
        path: str,
        split: str = "train",
        batch_size: int = 2,
        scale: int = 4,
        shuffle: bool = False,
    ) -> None:
        """Method to build the dataset generator object

        Args:
            path (str): Path to the main directory
            split (str, optional): Data split to utilize. Defaults to "train".
            batch_size (int, optional): Batch size for the model. Defaults to 2.
            scale (int, optional): Image upsampling ratio. Defaults to 4.
            shuffle (bool, optional): whether to shuffle the data files. Defaults to False.
        """
        self.scale = scale
        self.epoch_size = 0

        if split == "train":
            # Make single image object retriever function
            builder = tfds.ImageFolder(path)
            dataset = builder.as_dataset(split=split,
                                         as_supervised=False,
                                         shuffle_files=shuffle)
            # Make image pairs
            dataset = dataset.map(
                self.pair_maker,
                num_parallel_calls=tf.data.experimental.AUTOTUNE)
            # Make batches
            dataset = dataset.batch(batch_size,
                                    drop_remainder=split == "train")

        elif split == "val":
            LR_datagen = ImageDataGenerator()
            HR_datagen = ImageDataGenerator()

            # Provide the same seed for reproducibility
            seed = 1

            # Data generator for LR images
            LR_generator = LR_datagen.flow_from_directory(
                f"{path}/{split}/input",
                class_mode=None,
                seed=seed,
                target_size=(512, 512),
                color_mode="rgb",
                batch_size=1,
                shuffle=shuffle,
            )
            # Data generator for HR images
            HR_generator = HR_datagen.flow_from_directory(
                f"{path}/{split}/gt",
                class_mode=None,
                seed=seed,
                target_size=(2048, 2048),
                color_mode="rgb",
                batch_size=1,
                shuffle=shuffle,
            )
            assert len(HR_generator.filenames) == len(
                LR_generator.filenames
            ), "ensure equal number of HR and LR images"

            # size of validation dataset
            val_size = len(HR_generator.filenames)

            # combine generators into one which yields LR-HR pair
            combined_generator = zip(LR_generator, HR_generator)
            dataset = tf.data.Dataset.from_generator(
                lambda: combined_generator,
                output_signature=(
                    tf.TensorSpec(shape=(1, None, None, 3), dtype=tf.float32),
                    tf.TensorSpec(shape=(1, None, None, 3), dtype=tf.float32),
                ),
            )
            dataset = dataset.batch(batch_size,
                                    drop_remainder=split == "train")

            # Recale the images and drop extra dimension
            dataset = dataset.map(
                dimension_adjuster,
                num_parallel_calls=tf.data.experimental.AUTOTUNE)
            # ImageDatagenerator gives infinite samples, so to limit that.
            self.epoch_size = (val_size // batch_size + 1 if val_size %
                               batch_size else val_size // batch_size)

        self.dataset = dataset
Пример #27
0
# output = input feature map + stacked convolution/maxpooling layers + fully
# connected layer + sigmoid output layer
model = Model(img_input, output)

model.summary()

from tensorflow.keras.optimizers import RMSprop

model.compile(loss='binary_crossentropy',
              optimizer=RMSprop(lr=0.001),
              metrics=['acc'])

from tensorflow.keras.preprocessing.image import ImageDataGenerator

# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1. / 255)
test_datagen = ImageDataGenerator(rescale=1. / 255)

# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
    train_dir,  # This is the source directory for training images
    target_size=(150, 150),  # All images will be resized to 150x150
    batch_size=20,
    # Since we use binary_crossentropy loss, we need binary labels
    class_mode='binary')

# Flow validation images in batches of 20 using test_datagen generator
validation_generator = test_datagen.flow_from_directory(validation_dir,
                                                        target_size=(150, 150),
                                                        batch_size=20,
                                                        class_mode='binary')
Пример #28
0
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94


def pre_function(img):
    # img = im.open('test.jpg')
    # img = np.array(img).astype(np.float32)
    img = img - [_R_MEAN, _G_MEAN, _B_MEAN]

    return img


# data generator with data augmentation
train_image_generator = ImageDataGenerator(horizontal_flip=True,
                                           preprocessing_function=pre_function)

validation_image_generator = ImageDataGenerator(
    preprocessing_function=pre_function)

train_data_gen = train_image_generator.flow_from_directory(
    directory=train_dir,
    batch_size=batch_size,
    shuffle=True,
    target_size=(im_height, im_width),
    class_mode='categorical')
total_train = train_data_gen.n

# get class dict
class_indices = train_data_gen.class_indices
    layers.Dropout(0.5),
    layers.Dense(10, activation='softmax')
])

opt = hvd.DistributedOptimizer(tf.optimizers.Adam(0.01),
                               backward_passes_per_step=1,
                               average_aggregated_gradients=True)

model.compile(optimizer=opt,
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

callbacks = [hvd.callbacks.BroadcastGlobalVariablesCallback(0)]

if hvd.rank() == 0:
    callbacks.append(
        tf.keras.callbacks.ModelCheckpoint('./checkpoint-{epoch}.h5'))

datagen = ImageDataGenerator(horizontal_flip=True)
model.fit(datagen.flow(x_train, y_train, batch_size=8),
          callbacks=callbacks,
          epochs=3,
          verbose=(hvd.rank() == 0))

if hvd.rank() == 0:
    test_data = np.load('data_test.npz')
    x_test, y_test = test_data['x_test'], test_data['y_test']
    preds = model.predict(x_test)
    acc_score = accuracy_score(y_test[:, 0], np.argmax(preds, axis=1))
    print(f'Model accuracy is {acc_score}')
Пример #30
0
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image

fpath = './data/sample/dog-800.npy'
img_tensor = np.load(fpath)
test00 = np.expand_dims(img_tensor, 0)
test01 = np.append(test00, test00 / 2, axis=0)
print('img_tensor=', img_tensor.shape)
gen_test = np.copy(test00)

# Generator 생성
datagen = ImageDataGenerator(shear_range=0.5,
                             zoom_range=[0.8, 2.0],
                             rotation_range=15,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             horizontal_flip=True,
                             vertical_flip=False)

# prepare iterator
itr = datagen.flow(
    test01, y=['dog', 'dog'],
    batch_size=1)  # , save_to_dir='./data/sample', save_prefix='gen_dog')

# figure 생성
fig = plt.figure(figsize=(30, 30))
Y = []
# make 9 image
for i in range(16):
    plt.subplot(4, 4, i + 1)