示例#1
0
def train(tr_gen):
    """
    Constructs and trains a CNN
    Several Convolution2D and MaxPooling2D layers,
    then one hidden dense layer.
    The last layer does softmax to the number of classes.
    All layers but last use relu activation.
    Uses Adam optimzer.

    Args
            tr_gen (DirectoryIterator): The loaded data and labels
    Returns
            model (Sequential): A compiled and trained model
    """

    # math some constants
    num_classes = len(tr_gen.class_indices)

    # build a neural network
    model = Sequential()
    # first layer
    model.add(
        Convolution2D(filters=16,
                      kernel_size=(3, 3),
                      input_shape=(IMG_DIM, IMG_DIM, 3),
                      activation='relu',
                      name='input'))
    # pool to reduce number of features
    model.add(MaxPooling2D(pool_size=2))
    # additional layers
    model.add(Convolution2D(32, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=2))
    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=2))
    # flatten into single vector
    model.add(Flatten())
    # a hidden dense layer
    model.add(Dense(512))
    # last layer
    model.add(Dense(num_classes, activation='softmax', name='output'))

    # compile
    model.compile(optimizer=Adam(lr=LEARN_RATE),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # train.. lost of options to mess with in this function
    model.fit_generator(tr_gen, epochs=EPOCHS)

    return model
示例#2
0
def main():
    batch_size = 54
    epochs = 100
    img_height = 50
    img_width = 50
    train_set, test_set, valid_set = data(batch_size, img_height, img_width)

    model = Sequential([
        Flatten(),
        Dense(1250, activation='sigmoid'),
        Dense(512, activation='sigmoid'),
        Dense(10, activation='sigmoid'),
        Dense(1, activation='sigmoid')

    ])
    print("Built model successfully!~")
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    history = model.fit_generator(
        train_set,
        steps_per_epoch=5003 // batch_size,
        epochs=epochs,
        validation_data=valid_set,
        validation_steps=2001 // batch_size
    )

    json_str = model.to_json()
    with open(r'C:\Users\user1\PycharmProjects\gender-classification-1\Multilayer Perceptron\models\MLP_model.json',
              'w') as outfile:
        json.dump(json.loads(json_str), outfile, indent=4)
        model.save_weights(
            r"C:\Users\user1\PycharmProjects\gender-classification-1\Multilayer Perceptron\models\weights_MLP_model.h5",
            save_format="h5")
    print("Saved model to disk")
    print('\n# Evaluate on test data')
    results_test = model.evaluate_generator(test_set)
    print('test loss, test acc:', results_test)
    acc = history.history['accuracy']
    val_acc = history.history['val_accuracy']
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    epochs_range = range(epochs)
    plt.figure(figsize=(6, 6))
    plt.subplot(1, 2, 1)
    plt.plot(epochs_range, acc, label='Training Accuracy')
    plt.plot(epochs_range, val_acc, label='Validation Accuracy')
    plt.legend(loc='lower right')
    plt.title('Training and Validation Accuracy')
    plt.subplot(1, 2, 2)
    plt.plot(epochs_range, loss, label='Training Loss')
    plt.plot(epochs_range, val_loss, label='Validation Loss')
    plt.legend(loc='upper right')
    plt.title('Training and Validation Loss')
    plt.show()
def upload():
    from tensorflow.keras.preprocessing.image import ImageDataGenerator
    train_datagen = ImageDataGenerator(rescale = 1./255,shear_range = 0.2,zoom_range = 0.2,horizontal_flip = True)
    test_datagen = ImageDataGenerator(rescale = 1)
    x_train = train_datagen.flow_from_directory(r'C:\Users\Washifa\Desktop\dataset\train_set',target_size=(64,64),batch_size=32,class_mode='binary')
    x_test = test_datagen.flow_from_directory(r'C:\Users\Washifa\Desktop\dataset\test_set',target_size=(64,64),batch_size=32,class_mode='binary')

    print(x_train.class_indices)
    import tensorflow as tf
    from tensorflow.python.keras.layers import Dense
    from tensorflow.keras import Sequential
    from tensorflow.keras.layers import Convolution2D
    from tensorflow.keras.layers import MaxPooling2D
    from tensorflow.keras.layers import Flatten
    model = Sequential()
    model.add(Convolution2D(32,(3,3),input_shape = (64,64,3)))
    model.add(MaxPooling2D(pool_size = (2,2)))
    model.add(Flatten())
    model.add(Dense(units = 128,kernel_initializer = 'uniform'))
    model.add(Dense(units = 1,kernel_initializer = 'uniform'))
    model.compile(loss='binary_crossentropy',optimizer = "adam",metrics = ["accuracy"])
    model.fit_generator(x_train,validation_data=x_test, steps_per_epoch=10)
    if request.method == 'POST':
        f = request.files['image']
        print("current path")
        basepath = os.path.dirname(__file__)
        print("current path", basepath)
        filepath = os.path.join(basepath,'uploads\image',f.filename)
        print("upload folder is ", filepath)
        f.save(filepath)
        
        test_datagen = ImageDataGenerator(rescale = 1./255)
        vals = ['cancer','no cancer']
        print(vals)
        test_generator = test_datagen.flow_from_directory(r'C:\Users\Washifa\Desktop\pavithra\uploads',target_size =(64,64),class_mode ='categorical',batch_size = 32)
        print(test_generator)
        pred = model.predict_generator(test_generator)
        print(pred)
        
        y=str(vals[np.argmax(pred)])
        os.remove(filepath)
    return y
示例#4
0
def train(X, y):
    N = np.unique(y).size

    model = Sequential()
    model.add(Reshape((64, 64, 1)))
    model.add(Convolution2D(32, (3, 3), input_shape = (64, 64), activation = 'relu'))
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(Convolution2D(16, (3, 3),  activation = 'relu'))
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(Flatten())
    model.add(Dense(units = 128, activation = 'relu'))
    model.add(Dense(units = 128, activation = 'relu'))
    model.add(Dense(units = N, activation = 'softmax'))

    model.compile(optimizer = 'adam', metrics = ['accuracy'], loss = 'sparse_categorical_crossentropy')

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)

    number_of_training_samples = len(list(X_train))
    number_of_testing_samples = len(list(X_test))
    X_train = X_train.reshape((number_of_training_samples, 64, 64, 1))
    X_test = X_test.reshape((number_of_testing_samples, 64, 64, 1))

    datagen = ImageDataGenerator(width_shift_range=0.1,
                                height_shift_range=0.1,
                                zoom_range=0.2,
                                shear_range=0.1,
                                rotation_range=10.)

    datagen.fit(X_train)
    batches = datagen.flow(X_train, y_train, batch_size = 15)
    X_batch, y_batch = next(batches)

    model.fit_generator(datagen.flow(X_train, y_train, batch_size = 50),
                            steps_per_epoch = 2 * number_of_training_samples,
                            epochs=1, validation_data = (X_test, y_test))

    return model
示例#5
0
    verbose=1,
    restore_best_weights=True)  #keeps the best weigths once stopped

# In[ ]:

ReduceLR = ReduceLROnPlateau(patience=3, verbose=1)

# In[ ]:

callbacks = [earlystop, checkpoint, ReduceLR]

# In[ ]:

history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
                              epochs=10,
                              verbose=1,
                              callbacks=callbacks,
                              validation_data=(x_valid, y_validate))

# In[ ]:

import pickle

pickle_out = open("Trained_cnn_history.pickle", "wb")
pickle.dump(history.history, pickle_out)
pickle_out.close()

# In[ ]:

pickle_in = open("Trained_cnn_history.pickle", "rb")
saved_history = pickle.load(pickle_in)
class ModelBidirectDNA():
    def __init__(self, params):
        """
        It initializes the model before the training
        """

        # defines where to save the model's checkpoints
        self.results_base_dir = params['result_base_dir']

        self.pretrained_model = params.get('pretrained_model', None)
        if self.pretrained_model is not None:
            # pretrained model load params from pickle
            print("loading model")
            train_dir = "/"
            train_dir = train_dir.join(
                params['pretrained_model'].split("/")[:-1])
            print(train_dir)
            with open(os.path.join(train_dir, "network_params"),
                      'rb') as params_pickle:
                self.params = pickle.load(params_pickle)
            self.params['result_base_dir'] = self.results_base_dir
        else:
            ## new model
            self.params = params

        self.seeds = [42, 101, 142, 23, 53]
        self.learning_rate = self.params['lr']
        self.batch_size = self.params['batch_size']
        weight_decay = self.params['weight_decay']

        # Architecture --- emoji network
        weight_init = tf.keras.initializers.glorot_uniform
        recurrent_init = tf.keras.initializers.orthogonal(seed=42)

        # Model definition
        self.model = Sequential()
        self.model.add(
            Masking(mask_value=[1., 0., 0., 0., 0.],
                    input_shape=(self.params['maxlen'],
                                 self.params['vocabulary_len'])))
        self.model.add(
            tf.keras.layers.Conv1D(
                self.params['conv_num_filter'],
                self.params['conv_kernel_size'],
                activation='relu',
                kernel_regularizer=tf.keras.regularizers.l2(weight_decay),
                kernel_initializer=weight_init(self.seeds[2]),
                activity_regularizer=tf.keras.regularizers.l2(weight_decay)))
        self.model.add(tf.keras.layers.MaxPool1D())
        self.model.add(
            tf.keras.layers.Dropout(self.params['dropout_1_rate'],
                                    seed=self.seeds[0]))
        self.model.add(
            tf.keras.layers.Conv1D(
                self.params['conv_num_filter'],
                self.params['conv_kernel_size'],
                activation='relu',
                kernel_regularizer=tf.keras.regularizers.l2(weight_decay),
                kernel_initializer=weight_init(self.seeds[3]),
                activity_regularizer=tf.keras.regularizers.l2(weight_decay)))
        self.model.add(tf.keras.layers.MaxPool1D())
        self.model.add(
            Bidirectional(
                LSTM((int)(self.params['lstm_units']),
                     return_sequences=False,
                     dropout=self.params['lstm_input_dropout'],
                     kernel_initializer=weight_init(self.seeds[0]),
                     recurrent_initializer=recurrent_init,
                     kernel_regularizer=l2(self.params['weight_decay']))))
        self.model.add(
            Dropout(self.params['lstm_output_dropout'], seed=self.seeds[2]))
        self.model.add(
            Dense(8,
                  activation='relu',
                  kernel_initializer=weight_init(self.seeds[0])))
        self.model.add(
            Dropout(self.params['dense_dropout_rate'], seed=self.seeds[3]))
        self.model.add(
            Dense(1,
                  activation='sigmoid',
                  kernel_initializer=weight_init(self.seeds[4]),
                  kernel_regularizer=l2(self.params['weight_decay'])))

        # Check if the user wants a pre-trained model. If yes load the weights
        if self.pretrained_model is not None:
            self.model.load_weights(self.pretrained_model)

    def build(self, logger=None):
        """
        It compiles the model by defining optimizer, loss and learning rate
        """
        optimizer = tf.keras.optimizers.RMSprop(lr=self.learning_rate,
                                                clipnorm=1.0)
        self.model.compile(loss='binary_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy', f1_m, precision_m, recall_m])
        if (logger is not None):
            self.model.summary(print_fn=lambda x: logger.info(x))
        else:
            self.model.summary()

        # Print params onto the logger
        if logger is not None:
            logger.info("\n" + json.dumps(self.params, indent=4))

    def fit(self,
            X_tr,
            y_tr,
            epochs,
            callbacks_list,
            validation_data,
            shuffle=True):
        """
        Fit the model with the provided data and returns the results
        Inputs:
        - X_tr: samples
        - y_tr: labels related to the samples
        - epochs: number of epochs before stopping the training
        - callbacks_list
        - validation_data: data the model is validated on each time a epoch is completed
        - shuffle: if the dataset has to be shuffled before being fed into the network

        Outputs:
        - history: it contains the results of the training
        """

        callbacks_list = self._get_callbacks()
        history = self.model.fit(x=X_tr,
                                 y=y_tr,
                                 epochs=epochs,
                                 shuffle=True,
                                 batch_size=self.batch_size,
                                 callbacks=callbacks_list,
                                 validation_data=validation_data)
        trained_epochs = callbacks_list[0].stopped_epoch - callbacks_list[
            0].patience + 1 if callbacks_list[0].stopped_epoch != 0 else epochs
        return history, trained_epochs

    def fit_early_stopping_by_loss_val(self,
                                       X_tr,
                                       y_tr,
                                       epochs,
                                       early_stopping_loss,
                                       callbacks_list,
                                       validation_data,
                                       shuffle=True):
        """
        Train model until current validation loss reaches holdout training loss specified by early_stopping_loss parameter. 
        
        Algorithm 7.3 (Ian Goodfellow, Yoshua Bengio, and Aaron Courville. 2016. Deep Learning. The MIT Press, pp. 246-250.)
        
        Params:
        -------
            :X_tr: training samples
            :y_tr: training labels
            :epochs: number of epochs training is performed on
            :early_stopping_loss: threshold loss - Once reached this loss the training is stopped
            :callbacks_list: list of callbacks to use in the training phase
            :validation_data: data to evaluate the model on at the end of each epoch
            :shuffle: if True, it shuffles data before starting the training
        
        """
        print(f"early stopping loss: {early_stopping_loss}")
        callbacks_list = self._get_callbacks(train=True)
        callbacks_list.append(
            EarlyStoppingByLossVal(monitor='val_loss',
                                   value=early_stopping_loss))
        history = self.model.fit(x=X_tr,
                                 y=y_tr,
                                 epochs=epochs,
                                 batch_size=self.batch_size,
                                 shuffle=True,
                                 callbacks=callbacks_list,
                                 validation_data=validation_data)
        return history

    def evaluate(self, features, labels):
        """
        It evalutes the trained model onto the provided data
        Inputs:
        - features: sample of data to validate
        - labels: classes the data belong to
        Outputs:
        - loss
        - accuracy
        - f1_score
        - precision
        - recall
        """
        loss, accuracy, f1_score, precision, recall = self.model.evaluate(
            features, labels, verbose=0)
        metrics_value = [loss, accuracy, f1_score, precision, recall]

        results_dict = dict(zip(self.model.metrics_names, metrics_value))
        return results_dict

    def print_metric(self, name, value):
        print('{}: {}'.format(name, value))

    def save_weights(self):
        """
        It saves the model's weights into a hd5 file
        """
        with open(os.path.join(self.results_base_dir, "network_params"),
                  'wb') as params_pickle:
            pickle.dump(self.params, params_pickle)

        self.model.save_weights(
            os.path.join(self.results_base_dir, 'my_model_weights.h5'))
        model_json = self.model.to_json()
        with open(os.path.join(self.results_base_dir, "model.json"),
                  "w") as json_file:
            json_file.write(model_json)

    def fit_generator(self,
                      generator,
                      steps_per_epoch,
                      epochs,
                      validation_data=None,
                      shuffle=True,
                      callbacks_list=None):
        """
        Train the model for the same number of update step as in holdout validation phase
        
        Algorithm 7.2(Ian Goodfellow, Yoshua Bengio, and Aaron Courville. 2016. Deep Learning. The MIT Press, pp. 246-250.)
        """
        history = self.model.fit_generator(
            generator,
            steps_per_epoch,
            epochs,
            shuffle=False,
            callbacks=self._get_callbacks(train=True),
            validation_data=validation_data)
        return history

    def _get_callbacks(self, train=True):
        """
        It defines the callbacks for this specific architecture
        """
        callbacks_list = [
            keras.callbacks.EarlyStopping(monitor='val_loss',
                                          patience=10,
                                          restore_best_weights=True),
            keras.callbacks.ModelCheckpoint(filepath=os.path.join(
                self.results_base_dir, 'model_checkpoint_weights.h5'),
                                            monitor='val_loss',
                                            save_best_only=True,
                                            verbose=0),
            keras.callbacks.CSVLogger(
                os.path.join(self.results_base_dir, 'history.csv')),
            keras.callbacks.ReduceLROnPlateau(patience=10,
                                              monitor='val_loss',
                                              factor=0.75,
                                              verbose=1,
                                              min_lr=5e-6)
        ]
        return callbacks_list

    def predict(self,
                x_test,
                batch_size: int = 32,
                verbose: int = 0) -> np.array:
        """
        Wrapper method for Keras model's method 'precict'

        Params:
        -------
            :x_test: test samples
            :batch_size: default=32
            :verbose: verbosity level
        """
        return self.model.predict(
            x_test,
            batch_size=batch_size,
            verbose=verbose,
        ).ravel()

    def predict_classes(self,
                        x_test,
                        batch_size: int = 32,
                        verbose: int = 1) -> np.array:
        """
        Wrapper method for Keras model's method 'precict_classes'

        Params:
        -------
            :x_test: test samples
            :batch_size: default=32
            :verbose: verbosity level

        Raise:
            Exception
        """
        try:
            return self.model.predict_classes(x_test)
        except Exception as err:
            print(f"EXCEPTION-RAISED: {err}")
            sys.exit(-1)
        pass
示例#7
0
    class_mode='binary')
model = Sequential()
model.add(
    layers.Conv2D(200, (3, 3), input_shape=(100, 100, 3), activation='relu'))
model.add(layers.MaxPooling2D(
    pool_size=(2, 2),
    strides=2))  #if stride not given it equal to pool filter size
model.add(layers.Conv2D(32, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(layers.Flatten())
model.add(layers.Dense(units=128, activation='relu'))
model.add(layers.Dense(units=128, activation='relu'))

model.add(layers.Dense(units=1, activation='sigmoid'))
model.compile(optimizer='adam', loss='mse')
model.fit_generator(train_set, epochs=200, steps_per_epoch=10)
#1
for repeat in range(1, 20):
    img1 = image.load_img(
        'C:\\Users\\ahmed\\PycharmProjects\\untitled\\catanddog\\test1\\{}.jpg'
        .format(repeat),
        target_size=(100, 100))
    img = image.img_to_array(img1)
    img = img / 255
    img = np.expand_dims(img, axis=0)
    prediction = model.predict_classes(img)
    plt.text(20,
             62,
             prediction,
             color='red',
             fontsize=18,
示例#8
0
valid_gen = data_gen.flow_from_directory(spath,
                                         target_size=(224, 224),
                                         batch_size=batchSize,
                                         class_mode="categorical",
                                         subset="validation")

model = Sequential()
MobNet = MobileNetV2((224, 224, 3), include_top=False)
model.add(MobNet)
model.add(AveragePooling2D((7, 7)))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(2, activation="softmax"))

model.compile(optimizer="adam", loss="binary_crossentropy", metrics="accuracy")

his = model.fit_generator(generator=train_gen,
                          steps_per_epoch=len(train_gen) // batchSize,
                          validation_data=valid_gen,
                          validation_steps=len(valid_gen) // batchSize,
                          epochs=epochNum)
model.save("mask_detector.h5", save_format='h5')

plt.style.use("ggplot")
plt.figure()
plt.plot(range(epochNum), his.history['loss'], label="loss")
plt.plot(range(epochNum), his.history['accuracy'], label="accuracy")

plt.show()
示例#9
0
        featurewise_center=False,  # set input mean to 0 over the dataset 
        samplewise_center=False,  # set each sample mean to 0 
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # dimesion reduction
        rotation_range=0.5,  # randomly rotate images in the range 5 degrees
        zoom_range = 0.5, # Randomly zoom image 5%
        width_shift_range=0.5,  # randomly shift images horizontally 5%
        height_shift_range=0.5,  # randomly shift images vertically 5%
        horizontal_flip=False,  # randomly flip images
        vertical_flip=False)  # randomly flip images

datagen.fit(X_train)

# fit the model
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data = (X_val,y_val), steps_per_epoch=X_train.shape[0] // batch_size)

#
plt.plot(history.history['val_loss'], color='purple', label="validation loss")
plt.title("Test Loss")
plt.xlabel("Number of Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()

# confusion matrix
import seaborn as sns

Y_pred = model.predict(X_val)
 
Y_pred_classes = np.argmax(Y_pred,axis = 1) 
示例#10
0
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))

# softmax classifier
model.add(Dense(len(classes)))
model.add(Activation("softmax"))
# -

# Compile model with optimizer, loss function, and metrics
optimizer = Adam(learning_rate=learning_rate, epsilon=1e-08, clipnorm=1.0)
model.compile(optimizer=optimizer,
              loss='binary_crossentropy',
              metrics=['accuracy'])

# Train the model
model.fit_generator(train_generator,
                    steps_per_epoch=steps_per_epoch,
                    epochs=num_epochs,
                    validation_data=valid_generator,
                    validation_steps=10,
                    callbacks=[AmlLogger()],
                    verbose=1)

# Save the output model
if not os.path.exists(output_dir):
    os.makedirs(output_dir)
model.save(os.path.join(output_dir, 'model.h5'))
shutil.copyfile('classes.txt', os.path.join(output_dir, 'classes.txt'))
class RECOGNIZE_FLOWERS():
    def __init__(self):
        self.num_epochs = 3
        self.batch_size = 40
        self.input_height = 500
        self.input_width = 500
        self.channels = 3
        self.input_shape = (self.input_height, self.input_width, self.channels)

        self.train_dir = "Flower_images/data/train"
        self.train_files = []
        for i in range(0, 200):
            self.train_files.append(str(i) + ".jpg")
        self.labels = pd.read_csv("Flower_images/data/train.csv")
        self.train_labels = self.labels[:280]
        self.train_labels_dict = {
            i: j
            for i, j in zip(self.train_labels["image_id"],
                            self.train_labels["category"])
        }

        self.validation_files = []
        for i in range(200, 280):
            self.validation_files.append(str(i) + ".jpg")
        self.steps_per_epoch = 5

    def buildAndCompileModel(self):
        self.model = Sequential()
        self.model.add(
            Conv2D(16, (3, 3), padding='same', input_shape=self.input_shape))
        self.model.add(Activation('relu'))
        self.model.add(Conv2D(16, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(Conv2D(16, (3, 3)))
        self.model.add(Activation('relu'))

        self.model.add(Flatten())
        self.model.add(Dense(256))
        self.model.add(Activation('relu'))
        self.model.add(Dense(102, activation='sigmoid'))
        self.model.compile(optimizer='adam',
                           loss="binary_crossentropy",
                           metrics=["accuracy"])

    def train_model(self):
        training_generator = self.image_datagenerator(self.train_files)
        validation_generator = self.image_datagenerator(self.validation_files)

        self.model.fit_generator(training_generator,
                                 steps_per_epoch=self.steps_per_epoch,
                                 validation_data=validation_generator,
                                 validation_steps=2,
                                 epochs=self.num_epochs)

        self.model.predict_generator(self.validation_generator, verbose=1)

    def image_datagenerator(self, input_filenames):
        input_files = []
        for i in input_filenames:
            input_files.append(self.train_dir + "/" + i)
        counter = 0
        random.shuffle(input_files)
        while True:
            images = np.zeros(
                (self.batch_size, self.input_width, self.input_height, 3))
            labels = []
            if counter + self.batch_size >= len(input_files):
                counter = 0
            for i in range(self.batch_size):
                img = str(input_files[counter + i])
                images[i] = np.array(Image.open(img)) / 255.0
                file_number = img.replace("Flower_images/data/train/",
                                          "").replace(".jpg", "")
                labels.append(self.train_labels_dict[int(file_number)])
            yield (images, labels)
            counter += self.batch_size
                  metrics=['accuracy'])
    if i == 0:
        model.summary()

    # set callbacks
    mcloss_cb = callbacks.ModelCheckpoint(filepath=path + os.sep + str(i) + os.sep + 'lowValLoss.hdf5', monitor='val_loss', save_best_only=True)
    mcacc_cb = callbacks.ModelCheckpoint(filepath=path + os.sep + str(i) + os.sep + 'bestAcc.hdf5', monitor='val_acc', save_best_only=True)
    esloss_cb = callbacks.EarlyStopping(patience=1)

    cbs = []
    cbs.append(mcloss_cb)
    cbs.append(mcacc_cb)
    cbs.append(esloss_cb)

    # fits the model on batches with real-time data augmentation:
    his = model.fit_generator(datagen.flow(x_train, y_train, batch_size=8, save_to_dir=path + os.sep + str(i) + os.sep, save_prefix='train'), validation_data=(x_test,y_test),
                              callbacks= cbs, steps_per_epoch=100, epochs=epoch, verbose=1)

    accs[i,0:len(his.history['acc'])] = his.history['acc']
    val_accs[i,0:len(his.history['val_acc'])] = his.history['val_acc']
    loss[i,0:len(his.history['loss'])] = his.history['loss']
    val_loss[i,0:len(his.history['val_loss'])] = his.history['val_loss']

    np.save(path + os.sep + 'acc', accs)
    np.save(path + os.sep + 'val_acc', val_accs)
    np.save(path + os.sep + 'val_loss', val_loss)
    np.save(path + os.sep + 'loss', loss)
    i += 1


np.save(path + os.sep + 'acc', accs)
np.save(path + os.sep + 'val_acc', val_accs)
示例#13
0
valid = train_datagen.flow_from_directory(directory=train_dir, target_size=(img_height, img_lenght),
                                          class_mode="categorical", batch_size=32, subset="validation")

mobilenet = MobileNetV2(weights = "imagenet",include_top = False,input_shape=(150,150,3))
for layer in mobilenet.layers:
    layer.trainable = False

model = Sequential()
model.add(mobilenet)
model.add(Flatten())
model.add(Dense(2,activation="sigmoid"))
model.compile(optimizer="adam",loss="categorical_crossentropy",metrics ="accuracy")
checkpoint = ModelCheckpoint("moblenet_facemask.h5",monitor="val_accuracy",save_best_only=True,verbose=1)
earlystop = EarlyStopping(monitor="val_acc",patience=5,verbose=1)
history = model.fit_generator(generator=train,steps_per_epoch=len(train)// 32,validation_data=valid,
                             validation_steps = len(valid)//32,callbacks=[checkpoint,earlystop],epochs=15)
model.evaluate_generator(valid)
model.save("face_mask.h5")
pred = model.predict_classes(valid)
pred[:15]
#check

#without mask
mask = "../input/with-and-without-mask/"
plt.figure(figsize=(8, 7))
label = {0: "With Mask", 1: "Without Mask"}
color_label = {0: (0, 255, 0), 1: (0, 0, 255)}
cascade = cv2.CascadeClassifier("../input/frontalface/haarcascade_frontalface_default.xml")
count = 0
i = "../input/with-and-without-mask/mask9.jpg"
示例#14
0
model.add(Dropout(0.4))
model.add(LSTM(100, activation = 'tanh', recurrent_activation = 'sigmoid', unroll = False, use_bias = True, recurrent_dropout = 0,return_sequences= True))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(1, activation = 'linear'))
'''
adadelta = Adadelta(learning_rate=1.0, rho=0.95)
model.compile(loss='mse',
              optimizer=adadelta,
              metrics=[tf.keras.metrics.MeanAbsolutePercentageError()])
model.summary()

##### TRAINING #####
history = model.fit_generator(train_gen,
                              epochs=100,
                              verbose=2,
                              shuffle=False,
                              validation_data=test_gen)

##### PLOTTING LOSS ######
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
score = model.evaluate_generator(test_gen, verbose=1)
print()
print('Test loss:', score[0])
print('Test accuracy:', score[1])
print()

###### RESHAPE ACTUAL DATA #######
示例#15
0
def train(ticker):

    #Initializing the Paths
    create_dir(model_dir + '/' + ticker)

    output = {}
    present_date = date.today()
    prev_date = date.today() - timedelta(days=5457)
    dataset = get_stock_data(ticker,
                             start_date=prev_date,
                             end_date=present_date,
                             drop_na=True).reset_index(drop=True)

    dataset.to_csv(model_dir + '/' + ticker + '/' + str(ticker) + '.csv',
                   index=False)

    scaler = MinMaxScaler(feature_range=(0, 1))
    scaled = scaler.fit_transform(dataset.values)

    pickle.dump(scaler, open(model_dir + '/' + ticker + '/scaler.pkl', 'wb'))

    ##### TRAIN TEST SPLITTING #####
    '''
    train_gen = TimeseriesGenerator(scaled, scaled[:,0:4], start_index = 0, end_index = int(len(scaled) * 0.90), length = 1, batch_size = 256)
    test_gen = TimeseriesGenerator(scaled, scaled[:,0:4], start_index = int(len(scaled) * 0.90), end_index = int(len(scaled) - 1), length = 1, batch_size = 256)
    '''
    train_gen = TimeseriesGenerator(scaled,
                                    scaled[:, :4],
                                    start_index=0,
                                    end_index=int(len(scaled) * 0.85),
                                    length=5,
                                    batch_size=16)
    test_gen = TimeseriesGenerator(scaled,
                                   scaled[:, :4],
                                   start_index=int(len(scaled) * 0.85),
                                   end_index=int(len(scaled) - 1),
                                   length=5,
                                   batch_size=16)
    ##### MODEL CREATION ######
    '''
    model = Sequential()
    model.add(Conv1D(18, kernel_size=3, activation='relu', padding = 'valid', strides=1, input_shape=(1,18), data_format='channels_first'))
    model.add(Conv1D(18, kernel_size=3, activation='relu', padding = 'valid', strides=1))
    #model.add(MaxPooling1D(pool_size=2))
    model.add(LSTM(100, activation = 'tanh', recurrent_activation = 'sigmoid', unroll = False, use_bias = True, recurrent_dropout = 0, return_sequences=True))
    model.add(Dropout(0.4))
    model.add(LSTM(100, activation = 'tanh', recurrent_activation = 'sigmoid', unroll = False, use_bias = True, recurrent_dropout = 0,return_sequences=True))
    model.add(Dropout(0.4))
    model.add(LSTM(100, activation = 'tanh', recurrent_activation = 'sigmoid', unroll = False, use_bias = True, recurrent_dropout = 0,return_sequences= True))
    model.add(Dropout(0.2))
    model.add(Flatten())
    model.add(Dense(4, activation = 'linear'))
    adadelta = Adadelta(learning_rate=1.0, rho=0.95)
    model.compile(loss= 'mae', optimizer = adadelta, metrics=[tf.keras.metrics.MeanAbsolutePercentageError()])
    #model.summary()
    '''
    from tensorflow.keras.regularizers import l1
    model = Sequential()
    model.add(
        Conv1D(18,
               kernel_size=3,
               activation='relu',
               padding='valid',
               strides=1,
               input_shape=(5, 18),
               data_format='channels_first'))
    model.add(
        Conv1D(18,
               kernel_size=4,
               activation='relu',
               padding='valid',
               strides=1))
    #model.add(MaxPooling1D(pool_size=2))
    model.add(
        LSTM(100,
             activation='tanh',
             recurrent_activation='sigmoid',
             unroll=False,
             use_bias=True,
             recurrent_dropout=0,
             return_sequences=True,
             kernel_regularizer=l1(0.001)))
    model.add(Dropout(0.4))
    model.add(
        LSTM(50,
             activation='tanh',
             recurrent_activation='sigmoid',
             unroll=False,
             use_bias=True,
             recurrent_dropout=0,
             return_sequences=True,
             activity_regularizer=l1(0.001)))
    model.add(Dropout(0.4))
    model.add(
        LSTM(25,
             activation='tanh',
             recurrent_activation='sigmoid',
             unroll=False,
             use_bias=True,
             recurrent_dropout=0,
             return_sequences=True,
             activity_regularizer=l1(0.001)))
    model.add(Dropout(0.2))
    model.add(Flatten())
    model.add(Dense(4, activation='linear'))
    adadelta = Adadelta(learning_rate=1.0, rho=0.95)

    model.compile(loss='mse',
                  optimizer=adadelta,
                  metrics=[tf.keras.metrics.RootMeanSquaredError()])
    model.summary()
    ##### TRAINING #####
    my_callbacks = [
        #tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', patience=4, factor=0.2, min_lr=0.001),
        tf.keras.callbacks.ModelCheckpoint(
            filepath=model_dir + "/" + ticker + "/model" + ".h5",
            save_weights_only=True,
            save_best_only=True,
            monitor="val_root_mean_squared_error",
            mode="min"),
    ]
    history = model.fit_generator(train_gen,
                                  epochs=300,
                                  verbose=0,
                                  shuffle=True,
                                  validation_data=test_gen,
                                  callbacks=my_callbacks)

    ##### PLOTTING LOSS ######
    '''plt.plot(history.history['loss'], label='train')
    plt.plot(history.history['val_loss'], label='test')
    plt.legend()
    plt.show()
    score = model.evaluate_generator(test_gen, verbose = 1)
    print()
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    print()'''

    ###### RESHAPE ACTUAL DATA #######
    actual_train = reshape_actual(train_gen)
    predictions_train = model.predict_generator(train_gen, verbose=0)
    print(predictions_train)

    ##### RSME FOR TRAIN #####
    rmse_train = math.sqrt(
        mean_squared_error(actual_train[:], predictions_train[:]))
    print(rmse_train)

    ###### TEST DATA ######
    actual_test = reshape_actual(test_gen)
    predictions_test = model.predict_generator(test_gen, verbose=0)
    rmse_test = math.sqrt(
        mean_squared_error(actual_test[:], predictions_test[:]))
    print(rmse_test)

    output["Accuracy"] = {
        "Train": round(rmse_train * 100, 2),
        "Test": round(rmse_test * 100, 2)
    }

    ###### PLOT TEST ######
    output["Train"] = plot_them_graphs(actual_train, predictions_train,
                                       "train", ticker, scaler)
    output["Test"] = plot_them_graphs(actual_test, predictions_test, "test",
                                      ticker, scaler)

    ##### SAVE IT!!!!!! #####

    model_json = model.to_json()
    with open(model_dir + "/" + ticker + "/model" + ".json", "w") as json_file:
        json_file.write(model_json)
    #model.save_weights(model_dir + "/" + ticker + "/model" + ".h5")
    print("Saved model to disk")

    data = {"name": ticker, "date": present_date.strftime("%d-%b-%Y")}

    with open(model_dir + "/" + ticker + '/data.json', 'w') as outfile:
        json.dump(data, outfile)

    return output
示例#16
0
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))

model.add(Dense(1, activation='sigmoid'))
model.summary()

model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

#Model Fitting
history = model.fit_generator(generator=trainData,
                              steps_per_epoch=100,
                              epochs=10,
                              validation_data=testData,
                              validation_steps=40)

#plot for Loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epochs')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

#test Image
test = r'D:\Bhushan\casting_512x512\def_front\cast_def_0_180.jpeg'
img = image.load_img(test, target_size=(128, 128))
示例#17
0
    MaxPool2D(pool_size=2),

    Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu'),
    BatchNormalization(),
    MaxPool2D(pool_size=2),

    Flatten(),
    Dense(units=num_classes, activation='softmax'),
])

loss = keras.losses.CategoricalCrossentropy()
opt = keras.optimizers.Adam(lr=0.000001)
best_model = ModelCheckpoint(
    'dogs-iter1model.h5', save_best_only=True, verbose=1)
model.compile(optimizer='adam', loss=loss, metrics=['accuracy'])
H = model.fit_generator(aug_generator.flow(train_X, train_Y, batch_size=batch_size),
                        epochs=epochs, validation_data=(test_X, test_Y), callbacks=[best_model])

# testing
test_loss, test_acc = model.evaluate(test_X, test_Y)
print('test_acc:', test_acc)

# plot
epochs = range(1, len(H.history['acc']) + 1)

plt.figure(1, figsize=(8, 5))
plt.title("Training and test accuracy")
plt.ylabel('accuracy')
plt.xlabel('epochs')
plt.plot(epochs, H.history['acc'], 'r', label='train')
plt.plot(epochs, H.history['val_acc'], 'b', label='test')
plt.legend()
示例#18
0
model.compile(optimizer='Adam',
              loss='mse',
              metrics=[tf.keras.metrics.RootMeanSquaredError()])

datagen = ImageDataGenerator(featurewise_center=True,
                             featurewise_std_normalization=True,
                             rotation_range=20,
                             width_shift_range=0.2,
                             zoom_range=0.4,
                             height_shift_range=0.2,
                             horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(Images)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(Images, Labels, batch_size=32), epochs=30)

#history = model.fit(Images, Labels, epochs=25, shuffle=True, validation_split=0.15)

#PRE-PROCESSING TEST IMAGES

directory = '/content/drive/My Drive/giara/jukil/test/'

test_images = []

for image_file in os.listdir(
        directory
):  #Extracting the file name of the image from Class Label folder
    image = cv2.imread(directory + '/' +
                       image_file)  #Reading the image (OpenCV)
    image = cv2.resize(
示例#19
0
classifier.add(Dense(units = 1, activation = 'sigmoid'))

classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])

train_datagen = ImageDataGenerator(
        rescale=1./255,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1./255)

train_set = train_datagen.flow_from_directory(
        'dataset/training_set',
        target_size=(64, 64),
        batch_size=32,
        class_mode='binary')

test_set = test_datagen.flow_from_directory(
        'dataset/test_set',
        target_size=(64, 64),
        batch_size=32,
        class_mode='binary')

classifier.fit_generator(
        train_set,
        steps_per_epoch=8000,
        epochs=25,
        validation_data=test_set,
        validation_steps=2000)
def create_model(x_valid, y_valid, datagen, val_datagen, x_train, y_train):
    # construct the search space
    conv1_numfilters = {{choice([32, 64, 128, 256])}}
    conv1_size = {{choice([(2, 2), (3, 3), (5, 5)])}}
    conv1_activation = {{choice([relu, tanh, elu])}}
    conv2_numfilters = {{choice([32, 64, 128, 256])}}
    conv2_size = {{choice([(2, 2), (3, 3), (5, 5)])}}
    conv2_activation = {{choice([relu, tanh, elu])}}
    conv3_numfilters = {{choice([32, 64, 128, 256])}}
    conv3_size = {{choice([(2, 2), (3, 3), (5, 5)])}}
    conv3_activation = {{choice([relu, tanh, elu])}}
    conv4_numfilters = {{choice([32, 64, 128, 256])}}
    conv4_size = {{choice([(2, 2), (3, 3), (5, 5)])}}
    conv4_activation = {{choice([relu, tanh, elu])}}
    maxpool1_size = {{choice([2, 4, 8])}}
    maxpool2_size = {{choice([2, 4, 8])}}
    dense_activation = {{choice([relu, tanh, elu])}}
    num_neurons = {{choice([64, 128, 256, 512])}}
    dropout = {{uniform(0, 1)}}
    learn_rate = {{uniform(0.00001, 0.01)}}
    batch_size = {{choice([16, 32, 64])}}
    # build model
    model = Sequential()
    model.add(
        Conv2D(conv1_numfilters,
               conv1_size,
               input_shape=(64, 64, 1),
               padding="same",
               activation=conv1_activation))
    model.add(
        Conv2D(conv2_numfilters,
               conv2_size,
               padding="same",
               activation=conv2_activation))
    model.add(BatchNormalization())
    model.add(MaxPool2D(pool_size=maxpool1_size))
    model.add(
        Conv2D(conv3_numfilters,
               conv3_size,
               padding="same",
               activation=conv3_activation))
    model.add(
        Conv2D(conv4_numfilters,
               conv4_size,
               padding="same",
               activation=conv4_activation))
    model.add(BatchNormalization())
    model.add(MaxPool2D(pool_size=maxpool2_size))
    model.add(Flatten())
    model.add(Dense(units=num_neurons, activation=dense_activation))
    model.add(Dropout(dropout))
    model.add(Dense(units=6, activation=softmax))
    model.compile(loss=sparse_categorical_crossentropy,
                  optimizer=Adam(lr=learn_rate, decay=0.001),
                  metrics=["accuracy"])
    model.summary()
    train_steps = x_train.shape[0] // batch_size
    valid_steps = x_valid.shape[0] // batch_size
    result = model.fit_generator(datagen.flow(x_train,
                                              y_train,
                                              batch_size=batch_size),
                                 epochs=80,
                                 verbose=2,
                                 validation_data=val_datagen.flow(
                                     x_valid, y_valid, batch_size=batch_size),
                                 steps_per_epoch=train_steps,
                                 validation_steps=valid_steps)
    # take max validation accuracy as metric
    validation_acc = np.amax(result.history['val_acc'])
    return {
        'loss': -validation_acc,
        'status': STATUS_OK,
        'model': model,
        'history.val_loss': result.history['val_loss'],
        'history.val_acc': result.history['val_acc']
    }
model.summary()

model = model
model.compile(optimizer='sgd',
              loss='mse',
              metrics=[tf.keras.metrics.RootMeanSquaredError()])
checkpointer = ModelCheckpoint(
    filepath='/content/drive/My Drive/giara/best_model_7class.hdf5',
    verbose=1,
    save_best_only=True)
csv_logger = CSVLogger('/content/drive/My Drive/giara/history_7class.log')

history = model.fit_generator(train_generator,
                              steps_per_epoch=nb_train_samples // batch_size,
                              validation_data=validation_generator,
                              validation_steps=nb_validation_samples //
                              batch_size,
                              epochs=6,
                              verbose=1,
                              callbacks=[csv_logger, checkpointer])

model.save('model_trained_7class.hdf5')

class_map_3 = train_generator.class_indices
class_map_3


def plot_rmse(history, title):
    plt.title(title)
    plt.plot(history.history['root_mean_squared_error'])
    plt.plot(history.history['val_root_mean_squared_error'])
    plt.ylabel('root_mean_squared_error')
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,shear_range = 0.2,zoom_range = 0.2,horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1)
x_train = train_datagen.flow_from_directory(r'C:\Users\Washifa\Desktop\D\trainset',target_size=(64,64),batch_size=32,class_mode='binary')
x_test = test_datagen.flow_from_directory(r'C:\Users\Washifa\Desktop\D\testset',target_size=(64,64),batch_size=32,class_mode='binary')

print(x_train.class_indices)
import tensorflow as tf
from tensorflow.python.keras.layers import Dense
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Convolution2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Flatten
model = Sequential()
model.add(Convolution2D(32,(3,3),input_shape = (64,64,3)))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Flatten())
model.add(Dense(units = 128,kernel_initializer = 'uniform'))
model.add(Dense(units = 1,kernel_initializer = 'uniform'))
model.compile(loss='binary_crossentropy',optimizer = "adam",metrics = ["accuracy"])
model.fit_generator(x_train,validation_data=x_test, steps_per_epoch=10)
model.save(r'C:\Users\Washifa\Desktop\test\test.h5')
def main():
    ###predeclared parameters for the learning
    batch_size = 64
    epochs = 250
    IMG_HEIGHT = 200
    IMG_WIDTH = 200
    ###all data sets will use as train set, validation set and test set
    train_image_generator = ImageDataGenerator(
        rescale=1. / 255,
        rotation_range=45,
        width_shift_range=.15,
        height_shift_range=.15,
        horizontal_flip=True,
        zoom_range=0.5)  # Generator for our training data
    validation_image_generator = ImageDataGenerator(
        rescale=1. / 255)  # Generator for our validation data
    test_image_generator = ImageDataGenerator(rescale=1. / 255)
    train_data_gen = train_image_generator.flow_from_directory(
        batch_size=batch_size,
        directory=
        r"C:\Users\evgen\Desktop\DL_Git_forAllProj\CNN\dataset\train_set",
        shuffle=True,
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        class_mode='binary')
    val_data_gen = validation_image_generator.flow_from_directory(
        batch_size=batch_size,
        directory=
        r"C:\Users\evgen\Desktop\DL_Git_forAllProj\CNN\dataset\val_set",
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        class_mode='binary')
    test_data_gen = test_image_generator.flow_from_directory(
        batch_size=batch_size,
        directory=
        r"C:\Users\evgen\Desktop\DL_Git_forAllProj\CNN\dataset\train_set",
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        class_mode='binary')
    ###building the model
    model = Sequential([
        Conv2D(16,
               3,
               padding='same',
               activation='relu',
               input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
        MaxPooling2D(),
        Dropout(0.2),
        Conv2D(32, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Conv2D(64, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Conv2D(128, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Dropout(0.2),
        Flatten(),
        Dense(512, activation='relu'),
        Dense(1, activation='sigmoid')
    ])
    ###complinig the model
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    history = model.fit_generator(train_data_gen,
                                  steps_per_epoch=6003 // batch_size,
                                  epochs=epochs,
                                  validation_data=val_data_gen,
                                  validation_steps=2001 // batch_size)

    ###summary of the model after traning
    print('\nhistory dict:', history.history)
    ###saving the model and weights as a json and h5 files
    json_str = model.to_json()
    with open(
            r'C:\Users\evgen\Desktop\n_models\saved_model_250ep_w_dropout_data_rich.json',
            'w') as outfile:
        json.dump(json.loads(json_str), outfile,
                  indent=4)  # Save the json on a file
        model.save_weights(
            r"C:\Users\evgen\Desktop\n_models\weights_250ep_w_dropout_data_rich.h5",
            save_format="h5")
    print("Saved model to disk")
    ###evaluating the model on the test data
    print('\n# Evaluate on test data')
    results_test = model.evaluate_generator(test_data_gen)
    print('test loss, test acc:', results_test)
    ####printing the model as a graph
    acc = history.history['accuracy']
    val_acc = history.history['val_accuracy']
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    epochs_range = range(epochs)
    plt.figure(figsize=(6, 6))
    plt.subplot(1, 2, 1)
    plt.plot(epochs_range, acc, label='Training Accuracy')
    plt.plot(epochs_range, val_acc, label='Validation Accuracy')
    plt.legend(loc='lower right')
    plt.title('Training and Validation Accuracy')
    plt.subplot(1, 2, 2)
    plt.plot(epochs_range, loss, label='Training Loss')
    plt.plot(epochs_range, val_loss, label='Validation Loss')
    plt.legend(loc='upper right')
    plt.title('Training and Validation Loss')
    plt.show()
    RNN(HIDDEN_SIZE),
    layers.RepeatVector(3),
    RNN(128, return_sequences=True),
    layers.TimeDistributed(layers.Dense(len(CHARS), activation='softmax'))
])

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()

train_generator = encode_generator(training_generator, BATCH_SIZE)

hist = model.fit_generator(train_generator,
                           steps_per_epoch=STEPS_PER_EPOCH,
                           epochs=EPOCHS,
                           verbose=1,
                           use_multiprocessing=True,
                           workers=-2,
                           callbacks=callbacks,
                           validation_data=train_generator, validation_steps=30)

score = model.evaluate_generator(encode_generator(
    test_generator, BATCH_SIZE), steps=STEPS_PER_EPOCH)
print(score)

config = build_config(MODEL_NAME, LEARNING_RATE, BATCH_SIZE,
                      EPOCHS, STEPS_PER_EPOCH, score[0], score[1])
wrapper = ModelWrapper(model, config=config)
wrapper.save_model()
示例#25
0
train_datagen = ImageDataGenerator(rescale=1.0 / 255.0,
                                   validation_split=0.2,
                                   rotation_range=10)
train_generator = train_datagen.flow(x=x_shuffled,
                                     y=y_shuffled,
                                     batch_size=batch_size,
                                     subset='training')  # set as training data

validation_generator = train_datagen.flow(
    x=x_shuffled, y=y_shuffled, batch_size=batch_size,
    subset='validation')  # set as validation data

#training
nb_epochs = 5
model.fit_generator(train_generator,
                    validation_data=validation_generator,
                    epochs=nb_epochs)

#testing
import os
x_test = []
for filename in os.listdir('D:/desktop/med_hw2/test_image/'):
    img = cv2.imread(os.path.join('D:/desktop/med_hw2/test_image/', filename))
    if img is not None:
        img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_AREA)
        x_test.append(img)
x_test_np = np.array(x_test)
y_predict = model.predict(x_test_np)
import csv

fn = sorted(os.listdir('D:/desktop/med_hw2/test_image/'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPool2D(2, 2))

model.add(Conv2D(128, (3, 3), activation='relu'))
#model.add(MaxPool2D(2, 2))

model.add(Flatten())

#model.add(Dense(512, activation='relu'))

model.add(Dense(256, activation='relu'))

model.add(Dense(128, activation='relu'))

model.add(Dense(64, activation='relu'))

model.add(Dense(32, activation='relu'))

model.add(Dense(1, activation='sigmoid'))

print(model.summary())

model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])
history = model.fit_generator(generator=train_data_generator,
                              steps_per_epoch=len(train_data_generator),
                              epochs=20,
                              validation_data=validation_data_generator,
                              validation_steps=len(validation_data_generator))
示例#27
0
model.add(MaxPooling2D((2, 2), padding='same'))
# model.add(Dropout(0.2))
model.add(Conv2D(32, (6, 6), padding='same', activation='relu'))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4, activation='softmax'))

model.compile(loss="categorical_crossentropy",
              optimizer='adam',
              metrics=['accuracy'])

history = model.fit_generator(
    train_generator,
    workers=100,
    steps_per_epoch=63,  # 2000 images over 32 batch size
    validation_data=valid_generator,
    epochs=20)

scores = model.evaluate_generator(valid_generator)
print("\n\n1 - Accuracy 1st test: " + str(scores[1]))
"""scores = model.evaluate_generator(valid_generator2)
print("\n\n1 - Accuracy 2nd test: "+str(scores[1]))
scores = model.evaluate_generator(valid_generator3)
print("\n\n1 - Accuracy 3rd test: "+str(scores[1])) """

loss1 = history.history['loss']
val_loss1 = history.history['val_loss']

plt.plot(loss1, color="red")
plt.plot(val_loss1, color="blue")
示例#28
0
def train_binary_model(path,
                       epochs=100,
                       ft_epochs=100,
                       learning_rate=0.01,
                       classes_to_match: Union[int, List[int]] = 0,
                       classes_to_drop: Union[int, List[int]] = None):
    """
    Train a smaller binary model for empty/not empty classification and save it under the given path. The method first
    loads the models using :py:doc:`generate_datasets.py <training.generate_datasets.py>` methods. Then the model is
    trained, saved and finally evaluated.

    Training is run in two steps: It is first trained with synthetic data and then finetuned with real data. Early
    stopping is used to prevent overfitting.

    Args:
        path(str): The directory to save the trained model to.
        epochs(int): The number of epochs. (Default value = 100)
        ft_epochs: The number of finetuning epochs. (Default value = 100)
        learning_rate: The learning rate for the Adadelta optimizer. (Default value = 0.01)
        classes_to_match(Union[int, list[int]]): The classes to match as class 1. (Default value = 0)
        classes_to_drop(Union[int, list[int]]): The classes to drop from the dataset. (Default value = None)

    Returns:
        None

    """
    os.makedirs(path, exist_ok=True)
    concat_machine, concat_hand, concat_out, real_training, real_validation = load_datasets(
        TRANSFORMED_DATASET_NAMES)

    batch_size = 192
    train_generator = ToBinaryGenerator(concat_machine.train,
                                        concat_hand.train,
                                        concat_out.train,
                                        classes_to_match=classes_to_match,
                                        classes_to_drop=classes_to_drop,
                                        batch_size=batch_size,
                                        shuffle=True,
                                        truncate=True)

    dev_generator = ToBinaryGenerator(concat_machine.test,
                                      concat_hand.test,
                                      concat_out.test,
                                      classes_to_match=classes_to_match,
                                      classes_to_drop=classes_to_drop,
                                      batch_size=batch_size,
                                      shuffle=True,
                                      truncate=True)

    ft_train_generator = ToBinaryGenerator(real_training.train,
                                           classes_to_match=classes_to_match,
                                           classes_to_drop=classes_to_drop,
                                           batch_size=batch_size,
                                           shuffle=True,
                                           truncate=True)

    ft_dev_generator = ToBinaryGenerator(real_training.test,
                                         classes_to_match=classes_to_match,
                                         classes_to_drop=classes_to_drop,
                                         batch_size=batch_size,
                                         shuffle=True,
                                         truncate=True)

    test_generator = ToBinaryGenerator(real_validation.test,
                                       classes_to_match=classes_to_match,
                                       classes_to_drop=classes_to_drop,
                                       batch_size=batch_size,
                                       shuffle=False)

    # Run training on the GPU
    with tf.device('/GPU:0'):
        # Keras Model
        print("Creating model..")
        model = Sequential()
        model.add(Conv2D(16, (5, 5), strides=2, input_shape=(28, 28, 1)))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(4, 4)))
        model.add(Conv2D(32, (2, 2)))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())  # 32
        model.add(Dense(64, activation='relu'))
        model.add(Dropout(0.25))
        model.add(Dense(64, activation='relu'))
        model.add(Dense(1, activation='sigmoid'))

        # def mean_pred(_, y):
        #     return keras.backend.mean(y)

        print("Compiling model..")
        model.compile(
            loss=keras.losses.BinaryCrossentropy(from_logits=True),
            optimizer=keras.optimizers.Adadelta(learning_rate),
            metrics=[keras.metrics.binary_accuracy, 'mse'],
        )
        print(model.summary())

        print("Training model")
        model.fit_generator(train_generator,
                            validation_data=dev_generator,
                            epochs=epochs,
                            callbacks=[
                                EarlyStopping(monitor='val_accuracy',
                                              restore_best_weights=True,
                                              patience=3,
                                              min_delta=0.0001),
                            ])

        print("Finetuning model")
        model.fit_generator(ft_train_generator,
                            validation_data=ft_train_generator,
                            epochs=ft_epochs,
                            callbacks=[
                                EarlyStopping(monitor='val_accuracy',
                                              restore_best_weights=True,
                                              patience=3,
                                              min_delta=0.0001),
                            ])

        models.save_model(model, path + "model.h5", save_format='h5')

        print("Evaluating")
        print(
            "Training dev",
            list(
                zip(model.metrics_names,
                    model.evaluate_generator(dev_generator))))
        print(
            "Finetuning dev",
            list(
                zip(model.metrics_names,
                    model.evaluate_generator(ft_dev_generator))))
        print(
            "Test",
            list(
                zip(model.metrics_names,
                    model.evaluate_generator(test_generator))))
        evaluate(model, test_generator, binary=True)
示例#29
0
model.add(MaxPool2D(pool_size=3, strides=2))

model.add(
    Conv2D(256, kernel_size=3, strides=2, padding='SAME', activation='relu'))
model.add(
    Conv2D(256, kernel_size=3, strides=1, padding='SAME', activation='relu'))
model.add(
    Conv2D(256, kernel_size=3, strides=1, padding='SAME', activation='relu'))
model.add(MaxPool2D(pool_size=3, strides=2))
model.add(Dropout(rate=0.3))

model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(1, 'sigmoid'))
print(model.summary())
opt = tf.keras.optimizers.Adam()
ES = EarlyStopping(monitor='val_loss',
                   patience=3,
                   verbose=1,
                   restore_best_weights=True)
LRR = ReduceLROnPlateau(monitor='val_loss', factor=0.2, verbose=1)
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit_generator(train_gen,
                              steps_per_epoch=steps,
                              epochs=15,
                              callbacks=[ES, LRR],
                              validation_data=val_gen)

score = model.evaluate_generator(test_gen)
print(score)
    zca_whitening=False,  # apply ZCA whitening
    rotation_range=10,  # randomly rotate images in the range (degrees, 0 to 180)
    zoom_range=0.1,  # Randomly zoom image
    width_shift_range=
    0.1,  # randomly shift images horizontally (fraction of total width)
    height_shift_range=
    0.1,  # randomly shift images vertically (fraction of total height)
    horizontal_flip=False,  # randomly flip images
    vertical_flip=False)  # randomly flip images

datagen.fit(x_train)

history = model.fit_generator(datagen.flow(x_train,
                                           y_train,
                                           batch_size=batch_size),
                              epochs=epochs,
                              validation_data=(x_val, y_val),
                              verbose=2,
                              steps_per_epoch=x_train.shape[0] // batch_size,
                              callbacks=[learning_rate_reduction])

df_test = pd.read_csv(os.path.join(raw_data_dir, project, test_file))
df_test /= 255
df_test = df_test.values.reshape(-1, 28, 28, 1)

results = model.predict(df_test)

# select the indix with the maximum probability
results = np.argmax(results, axis=1)
results = pd.Series(results, name="Label")
submission = pd.concat([pd.Series(range(1, 28001), name="ImageId"), results],
                       axis=1)