コード例 #1
0
def train_classifier() -> tf.keras.Model:
    model = get_classification_model()
    model = compile_model(model)

    train, val, test = create_datasets()
    train = configure_dataset_for_performance(train, BATCH_SIZE)
    val = configure_dataset_for_performance(val, BATCH_SIZE)
    test = configure_dataset_for_performance(test, BATCH_SIZE)

    log_dir = "logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S")
    callbacks = [
        EarlyStopping(monitor="val_loss",
                      patience=5,
                      restore_best_weights=True),
        TerminateOnNaN(),
        TBCallback(log_dir=log_dir, histogram_freq=1),
        PlotLossesKeras(),
    ]
    fit_kwargs = dict(
        epochs=20,
        shuffle=True,
        callbacks=callbacks,
    )
    history = model.fit(train, validation_data=val, **fit_kwargs)

    plot_history(history.history)

    scores = model.evaluate(test)
    metric_dict = dict(zip(model.metrics_names, scores))
    print("Test metrics:", metric_dict)

    return model
コード例 #2
0
    def KerasModel_train(self, trainX, trainY, model_path):
        """
        :param trainX: training data set
        :param trainY: expect value of training data
        :param testX: test data set
        :param testY: expect value of test data
        :param model_path: h5 file to store the trained model
        :param override: override existing models
        :return: model after training
        """
        input_dim = trainX[0].shape[1]
        output_dim = trainY.shape[1]
        # print predefined parameters of current model:
        model = Sequential()
        # applying a LSTM layer with x dim output and y dim input. Use dropout parameter to avoid overfit
        model.add(
            LSTM(output_dim=self.lstm_output_dim,
                 input_dim=input_dim,
                 activation=self.activation_lstm,
                 dropout_U=self.drop_out,
                 return_sequences=True))
        for i in range(self.lstm_layer - 2):
            model.add(
                LSTM(output_dim=self.lstm_output_dim,
                     activation=self.activation_lstm,
                     dropout_U=self.drop_out,
                     return_sequences=True))
        # return sequences should be False to avoid dim error when concatenating with dense layer
        model.add(
            LSTM(output_dim=self.lstm_output_dim,
                 activation=self.activation_lstm,
                 dropout_U=self.drop_out))
        # applying a full connected NN to accept output from LSTM layer
        for i in range(self.dense_layer - 1):
            model.add(
                Dense(output_dim=self.lstm_output_dim,
                      activation=self.activation_dense))
            model.add(Dropout(self.drop_out))
        model.add(Dense(output_dim=output_dim,
                        activation=self.activation_last))
        # configure the learning process
        model.compile(loss=self.loss,
                      optimizer=self.optimizer,
                      metrics=['accuracy'])
        # train the model with fixed number of epoches
        # model.fit(x=trainX, y=trainY, batch_size=self.nb_epoch, verbose=2, shuffle=False)
        model.fit(x=trainX,
                  y=trainY,
                  nb_epoch=self.nb_epoch,
                  batch_size=self.batch_size,
                  callbacks=[PlotLossesKeras()])
        # model.fit(x=trainX, y=trainY, nb_epoch=self.nb_epoch, batch_size=self.batch_size, validation_data=(testX, testY))

        ##TODO : evalutate -> eval by timestep input sequently
        score = model.evaluate(trainX, trainY, self.batch_size)
        # print("Model evaluation: {}".format(score))
        # store model to json file
        save_model(model, model_path)
    def fitmodel(self, x_train, x_test, y_train, y_test):
        """self.history = self.model.fit(x_train, [x_train, np_utils.to_categorical(y_train)],
			epochs=self.epochs,
			batch_size=self.batch_size,
			shuffle=True,
			validation_data=(x_test, [x_test, np_utils.to_categorical(y_test)]),
			callbacks=[PlotLossesKeras()])"""
        self.history = self.model.fit(
            x_train,
            x_train,
            epochs=self.epochs,
            batch_size=self.batch_size,
            shuffle=True,
            validation_data=(x_test, x_test),
            callbacks=[PlotLossesKeras()],
        )
コード例 #4
0
def test_keras():
    callback = PlotLossesKeras(outputs=(CheckOutput(), ))
    model = Sequential()
    model.add(LSTM(5, input_shape=(1, NUM_OF_GENERATED)))
    model.add(Dense(NUM_OF_GENERATED, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    X_train, Y_train = generate_data()
    X_test, Y_test = generate_data()
    model.fit(X_train,
              Y_train,
              epochs=2,
              validation_data=(X_test, Y_test),
              callbacks=[callback],
              verbose=False)
コード例 #5
0
def training(EPOCHS, VERBOSITY, MODEL_FILE, TRAINING_LOGS_FILE, training_generator, validation_generator, model):       
    history = model.fit_generator(training_generator,
                                steps_per_epoch=len(training_generator), 
                                validation_data=validation_generator,
                                validation_steps=len(validation_generator),
                                epochs=EPOCHS,
                                verbose=VERBOSITY,
                                callbacks=[PlotLossesKeras(),
                                            ModelCheckpoint(MODEL_FILE,
                                                            monitor='val_acc',
                                                            verbose=VERBOSITY,
                                                            save_best_only=True,
                                                            mode='max'),
                                            CSVLogger(TRAINING_LOGS_FILE,
                                                    append=False,
                                                    separator=';')])
    return history
コード例 #6
0
    def _get_callback_functions(self):
        early_stopping = EarlyStopping(monitor='val_loss', patience=3)
        plot_losses = PlotLossesKeras()
        tb_callback = TensorBoard(log_dir=self.tb_logs_path,
                                  histogram_freq=0,
                                  write_graph=True,
                                  write_images=False,
                                  embeddings_freq=0,
                                  embeddings_layer_names=None,
                                  embeddings_metadata=None)

        checkpoint = ModelCheckpoint(self.weights_loc,
                                     monitor='val_loss',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='min')
        return [early_stopping, plot_losses, tb_callback, checkpoint]
コード例 #7
0
ファイル: train_model.py プロジェクト: samarth491/ECG_ID
def train_cnn():
    df = pd.read_csv('../normalised_data.csv')
    N = len(df.columns) - 1
    N = str(N)

    df[N] = df[N].apply(str)

    print(
        "============================= DataFrame Loaded ============================"
    )

    X = df.iloc[:, :-1]
    y = df.iloc[:, -1:]

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)

    X_train = np.expand_dims(X_train, 2)
    X_test = np.expand_dims(X_test, 2)

    y_train = pd.get_dummies(y_train)
    y_test = pd.get_dummies(y_test)

    _, feature, depth = X_train.shape
    number_of_patients = y_train.shape[1]
    model = make_model(feature, depth, number_of_patients)

    print(
        "\n======================== Model Architecture Defined =======================\n"
    )

    model.summary()
    num_epochs = 10

    history = model.fit(X_train,
                        y_train,
                        epochs=num_epochs,
                        callbacks=[PlotLossesKeras()],
                        validation_split=0.25)
    score = model.evaluate(X_test, y_test, verbose=1)

    model.save('../trained_model')

    print(
        "\n============================ Training Completed ===========================\n"
    )
コード例 #8
0
class An_xyz():
    global xyz
    batch_size = 40
    xyz = Sequential()
    # Step 1 - Convolution
    xyz.add(Conv2D(32, (3, 3), input_shape=(256, 256, 3), activation='relu'))
    # Step 2 - Pooling
    xyz.add(MaxPooling2D(pool_size=(2, 2)))
    # Adding a second convolutional layer
    xyz.add(Conv2D(48, (3, 3), activation='relu'))
    xyz.add(MaxPooling2D(pool_size=(2, 2)))
    #Adding third convolutional layer
    xyz.add(Conv2D(32, (3, 3), activation='relu'))
    xyz.add(MaxPooling2D(pool_size=(2, 2)))
    # Step 3 - Flattening
    xyz.add(Flatten())
    # Step 4 - Full connection
    xyz.add(Dense(units=2, activation='relu'))
    xyz.add(Dense(units=2, activation='softmax'))
    # Compiling the CNN
    xyz.compile(optimizer='adam',
                loss='categorical_crossentropy',
                metrics=['accuracy'])
    # Part 2 - Fitting the CNN to the images
    from keras.preprocessing.image import ImageDataGenerator
    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)
    test_datagen = ImageDataGenerator(rescale=1. / 255)
    training_set = train_datagen.flow_from_directory(
        'C:\\Users\\anany\\OneDrive\\Desktop\\an_python\\dataset\\face_dataset_train'
    )
    test_set = test_datagen.flow_from_directory(
        'C:\\Users\\anany\\OneDrive\\Desktop\\an_python\\dataset\\face_dataset_test'
    )
    xyz.fit_generator(training_set,
                      steps_per_epoch=400 // batch_size,
                      epochs=10,
                      callbacks=[PlotLossesKeras()],
                      validation_data=test_set,
                      validation_steps=200 // batch_size)
    # Part 3 - Making new predictions
    x = training_set.class_indices
コード例 #9
0
    def __init__(self, nb_epochs=10, image_size=320, early_stopping_patience=3,
                 n_valid_samples=2560, optimizer='adam',
                 batch_size=16, depth=4, channels=32, n_blocks=2, augment_images=True, debug_sample_size=None):
        self.model_name = 'resnet'
        self.optimizer = 'adagrad'
        self.weight_file_path = MODEL_BINARIES_PATH + self.model_name + '.h5'
        self.n_valid_samples = n_valid_samples
        self.nb_epochs = nb_epochs
        self.image_size = image_size
        self.augment_images = augment_images
        self.batch_size = batch_size
        self.depth = depth
        self.channels = channels
        self.n_blocks = n_blocks

        tb_callback = TensorBoard(log_dir=TB_LOGS_PATH, histogram_freq=0, write_graph=True,
                                  write_images=False, embeddings_freq=0, embeddings_layer_names=None,
                                  embeddings_metadata=None)

        self.callbacks = [tb_callback, PlotLossesKeras()]
        self.callbacks.append(EarlyStopping(monitor='val_loss', patience=early_stopping_patience))
        self.callbacks.append(ReduceLROnPlateau(monitor='val_loss',
                                                patience=2,
                                                verbose=1,
                                                factor=0.1,
                                                min_lr=0.0001))
        self.callbacks.append(ModelCheckpoint(self.weight_file_path, monitor='val_loss', save_best_only=True))

        if debug_sample_size is not None:
            self.debug_sample_size = debug_sample_size
        self.load_data()
        self.model = self.create_resnet_network(input_size=self.image_size,
                                                channels=self.channels,
                                                n_blocks=self.n_blocks,
                                                depth=self.depth)

        self.model.compile(optimizer=self.optimizer ,
                           loss=iou_bce_loss,
                           metrics=['accuracy', mean_iou])
コード例 #10
0
###### We are going to drop the first channel of y because we don't ned background info anymore
y_train = y_train[:,:,:,1:] 
masks/=255
masks = np.concatenate((masks[:,:,:,np.newaxis],masks[:,:,:,np.newaxis],
                        #masks[:,:,:,np.newaxis],masks[:,:,:,np.newaxis]
                        ), axis=3)

"""
Callbacks and model internals
"""

loss    = tf.keras.losses.categorical_crossentropy
metrics = [ cu.iou_loss, cu.build_iou_for(label=0) , ] 

optimizer     = tf.keras.optimizers.Adam(lr=learning_rate) 
plot_losses   = PlotLossesKeras(outputs=[MatplotlibPlot(figpath=f'models/{model_name}/metrics.png')]) 
nan_terminate = tf.keras.callbacks.TerminateOnNaN()
ReduceLR      = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', 
                                                     factor=0.1, patience=patience_LR,
                                                     verbose=1, mode='auto', min_delta=0.0001, 
                                                     cooldown=0, min_lr=0)
early_stop    = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, 
                                                 patience=patience_training, 
                                                 verbose=2, mode='auto', baseline=None,
                                                 restore_best_weights=False)


csv_logger = tf.keras.callbacks.CSVLogger(f'models/{model_name}/training_log.csv', append=True)

checkpoint = tf.keras.callbacks.ModelCheckpoint(f'models/{model_name}/{model_name}.h5',
                             monitor='val_loss', verbose=1, save_best_only=True, 
コード例 #11
0
#output layer
model.add(Dense(7, activation= 'softmax'))

#optimizing the layers
opt = Adam(lr=0.0005)

#compilation 
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])

#summary
model.summary()

epochs =15
steps_per_epoch = train_generator.n//train_generator.batch_size
validation_steps = validation_generator.n//validation_generator.batch_size

checkpoint = ModelCheckpoint("model_weights.h5", monitor='val_accuracy',save_weights_only=True, 
mode='max', verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=2, min_lr=0.00001, mode='auto')
callbacks = [PlotLossesKeras(), checkpoint, reduce_lr]
history = model.fit(
    x=train_generator,
    steps_per_epoch=steps_per_epoch,
    epochs=epochs,
    validation_data = validation_generator,
    validation_steps = validation_steps,
    callbacks=callbacks
)

コード例 #12
0
#
# The batch size defines the number of samples that will be propagated through the network.
#
# Advantages of using a batch size < number of all samples:
#
# * It requires less memory. Since you train the network using fewer samples, the overall training procedure requires less memory.
#   That's especially important if you are not able to fit the whole dataset in your machine's memory.
#
# * Typically networks train faster with mini-batches. That's because we update the weights after each propagation.
#
# Disadvantages of using a batch size < number of all samples:
#
# * The smaller the batch the less accurate the estimate of the gradient will be.

print("[INFO] training model...")
history = model.fit(X_train, y_train, epochs=100, batch_size=15, verbose=2, validation_data=(X_test, y_test), callbacks=[PlotLossesKeras()])


#loss_history = np.array(history)
#np.savetxt("loss_history.txt", loss_history, delimiter=",")

# Plot metrics
print(history.history.keys())

# "Loss"
plt.figure()
plt.plot(history.history['mean_squared_error'])
plt.plot(history.history['val_mean_squared_error'])
plt.title('model MSE')
plt.ylabel('mean squared error')
plt.xlabel('epoch')
コード例 #13
0
    optimizer='rmsprop',
    loss='categorical_crossentropy',
    metrics=['accuracy'],
)

tbCallBack = callbacks.TensorBoard(log_dir='./TensorBoard',
                                   histogram_freq=0,
                                   write_graph=True,
                                   write_images=True)

model.fit(train,
          train_target,
          epochs=10,
          batch_size=100,
          validation_data=(test, test_target),
          callbacks=[tbCallBack, PlotLossesKeras()])
score = model.evaluate(test, test_target, batch_size=100)

# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
# Super simple k-NN

neigh = KNeighborsClassifier()  # default K of 5 turned out to be good
neigh.fit(train, train_target)

k_pred = neigh.predict(test)
k_accuracy = accuracy_score(test_target, k_pred)

print(gnb_accuracy)  # 23.95% (+/- 10.6%)
print(score)  # 46.61 (+/- 1.94%)
print(k_accuracy)  # 40.43 (+/- 0.04%)
コード例 #14
0
def initScript():
    #	global myArgs, arguments, nbExamples, lastSample, epochs, df, lossFunction, optimizer, activation, Lr, dumpedModelFileName, rmse, batch_size, validation_split, shuffle, earlyStoppingPatience, plotMetrics
    global myArgs, df, plotResolution, pictureFileResolution
    global optimizerName, lossFunctionName, myMetrics, modelTrainingCallbacks, dataIsNormalized, monitoredData
    plotResolution = 150
    pictureFileResolution = 600

    # fix random seed for reproducibility
    seed = 7
    np.random.seed(seed)

    rmse = root_mean_squared_error

    arguments = initArgs()
    Allow_GPU_Memory_Growth()
    pda.options.display.max_rows = 20  #Prints the first max_rows/2 and the last max_rows/2 of each dataframe
    pda.options.display.width = None  #Automatically adjust the display width of the terminal

    myArgs = copyArgumentsToStructure(arguments)

    myArgs.nbExamples = int(myArgs.nbExamples)
    myArgs.epochs = int(myArgs.epochs)
    myArgs.batch_size = int(myArgs.batch_size)

    if myArgs.dataFileName:
        df = pda.read_table(
            myArgs.dataFileName, delim_whitespace=True,
            comment='#')  # The column names are infered from the datafile
        #		df = pda.read_table('dataset10-nCh10.txt', delim_whitespace=True, comment='#', skiprows=[1,2] ) # To read the data from 'dataset*-nCh*.txt'
        myArgs.nbExamples = df.shape[0]
    else:
        X = np.linspace(0, myArgs.lastSample, myArgs.nbExamples)
        df = pda.DataFrame(columns=['X_train', 'y_train'])
        df[df.columns[0]] = X
        df[df.columns[1]] = -5 * X + 10

    dataIsNormalized = False
    if myArgs.lossFunction == 'mse':
        # MSE needs NORMALIZATION
        PrintInfo("Doing Pandas dataframe normalization ...",
                  quiet=myArgs.quiet)
        #	df[ df.columns[0] ] = keras.utils.normalize( df.values )[:,0]
        #	df[ df.columns[1] ] = keras.utils.normalize( df.values )[:,1]
        df = (df - df.mean()) / df.std()
        PrintInfo("DONE.", quiet=myArgs.quiet)
        dataIsNormalized = True

    optimizerName = myArgs.optimizer
    lossFunctionName = myArgs.lossFunction.lower()

    if myArgs.lossFunction == 'mse' and myArgs.epochs < 10: myArgs.epochs = 15

    if myArgs.batch_size == -1:
        if myArgs.nbExamples > 1e2:
            myArgs.batch_size = int(myArgs.nbExamples / myArgs.epochs)
        else:
            myArgs.batch_size = myArgs.nbExamples


#			myArgs.epochs = int(myArgs.nbExamples / 4)

    import keras.optimizers
    if myArgs.Lr:
        if myArgs.optimizer == 'sgd':
            myArgs.optimizer = keras.optimizers.sgd(myArgs.Lr)
        elif myArgs.optimizer == 'rmsprop':
            myArgs.optimizer = keras.optimizers.RMSProp(myArgs.Lr)
        elif myArgs.optimizer == 'adam':
            myArgs.optimizer = keras.optimizers.Adam(myArgs.Lr)
        elif myArgs.optimizer == 'adadelta':
            myArgs.optimizer = keras.optimizers.Adadelta(lr=myArgs.Lr)

    myMetrics = []
    if myArgs.lossFunction == 'mae':
        myMetrics += ['mse']
        myMetrics += [rmse]
    elif myArgs.lossFunction == 'mse':
        myMetrics += [rmse]
        myMetrics += ['mae']
    elif myArgs.lossFunction == 'rmse':
        myArgs.lossFunction = rmse
        myMetrics += ['mse']
        myMetrics += ['mae']
    #myMetrics += [ 'accuracy' ]

    if myArgs.earlyStoppingPatience == -1 and not myArgs.plotMetrics:
        modelTrainingCallbacks = None
    else:
        modelTrainingCallbacks = []

    if myArgs.earlyStoppingPatience != -1:
        from keras.callbacks import EarlyStopping
        if myArgs.nbExamples < 10: monitoredData = 'loss'
        else: monitoredData = 'val_loss'

        modelTrainingCallbacks += [
            EarlyStopping(monitor=monitoredData,
                          patience=myArgs.earlyStoppingPatience)
        ]
        PrintInfo("The monitored data for early stopping is : " +
                  monitoredData)
        PrintInfo("modelTrainingCallbacks = " + str(modelTrainingCallbacks))

    if isnotebook(
    ) and myArgs.plotMetrics:  # The metrics can only be plotted in a jupyter notebook
        from livelossplot import PlotLossesKeras
        modelTrainingCallbacks += [PlotLossesKeras()]
コード例 #15
0
ファイル: baseline3.py プロジェクト: yangyongjx/mmWave-2
cp_callback = tf.keras.callbacks.ModelCheckpoint(
    checkpoint_path, monitor='val_loss',mode='min', verbose=1, save_best_only=False)

csv_logger = CSVLogger('/content/drive/My Drive/Colab Notebooks/trial3_data/cnn_data_trial3data/training1.log', append=True, separator=',')

batch_size = 16
epochs = 400

# training
history = model.fit(X_train, y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    initial_epoch = 0,
                    verbose=1,
                    shuffle=True,
                    validation_data=(X_valid, y_valid), callbacks=[es, cp_callback, csv_logger, PlotLossesKeras()])                    
    
# save model
model.save("/content/drive/My Drive/Colab Notebooks/trial3_data/cnn_data_trial3data/model_1_1.h5")
print("Saved model_1.h5\n")
model.save_weights("/content/drive/My Drive/Colab Notebooks/trial3_data/cnn_data_trial3data/model_1_1weights.h5")
model_json = model.to_json()
with open("/content/drive/My Drive/Colab Notebooks/trial3_data/cnn_data_trial3data/model_1_1architecture.json", "w") as json_file:
    json_file.write(model_json)

# evaluating model
X_test, y_test = get_valid_data(mode="test")
X_test = X_test.reshape(-1, 46, 500, 1)
y_test = y_test.reshape(-1, 61)

train_loss, train_acc = model.evaluate(X_train, y_train, verbose=0)
コード例 #16
0
def fit_stacked_model(model, inputX, inputy, validX, validy):
    
    X = [inputX for _ in range(len(model.input))]
    X_v = [validX for _ in range(len(model.input))]
    
    checkpoint_path = "/content/drive/My Drive/Colab Notebooks/stack/cpCombined5-{epoch:04d}.ckpt"
    checkpoint_dir = os.path.dirname(checkpoint_path)
    cp_callback = tf.keras.callbacks.ModelCheckpoint(
    checkpoint_path, monitor='val_loss',mode='min', verbose=1, save_best_only=True)
    
    csv_logger = CSVLogger('/content/drive/My Drive/Colab Notebooks/stack/training5.log', append=True, separator=',')
    model.load_weights('/content/drive/My Drive/Colab Notebooks/stack/cpCombined5-0030.ckpt')
    # fit model
    history = model.fit(X, inputy, epochs=200, initial_epoch = 30, verbose=1, validation_data=(X_v, Y1_valid), callbacks=[cp_callback, csv_logger, PlotLossesKeras()])
    return history
コード例 #17
0
# Functional model API
type(base_resnet)

# Add head aka last layer
x = layers.Flatten()(base_resnet.output)
x = layers.Dense(256, activation='relu')(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(256, activation='relu')(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(256, activation='relu')(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(256, activation='relu')(x)
x = layers.Dropout(0.2)(x)
predictions = layers.Dense(10, activation = 'softmax')(x)

# put model together
head_model = Model(inputs = base_resnet.input, outputs = predictions)
head_model.compile(optimizer='adam', loss=losses.sparse_categorical_crossentropy, metrics=['accuracy'])
# %%
es = EarlyStopping(monitor='loss', min_delta=0.01, patience=3)

head_model.fit(X_train, y_train,
          epochs=2,
          batch_size=32,
          validation_split=0.2,
          callbacks=[es, PlotLossesKeras()],
          verbose=1)
# %%
head_model.evaluate(X_test, y_test)
# %%
head_model.save(root_dir.joinpath("models", "resnet-transfer"))
コード例 #18
0
width_shift_range = 0.3,
height_shift_range=0.3,
rotation_range=30)

# Save the model according to the conditions  
checkpoint = ModelCheckpoint("vgg1921.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto')

# Train the model 
model_final.fit_generator(
train_generator,
samples_per_epoch = nb_train_samples,
epochs = epochs,
validation_data = validation_generator,
nb_val_samples = nb_validation_samples,
callbacks = [PlotLossesKeras(),checkpoint, early])
plot_model(model2, to_file='model.png',show_shapes =True)

######Load IMAGE

# load an image from file
image = load_img('result.jpg', target_size=(256, 256,3))
# convert the image pixels to a numpy array
image = img_to_array(image)
plt.imshow(image)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
yhat=model2.predict(image)

disease_dict={'Tomato___Bacterial_spot': 0,
 'Tomato___Early_blight': 1,
 'Tomato___Late_blight': 2,
コード例 #19
0
#decoded = layers.Dense(150, activation='relu')(encoded)
#decoded = layers.Dense(250, activation='relu')(decoded)
#decoded = layers.Dense(500, activation='relu')(decoded)
#decoded = layers.Dense(1000, activation='relu')(decoded)
#decoded = layers.Dense(28 * 28, activation='sigmoid')(decoded)

#autoencoder = Sequential()
autoencoder = keras.Model(input_img, decoded)
autoencoder.compile(optimizer='adam', loss='mean_absolute_error')
autoencoder.summary()
#autoencoder.fit(trainX, trainX,
 #               epochs=100,
 #               batch_size=1,
  #              shuffle=True)

history = autoencoder.fit(trainX, trainX, batch_size=128, epochs=100, validation_split=0.1, callbacks=[PlotLossesKeras()])

#encoder = keras.Model(input_img, encoded)
#encoded_input = keras.Input(shape=(width * width * 3,))
#decoder_layer = autoencoder.layers[-1]
#decoder = keras.Model(encoded_input, decoder_layer(encoded_input))


#encoded_imgs = autoencoder.predict(trainX_noise)

import matplotlib.pyplot as plt

encoded_imgs = autoencoder.predict(testX)

n = 10  # How many digits we will display
plt.figure(figsize=(40, 8))
コード例 #20
0
modelcnn.add(keras.layers.Flatten())
modelcnn.add(keras.layers.Dense(3, activation='softmax'))



# Compiling the model - adaDelta - Adaptive learning
modelcnn.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])

#Print model summary
print(modelcnn.summary())
# Training and evaluating
batch_size = 50
num_epoch = 10
#Model fit
model_log = modelcnn.fit(np.array(x_train_reshape), np.array(y_train), batch_size=batch_size, epochs=num_epoch, verbose=1,
                         validation_data=(np.array(x_test_reshape), np.array(y_test)), callbacks=[PlotLossesKeras()])

#Train and Test score
train_score = modelcnn.evaluate(np.array(x_train_reshape), np.array(y_train), verbose=1)
test_score = modelcnn.evaluate(np.array(x_test_reshape), np.array(y_test), verbose=1)
print('Train accuracy CNN:', train_score[1])
print('Test accuracy CNN:', test_score[1])

#Plotting of the model training history

# list all data in history
print(model_log.history.keys())
# summarize history for accuracy
plt.plot(model_log.history['acc'])
#plt.plot(model_log.history['val_acc'])
plt.title('model accuracy')
コード例 #21
0
ファイル: active_learning.py プロジェクト: b73f9/VRNN2018
def train(build_model,
          X,
          Y,
          picker,
          initial_data_idx,
          metrices_names,
          model_name,
          initial_data_size=1000,
          steps=100,
          epochs=100,
          batch_size=128,
          is_ensemble=False):

    # set variables
    labeled_idx = initial_data_idx
    metrices_results = []
    x_train, x_test = X
    y_train, y_test = Y
    #set parameters
    interval = x_train.shape[0] // steps
    x_steps = np.arange(1, steps + 1)
    filepath = './saved_models/best_model{}.h5'.format(model_name)
    callbacks = [
        PlotLossesKeras(max_cols=3),
        EarlyStopping(monitor="val_loss", patience=3),
        ModelCheckpoint(filepath=filepath,
                        monitor='val_loss',
                        save_best_only=True),
    ]

    for i in tqdm(range(1, steps + 1)):
        np.random.shuffle(labeled_idx)
        print("Iteration number: {}, size of training set: {}".format(
            i, len(labeled_idx)))

        #create & fit model
        model = build_model()
        model.fit(x=x_train[labeled_idx],
                  y=y_train[labeled_idx],
                  validation_data=[x_test, y_test],
                  callbacks=callbacks,
                  epochs=epochs,
                  batch_size=batch_size)

        # evaluate model
        # model.load_weights(filepath) pomin wynik, indeksy
        result = model.evaluate(x_test, y_test)
        metrices_results.append(result)

        # whether ensemple picker or not
        # decrease uncertainty
        if labeled_idx.shape[0] != x_train.shape[0]:
            if not is_ensemble:
                labeled_idx = picker(model, x_train, labeled_idx, interval)
            else:
                labeled_idx = picker(build_model, [x_train, y_train],
                                     labeled_idx, interval)

    # plot results
    n_rows = (len(metrices_results[0]) - 1) // 3 + 1
    n_cols = 3
    fig, ax = plt.subplots(n_rows, n_cols, figsize=[20, 10])
    fig.autofmt_xdate(rotation=70)
    for i, result in enumerate(zip(*metrices_results)):
        first_idx = i // 3
        second_idx = i % 3
        ax[first_idx][second_idx].grid(True)
        ax[first_idx][second_idx].set_title(metrices_names[i].upper() +
                                            "_TEST")
        ax[first_idx][second_idx].set_xlabel("DATA SIZE")
        ax[first_idx][second_idx].set_ylabel(metrices_names[i])
        ax[first_idx][second_idx].set_xticks(interval * x_steps)
        ax[first_idx][second_idx].plot(x_steps * interval, result)

    plt.show()

    return metrices_results
コード例 #22
0
model.summary()

#opt = keras.optimizers.Adadelta(0.1,decay=1e-4)
ams=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=True)
model.compile(optimizer = ams, loss = 'categorical_crossentropy', metrics = ['accuracy'])
datagen = ImageDataGenerator(
    rotation_range=15,
    horizontal_flip=True,
    width_shift_range=0.1,
    height_shift_range=0.1,
    #zoom_range=0.3
    )
datagen.fit(X_train)

log=model.fit_generator(datagen.flow(X_train, Y_train, batch_size=32),
                    steps_per_epoch = len(X_train) / 32, epochs=150, callbacks=[PlotLossesKeras()], validation_data=(vx, vy))
#log=cnn.fit(x_train ,y_train,validation_split=0.2, callbacks=[PlotLossesKeras()], epochs = 50)
plt.plot(log.history['acc'])
plt.plot(log.history['val_acc'])
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(log.history['loss'])
plt.plot(log.history['val_loss'])
plt.title('model error')
plt.ylabel('error')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
コード例 #23
0
cp_callback = tf.keras.callbacks.ModelCheckpoint(
    checkpoint_path, monitor='val_loss',mode='min', verbose=1, save_best_only=True)
csv_logger = CSVLogger('/content/drive/My Drive/Colab Notebooks/lstm_models/training2.log', append=True, separator=',')

# training
history = model.fit_generator(
    generate_array(mode="train"),
    steps_per_epoch = 863,
    validation_data = (X_valid, Y_valid),
    validation_steps = 863,
    epochs = 200,
    verbose=1,
    shuffle=True,
    initial_epoch = 0,
    callbacks=[es, cp_callback, csv_logger, PlotLossesKeras()])
	
# plot loss and accuracy graphs for visualisation
val_acc = history.history['val_acc']
acc = history.history['acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epoch_num = np.arange(0, len(val_acc), dtype=int)
plot1, = plt.plot(epoch_num, acc)
plot2, = plt.plot(epoch_num, val_acc)
plt.legend([plot1, plot2],['training accuracy', 'validation accuracy'])
plt.show()
plot1, = plt.plot(epoch_num, loss)
plot2, = plt.plot(epoch_num, val_loss)
plt.legend([plot1, plot2],['training loss', 'validation loss'])
コード例 #24
0
    img = transform.resize(image, new_size)
    testX = np.append(testX, img)
testX = np.reshape(testX, (len(testY), width * width * 3))
#resizeImages(testX, testY, parasitized_test_path, uninfected_test_path)

shuffle(trainX, trainY)
shuffle(testX, testY)

print(trainY)

#trainX /= 255.0
#testX /= 255.0

model = Sequential()
model.add(Dense(200, input_dim=(width * width * 3), activation="sigmoid"))
model.add(Dense(300, activation="sigmoid"))
model.add(Dense(300, activation="sigmoid"))
model.add(Dense(300, activation="sigmoid"))
model.add(Dense(10, activation="sigmoid"))
 
model.add(Dense(1, activation="sigmoid"))

#model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.compile(loss=tf.keras.losses.KLD, optimizer='rmsprop', metrics=['accuracy'])

history = model.fit(trainX, trainY, batch_size=1, epochs=1000, validation_split=0.1, callbacks=[PlotLossesKeras()])

model.evaluate(testX, testY, batch_size=1,callbacks=[PlotLossesKeras()])

print(testX.shape)
print(trainX.shape)
コード例 #25
0
def train_model(model: models.Model,
                signal_data: np.array,
                label_data: np.array,
                file_path: str = None,
                epochs: int = None,
                add_class_weights=False,
                plot_progress_no_callbacks=False):
    """
    Args:
        file_path: specify where to save the model for future use
        add_class_weights: if set, adds 'balanced' class weights to account
            for skewed data (see tensorflow docs)
        plot_progress_no_callbacks: if set, detailed training progress stats
            are shown during training, but all other callbacks are deactivated
            due to incompatibility
    """
    if not plot_progress_no_callbacks:
        early = EarlyStopping(monitor="val_accuracy",
                              mode="max",
                              patience=5,
                              verbose=1)
        redonplat = ReduceLROnPlateau(monitor="val_accuracy",
                                      mode="max",
                                      patience=3,
                                      verbose=2)

        callbacks = [early, redonplat]

        if file_path:
            checkpoint = ModelCheckpoint(file_path,
                                         monitor='val_accuracy',
                                         verbose=1,
                                         save_best_only=True,
                                         mode='max')
            callbacks.append(checkpoint)
    else:
        callbacks = [PlotLossesKeras()]

    kwargs = {'x': signal_data,
              'y': label_data,
              'batch_size': 256,
              'epochs': epochs or 10,
              'callbacks': callbacks,
              'validation_split': 0.1}

    if add_class_weights:
        classes = np.sort(np.unique(label_data))
        class_weights = class_weight.compute_class_weight(
            class_weight='balanced',
            classes=classes,
            y=label_data)
        weight_dict = dict(zip(np.sort(np.unique(label_data)), class_weights))

        # In case classes are not contiguous starting from 0, fill those
        # weights for compatibility with tensorflow
        for i in range(max(weight_dict.keys())):
            if i not in weight_dict:
                weight_dict[i] = 0

        kwargs['class_weight'] = weight_dict

    model.fit(**kwargs)
コード例 #26
0
    target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
    batch_size=BATCH_SIZE,
    class_mode="categorical",
    shuffle=False)

import warnings
warnings.filterwarnings("ignore")

# Training
H = model.fit_generator(
    training_generator,
    steps_per_epoch=len(training_generator.filenames) // BATCH_SIZE,
    epochs=EPOCHS,
    validation_data=validation_generator,
    validation_steps=len(validation_generator.filenames) // BATCH_SIZE,
    callbacks=[PlotLossesKeras(), CSVLogger(TRAINING_LOGS_FILE,
                                            append=False,
                                            separator=";")], 
    verbose=1)

#model.save_weights(MODEL_FILE)

N = EPOCHS
plt.style.use("seaborn-white")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="test_loss")
plt.title("Training Loss on Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
# second hidden layer
model.add(Dense(128, activation='sigmoid', kernel_regularizer=l1(1e-5)))
model.add(Dropout(0.25))

# output layer
model.add(Dense(output_classes, activation='softmax'))

# compile the model
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# In[17]:

callbacks = [
    PlotLossesKeras(),
    EarlyStopping(monitor='val_loss', patience=10),
    ReduceLROnPlateau(monitor='val_loss', patience=5, factor=1 / 3)
]

# In[18]:

#plot_model(model, show_shapes=True, show_layer_names=True)
model.summary()

# In[19]:

model.fit(X_train,
          y_train,
          batch_size=64,
          epochs=100,
コード例 #28
0
model.compile(optimizer=adawarm, loss=loss_dict[mode], metrics=['acc'])
# model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['acc'])

es = keras.callbacks.EarlyStopping(monitor='val_loss',
                                   patience=10,
                                   restore_best_weights=True)
cp = keras.callbacks.ModelCheckpoint('best_acc_model.h5', monitor='val_acc')
csvl = keras.callbacks.CSVLogger('train_log.csv')

history = model.fit([ind_array, seg_array, param],
                    y_enc,
                    epochs=epochs,
                    batch_size=batch_size,
                    validation_split=0.2,
                    callbacks=[es, cp, csvl, PlotLossesKeras()])

model.save_weights('best_model.h5')

# log = pd.read_csv('NUS.csv')

plt.figure()
plt.title('Loss')
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['loss', 'val_loss'])
plt.grid()
plt.figure()
plt.title('Acc')
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
コード例 #29
0
    
    model_lvh.add(Dense(2, activation='sigmoid'))
#   

    weights = {True: 3,
               False: 1}
    
    model_lvh.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=0.0012,),
                  metrics=['accuracy'])

    history = model_lvh.fit(x_train, y_train, 
                        batch_size=64, 
                        epochs=100, 
                        validation_data=(x_test, y_test),
                        callbacks=[PlotLossesKeras()],
                        verbose=1,
                        class_weight=weights)

#%% Model save/load

model_lvh = load_model(WDIR + "source/src/mdl/lvh_cnn.hdf5")
#model.save(WDIR + "source/src/mdl/lvh.hdf5")

#%% Model evaluation

y_pred = model_lvh.predict(x_test)
y_pred = np.argmax(y_pred, axis=1)

y_true = np.argmax(y_test, axis=1)
コード例 #30
0
def initKeras() :
	global optimizerName, lossFunctionName, myMetrics, modelTrainingCallbacks

	myArgs.epochs = int( myArgs.epochs )

	if myArgs.outputLayersUnits == -1 : myArgs.outputLayersUnits = nbOutputVariables
	if myArgs.hiddenLayersUnits == -1 : myArgs.hiddenLayersUnits = nbInputVariables * 2

	optimizerName = myArgs.optimizer
	lossFunctionName = myArgs.lossFunction.lower()

	if myArgs.lossFunction == 'mse' and myArgs.epochs < 10 : myArgs.epochs = 15

	if myArgs.batch_size_Fraction == -1 :
		if nbExperiments / myArgs.epochs < 1 :
			if nbExperiments > 1e6 :
				myArgs.batch_size_Fraction = 0.1
			else:
				myArgs.batch_size_Fraction = 1
		else :
			myArgs.batch_size_Fraction = 1 / myArgs.epochs

	if not myArgs.batch_size_Fraction :
		PrintError( "The chosen batch_size is %f, too small compared to %d" %(nbExperiments/myArgs.epochs, myArgs.epochs) )
		Exit(1, myArgs.md)

	import keras.optimizers
	if myArgs.Lr :
		if   myArgs.optimizer == 'sgd' :
			myArgs.optimizer = keras.optimizers.sgd( lr = myArgs.Lr )
		elif myArgs.optimizer == 'rmsprop' :
			myArgs.optimizer = keras.optimizers.RMSProp( lr = myArgs.Lr )
		elif myArgs.optimizer == 'adam' :
			myArgs.optimizer = keras.optimizers.Adam( lr = myArgs.Lr )
		elif myArgs.optimizer == 'adadelta' :
			myArgs.optimizer = keras.optimizers.Adadelta( lr = myArgs.Lr )
	
	rmse = root_mean_squared_error
	myMetrics = []
	if   myArgs.lossFunction == 'mae' :
		myMetrics += [ 'mse' ]
		myMetrics += [ rmse ]
	elif myArgs.lossFunction == 'mse' :
		myMetrics += [ rmse ]
		myMetrics += [ 'mae' ]
	elif myArgs.lossFunction == 'rmse' :
		myArgs.lossFunction = rmse
		myMetrics += [ 'mse' ]
		myMetrics += [ 'mae' ]
	#myMetrics += [ 'accuracy' ]

	if myArgs.earlyStoppingPatience == -1 and not myArgs.plotMetricsLive :
		modelTrainingCallbacks = None
	else :
		modelTrainingCallbacks = []
	
	if myArgs.earlyStoppingPatience != -1 :
		from keras.callbacks import EarlyStopping
		if nbExperiments < 10 : monitoredData = 'loss'
		else : monitoredData = 'val_loss'

		modelTrainingCallbacks += [ EarlyStopping( monitor= monitoredData, patience = myArgs.earlyStoppingPatience ) ]
		PrintInfo( "The monitored data for early stopping is : " + monitoredData )
		PrintInfo( "modelTrainingCallbacks = " + str(modelTrainingCallbacks) )

	if isnotebook() and myArgs.plotMetricsLive : # The metrics can only be plotted in a jupyter notebook
		from livelossplot import PlotLossesKeras
		modelTrainingCallbacks += [ PlotLossesKeras() ]