def train_and_plot_model(train_x, val_x):
    '''
    Train the conv-autoencoder model and plot training loss and accuracy curve.
    '''
    no_epochs = 20
    init_lr = 1e-3
    batch_size = 64

    # construct our convolutional autoencoder
    logger.info("Starting building of Conv-Autoencoder")
    autoencoder = ConvAutoencoder.build(256, 256, 3)
    autoencoder.compile(loss="mse", optimizer=Adam(lr=init_lr))

    # train the convolutional autoencoder
    H = autoencoder.fit(train_x,
                        train_x,
                        validation_data=(val_x, val_x),
                        epochs=no_epochs,
                        batch_size=batch_size)

    # plotting and saving the training history
    N = np.arange(0, no_epochs)
    plt.style.use('fivethirtyeight')
    plt.figure()
    plt.plot(N, H.history["loss"], label="train_loss")
    plt.plot(N, H.history["val_loss"], label="val_loss")
    plt.title("Training Loss and Accuracy")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend(loc="lower left")
    plt.savefig("plot.png")

    # serialize the autoencoder model to disk
    logger.info("Saving model in .h5 format")
    autoencoder.save("autoenc2.h5", save_format="h5")
def train(args):
    # initialize the number of epochs to train for and batch size
    EPOCHS = 25
    BS = 32

    # load the MNIST dataset
    print("[INFO] loading MNIST dataset...")
    ((trainX, _), (testX, _)) = tensorflow.keras.datasets.mnist.load_data()

    # add a channel dimension to every image in the dataset, then scale
    # the pixel intensities to the range [0, 1]
    trainX = np.expand_dims(trainX, axis=-1)
    testX = np.expand_dims(testX, axis=-1)
    trainX = trainX.astype("float32") / 255.0
    testX = testX.astype("float32") / 255.0

    # construct our convolutional autoencoder
    print("[INFO] building autoencoder...")
    (encoder, decoder, autoencoder) = ConvAutoencoder.build(28, 28, 1)
    opt = tensorflow.keras.optimizers.Adam(lr=1e-3)
    autoencoder.compile(loss="mse", optimizer=opt)

    # train the convolutional autoencoder
    H = autoencoder.fit(trainX, trainX,
                        validation_data=(testX, testX),
                        epochs=EPOCHS,
                        batch_size=BS)

    return H, autoencoder
示例#3
0
 def train(self):
     # construct our convolutional autoencoder
     print("[INFO] building autoencoder...")
     (encoder, decoder, self.autoencoder) = ConvAutoencoder.build(28, 28, 1)
     opt = Adam(lr=1e-3)
     self.autoencoder.compile(loss="mse", optimizer=opt)
     # train the convolutional autoencoder
     self.H = self.autoencoder.fit(self.trainXNoisy,
                                   self.trainX,
                                   validation_data=(self.testXNoisy,
                                                    self.testX),
                                   epochs=self.EPOCHS,
                                   batch_size=self.BS)
示例#4
0
 def get(self):
     if self.train_config.MODEL == self.train_config.BASIC:
         return Basic(self.train_config)
     if self.train_config.MODEL == self.train_config.UNET:
         return Unet(self.train_config.channels,
                     input_size=(self.train_config.img_width,
                                 self.train_config.img_height,
                                 self.train_config.channels),
                     pretrained_weights=None)
     if self.train_config.MODEL == self.train_config.CCGAN:
         return CCGAN()
     if self.train_config.MODEL == self.train_config.CONV:
         return ConvAutoencoder.build(self.train_config.img_width,
                                      self.train_config.img_height,
                                      self.train_config.channels,
                                      filters=(16, 32, 64))
    trainXNoisyBorder, testXNoisyBorder = Noises.border(
        trainX[numberOfTrainX * 4:numberOfTrainX * 5],
        testX[numberOfTestX * 4:numberOfTestX * 5])
    trainXNoisyNoNoise, testXNoisyNoNoise = trainX[numberOfTrainX *
                                                   5:], testX[numberOfTestX *
                                                              5:]

    trainXNoisy = np.concatenate(
        (trainXNoisyGaussian, trainXNoisySpeckle, trainXNoisySaltAndPepper,
         trainXNoisyBlock, trainXNoisyBorder, trainXNoisyNoNoise))
    testXNoisy = np.concatenate(
        (testXNoisyGaussian, testXNoisySpeckle, testXNoisySaltAndPepper,
         testXNoisyBlock, testXNoisyBorder, testXNoisyNoNoise))

# initialize convolutional autoencoder
cae = ConvAutoencoder()
file_path = '.\models\convnetDenoisingAE' + args["noise"] + '.h5'
if os.path.isfile(file_path):
    # loading existing model
    print("Loading existing model...")
    autoencoder = cae.load_model(file_path)
else:
    # construct our convolutional autoencoder
    print("[INFO] building autoencoder...")
    autoencoder = cae.build(28, 28, 1, args["noise"])
    opt = Adam(lr=1e-3)
    autoencoder.compile(loss="mse", optimizer=opt)
    # train the convolutional autoencoder
    H = autoencoder.fit(trainXNoisy,
                        trainX,
                        validation_data=(testXNoisy, testX),
# 构建训练和测试分组
(trainX,testX)=train_test_split(images,test_size=0.2,random_state=42)

trainX = trainX.reshape(-1, 28, 28, 1)
trainX = trainX.astype('float32')
trainX/=255

testX = testX.reshape(-1, 28, 28, 1)
testX = testX.astype('float32')
testX/=255


#搭建模型
print("[INFO] building autoencoder...")
(encoder, decoder, autoencoder) = ConvAutoencoder.build(28, 28,1)

#搭建优化器
opt=tf.keras.optimizers.Adam(lr=lr,decay=lr/epochs)
autoencoder.compile(loss='mse',optimizer=opt,metrics=['acc'])

#训练
H=autoencoder.fit(trainX,trainX,validation_data=(testX,testX),epochs=epochs,batch_size=batch_size)

print("[INFO] making predictions...")
decoded = autoencoder.predict(testX)
vis = visualize_predictions(decoded, testX)
cv2.imwrite("recon_vis.png", vis)

N = np.arange(0, epochs)
plt.style.use("ggplot")
    def build(self, height, width, depth, filters=(32, 64), latentDim=16):
        inputShape = (height, width, depth)
        inputs = Input(shape=inputShape)

        # loading existing model
        print("Loading existing autoencoders...")

        file_path = '.\models\convnetDenoisingAEgaussian.h5'
        if os.path.isfile(file_path):
            cae = ConvAutoencoder()
            autoencoder_gaussian = cae.load_model(file_path)
        file_path = '.\models\convnetDenoisingAEspeckle.h5'
        if os.path.isfile(file_path):
            cae = ConvAutoencoder()
            autoencoder_speckle = cae.load_model(file_path)
        file_path = '.\models\convnetDenoisingAEsaltAndPepper.h5'
        if os.path.isfile(file_path):
            cae = ConvAutoencoder()
            autoencoder_saltAndPepper = cae.load_model(file_path)
        file_path = '.\models\convnetDenoisingAEblock.h5'
        if os.path.isfile(file_path):
            cae = ConvAutoencoder()
            autoencoder_block = cae.load_model(file_path)
        file_path = '.\models\convnetDenoisingAEborder.h5'
        if os.path.isfile(file_path):
            cae = ConvAutoencoder()
            autoencoder_border = cae.load_model(file_path)
        file_path = '.\models\convnetDenoisingAEnoNoise.h5'
        if os.path.isfile(file_path):
            cae = ConvAutoencoder()
            autoencoder_no_noise = cae.load_model(file_path)

        #
        x_gaussian = autoencoder_gaussian(inputs)
        x_speckle = autoencoder_speckle(inputs)
        x_saltAndPepper = autoencoder_saltAndPepper(inputs)
        x_block = autoencoder_block(inputs)
        x_border = autoencoder_border(inputs)
        x_noNoise = autoencoder_no_noise(inputs)

        # concatinating autoencoders
        combined = concatenate([
            x_gaussian, x_speckle, x_saltAndPepper, x_block, x_border,
            x_noNoise
        ])

        # reshaping to fit input/output
        combined = Dense(1, input_shape=(6, ))(combined)
        combined = Reshape((28, 28, 1))(combined)

        # apply RELU => LINEAR (Don't know if this is needed)
        x = Activation("relu")(combined)
        x = Activation("linear")(x)

        # out models is the combined autoencoders
        self.model = Model(inputs=inputs, outputs=x)

        return self.model