Ejemplo n.º 1
0
    def train_classifier(self, epoch=300, load=False):

        self.collect_sampled_data_classification()

        #classification model
        input_dt, encoded_d, out = self.encoder1D()
        self.classifier = Model(input_dt, out)
        self.classifier.compile(optimizer=Adam(lr=1e-3),
                                loss="categorical_crossentropy",
                                metrics=['accuracy'])
        self.classifier.summary()
        plot_model(self.classifier, to_file='readme/classifier.png')

        if not load:
            plot_losses = util.PlotLosses()
            self.classifier.fit(self.D_classify,
                                to_categorical(self.M_label_classify),
                                epochs=epoch,
                                batch_size=32,
                                shuffle=True,
                                validation_split=0.2,
                                callbacks=[plot_losses])

            self.classifier.save('readme/classifier.h5')
        else:
            print("Trained model loaded")
            self.classifier = load_model('readme/classifier.h5')
Ejemplo n.º 2
0
	def train_regressor(self, epoch, load=False):
	
		self.collect_resampled_data_inversion()
		
		#regression model
		input_dt, encoded_d, out = self.encoder1D()
		decoded_m = self.decoder2D(encoded_d)
		
		self.regressor = Model(input_dt, decoded_m)
		self.regressor.compile(optimizer=Adam(lr=1e-3), loss="mean_squared_error", metrics=['mse'])
		self.regressor.summary()
		plot_model(self.regressor, to_file='readme/regressor.png')
		
		if not load:
			plot_losses = util.PlotLosses()
			self.regressor.fit(self.D_regression, self.M_regression,        
				epochs=epoch,
				batch_size=32,
				shuffle=True,
				validation_split=0.2,
				callbacks=[plot_losses])
			
			self.regressor.save('readme/regressor.h5')
		else:
			print("Trained model loaded")
			self.regressor = load_model('readme/regressor.h5')
Ejemplo n.º 3
0
    def train_var_autoencoder2D(self, x_train, load=False):
        #set loss function, optimizer and compile
        input_image, encoded_image, z_mean, z_log_var = self.encoder2D()
        decoded_image = self.decoder2D(encoded_image)

        #define the variational loss and mse loss (equal weighting)
        def vae_loss(input_image, decoded_image):
            recons_loss = K.sum(mse(input_image, decoded_image))
            kl_loss = (-0.5) * K.sum(
                1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
            return K.mean(recons_loss + kl_loss)

        #add custom loss
        get_custom_objects().update({"vae_loss": vae_loss})

        self.AE_m2m = Model(input_image, decoded_image)
        opt = keras.optimizers.Adam(lr=1e-3)
        self.AE_m2m.compile(optimizer=opt, loss=vae_loss)

        #get summary of architecture parameters and plot arch. diagram
        self.AE_m2m.summary()
        plot_model(self.AE_m2m, to_file='AE_m2m_var.png')

        #train the neural network
        if not load:
            plot_losses = util.PlotLosses()
            self.AE_m2m.fit(x_train,
                            x_train,
                            epochs=100,
                            batch_size=32,
                            shuffle=True,
                            validation_split=0.3,
                            callbacks=[plot_losses])
            #save trained model
            self.AE_m2m.save('AE_m2m_var.h5')
        else:
            #load an already trained model
            print("Trained model loaded")
            self.AE_m2m = load_model('AE_m2m_var.h5')

        #set the encoder model
        self.AE_m2z = Model(input_image, encoded_image)

        #set the decoder model
        zm_dec = Input(shape=(self.z_dim, ))
        _ = self.AE_m2m.layers[17](zm_dec)
        for i in range(18, 30):
            _ = self.AE_m2m.layers[i](_)
        decoded_image_ = self.AE_m2m.layers[30](_)
        self.AE_z2m = Model(zm_dec, decoded_image_)
Ejemplo n.º 4
0
    def train_autoencoder2D(self, x_train, load=False):
        #set loss function, optimizer and compile
        input_image, encoded_image = self.encoder2D()
        decoded_image = self.decoder2D(encoded_image)

        self.AE_m2m = Model(input_image, decoded_image)
        opt = keras.optimizers.Adam(lr=1e-3)
        self.AE_m2m.compile(optimizer=opt, loss="mse", metrics=['mse'])

        #get summary of architecture parameters and plot arch. diagram
        self.AE_m2m.summary()
        plot_model(self.AE_m2m, to_file='AE_m2m.png')

        #train the neural network
        if not load:
            plot_losses = util.PlotLosses()
            self.AE_m2m.fit(x_train,
                            x_train,
                            epochs=100,
                            batch_size=32,
                            shuffle=True,
                            validation_split=0.3,
                            callbacks=[plot_losses])
            #save trained model
            self.AE_m2m.save('AE_m2m.h5')
        else:
            #load an already trained model
            #some bug here, self.AE_m2z and self.AE_z2m not working
            #when model is loaded.
            print("Trained model loaded")
            self.AE_m2m = load_model('AE_m2m.h5')

        #set the encoder model
        self.AE_m2z = Model(input_image, encoded_image)

        #set the decoder model
        zm_dec = Input(shape=(self.z_dim, ))
        _ = self.AE_m2m.layers[14](zm_dec)
        for i in range(15, 27):
            _ = self.AE_m2m.layers[i](_)
        decoded_image_ = self.AE_m2m.layers[27](_)
        self.AE_z2m = Model(zm_dec, decoded_image_)
    def train_autoencoder2D_dual(self, x_train_reg, x_train, load=False):
        #autoencoder for regression
        input_image_reg, encoded_image_reg = self.encoder2D()
        decoded_image_reg = self.decoder2D(encoded_image_reg)

        self.AE_m2m_reg = Model(input_image_reg, decoded_image_reg)
        opt = keras.optimizers.Adam(lr=1e-3)
        self.AE_m2m_reg.compile(optimizer=opt, loss="mse", metrics=['mse'])

        self.AE_m2m_reg.summary()

        #autoencoder for reconstruction
        input_image, encoded_image = self.encoder2D()
        decoded_image = self.decoder2D(encoded_image)

        self.AE_m2m = Model(input_image, decoded_image)
        opt = keras.optimizers.Adam(lr=1e-3)
        self.AE_m2m.compile(optimizer=opt, loss="mse", metrics=['mse'])

        self.AE_m2m.summary()

        #train the neural network alternatingly
        totalEpoch = 150
        plot_losses1 = util.PlotLosses()
        plot_losses2 = util.PlotLosses()
        history1 = History()
        history2 = History()

        AE_reg = np.zeros([totalEpoch, 4])
        AE = np.zeros([totalEpoch, 4])

        for i in range(totalEpoch):
            #train main reg model
            self.AE_m2m_reg.fit(x_train_reg,
                                x_train,
                                epochs=1,
                                batch_size=128,
                                shuffle=True,
                                validation_split=0.2,
                                callbacks=[
                                    plot_losses1,
                                    EarlyStopping(monitor='loss', patience=60),
                                    history1
                                ])
            #copy loss
            AE_reg[i, :] = np.squeeze(
                np.asarray(list(history1.history.values())))

            #copy weights from the reg model to the recons model
            copy_idxs = range(14, 27 + 1)
            for c in copy_idxs:
                self.AE_m2m.layers[c].set_weights(
                    self.AE_m2m_reg.layers[c].get_weights())

            #train model recons AE
            self.AE_m2m.fit(x_train,
                            x_train,
                            epochs=1,
                            batch_size=128,
                            shuffle=True,
                            validation_split=0.2,
                            callbacks=[
                                plot_losses2,
                                EarlyStopping(monitor='loss', patience=60),
                                history2
                            ])

            #copy recons into the reg model
            for c in copy_idxs:
                self.AE_m2m_reg.layers[c].set_weights(
                    self.AE_m2m.layers[c].get_weights())

            #copy loss
            AE[i, :] = np.squeeze(np.asarray(list(history2.history.values())))

            #write to folder for every 10th epoch for monitoring
            figs = util.plotAllLosses(AE_reg, AE)
            figs.savefig('Dual_Losses.png')

        #set the encoder model
        self.AE_m2z_reg = Model(input_image_reg, encoded_image_reg)
        self.AE_m2z = Model(input_image, encoded_image)

        #set the decoder model
        zm_dec = Input(shape=(self.z_dim, ))
        _ = self.AE_m2m.layers[14](zm_dec)
        for i in range(15, 27):
            _ = self.AE_m2m.layers[i](_)
        decoded_image_ = self.AE_m2m.layers[27](_)
        self.AE_z2m = Model(zm_dec, decoded_image_)