Пример #1
0
    def generate(self, X, y, reject=True, same_size=True):
        inputs = X.copy()
        targets = one_hot_encoding(y, self.num_classes)
        #noise = self.noise_generator(size=(inputs.shape[0], self.prior_dim))
        noise = self.noise_generator.generate(X, y, self.block)
        feed_dict = {}
        feed_dict[self.inputs] = inputs
        feed_dict[self.targets] = targets
        feed_dict[self.prior_noise] = noise
        feed_dict[self.masks] = np.broadcast_to(self.mask, shape=inputs.shape)
        feed_dict[self.is_training] = False
        feed_dict[self.dis_l2_scale] = self.cur_dis_l2_scale

        return self.session.run(self.gen_inputs, feed_dict=feed_dict)
        """
Пример #2
0
 def pre_training(self, X, y):
     batch_size = X.shape[0]
     feed_dict = {}
     feed_dict[self.inputs] = X
     #noise = self.noise_generator(size=(batch_size, self.prior_dim))
     noise = np.random.uniform(size=(batch_size, self.prior_dim))
     feed_dict[self.prior_noise] = noise
     feed_dict[self.targets] = one_hot_encoding(y, self.num_classes)
     feed_dict[self.masks] = np.broadcast_to(self.mask, shape=X.shape)
     #feed_dict[self.is_training] = True
     feed_dict[self.dis_l2_scale] = self.cur_dis_l2_scale
     feed_dict[self.is_gen] = 1.
     #feed_dict[self.source_targets] = np.zeros((batch_size, 1))
     feed_dict[self.source_targets] = np.ones(
         (batch_size,
          1))  # min log(1-D) has vanishing gradients, use max logD
     self.session.run(self.pretraining_optimizer, feed_dict=feed_dict)
     return self.session.run(self.pretraining_error, feed_dict=feed_dict)
Пример #3
0
    def train_discriminator_step(self, X, y):
        batch_size = X.shape[0]
        feed_dict = {}
        feed_dict[self.inputs] = X
        #noise = self.noise_generator(size=(batch_size, self.prior_dim))
        noise = self.noise_generator.generate(X, y, self.block)
        feed_dict[self.prior_noise] = noise
        feed_dict[self.targets] = one_hot_encoding(y, self.num_classes)
        feed_dict[self.masks] = np.broadcast_to(self.mask, shape=X.shape)
        feed_dict[self.is_training] = True
        feed_dict[self.dis_l2_scale] = self.cur_dis_l2_scale

        feed_dict[self.is_gen] = 1.
        feed_dict[self.source_targets] = np.zeros((batch_size, 1))
        self.session.run(self.discriminator_optimizer, feed_dict=feed_dict)
        feed_dict[self.is_gen] = 0.
        feed_dict[self.source_targets] = np.ones((batch_size, 1))

        #feed_dict[self.source_targets] = np.concatenate([np.zeros((batch_size, 1)), np.ones((batch_size, 1))], axis=0)
        self.session.run(self.discriminator_optimizer, feed_dict=feed_dict)

        return self.session.run([self.source_error, self.classification_error],
                                feed_dict=feed_dict)
Пример #4
0
 def evaluate(self, X, y, batch_size=100):
     y = one_hot_encoding(y)
     return self.model.evaluate(X, y, batch_size=batch_size)
Пример #5
0
 def fit(self, X, y, max_num_epochs=500, validation_split=0.2, batch_size=100, verbose=1):
     y = one_hot_encoding(y)
     early_stopping = EarlyStopping(monitor='val_loss', patience=5)
     self.model.fit(X, y, validation_split=validation_split, callbacks=[early_stopping], epochs=max_num_epochs,  batch_size=batch_size, verbose=verbose)