Пример #1
0
 def evaluate_U(self, features, labels):
     predictions = self.model(features)
     if(self.robust_train == 1): # We only check with IBP if we need to
         logit_l, logit_u = analyzers.IBP(self, features, self.model.get_weights(), self.epsilon)
         #logit_l, logit_u = analyzers.IBP(self, features, self.model.trainable_variables, 0.0)
         v1 = tf.one_hot(labels, depth=10)
         v2 = 1 - tf.one_hot(labels, depth=10)
         worst_case = tf.math.add(tf.math.multiply(v2, logit_u), tf.math.multiply(v1, logit_l))
         worst_case = self.model.layers[-1].activation(worst_case)
         v_loss = losses.robust_potential_energy(labels, predictions, self.prior_mean,
                                            self.prior_var, self.q, self.loss_func,
                                            worst_case, self.robust_lambda)
         #v_loss = self.loss_func(labels, predictions, worst_case, self.robust_lambda)
         self.extra_metric(labels, worst_case)
     elif(self.robust_train == 2):
         v_loss = self.loss_func(labels, predictions, predictions, self.robust_lambda)
         worst_case = predictions
     else:
         v_loss = losses.normal_potential_energy(labels, predictions, self.prior_mean,
                                            self.prior_var, self.q, self.loss_func)
         #v_loss = self.loss_func(labels, predictions)
         worst_case = predictions
     self.U_metric(v_loss)
     res = self.U_metric.result()
     self.U_metric.reset_states()
     return res
Пример #2
0
 def model_validate(self, features, labels):
     #self.model.set_weights(self.sample())
     predictions = self.model(features)
     if (self.robust_train == 1):  # We only check with IBP if we need to
         logit_l, logit_u = analyzers.IBP(self, features,
                                          self.model.get_weights(),
                                          self.epsilon)
         #logit_l, logit_u = analyzers.IBP(self, features, self.model.trainable_variables, 0.0)
         v1 = tf.one_hot(labels, depth=10)
         v2 = 1 - tf.one_hot(labels, depth=10)
         worst_case = tf.math.add(tf.math.multiply(v2, logit_u),
                                  tf.math.multiply(v1, logit_l))
         worst_case = self.model.layers[-1].activation(worst_case)
         v_loss = self.loss_func(labels, predictions)
         self.extra_metric(labels, worst_case)
     elif (self.robust_train == 2):
         v_loss = self.loss_func(labels, predictions)
         worst_case = predictions
     else:
         v_loss = self.loss_func(labels, predictions)
         worst_case = predictions
     if (self.mode == 'regression'):
         labels = tf.reshape(labels, predictions.shape)
     self.valid_metric(labels, predictions)
     self.valid_loss(v_loss)
Пример #3
0
    def step(self, features, labels, lrate):
        # OPTIMIZATION PARAMETERS:
        alpha = lrate #self.alpha
        beta_1 = self.beta_1
        beta_2 = self.beta_2
        lam = self.lam

        N = self.N #60000 #float(self.batch_size) # batch size

        self.posterior_mean = self.model.get_weights()

        init_weights = []
        for i in range(len(self.posterior_mean)):
            var = tf.math.add(tf.math.sqrt(N*self.posterior_var[i]), lam)
            var = tf.math.reciprocal(var)
            sample = tf.random.normal(shape=self.posterior_var[i].shape, mean=0, stddev=1.0)
            sample = tf.math.multiply(var, sample)
            sample = tf.math.add(self.posterior_mean[i], sample)
            init_weights.append(sample)
        
        self.model.set_weights(np.asarray(init_weights))
        
        with tf.GradientTape(persistent=True) as tape:
            # Get the probabilities
            predictions = self.model(features)
            # Calculate the loss
            if(int(self.robust_train) == 0):
                loss = self.loss_func(labels, predictions)

            elif(int(self.robust_train) == 1):
                logit_l, logit_u = analyzers.IBP(self, features, self.model.trainable_variables, eps=self.epsilon)
                v1 = tf.one_hot(labels, depth=10)
                v2 = 1 - tf.one_hot(labels, depth=10)
                worst_case = tf.math.add(tf.math.multiply(v2, logit_u), tf.math.multiply(v1, logit_l))
                worst_case = self.model.layers[-1].activation(worst_case)
                output = (self.robust_lambda * predictions) + ((1-self.robust_lambda) * worst_case)
                loss =  self.loss_func(labels, output)

            elif(int(self.robust_train) == 2):
                features_adv = analyzers.PGD(self, features, self.attack_loss, eps=self.epsilon, num_models=-1)
                worst_case = self.model(features_adv)
                output = (self.robust_lambda * predictions) + ((1-self.robust_lambda) * worst_case)
                loss =  self.loss_func(labels, output)

            elif(int(self.robust_train) == 3):
                output = tf.zeros(predictions.shape)
                self.epsilon = max(0.0001, self.epsilon)
                self.eps_dist = tfp.distributions.Exponential(1.0/self.epsilon)
                for _mc_ in range(self.loss_monte_carlo):
                    eps = self.eps_dist.sample()
                    logit_l, logit_u = analyzers.IBP(self, features, self.model.trainable_variables, eps=eps)
                    v1 = tf.one_hot(labels, depth=10)
                    v2 = 1 - tf.one_hot(labels, depth=10)
                    v1 = tf.squeeze(v1); v2 = tf.squeeze(v2)
                    worst_case = tf.math.add(tf.math.multiply(v2, logit_u), tf.math.multiply(v1, logit_l))
                    worst_case = self.model.layers[-1].activation(worst_case)
                    one_hot_cls = tf.one_hot(labels, depth=10)
                    output += (1.0/self.loss_monte_carlo) * worst_case
                loss = self.loss_func(labels, output)

            elif(int(self.robust_train) == 4):
                output = tf.zeros(predictions.shape)
                self.epsilon = max(0.0001, self.epsilon)
                self.eps_dist = tfp.distributions.Exponential(1.0/self.epsilon)
                for _mc_ in range(self.loss_monte_carlo):
                    #eps = tfp.random.rayleigh([1], scale=self.epsilon)
                    eps = self.eps_dist.sample()
                    features_adv = analyzers.FGSM(self, features, self.attack_loss, eps=self.epsilon, num_models=-1)
                    worst_case = self.model(features_adv)
                    output += (1.0/self.loss_monte_carlo) * worst_case
                loss = self.loss_func(labels, output)
        weight_gradient = tape.gradient(loss, self.model.trainable_variables)
        g = np.asarray(weight_gradient)
       
        #if(int(self.robust_train) == 1):
#        print(g)
        # We need to process the gradient according to the reparameterization given by Khan (2002.10060)
        g_mu = []
        g_s = []
        m_hat = []
        s_hat = []
        t = self.learning_rate
        for i in range(len(g)):
            # Appropriately scaled updates to the gradients Khan (2002.10060)[ICLR2020]
            g_mu.append((self.lam/60000)*self.posterior_mean[i] + g[i])
            g_s_comp2 = tf.math.multiply((60000*self.posterior_var[i]), (init_weights[i] - self.posterior_mean[i]))
            g_s_comp2 = tf.math.multiply(g_s_comp2, g[i])
            g_s.append((self.lam/60000) - self.posterior_var[i] + g_s_comp2)
            # Standard momentum updtae
            self.m[i] = (beta_1*self.m[i]) + ((1-beta_1)*(g_mu[i]))
            m_hat.append(self.m[i]/(1-beta_1))
            s_hat.append(self.posterior_var[i]/(1-beta_2))

        # Apply the effects from the updates
        for i in range(len(g)):
            self.posterior_mean[i] = self.posterior_mean[i] - t*(m_hat[i]/s_hat[i])
            comp_1 = (0.5 * ((1-beta_2)**2) * g_s[i])
            recip = tf.math.multiply(tf.math.reciprocal(self.posterior_var[i]), g_s[i])
            self.posterior_var[i] = self.posterior_var[i] + tf.math.multiply(comp_1, recip) 

        self.model.set_weights(self.posterior_mean)
        self.train_loss(loss)
        self.train_metric(labels, predictions)
        return self.posterior_mean, self.posterior_var
Пример #4
0
    def step(self, features, labels, lrate):
        # Define the GradientTape context
        with tf.GradientTape(persistent=True) as tape:
            tape.watch(self.posterior_mean)

            predictions = self.model(features)

            if (not self.robust_train):
                worst_case = predictions
                loss = self.loss_func(labels, predictions)

            elif (int(self.robust_train) == 1):
                logit_l, logit_u = analyzers.IBP(
                    self,
                    features,
                    self.model.trainable_variables,
                    eps=self.epsilon)
                v1 = tf.one_hot(labels, depth=10)
                v2 = 1 - tf.one_hot(labels, depth=10)
                worst_case = tf.math.add(tf.math.multiply(v2, logit_u),
                                         tf.math.multiply(v1, logit_l))
                worst_case = self.model.layers[-1].activation(worst_case)
                output = (self.robust_lambda * predictions) + (
                    (1 - self.robust_lambda) * worst_case)
                loss = self.loss_func(labels, output)

            elif (int(self.robust_train) == 2):
                features_adv = analyzers.PGD(self,
                                             features,
                                             self.attack_loss,
                                             eps=self.epsilon,
                                             num_models=-1)
                worst_case = self.model(features_adv)
                output = (self.robust_lambda * predictions) + (
                    (1 - self.robust_lambda) * worst_case)
                loss = self.loss_func(labels, output)
                #self.train_rob(labels, worst_case)

            elif (int(self.robust_train) == 3):
                output = tf.zeros(predictions.shape)
                self.epsilon = max(0.0001, self.epsilon)
                self.eps_dist = tfp.distributions.Exponential(
                    1.0 / float(self.epsilon))
                for _mc_ in range(self.loss_monte_carlo):
                    eps = self.eps_dist.sample()
                    #eps = tfp.random.rayleigh([1], scale=self.epsilon)
                    logit_l, logit_u = analyzers.IBP(
                        self,
                        features,
                        self.model.trainable_variables,
                        eps=eps)
                    v1 = tf.one_hot(labels, depth=10)
                    v2 = 1 - tf.one_hot(labels, depth=10)
                    v1 = tf.squeeze(v1)
                    v2 = tf.squeeze(v2)
                    worst_case = tf.math.add(tf.math.multiply(v2, logit_u),
                                             tf.math.multiply(v1, logit_l))
                    worst_case = self.model.layers[-1].activation(worst_case)
                    one_hot_cls = tf.one_hot(labels, depth=10)
                    output += (1.0 / self.loss_monte_carlo) * worst_case
                loss = self.loss_func(labels, output)

            elif (int(self.robust_train) == 4):
                output = tf.zeros(predictions.shape)
                self.epsilon = max(0.0001, self.epsilon)
                self.eps_dist = tfp.distributions.Exponential(
                    1.0 / float(self.epsilon))
                for _mc_ in range(self.loss_monte_carlo):
                    #eps = tfp.random.rayleigh([1], scale=self.epsilon)
                    eps = self.eps_dist.sample()
                    features_adv = analyzers.FGSM(self,
                                                  features,
                                                  self.attack_loss,
                                                  eps=self.epsilon,
                                                  num_models=-1)
                    worst_case = self.model(features_adv)
                    output += (1.0 / self.loss_monte_carlo) * worst_case
                loss = self.loss_func(labels, output)

        # Get the gradients
        weight_gradient = tape.gradient(loss, self.model.trainable_variables)
        weights = self.model.get_weights()
        new_weights = []
        for i in range(len(weight_gradient)):
            wg = tf.math.multiply(weight_gradient[i], lrate)
            m = tf.math.subtract(weights[i], wg)
            new_weights.append(m)

        if (self.record == True):
            self.weights_stack.append(new_weights)

        self.model.set_weights(new_weights)
        self.posterior_mean = new_weights

        self.train_loss(loss)
        self.train_metric(labels, predictions)
        #self.train_rob(labels, worst_case)
        return self.posterior_mean, self.posterior_var
Пример #5
0
    def step(self, features, labels, lrate):
        """
        Initial sampling for BBB
        """
        init_weights = []
        noise_used = []
        for i in range(len(self.posterior_mean)):
            noise = tf.random.normal(shape=self.posterior_var[i].shape,
                                     mean=tf.zeros(
                                         self.posterior_var[i].shape),
                                     stddev=1.0)
            var_add = tf.multiply(softplus(self.posterior_var[i]), noise)
            #var_add = tf.multiply(self.posterior_mean[i], noise)
            w = tf.math.add(self.posterior_mean[i], var_add)
            noise_used.append(noise)
            init_weights.append(w)
        self.model.set_weights(init_weights)

        # Define the GradientTape context
        with tf.GradientTape(
                persistent=True
        ) as tape:  # Below we add an extra variable for IBP
            tape.watch(self.posterior_mean)
            tape.watch(self.posterior_var)
            #tape.watch(init_weights)
            predictions = self.model(features)
            """
            We support a few different things for auto-diff including adversarial training.
            """
            if (self.robust_train == 0):
                loss, kl_comp = losses.KL_Loss(labels, predictions,
                                               self.model.trainable_variables,
                                               self.prior_mean, self.prior_var,
                                               self.posterior_mean,
                                               self.posterior_var,
                                               self.loss_func, self.kl_weight)
            elif (int(self.robust_train) == 1):
                # Get the probabilities
                logit_l, logit_u = analyzers.IBP(
                    self,
                    features,
                    self.model.trainable_variables,
                    eps=self.epsilon)
                #!*! TODO: Undo the hardcoding of depth in this function
                v1 = tf.one_hot(labels, depth=10)
                v2 = 1 - tf.one_hot(labels, depth=10)
                worst_case = tf.math.add(tf.math.multiply(v2, logit_u),
                                         tf.math.multiply(v1, logit_l))

                # Now we have the worst case softmax probabilities (or output)
                worst_case = self.model.layers[-1].activation(worst_case)
                output = (self.robust_lambda * predictions) + (
                    (1 - self.robust_lambda) * worst_case)
                # Calculate the loss
                loss, kl_comp = losses.KL_Loss(labels, output,
                                               self.model.trainable_variables,
                                               self.prior_mean, self.prior_var,
                                               self.posterior_mean,
                                               self.posterior_var,
                                               self.loss_func, self.kl_weight)

            elif (int(self.robust_train) == 2):
                features_adv = analyzers.FGSM(self,
                                              features,
                                              self.attack_loss,
                                              eps=self.epsilon,
                                              num_models=-1)
                # Get the probabilities
                worst_case = self.model(features_adv)
                output = (self.robust_lambda * predictions) + (
                    (1 - self.robust_lambda) * worst_case)
                # Calculate the loss
                loss, kl_comp = losses.KL_Loss(labels, output,
                                               self.model.trainable_variables,
                                               self.prior_mean, self.prior_var,
                                               self.posterior_mean,
                                               self.posterior_var,
                                               self.loss_func, self.kl_weight)

            elif (int(self.robust_train) == 3):
                output = tf.zeros(predictions.shape)
                self.epsilon = max(0.0001, self.epsilon)
                self.eps_dist = tfp.distributions.Exponential(1.0 /
                                                              self.epsilon)
                for _mc_ in range(self.loss_monte_carlo):
                    #eps = tfp.random.rayleigh([1], scale=self.epsilon)
                    eps = self.eps_dist.sample()
                    logit_l, logit_u = analyzers.IBP(
                        self,
                        features,
                        self.model.trainable_variables,
                        eps=eps)
                    v1 = tf.one_hot(labels, depth=10)
                    v2 = 1 - tf.one_hot(labels, depth=10)
                    v1 = tf.squeeze(v1)
                    v2 = tf.squeeze(v2)
                    worst_case = tf.math.add(tf.math.multiply(v2, logit_u),
                                             tf.math.multiply(v1, logit_l))
                    worst_case = self.model.layers[-1].activation(worst_case)
                    one_hot_cls = tf.one_hot(labels, depth=10)
                    output += (1.0 / self.loss_monte_carlo) * worst_case

                loss, kl_comp = losses.KL_Loss(labels, output,
                                               self.model.trainable_variables,
                                               self.prior_mean, self.prior_var,
                                               self.posterior_mean,
                                               self.posterior_var,
                                               self.loss_func, self.kl_weight)

            elif (int(self.robust_train) == 4):
                output = tf.zeros(predictions.shape)
                self.epsilon = max(0.0001, self.epsilon)
                self.eps_dist = tfp.distributions.Exponential(1.0 /
                                                              self.epsilon)
                for _mc_ in range(self.loss_monte_carlo):
                    #eps = tfp.random.rayleigh([1], scale=self.epsilon)
                    eps = self.eps_dist.sample()
                    features_adv = analyzers.FGSM(self,
                                                  features,
                                                  self.attack_loss,
                                                  eps=self.epsilon,
                                                  num_models=-1)
                    worst_case = self.model(features_adv)
                    output += (1.0 / self.loss_monte_carlo) * worst_case
                loss, kl_comp = losses.KL_Loss(labels, output,
                                               self.model.trainable_variables,
                                               self.prior_mean, self.prior_var,
                                               self.posterior_mean,
                                               self.posterior_var,
                                               self.loss_func, self.kl_weight)
        # Get the gradients
        weight_gradient = tape.gradient(loss, self.model.trainable_variables)
        mean_gradient = tape.gradient(loss, self.posterior_mean)
        var_gradient = tape.gradient(loss, self.posterior_var)

        posti_mean_grad = []
        posti_var_grad = []
        for i in range(len(mean_gradient)):
            weight_gradient[i] = tf.cast(weight_gradient[i], 'float32')
            mean_gradient[i] = tf.cast(mean_gradient[i], 'float32')
            f = tf.math.add(weight_gradient[i], mean_gradient[i])
            posti_mean_grad.append(f)
            v = tf.math.divide(
                noise_used[i],
                1 + tf.math.exp(tf.math.multiply(self.posterior_var[i], -1)))
            v = tf.math.multiply(v, weight_gradient[i])
            v = tf.math.add(v, var_gradient[i])
            posti_var_grad.append(v)

        # APPLICATION OF WEIGHTS
        new_posti_var = []
        new_posti_mean = []
        for i in range(len(mean_gradient)):
            pdv = tf.math.multiply(posti_var_grad[i], lrate)
            pdm = tf.math.multiply(posti_mean_grad[i], lrate)
            v = tf.math.subtract(self.posterior_var[i], pdv)
            m = tf.math.subtract(self.posterior_mean[i], pdm)
            new_posti_var.append(v)
            new_posti_mean.append(m)

        self.train_loss(loss)
        self.train_metric(labels, predictions)
        #self.train_rob(labels, worst_case)
        self.kl_component(kl_comp)
        self.posterior_mean = new_posti_mean
        self.posterior_var = new_posti_var
        return new_posti_mean, new_posti_var
Пример #6
0
    def step(self, features, labels, lrate):
        for features, labels in self.train_ds.take(self.batches):
            #features, labels = self.train_ds.take()
            # Define the GradientTape context
            with tf.GradientTape(persistent=True) as tape:   # Below we add an extra variable for IBP
                tape.watch(self.posterior_mean) 
                predictions = self.model(features)
                if(self.robust_train == 0):
                    loss = losses.normal_potential_energy(labels, predictions, self.prior_mean,
                                               self.prior_var, self.q, self.loss_func)

                elif(int(self.robust_train) == 1):
                    # Get the probabilities
                    predictions = self.model(features)
                    logit_l, logit_u = analyzers.IBP(self, features, self.model.trainable_variables, eps=self.epsilon)
                    #!*! TODO: Undo the hardcoding of depth in this function
                    v1 = tf.one_hot(labels, depth=10)
                    v2 = 1 - tf.one_hot(labels, depth=10)
                    worst_case = tf.math.add(tf.math.multiply(v2, logit_u), tf.math.multiply(v1, logit_l))
                    # Now we have the worst case softmax probabilities
                    worst_case = self.model.layers[-1].activation(worst_case)
                    output = (self.robust_lambda * predictions) + ((1-self.robust_lambda) * worst_case)
                    #loss =  self.loss_func(labels, output)
                    loss = losses.normal_potential_energy(labels, predictions, self.prior_mean,
                                               self.prior_var, self.q, self.loss_func)

                elif(int(self.robust_train) == 2):
                    predictions = self.model(features)
                    features_adv = analyzers.FGSM(self, features, self.attack_loss, eps=self.epsilon, num_models=-1)
                    # Get the probabilities
                    worst_case = self.model(features_adv)
                    # Calculate the loss
                    output = (self.robust_lambda * predictions) + ((1-self.robust_lambda) * worst_case)
                    #loss =  self.loss_func(labels, output)
                    loss = losses.normal_potential_energy(labels, predictions, self.prior_mean,
                                               self.prior_var, self.q, self.loss_func)

                elif(int(self.robust_train) == 3):
                    output = tf.zeros(predictions.shape)
                    self.epsilon = max(0.0001, self.epsilon )
                    self.eps_dist= tfp.distributions.Exponential(1.0/self.epsilon)
                    for _mc_ in range(self.loss_monte_carlo):
                        #eps = tfp.random.rayleigh([1], scale=self.epsilon)
                        eps = self.eps_dist.sample()
                        logit_l, logit_u = analyzers.IBP(self, features, self.model.trainable_variables, eps=eps)
                        v1 = tf.one_hot(labels, depth=10)
                        v2 = 1 - tf.one_hot(labels, depth=10)
                        v1 = tf.squeeze(v1); v2 = tf.squeeze(v2)
                        worst_case = tf.math.add(tf.math.multiply(v2, logit_u), tf.math.multiply(v1, logit_l))
                        worst_case = self.model.layers[-1].activation(worst_case)
                        one_hot_cls = tf.one_hot(labels, depth=10)
                        output += (1.0/self.loss_monte_carlo) * worst_case
                    loss = losses.normal_potential_energy(labels, predictions, self.prior_mean,
                                               self.prior_var, self.q, self.loss_func)     
                elif(int(self.robust_train) == 4):
                    predictions = self.model(features)
                    self.epsilon = max(0.0001, self.epsilon)
                    self.eps_dist = tfp.distributions.Exponential(1.0/self.epsilon)
                    output = tf.zeros(predictions.shape)
                    for _mc_ in range(self.loss_monte_carlo):
                        #eps = tfp.random.rayleigh([1], scale=self.epsilon)
                        eps = self.eps_dist.sample()
                        features_adv = analyzers.FGSM(self, features, self.attack_loss, eps=self.epsilon, num_models=-1)
                        worst_case = self.model(features_adv)
                        output += (1.0/self.loss_monte_carlo) * worst_case
                    loss = losses.normal_potential_energy(labels, predictions, self.prior_mean,
                                               self.prior_var, self.q, self.loss_func)     

            # Get the gradients
            weight_gradient = tape.gradient(loss, self.model.trainable_variables)
            temp_p = []
            for i in range(len(weight_gradient)):
                wg = tf.math.multiply(weight_gradient[i], lrate/self.batches)
                temp_p.append(tf.math.add(self.p[i], wg)) # maybe come back and make this subtraction
            self.p = np.asarray(temp_p)

        self.train_loss(loss)
        self.train_metric(labels, predictions)
        #self.train_rob(labels, worst_case)
        return self.posterior_mean, self.posterior_var
Пример #7
0
    def step(self, features, labels, lrate):
        alpha = lrate
        beta_1 = self.beta_1
        beta_2 = self.beta_2
        lam = self.lam

        posti_var = self.posterior_var
        posti_mean = self.posterior_mean

        N = float(self.batch_size)  # batch size

        with tf.GradientTape(persistent=True) as tape:
            # Get the probabilities
            predictions = self.model(features)
            # Calculate the loss
            if (int(self.robust_train) == 0):
                loss = self.loss_func(labels, predictions)
            elif (int(self.robust_train) == 1):
                logit_l, logit_u = analyzers.IBP(
                    self,
                    features,
                    self.model.trainable_variables,
                    eps=self.epsilon)
                v1 = tf.one_hot(labels, depth=10)
                v2 = 1 - tf.one_hot(labels, depth=10)
                worst_case = tf.math.add(tf.math.multiply(v2, logit_u),
                                         tf.math.multiply(v1, logit_l))
                worst_case = self.model.layers[-1].activation(worst_case)
                loss = self.loss_func(labels, predictions, worst_case,
                                      self.robust_lambda)
                #self.train_rob(labels, worst_case)
            elif (int(self.robust_train) == 2):
                features_adv = analyzers.FGSM(self,
                                              features,
                                              self.attack_loss,
                                              eps=self.epsilon,
                                              num_models=-1)
                # Get the probabilities
                worst_case = self.model(features_adv)
                # Calculate the loss
                loss = self.loss_func(labels, predictions, worst_case,
                                      self.robust_lambda)

        weight_gradient = tape.gradient(loss, self.model.trainable_variables)
        g = np.asarray(weight_gradient)

        sq_grad = []
        for i in range(len(weight_gradient)):
            sq_grad.append(
                tf.math.multiply(weight_gradient[i], weight_gradient[i]))
            self.m[i] = (beta_1 * self.m[i]) + ((1 - beta_1) *
                                                (g[i] +
                                                 ((lam * posti_mean[i]) / N)))
            posti_var[i] = (beta_2 * posti_var[i]) + ((1 - beta_2) *
                                                      (sq_grad[i]))

        sq_grad = np.asarray(sq_grad)
        self.m = np.asarray(self.m)
        posti_var = np.asarray(posti_var)

        for i in range(len(weight_gradient)):
            m_ = self.m[i] / (1 - beta_1)
            s_ = np.sqrt(posti_var[i]) + lam / N
            posti_mean[i] = posti_mean[i] - (alpha * (m_ / s_))

        self.model.set_weights(posti_mean)
        self.train_loss(loss)
        self.train_metric(labels, predictions)
        return posti_mean, posti_var
Пример #8
0
    def step(self, features, labels, lrate):
        # OPTIMIZATION PARAMETERS:
        alpha = lrate  #self.alpha
        beta_1 = self.beta_1
        beta_2 = self.beta_2
        lam = self.lam

        N = self.N  #60000
        #N = float(self.batch_size) # batch size

        self.posterior_mean = self.model.get_weights()

        v1 = tf.one_hot(labels, depth=10)
        v2 = 1 - tf.one_hot(labels, depth=10)

        init_weights = []
        for i in range(len(self.posterior_mean)):
            var = tf.math.add(tf.math.sqrt(N * self.posterior_var[i]), lam)
            var = tf.math.reciprocal(var)
            sample = tf.random.normal(shape=self.posterior_var[i].shape,
                                      mean=0,
                                      stddev=1.0)
            sample = tf.math.multiply(var, sample)
            sample = tf.math.add(self.posterior_mean[i], sample)
            init_weights.append(sample)

        self.model.set_weights(init_weights)

        with tf.GradientTape(persistent=True) as tape:
            # Get the probabilities
            predictions = self.model(features)
            # Calculate the loss
            if (int(self.robust_train) == 0):
                loss = self.loss_func(labels, predictions)

            elif (int(self.robust_train) == 1):
                logit_l, logit_u = analyzers.IBP(
                    self,
                    features,
                    self.model.trainable_variables,
                    eps=self.epsilon)
                worst_case = tf.math.add(tf.math.multiply(v2, logit_u),
                                         tf.math.multiply(v1, logit_l))
                worst_case = self.model.layers[-1].activation(worst_case)
                output = (self.robust_lambda * predictions) + (
                    (1 - self.robust_lambda) * worst_case)
                loss = self.loss_func(labels, output)

            elif (int(self.robust_train) == 2):
                features_adv = analyzers.PGD(self,
                                             features,
                                             self.attack_loss,
                                             eps=self.epsilon,
                                             num_models=-1)
                # Get the probabilities
                worst_case = self.model(features_adv)
                # Calculate the loss
                output = (self.robust_lambda * predictions) + (
                    (1 - self.robust_lambda) * worst_case)
                loss = self.loss_func(labels, output)

            elif (int(self.robust_train) == 3):
                output = tf.zeros(predictions.shape)
                self.epsilon = max(0.0001, self.epsilon)
                self.eps_dist = tfp.distributions.Exponential(1.0 /
                                                              self.epsilon)
                for _mc_ in range(self.loss_monte_carlo):
                    #eps = tfp.random.rayleigh([1], scale=self.epsilon)
                    eps = self.eps_dist.sample()
                    logit_l, logit_u = analyzers.IBP(
                        self,
                        features,
                        self.model.trainable_variables,
                        eps=eps)
                    v1 = tf.one_hot(labels, depth=10)
                    v2 = 1 - tf.one_hot(labels, depth=10)
                    v1 = tf.squeeze(v1)
                    v2 = tf.squeeze(v2)
                    worst_case = tf.math.add(tf.math.multiply(v2, logit_u),
                                             tf.math.multiply(v1, logit_l))
                    worst_case = self.model.layers[-1].activation(worst_case)
                    one_hot_cls = tf.one_hot(labels, depth=10)
                    output += (1.0 / self.loss_monte_carlo) * worst_case
                loss = self.loss_func(labels, output)

            elif (int(self.robust_train) == 4):
                output = tf.zeros(predictions.shape)
                self.epsilon = max(0.0001, self.epsilon)
                self.eps_dist = tfp.distributions.Exponential(1.0 /
                                                              self.epsilon)
                for _mc_ in range(self.loss_monte_carlo):
                    #eps = tfp.random.rayleigh([1], scale=self.epsilon)
                    eps = self.eps_dist.sample()
                    features_adv = analyzers.FGSM(self,
                                                  features,
                                                  self.attack_loss,
                                                  eps=self.epsilon,
                                                  num_models=-1)
                    worst_case = self.model(features_adv)
                    output += (1.0 / self.loss_monte_carlo) * worst_case
                loss = self.loss_func(labels, output)

        weight_gradient = tape.gradient(loss, self.model.trainable_variables)
        g = np.asarray(weight_gradient)

        sq_grad = []
        for i in range(len(weight_gradient)):
            sq_grad.append(
                tf.math.multiply(weight_gradient[i], weight_gradient[i]))
            self.m[i] = (beta_1 * self.m[i]) + ((1 - beta_1) * (g[i] + (
                (lam * self.posterior_mean[i]) / N)))
            self.posterior_var[i] = (beta_2 * self.posterior_var[i]) + (
                (1 - beta_2) * (sq_grad[i]))

        #print("sq: ", sq_grad)
        sq_grad = np.asarray(sq_grad)
        self.m = np.asarray(self.m)
        self.posterior_var = np.asarray(self.posterior_var)

        for i in range(len(weight_gradient)):
            m_ = self.m[i] / (1 - beta_1)
            s_ = np.sqrt(self.posterior_var[i]) + lam / N
            self.posterior_mean[i] = self.posterior_mean[i] - (alpha *
                                                               (m_ / s_))

        self.model.set_weights(self.posterior_mean)
        self.train_loss(loss)
        self.train_metric(labels, predictions)
        #self.posterior_mean = posti_mean
        #self.posterior_var = posti_var
        return self.posterior_mean, self.posterior_var