def optimizer(self):
     return optimizers.Bop(
         fp_optimizer=tf.keras.optimizers.Adam(0.01),
         threshold=self.threshold,
         gamma=tf.keras.optimizers.schedules.ExponentialDecay(
             self.gamma, self.decay_step, self.gamma_decay, staircase=True),
     )
Esempio n. 2
0
 def optimizer(self):
     decay_step = self.epochs * 1281167 // self.batch_size
     lr = tf.keras.optimizers.schedules.PolynomialDecay(
         self.lr_start, decay_step, end_learning_rate=self.lr_end, power=1.0
     )
     gamma = tf.keras.optimizers.schedules.PolynomialDecay(
         self.gamma_start, decay_step, end_learning_rate=self.gamma_end, power=1.0
     )
     
     '''
     return optimizers.Bop(
         tf.keras.optimizers.Adam(lr), threshold=self.threshold, gamma=gamma
     )
     '''
     
     return lq.optimizers.CaseOptimizer(
         (optimizers.Bop.is_binary_variable, 
             optimizers.Bop(
                 threshold=self.threshold,
                 gamma=gamma,
                 name="Bop"
             )
         ),
         default_optimizer=tf.keras.optimizers.Adam(lr),  # for FP weights
     ) 
Esempio n. 3
0
 def optimizer(self):
     return lq.optimizers.CaseOptimizer(
         (optimizers.Bop.is_binary_variable,
          optimizers.Bop(
              threshold=self.threshold, gamma=self.gamma, name="Bop")),
         default_optimizer=tf.keras.optimizers.Adam(
             self.lr),  # for FP weights
     )
 def optimizer(self):
     decay_step = 100 * 1281167 // self.batch_size
     lr = tf.keras.optimizers.schedules.PolynomialDecay(
         2.5e-3, decay_step, end_learning_rate=2.5e-6, power=1.0)
     gamma = tf.keras.optimizers.schedules.PolynomialDecay(
         5e-4, decay_step, end_learning_rate=2.5e-6, power=1.0)
     return optimizers.Bop(tf.keras.optimizers.Adam(lr),
                           threshold=1e-7,
                           gamma=gamma)
Esempio n. 5
0
 def optimizer(self):
     decay_step = self.epochs * 1281167 // self.batch_size
     lr = tf.keras.optimizers.schedules.PolynomialDecay(
         self.lr_start, decay_step, end_learning_rate=self.lr_end, power=1.0
     )
     gamma = tf.keras.optimizers.schedules.PolynomialDecay(
         self.gamma_start, decay_step, end_learning_rate=self.gamma_end, power=1.0
     )
     return optimizers.Bop(
         tf.keras.optimizers.Adam(lr), threshold=self.threshold, gamma=gamma
     )
Esempio n. 6
0
 def optimizer(self):
     return lq.optimizers.CaseOptimizer(
         (optimizers.Bop.is_binary_variable,
          optimizers.Bop(
              threshold=self.threshold,
              gamma=tf.keras.optimizers.schedules.ExponentialDecay(
                  self.gamma,
                  self.decay_step,
                  self.gamma_decay,
                  staircase=True),
              name="Bop")),
         default_optimizer=tf.keras.optimizers.Adam(
             self.lr),  # for FP weights
     )
 def optimizer(self):
     return optimizers.Bop(
         fp_optimizer=tf.keras.optimizers.Adam(0.01),
         threshold=self.threshold,
         gamma=self.gamma,
     )