예제 #1
0
  def __init__(self, X, Y, training=True, global_step=None):

    self.shapeX = X.get_shape().as_list()
    self.shapeY = Y.get_shape().as_list()

    # if data dype is not float32, we assume that there is no preprocess
    if X.dtype != tf.float32:                                                        # Here go CIFAR10 and SVHN (Iban)
      X = tf.cast(X, tf.float32)
      print('Input data dype is not float32, perform simple preprocess [0,255]->[-1,1]')
      X = X / 127.5 - 1
    else:                                  
      print('Input data type is float32, we assume they are preprocessed already')    # MNIST X is float32, no additional preprocessing needed (Iban)

    # quantize inputs
    self.H = [X]
    self._QA(X)

    self.Y = Y

    self.lossFunc = Option.lossFunc
    self.L2 = Option.L2

    self.initializer = myInitializer.variance_scaling_initializer(
      factor=1.0, mode='FAN_IN', uniform=True)

    self.is_training = training
    self.GPU = Option.GPU

    self.W = []
    self.W_q = []
    self.W_clip_op = []
    self.W_q_op = []
예제 #2
0
    def __init__(self, X, Y, training=True, global_step=None):

        self.shapeX = X.get_shape().as_list()
        self.shapeY = Y.get_shape().as_list()

        # if data dype is not float32, we assume that there is no preprocess
        if X.dtype != tf.float32:
            X = tf.cast(X, tf.float32)
            # add simple preprocess
            if Option.dataSet == 'MNIST':
                X = X / 256.
            else:
                X = X / 128. - 1

        self.H = [X]
        # quantize inputs
        self._QA(X)
        self.Y = Y

        self.use_batch_norm = Option.use_batch_norm
        self.lossFunc = Option.lossFunc
        self.L2 = Option.L2

        self.initializer = myInitializer.variance_scaling_initializer(
            factor=1.0, mode='FAN_IN', uniform=True)

        self.is_training = training
        self.GPU = Option.GPU

        self.W = []
        self.W_q = []
        self.W_clip_op = []
        self.W_q_op = []