コード例 #1
0
ファイル: models.py プロジェクト: freudh/lbt
 def get_layers(self):
     return [
         dfxp.Conv2d_q(
             name='conv1',
             bits=self.bits,
             ksize=[5, 5, 1, 6],
             strides=[1, 1, 1, 1],
             padding='SAME',
             weight_decay=self.weight_decay,
         ),
         dfxp.ReLU_q(),
         dfxp.MaxPool_q(
             ksize=[1, 2, 2, 1],
             strides=[1, 2, 2, 1],
             padding='VALID'
         ),
         dfxp.Conv2d_q(
             name='conv2',
             bits=self.bits,
             ksize=[5, 5, 6, 16],
             strides=[1, 1, 1, 1],
             padding='VALID',
             weight_decay=self.weight_decay,
         ),
         dfxp.ReLU_q(),
         dfxp.MaxPool_q(
             ksize=[1, 2, 2, 1],
             strides=[1, 2, 2, 1],
             padding='VALID'
         ),
         dfxp.Conv2d_q(
             name='conv3',
             bits=self.bits,
             ksize=[5, 5, 16, 120],
             strides=[1, 1, 1, 1],
             padding='VALID',
             weight_decay=self.weight_decay,
         ),
         dfxp.ReLU_q(),
         dfxp.Flatten_q(120),
         dfxp.Dropout_q(self.dropout, self.training),
         dfxp.Dense_q(
             name='dense1',
             bits=self.bits,
             in_units=120,
             units=84,
             weight_decay=self.weight_decay,
         ),
         dfxp.ReLU_q(),
         dfxp.Dropout_q(self.dropout, self.training),
         dfxp.Dense_q(
             name='softmax',
             bits=self.bits,
             in_units=84,
             units=10,
             weight_decay=self.weight_decay,
         ),
     ]
コード例 #2
0
 def get_layers(self):
     return [
         dfxp.Conv2d_q(
             name='conv',
             bits=self.bits,
             training=self.training,
             ksize=[5, 5, 1, 20],
             strides=[1, 1, 1, 1],
             padding='VALID',
             weight_decay=self.weight_decay,
         ),
         dfxp.BatchNorm_q(
             name='batch_normolization',
             bits=self.bits,
             num_features=20,
             training=self.training,
             weight_decay=self.weight_decay,
         ),
         dfxp.ReLU_q(),
         dfxp.MaxPool_q(ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1],
                        padding='VALID'),
         dfxp.Flatten_q(12 * 12 * 20),
         dfxp.Dense_q(
             name='dense1',
             bits=self.bits,
             training=self.training,
             in_units=12 * 12 * 20,
             units=100,
             weight_decay=self.weight_decay,
         ),
         dfxp.Dense_q(
             name='dense2',
             bits=self.bits,
             training=self.training,
             in_units=100,
             units=10,
             weight_decay=self.weight_decay,
         )
     ]
コード例 #3
0
ファイル: models.py プロジェクト: freudh/lbt
    def get_layers(self):
        return [
            # conv1-1
            dfxp.Conv2d_q(
                name='conv1-1',
                bits=self.bits,
                ksize=[3, 3, 3, 128],
                strides=[1, 1, 1, 1],
                padding='SAME',
                weight_decay=self.weight_decay,
            ),
            dfxp.ReLU_q(),

            # conv1-2
            # dfxp.Dropout_q(self.dropout, self.training),
            dfxp.Conv2d_q(
                name='conv1-2',
                bits=self.bits,
                ksize=[3, 3, 128, 128],
                strides=[1, 1, 1, 1],
                padding='SAME',
                weight_decay=self.weight_decay,
            ),
            dfxp.ReLU_q(),

            # pool1
            dfxp.MaxPool_q(
                ksize=[1, 3, 3, 1],
                strides=[1, 2, 2, 1],
                padding='SAME'
            ),

            # conv2-1
            dfxp.Dropout_q(self.dropout, self.training),
            dfxp.Conv2d_q(
                name='conv2-1',
                bits=self.bits,
                ksize=[3, 3, 128, 256],
                strides=[1, 1, 1, 1],
                padding='SAME',
                weight_decay=self.weight_decay,
            ),
            dfxp.ReLU_q(),

            # conv2-2
            # dfxp.Dropout_q(self.dropout, self.training),
            dfxp.Conv2d_q(
                name='conv2-2',
                bits=self.bits,
                ksize=[3, 3, 256, 256],
                strides=[1, 1, 1, 1],
                padding='SAME',
                weight_decay=self.weight_decay,
            ),
            dfxp.ReLU_q(),

            # pool2
            dfxp.MaxPool_q(
                ksize=[1, 3, 3, 1],
                strides=[1, 2, 2, 1],
                padding='SAME'
            ),

            # conv3-1
            dfxp.Dropout_q(self.dropout, self.training),
            dfxp.Conv2d_q(
                name='conv3-1',
                bits=self.bits,
                ksize=[3, 3, 256, 512],
                strides=[1, 1, 1, 1],
                padding='SAME',
                weight_decay=self.weight_decay,
            ),
            dfxp.ReLU_q(),

            # conv3-2
            # dfxp.Dropout_q(self.dropout, self.training),
            dfxp.Conv2d_q(
                name='conv3-2',
                bits=self.bits,
                ksize=[3, 3, 512, 512],
                strides=[1, 1, 1, 1],
                padding='SAME',
                weight_decay=self.weight_decay,
            ),
            dfxp.ReLU_q(),

            # pool3
            dfxp.MaxPool_q(
                ksize=[1, 3, 3, 1],
                strides=[1, 2, 2, 1],
                padding='SAME'
            ),

            dfxp.Flatten_q(512*4*4),

            # dense1
            dfxp.Dropout_q(self.dropout, self.training),
            dfxp.Dense_q(
                name='dense1',
                bits=self.bits,
                in_units=512*4*4,
                units=1024,
                weight_decay=self.weight_decay,
            ),
            dfxp.ReLU_q(),

            # dense2
            dfxp.Dropout_q(self.dropout, self.training),
            dfxp.Dense_q(
                name='dense2',
                bits=self.bits,
                in_units=1024,
                units=1024,
                weight_decay=self.weight_decay,
            ),
            dfxp.ReLU_q(),

            # softmax
            dfxp.Dropout_q(self.dropout, self.training),
            dfxp.Dense_q(
                name='softmax',
                bits=self.bits,
                in_units=1024,
                units=10,
                weight_decay=self.weight_decay,
            ),
        ]
コード例 #4
0
ファイル: models.py プロジェクト: freudh/lbt
    def get_layers(self):
        return [
            # conv1
            # dfxp.Dropout_q(self.dropout, self.training),
            dfxp.Conv2d_q(
                name='conv1',
                bits=self.bits,
                ksize=[5, 5, 3, 64],
                strides=[1, 1, 1, 1],
                padding='SAME',
                weight_decay=self.weight_decay,
            ),
            dfxp.ReLU_q(),
            dfxp.MaxPool_q(
                ksize=[1, 3, 3, 1],
                strides=[1, 2, 2, 1],
                padding='SAME'
            ),

            # conv2
            dfxp.Dropout_q(self.dropout, self.training),
            dfxp.Conv2d_q(
                name='conv2',
                bits=self.bits,
                ksize=[5, 5, 64, 128],
                strides=[1, 1, 1, 1],
                padding='SAME',
                weight_decay=self.weight_decay,
            ),
            dfxp.ReLU_q(),
            dfxp.MaxPool_q(
                ksize=[1, 3, 3, 1],
                strides=[1, 2, 2, 1],
                padding='SAME'
            ),

            # conv3
            dfxp.Dropout_q(self.dropout, self.training),
            dfxp.Conv2d_q(
                name='conv3',
                bits=self.bits,
                ksize=[5, 5, 128, 128],
                strides=[1, 1, 1, 1],
                padding='SAME',
                weight_decay=self.weight_decay,
            ),
            dfxp.ReLU_q(),
            dfxp.MaxPool_q(
                ksize=[1, 3, 3, 1],
                strides=[1, 2, 2, 1],
                padding='SAME'
            ),

            dfxp.Flatten_q(128*4*4),

            # dense1
            dfxp.Dropout_q(self.dropout, self.training),
            dfxp.Dense_q(
                name='dense1',
                bits=self.bits,
                in_units=128*4*4,
                units=400,
                weight_decay=self.weight_decay,
            ),
            dfxp.ReLU_q(),

            # softmax
            dfxp.Dropout_q(self.dropout, self.training),
            dfxp.Dense_q(
                name='softmax',
                bits=self.bits,
                in_units=400,
                units=10,
                weight_decay=self.weight_decay,
            ),
        ]
コード例 #5
0
ファイル: trainer.py プロジェクト: hixio-mh/LOW-BIT-TRAINING
    def __init__(self, model, dataset, dataset_name, logger, params):

        self.n_epoch = params.n_epoch
        self.exp_path = params.exp_path

        self.logger = logger

        self.graph = tf.Graph()
        with self.graph.as_default():
            global_step = tf.train.get_or_create_global_step()

            self.lr_scheduler = LearningRateScheduler(params.lr,
                                                      params.lr_decay_epoch,
                                                      params.lr_decay_factor)
            optimizer = tf.train.MomentumOptimizer(params.lr, params.momentum)

            tower_grads, tower_loss = [], []

            with tf.variable_scope(tf.get_variable_scope()):

                images = avatar.batch_data()
                images = tf.cast(tf.reshape(images, [-1, 28, 28, 1]),
                                 dtype=tf.float64)
                lables = avatar.batch_lable()

                conv = dfxp.Conv2d_q(name='conv',
                                     bits=params.bits,
                                     training=False,
                                     ksize=[5, 5, 1, 20],
                                     strides=[1, 1, 1, 1],
                                     padding='VALID')
                self.conv_in = images
                self.batch_in, self.conv_w, self.x_s, self.w_s, self.conv_w_q = conv.forward(
                    self.conv_in)
                batch = dfxp.Normalization_q(name='batch',
                                             bits=params.bits,
                                             num_features=20,
                                             training=True)
                self.scale_in = batch.forward(self.batch_in)
                scale = dfxp.Rescale_q(name='scale',
                                       bits=params.bits,
                                       training=False,
                                       num_features=20)
                self.relu_in, self.scale_w, self.scale_b = scale.forward(
                    self.scale_in)
                relu = dfxp.ReLU_q()
                self.pool_in = relu.forward(self.relu_in)
                pool = dfxp.MaxPool_q(ksize=[1, 2, 2, 1],
                                      strides=[1, 2, 2, 1],
                                      padding='VALID')
                flat = dfxp.Flatten_q(12 * 12 * 20)
                self.fc1_in = pool.forward(self.pool_in)
                self.flat = flat.forward(self.fc1_in)
                fc1 = dfxp.Dense_q(name='dense1',
                                   bits=params.bits,
                                   training=False,
                                   in_units=12 * 12 * 20,
                                   units=100)
                self.fc2_in, self.w1, self.b1 = fc1.forward(self.flat)
                fc2 = dfxp.Dense_q(name='dense2',
                                   bits=params.bits,
                                   training=False,
                                   in_units=100,
                                   units=10)
                self.softmax_in, self.w2, self.b2 = fc2.forward(self.fc2_in)
                self.loss = tf.nn.softmax_cross_entropy_with_logits(
                    labels=lables, logits=self.softmax_in)
                self.conv_indiff = tf.gradients(self.loss, self.conv_in)
                self.batch_indiff = tf.gradients(self.loss, self.batch_in)
                self.scale_indiff = tf.gradients(self.loss, self.scale_in)
                self.train_step = optimizer.minimize(self.loss)

            self.init_op = tf.global_variables_initializer()
            self.saver = tf.train.Saver()

            self.summary = tf.summary.merge_all()
            self.graph.finalize()