Exemple #1
0
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format,
                         name):
    """Strided 2-D convolution with explicit padding."""
    # The padding is consistent and is based only on `kernel_size`, not on the
    # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
    if strides > 1:
        inputs = fixed_padding(inputs, kernel_size, data_format)

    c_in = inputs.get_shape().as_list()[1]

    def _compute_fans(shape):
        """Computes the number of input and output units for a weight shape.

    Args:
      shape: Integer shape tuple or TF tensor shape.

    Returns:
      A tuple of scalars (fan_in, fan_out).
    """
        if len(shape) < 1:  # Just to avoid errors for constants.
            fan_in = fan_out = 1
        elif len(shape) == 1:
            fan_in = fan_out = shape[0]
        elif len(shape) == 2:
            fan_in = shape[0]
            fan_out = shape[1]
        else:
            # Assuming convolution kernels (2D, 3D, or more).
            # kernel shape: (..., input_depth, depth)
            receptive_field_size = 1.
            for dim in shape[:-2]:
                receptive_field_size *= dim
                fan_in = shape[-2] * receptive_field_size
                fan_out = shape[-1] * receptive_field_size
        return fan_in, fan_out

    shape = [kernel_size, kernel_size, c_in, filters]
    fan_in, fan_out = _compute_fans(shape)

    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
        W = tf.Variable(fBits(
            tf.truncated_normal(shape=shape,
                                stddev=1.0 / tf.sqrt(float(fan_in))), 16),
                        name='conv2d/kernel')

    w_q = fw(W)
    if strides == 1:
        padding = 'SAME'
    else:
        padding = 'VALID'
    inputs = tf.nn.conv2d(inputs,
                          w_q,
                          strides=[1, 1, strides, strides],
                          padding=padding,
                          data_format='NCHW',
                          name=name)

    inputs = fe2(inputs)
    return inputs
Exemple #2
0
    def __getitem__(self, slice_spec):
        """Basic indexing, returns a `TensorTrain` containing the specified region.

    Examples:
      >>> a = t3f.random_tensor((2, 3, 4))
      >>> a[1, :, :]
      is a 2D TensorTrain 3 x 4.
      >>> a[1:2, :, :]
      is a 3D TensorTrain 1 x 3 x 4
    """
        if len(slice_spec) != self.ndims():
            raise ValueError('Expected %d indices, got %d' %
                             (self.ndims(), len(slice_spec)))
        new_tt_cores = []
        remainder = None
        print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
        for i in range(self.ndims()):
            curr_core = self.tt_cores[i]
            if self.is_tt_matrix():
                raise NotImplementedError
            else:
                sliced_core = curr_core[:, slice_spec[i], :]
                if len(curr_core.get_shape()) != len(sliced_core.get_shape()):
                    # This index is specified exactly and we want to collapse this axis.
                    if remainder is None:
                        remainder = sliced_core
                    else:
                        remainder = tf.matmul(remainder, sliced_core)
                else:
                    if remainder is not None:
                        # Add reminder from the previous collapsed cores to the current
                        # core.
                        sliced_core = tf.einsum('ab,bid->aid', remainder,
                                                sliced_core)
                        remainder = None
                    new_tt_cores.append(sliced_core)

        if remainder is not None:
            # The reminder obtained from collapsing the last cores.
            new_tt_cores[-1] = tf.einsum('aib,bd->aid', new_tt_cores[-1],
                                         remainder)
            remainder = None
        # TODO: infer the output ranks and shape.

        for i in range(len(new_tt_cores)):
            print('FP core: ', new_tt_cores[i])
            new_tt_cores[i] = fBits(new_tt_cores[i], 8)
            print('8bits core: ', new_tt_cores[i])
        return TensorTrain(new_tt_cores)
Exemple #3
0
def matrix_with_random_cores(shape,
                             tt_rank=2,
                             mean=0.,
                             stddev=1.,
                             dtype=tf.float32,
                             name='t3f_matrix_with_random_cores'):
    """Generate a TT-matrix of given shape with N(mean, stddev^2) cores.

  Args:
    shape: 2d array, shape[0] is the shape of the matrix row-index,
      shape[1] is the shape of the column index.
      shape[0] and shape[1] should have the same number of elements (d)
      Also supports omitting one of the dimensions for vectors, e.g.
        matrix_with_random_cores([[2, 2, 2], None])
      and
        matrix_with_random_cores([None, [2, 2, 2]])
      will create an 8-element column and row vectors correspondingly.
    tt_rank: a number or a (d+1)-element array with ranks.
    mean: a number, the mean of the normal distribution used for
      initializing TT-cores.
    stddev: a number, the standard deviation of the normal distribution used
      for initializing TT-cores.
    dtype: [tf.float32] dtype of the resulting matrix.
    name: string, name of the Op.

  Returns:
    TensorTrain containing a TT-matrix of size
      np.prod(shape[0]) x np.prod(shape[1])
  """
    # TODO: good distribution to init training.
    # In case the shape is immutable.
    shape = list(shape)
    # In case shape represents a vector, e.g. [None, [2, 2, 2]]
    if shape[0] is None:
        shape[0] = np.ones(len(shape[1]), dtype=int)
    # In case shape represents a vector, e.g. [[2, 2, 2], None]
    if shape[1] is None:
        shape[1] = np.ones(len(shape[0]), dtype=int)
    shape = np.array(shape)
    tt_rank = np.array(tt_rank)
    _validate_input_parameters(is_tensor=False, shape=shape, tt_rank=tt_rank)

    num_dims = shape[0].size
    if tt_rank.size == 1:
        tt_rank = tt_rank * np.ones(num_dims - 1)
        tt_rank = np.concatenate([[1], tt_rank, [1]])

    tt_rank = tt_rank.astype(int)
    tt_cores = [None] * num_dims
    with tf.name_scope(name):
        for i in range(num_dims):
            curr_core_shape = (tt_rank[i], shape[0][i], shape[1][i],
                               tt_rank[i + 1])
            tt_cores[i] = tf.random_normal(curr_core_shape,
                                           mean=mean,
                                           stddev=stddev,
                                           dtype=dtype)
            # Quantization
            if i == 0 or i == num_dims - 1:
                tt_cores[i] = fBits(tt_cores[i], 8)
            else:
                tt_cores[i] = fBits(tt_cores[i], 8)
            #print('!!!!tt_cores!! after:', tt_cores[i])

        return TensorTrain(tt_cores, shape, tt_rank)
Exemple #4
0
def resnet_model_fn(features, labels, mode, params):
    """Our model_fn for ResNet to be used with our Estimator."""
    tf.summary.image('images', features, max_outputs=6)

    network = resnet_model.imagenet_resnet_v2(params['resnet_size'],
                                              _LABEL_CLASSES,
                                              params['data_format'])

    logits = network(inputs=features,
                     is_training=(mode == tf.estimator.ModeKeys.TRAIN))

    print('<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>')
    print(logits.name)
    predictions = {
        'classes': tf.argmax(logits, axis=1),
        'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
    }

    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)

    # Calculate loss, which includes softmax cross entropy and L2 regularization.
    cross_entropy = tf.losses.softmax_cross_entropy(logits=logits,
                                                    onehot_labels=labels)

    #cross_entropy = -tf.reduce_sum(labels*tf.log(logits))

    #cross_entropy = tf.losses.sparse_softmax_cross_entropy(
    #logits=logits, labels=labels)

    # Create a tensor named cross_entropy for logging purposes.
    tf.identity(cross_entropy, name='cross_entropy')
    tf.summary.scalar('cross_entropy', cross_entropy)

    # Add weight decay to the loss. We exclude the batch norm variables because
    # doing so leads to a small improvement in accuracy.
    loss = cross_entropy + _WEIGHT_DECAY * tf.add_n([
        tf.nn.l2_loss(v)
        for v in tf.trainable_variables() if 'BatchNorm' not in v.name
    ])

    if mode == tf.estimator.ModeKeys.TRAIN:
        # Scale the learning rate linearly with the batch size. When the batch size
        # is 256, the learning rate should be 0.1.
        #initial_learning_rate = 0.1 * params['batch_size'] / 256
        initial_learning_rate = 0.05
        batches_per_epoch = _NUM_IMAGES['train'] / params['batch_size']
        global_step = tf.train.get_or_create_global_step()

        # Multiply the learning rate by 0.1 at 30, 60, 80, and 90 epochs.
        boundaries = [
            int(batches_per_epoch * epoch) for epoch in [30, 60, 80, 90]
        ]
        #int(batches_per_epoch * epoch) for epoch in [20, 30, 40, 50]]
        values = [
            initial_learning_rate * decay
            for decay in [1, 0.12, 0.06, 0.03, 0.03]
        ]
        learning_rate = tf.train.piecewise_constant(
            tf.cast(global_step, tf.int32), boundaries, values)

        g_values = [128., 128., 32., 8., 2.]
        g_scale = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
                                              boundaries, g_values)
        tf.identity(g_scale, name='g_scale')

        learning_rate = flr(learning_rate)
        # Create a tensor named learning_rate for logging purposes.
        tf.identity(learning_rate, name='learning_rate')
        tf.summary.scalar('learning_rate', learning_rate)

        optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                               momentum=_MOMENTUM)

        # Batch norm requires update_ops to be added as a train_op dependency.
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        gradTrainBatch = optimizer.compute_gradients(loss)

        grad = []
        var = []
        for grad_and_vars in gradTrainBatch:
            grad.append(grad_and_vars[0])
            var.append(grad_and_vars[1])

        def QuantizeG(gradTrainBatch):
            grads = []
            for grad_and_vars in gradTrainBatch:
                if grad_and_vars[1].name == 'conv2d/kernel:0' or grad_and_vars[
                        1].name.find('dense') > -1:

                    grads.append([grad_and_vars[0] * 1.0, grad_and_vars[1]])
                elif grad_and_vars[1].name.find('BatchNorm') > -1:

                    grads.append(
                        [fgBN(grad_and_vars[0], 1.0), grad_and_vars[1]])

                else:
                    grads.append(
                        [fg(grad_and_vars[0], 1.0, g_scale), grad_and_vars[1]])

            return grads

        gradTrainBatch = QuantizeG(gradTrainBatch)

        Mom_Q = []
        Mom_W = []

        w_vars = tf.trainable_variables()
        for w_var in w_vars:
            if w_var.name == (
                    'conv2d/kernel:0') or w_var.name.find('dense') > -1:
                Mom_W.append(tf.assign(w_var, w_var))
                print(w_var.name)
                print('**************************')

            else:
                Mom_W.append(tf.assign(w_var, fBits(w_var, 24)))

        with tf.control_dependencies(update_ops):
            train_op = optimizer.apply_gradients(gradTrainBatch,
                                                 global_step=global_step)
            opt_slot_name = optimizer.get_slot_names()
            train_vars = tf.trainable_variables()
            for train_var in train_vars:
                mom_var = optimizer.get_slot(train_var, opt_slot_name[0])
                if train_var.name == ('conv2d/kernel:0'
                                      ) or train_var.name.find('dense') > -1:
                    print(mom_var.name)
                else:
                    Mom_Q.append(tf.assign(mom_var, fBits(mom_var, 13)))

            train_op = tf.group([train_op, Mom_Q, Mom_W])

    else:
        train_op = None

    accuracy = tf.metrics.accuracy(tf.argmax(labels, axis=1),
                                   predictions['classes'])
    accuracy5 = tf.metrics.mean(
        tf.nn.in_top_k(logits, tf.argmax(labels, axis=1), k=5))

    metrics = {'accuracy': accuracy, 'accuracy5': accuracy5}

    # Create a tensor named train_accuracy for logging purposes.
    tf.identity(accuracy[1], name='train_accuracy')
    tf.summary.scalar('train_accuracy', accuracy[1])

    return tf.estimator.EstimatorSpec(mode=mode,
                                      predictions=predictions,
                                      loss=loss,
                                      train_op=train_op,
                                      eval_metric_ops=metrics)