Пример #1
0
 def __call__(self, kwargs=None):
     input_tensor = ModelAssign(kwargs, 'input_tensor', None)
     self.input_shape = input_tensor.get_shape()[1:]
     with tf.variable_scope(self.name, tf.AUTO_REUSE) as scope:
         output = util.flatten(input_tensor)
     self.output_shape = output.get_shape()[1:]
     self.output_tensor = output
Пример #2
0
 def __call__(self, kwargs=None, method='training'):
     logits = ModelAssign(kwargs, 'logits', None)
     labels = ModelAssign(kwargs, 'labels', None)
     with tf.device("/cpu:0"):
         accuracy = tf.keras.metrics.top_k_categorical_accuracy(labels,
                                                                logits,
                                                                k=self.k)
     self.output_tensor = accuracy
Пример #3
0
 def __init__(self, kwargs):
     """
     Accuracy.
     :param kwargs:
     """
     Layer.__init__(self, kwargs)
     self.name = ModelAssign(kwargs, 'name', None)
     self.logits = ModelAssign(kwargs, 'logits', None)
     self.labels = ModelAssign(kwargs, 'labels', None)
     self.output_tensor = None
Пример #4
0
 def __init__(self, kwargs):
     """
     Top-k accuracy for large-scale image classification. (ImageNet)
     :param kwargs: Configurations
     """
     Layer.__init__(self, kwargs)
     self.name = ModelAssign(kwargs, 'name', None)
     self.logits = ModelAssign(kwargs, 'logits', None)
     self.labels = ModelAssign(kwargs, 'labels', None)
     self.k = ModelAssign(kwargs, 'k', 5)
     self.output_tensor = None
Пример #5
0
    def __init__(self, kwargs):
        Layer.__init__(self, kwargs)
        self.input_shape = None
        self.name = ModelAssign(kwargs, 'name', None)
        self.label_smoothing = ModelAssign(kwargs, 'label_smoothing', 0.0)
        self.output_tensor = None

        self.num_trainable_parameters = 0
        self.num_non_trainable_parameters = 0
        self.shared_trainable_parameters = 0
        self.MACs = 0
Пример #6
0
 def __call__(self, kwargs=None, method='training'):
     """
     Call the accuracy class and return an metric tensor.
     :param kwargs: configurations
     :param method: scope.
     :return:
     """
     logits = ModelAssign(kwargs, 'logits', None)
     labels = ModelAssign(kwargs, 'labels', None)
     self.output_tensor = tf.reduce_mean(
         tf.cast(tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1)),
                 dtype=tf.float32))
Пример #7
0
    def __init__(self, kwargs):
        Layer.__init__(self, kwargs)
        self.name = ModelAssign(kwargs, 'name', None)
        self.axis = ModelAssign(kwargs, 'axis', -1)
        self.activation = ModelAssign(kwargs, 'activation', None)
        self.input_shape = None
        self.output_shape = None
        self.output_tensor = None

        self.num_trainable_parameters = 0
        self.num_non_trainable_parameters = 0
        self.shared_trainable_parameters = 0
        self.MACs = 0
Пример #8
0
 def __call__(self, kwargs=None):
     input_tensor = ModelAssign(kwargs, 'input_tensor', None)
     self.input_shape = input_tensor.get_shape()[1:]
     self.is_training = ModelAssign(kwargs, 'is_training')
     with tf.variable_scope(self.name, tf.AUTO_REUSE) as scope:
         output = batch_normalization(input_tensor,
                                      is_training=self.is_training,
                                      activation='linear',
                                      trainable=self.trainable)
     self.output_shape = output.get_shape()[1:]
     self.output_tensor = output
     self.MACs = int(self.output_shape[0]) * int(
         self.output_shape[1]) * int(self.output_shape[2]) * 2
Пример #9
0
    def __call__(self, kwargs=None):
        input_tensor = ModelAssign(kwargs, 'input_tensor', None)
        self.input_shape = input_tensor.get_shape()[1:]
        self.skip_from_names = ModelAssign(kwargs, 'skip_from_names', None)
        skip_from = ModelAssign(kwargs, 'skip_from', None)
        self.is_training = ModelAssign(kwargs, 'is_training')
        initializer = ModelAssign(kwargs, 'initializer',
                                  G.BACKEND_DEFAULT_CONV_INITIALIZER)
        regularizer = ModelAssign(kwargs, 'regularizer',
                                  G.BACKEND_DEFAULT_REGULARIZER)
        regularizer_strength = ModelAssign(kwargs, 'regularizer_strength',
                                           1e-4)

        # infer filters during compiling
        self.filters = int(
            input_tensor.get_shape()[-1]) * self.depthwise_multiplier

        with tf.variable_scope(self.name) as scope:
            self.output_tensor = util.depthwise_conv2d(
                inputs=input_tensor,
                depth_multiplier=self.depthwise_multiplier,
                kernel_size=self.kernel_size,
                strides=self.strides,
                padding=self.padding,
                batchnorm=self.batchnorm,
                activation=self.activation,
                initializer=initializer(),
                regularizer=regularizer(regularizer_strength),
                use_bias=self.use_bias,
                is_training=self.is_training,
                mode=G.EXEC_CONV_MODE)

            self.output_shape = self.output_tensor.get_shape()[1:]
            # Normal
            self.num_trainable_parameters += (self.kernel_size *
                                              self.kernel_size) * self.filters
            if self.use_bias:
                self.num_trainable_parameters += self.filters
            self.MACs += self.kernel_size * self.kernel_size * self.filters * int(
                self.output_shape[0]) * int(self.output_shape[1])
            if self.batchnorm and not G.DISABLE_BATCHNORM_COUNT:
                self.num_trainable_parameters += 2 * self.filters
                self.num_non_trainable_parameters += 2 * self.filters
                self.MACs += int(self.output_shape[0]) * int(
                    self.output_shape[1]) * int(self.output_shape[2]) * 2

            self.mem_cost = self.num_non_trainable_parameters + self.num_trainable_parameters
            self.peak_activation_mem += int(self.input_shape[0]) * int(
                self.input_shape[1]) * int(self.input_shape[2])
            self.peak_activation_mem += int(self.output_shape[0]) * int(
                self.output_shape[1]) * int(self.output_shape[2])
Пример #10
0
 def __call__(self, kwargs=None):
     """
     Call the attention module and returns an output tensor
     :param kwargs: configurations
     :return: an output tensor after feeding the input into attention model
     """
     input_tensor = ModelAssign(kwargs, 'input_tensor', None)
     self.input_shape = input_tensor.get_shape()[1:]
     if self.method == 'softmax':
         with tf.variable_scope(self.name, tf.AUTO_REUSE):
             self.outputs = util.softmax_self_attention(inputs=input_tensor)
     else:
         raise NotImplementedError
     pass
Пример #11
0
    def __init__(self, kwargs):
        Layer.__init__(self, kwargs)
        self.name = ModelAssign(kwargs, 'name', None)
        self.input_shape = Str2List(ModelAssign(kwargs, 'input_shape', None))
        self.dtype = ModelAssign(kwargs, 'dtype', 'float32')
        self.output_shape = None
        self.output_tensor = None

        self.mean = ModelAssign(kwargs, 'mean', None)
        self.std = ModelAssign(kwargs, 'std', None)

        self.num_trainable_parameters = 0
        self.num_non_trainable_parameters = 0
        self.shared_trainable_parameters = 0
        self.MACs = 0
Пример #12
0
    def __call__(self, kwargs=None):
        input_tensor = ModelAssign(kwargs, 'input_tensor', None)
        self.input_shape = input_tensor.get_shape()[1:]
        self.dropout_tensor = tf.placeholder(dtype=tf.float32)
        if self.dropout > 1e-12:
            output = tf.nn.dropout(input_tensor,
                                   keep_prob=1 - self.dropout_tensor,
                                   name="dropout")
        else:
            output = input_tensor
        self.output_shape = output.get_shape()[1:]
        self.output_tensor = output

        if self.dropout > 1e-12:
            return {'dropout': self.dropout_tensor}
Пример #13
0
    def __call__(self, kwargs=None):
        skip_from = ModelAssign(kwargs, 'skip_from', None)
        self.skip_from_names = ModelAssign(kwargs, 'skip_from_names', None)
        input_tensor = ModelAssign(kwargs, 'input_tensor', None)
        self.input_shape = input_tensor.get_shape()[1:]
        self.is_training = ModelAssign(kwargs, 'is_training', None)
        print(self.is_training)
        in_dim = int(input_tensor.get_shape()[-1])
        initializer = ModelAssign(kwargs, 'initializer',
                                  G.BACKEND_DEFAULT_FC_INITIALIZER)
        regularizer = ModelAssign(kwargs, 'regularizer',
                                  G.BACKEND_DEFAULT_REGULARIZER)
        regularizer_strength = ModelAssign(kwargs, 'regularizer_strength',
                                           1e-4)
        print("Intializing Dense Layer with L2-reg=%f" % regularizer_strength)
        with tf.variable_scope(self.name) as scope:
            output = util.dense(input_tensor,
                                units=self.units,
                                activation=self.activation,
                                batchnorm=self.batchnorm,
                                initializer=initializer(),
                                regularizer=regularizer(regularizer_strength),
                                is_training=self.is_training,
                                trainable=self.trainable,
                                use_bias=self.use_bias)
            self.num_trainable_parameters += (in_dim + 1) * self.units
            self.MACs += int(in_dim) * self.units
            if self.batchnorm:
                self.num_non_trainable_parameters += 2 * self.units
                self.num_trainable_parameters += 2 * self.units
                self.MACs += int(self.units) * 2

            if self.dropout > 1e-12:
                self.dropout_tensor = tf.placeholder(dtype=tf.float32,
                                                     shape=())
                output = tf.nn.dropout(output,
                                       keep_prob=1 - self.dropout_tensor,
                                       name="dropout")
            self.output_tensor = output
            self.output_shape = self.output_tensor.get_shape()[1:]
            self.mem_cost = self.num_non_trainable_parameters + self.num_trainable_parameters
            self.peak_activation_mem += int(self.input_shape[0])
            self.peak_activation_mem += int(self.output_shape[0])

            if self.dropout > 1e-12:
                return {'dropout': self.dropout_tensor}
Пример #14
0
    def __init__(self, kwargs):
        """
        Concatenation. Concatenate two tensors if possible.
        :param kwargs: configurations for concatenation layers.
        """
        Layer.__init__(self, kwargs)
        self.name = ModelAssign(kwargs, 'name', None)
        self.activation = ModelAssign(kwargs, 'activation', None)
        self.input_shape = None
        self.output_shape = None
        self.output_tensor = None

        self.num_trainable_parameters = 0
        self.num_non_trainable_parameters = 0
        self.shared_trainable_parameters = 0
        self.mem_cost = 0
        self.MACs = 0
        self.peak_activation_mem = 0
Пример #15
0
    def __call__(self, kwargs=None):
        input_tensor = ModelAssign(kwargs, 'input_tensor', None)
        is_training = ModelAssign(kwargs, 'is_training')
        if not isinstance(input_tensor, list):
            assert NotImplementedError, "The input tensor list must have exactly two tensors"
            raise NotImplementedError
        # Add trainable parameter to control scale
        base_filters = int(input_tensor[-1].get_shape()[-1])
        input_tensor_scaled = []
        with tf.variable_scope(self.name) as scope:
            id = 0
            for tensor in input_tensor:
                scale = tf.get_variable(name='scale_%d' %id, dtype=tf.float32,
                                        shape=(),
                                        initializer=tf.initializers.ones())
                current_filter = int(tensor.get_shape()[-1])
                if G.EXEC_CONV_MODE == 'relu-conv-bn':
                    activation_ = self.activation
                else:
                    activation_ = 'linear'
                if current_filter != base_filters:
                    tensor = convolution2d(tensor,
                                           filters=base_filters,
                                           kernel_size=1,
                                           padding='same',
                                           activation=activation_,
                                           batchnorm=True,
                                           strides=1,
                                           use_bias=False,
                                           is_training=is_training,
                                           mode=G.EXEC_CONV_MODE)
                    print(tensor)
                print(scale)
                scaled_tensor = tensor * scale
                input_tensor_scaled.append(scaled_tensor)
                id += 1

            output = tf.add_n(input_tensor_scaled, name='added_tensor')
            if G.EXEC_CONV_MODE == 'conv-bn-relu':
                output = apply_activation(output, self.activation)

            self.output_tensor = output
        self.output_shape = self.output_tensor.get_shape()[1:]
Пример #16
0
    def __call__(self, kwargs=None):
        """
        Call the attention module and returns an output tensor
        :param kwargs: configurations
        :return: an output tensor after feeding the input into attention model
        """
        queries = ModelAssign(kwargs, 'queries', None)
        keys = ModelAssign(kwargs, 'keys', None)
        values = ModelAssign(kwargs, 'values', None)

        if self.method == 'softmax':
            with tf.variable_scope(self.name, tf.AUTO_REUSE):
                self.outputs = util.multihead_attention(
                    queries=queries,
                    keys=keys,
                    values=values,
                    num_heads=self.num_heads,
                    dropout_rate=self.dropout_rate,
                    causality=self.casuality)
        else:
            raise NotImplementedError
        pass
Пример #17
0
    def __call__(self, kwargs=None, renorm=False):
        """
        Call the concat module and return the output tensor.
        :param kwargs: configurations.
        :param renorm: Renorm activations for concatenation. (experimental feature)
        :return:
        """
        input_tensor = ModelAssign(kwargs, 'input_tensor', None)

        if renorm:
            renormed_inputs = []
            # Calculate l2-norm sum
            l2_norm = []
            for t in input_tensor:
                l2_norm.append(tf.nn.l2_loss(t))
            l2_norm = tf.reduce_mean(l2_norm)
            for t in input_tensor:
                renormed_inputs.append(tf.nn.l2_normalize(t) * l2_norm)
            input_tensor = renormed_inputs

        with tf.variable_scope(self.name, tf.AUTO_REUSE) as scope:
            try:
                output = concat(input_tensor, axis=self.axis)
            except Exception as e:
                print(input_tensor)
                raise e
        try:
            self.output_shape = output.get_shape()[1:]
        except Exception as e:
            print(self.name)
            raise e
        # Calculate Mem Cost
        if not isinstance(input_tensor, list):
            t = input_tensor
            tshape = t.get_shape()[1:]
            if len(tshape) == 3:
                self.mem_cost += int(tshape[0]) * int(tshape[1]) * int(
                    tshape[2])
            elif len(tshape) == 2:
                self.mem_cost += int(tshape[0]) * int(tshape[1])
        else:
            for t in input_tensor:
                tshape = t.get_shape()[1:]
                if len(tshape) == 3:
                    self.mem_cost += int(tshape[0]) * int(tshape[1]) * int(
                        tshape[2])
                elif len(tshape) == 2:
                    self.mem_cost += int(tshape[0]) * int(tshape[1])
        self.peak_activation_mem = self.mem_cost
        self.output_tensor = output
Пример #18
0
 def __init__(self, kwargs):
     """
     Initialization of the Attention Model Class
     :param kwargs: arguments for configuration
     """
     super(Attention).__init__(kwargs)
     self.method = ModelAssign(kwargs, 'method', 'softmax')
     self.name = ModelAssign(kwargs, 'name', None)
     self.input = ModelAssign(kwargs, 'input', None)
     self.num_heads = ModelAssign(kwargs, 'num_heads', 1)
     self.dropout_rate = ModelAssign(kwargs, 'dropout_rate', 0.00)
     self.casuality = ModelAssign(kwargs, 'casuality', False)
     self.outputs = None
     pass
Пример #19
0
    def __init__(self, kwargs):
        Layer.__init__(self, kwargs)
        self.name = ModelAssign(kwargs, 'name', None)
        self.input = ModelAssign(kwargs, 'input', None)
        self.units = ModelAssign(kwargs, 'units', 10)
        self.use_bias = ModelAssign(kwargs, 'use_bias', True)
        self.batchnorm = ModelAssign(kwargs, 'batchnorm', False)
        self.trainable = ModelAssign(kwargs, 'trainable', True)
        self.activation = ModelAssign(kwargs, 'activation', 'relu')
        self.dropout = ModelAssign(kwargs, 'dropout', 0.0)
        self.skip_from_names = None
        self.input_shape = None
        self.output_shape = None
        self.output_tensor = None
        self.dropout_tensor = None

        # Params
        self.num_trainable_parameters = 0
        self.num_non_trainable_parameters = 0
        self.shared_trainable_parameters = 0
        self.MACs = 0
        self.mem_cost = 0
        self.peak_activation_mem = 0
Пример #20
0
    def __call__(self, kwargs=None):
        """
        Call the activation module and return the output tensor.
        :param kwargs: configurations.
        :return:
        """
        input_tensor = ModelAssign(kwargs, 'input_tensor', None)

        with tf.variable_scope(self.name, tf.AUTO_REUSE) as scope:
            try:
                output = apply_activation(input_tensor, self.activation)
            except Exception as e:
                print(input_tensor)
                raise e
            self.output_shape = output.get_shape()[1:]
            self.MACs = int(self.output_shape[0]) * int(
                self.output_shape[1]) * int(self.output_shape[2])

        # For activation we do not need to calculate the memory cost since they are usually fused into conv/fc layers.
        self.mem_cost = 0
        self.peak_activation_mem = self.mem_cost
        self.output_tensor = output
Пример #21
0
class SoftmaxLoss(Layer):
    def __init__(self, kwargs):
        Layer.__init__(self, kwargs)
        self.input_shape = None
        self.name = ModelAssign(kwargs, 'name', None)
        self.label_smoothing = ModelAssign(kwargs, 'label_smoothing', 0.0)
        self.output_tensor = None

        self.num_trainable_parameters = 0
        self.num_non_trainable_parameters = 0
        self.shared_trainable_parameters = 0
        self.MACs = 0

    def __call__(self, kwargs=None, mode="training"):
        self.label = ModelAssign(kwargs, 'label', None)
        self.prediction = ModelAssign(kwargs, 'input', None)
        self.input_shape = self.label.get_shape()[1:]
        self.output_tensor = tf.losses.softmax_cross_entropy(
            self.label, self.prediction, label_smoothing=self.label_smoothing)

    def summary(self):
        format_str = '|SoftmaxLoss(%s)' % self.name + ' ' * (17 -
                                                             len(self.name))
        conv_str = "%s" % (self.input_shape)
        space = " " * (36 - len(conv_str))
        format_str += "|" + conv_str + space
        ts = '%s' % 0
        tstr = '|      ' + ts + ' ' * (22 - len(ts))
        format_str += tstr
        ts = '%s' % 0
        tstr = '|      ' + ts + ' ' * (26 - len(ts))
        format_str += tstr
        ts = '%s' % 0
        tstr = '|      ' + ts + ' ' * (15 - len(ts))
        format_str += tstr
        ts = '%s' % None
        tstr = '|      ' + ts + ' ' * (14 - len(ts)) + '|'
        format_str += tstr
        print(format_str)
Пример #22
0
 def __call__(self, kwargs=None):
     input_tensor = ModelAssign(kwargs, 'input_tensor', None)
     self.input_shape = input_tensor.get_shape()[1:]
     output = input_tensor
     self.output_shape = output.get_shape()[1:]
     self.output_tensor = output
Пример #23
0
 def __init__(self, kwargs):
     Layer.__init__(self, kwargs)
     self.name = ModelAssign(kwargs, 'name', None)
     self.output_tensor = None
Пример #24
0
 def __call__(self, kwargs=None):
     self.output_tensor = tf.nn.softmax(ModelAssign(kwargs, 'input'))
Пример #25
0
 def __call__(self, kwargs=None):
     self.output_tensor = ModelAssign(kwargs, 'input')
Пример #26
0
    def __call__(self, kwargs=None):
        input_tensor = ModelAssign(kwargs, 'input_tensor', None)
        self.input_shape = input_tensor.get_shape()[1:]
        self.skip_from_names = ModelAssign(kwargs, 'skip_from_names', None)
        input_filters = int(self.input_shape[-1])
        skip_from = ModelAssign(kwargs, 'skip_from', None)
        initializer = ModelAssign(kwargs, 'initializer',
                                  G.BACKEND_DEFAULT_CONV_INITIALIZER)
        regularizer = ModelAssign(kwargs, 'regularizer',
                                  G.BACKEND_DEFAULT_REGULARIZER)
        self.is_training = ModelAssign(kwargs, 'is_training')
        regularizer_strength = ModelAssign(kwargs, 'regularizer_strength',
                                           1e-4)
        # infer filters during compiling
        if self.filters == 1:
            self.filters = int(input_tensor.get_shape()[-1])
        print("Intializing Conv Layer with L2-reg=%f" % regularizer_strength)

        with tf.variable_scope(self.name) as scope:
            if not self.hyper:
                output = util.convolution2d(
                    inputs=input_tensor,
                    kernel_size=self.kernel_size,
                    filters=self.filters,
                    strides=self.strides,
                    padding=self.padding,
                    batchnorm=self.batchnorm,
                    activation=self.activation,
                    initializer=initializer(),
                    regularizer=regularizer(regularizer_strength),
                    use_bias=self.use_bias,
                    is_training=self.is_training,
                    mode=G.EXEC_CONV_MODE)
                self.output_shape = output.get_shape()[1:]
                # Normal
                self.num_trainable_parameters += (self.kernel_size *
                                                  self.kernel_size *
                                                  input_filters) * self.filters
                if self.use_bias:
                    self.num_trainable_parameters += self.filters
                # FLOPS-MAC
                self.MACs += int(self.input_shape[0]) * int(self.input_shape[1]) * \
                             self.kernel_size * self.kernel_size * input_filters * self.filters / self.strides / self.strides
                if self.batchnorm and not G.DISABLE_BATCHNORM_COUNT:
                    self.num_trainable_parameters += 2 * self.filters
                    self.num_non_trainable_parameters += 2 * self.filters
                    self.MACs += int(self.output_shape[0]) * int(
                        self.output_shape[1]) * int(self.output_shape[2]) * 2
            else:
                raise NotImplementedError

            self.mem_cost = self.num_non_trainable_parameters + self.num_trainable_parameters
            self.peak_activation_mem += int(self.input_shape[0]) * int(
                self.input_shape[1]) * int(self.input_shape[2])
            self.peak_activation_mem += int(self.output_shape[0]) * int(
                self.output_shape[1]) * int(self.output_shape[2])

            if self.dropout > 1e-12:
                self.dropout_tensor = tf.placeholder(dtype=tf.float32)
                output = tf.nn.dropout(output,
                                       keep_prob=1 - self.dropout_tensor,
                                       name="dropout")

            self.output_tensor = output

            ret_dict = {}
            if self.hyper:
                ret_dict.update({
                    'layerinfo': {
                        self.layer_info: [
                            self.kernel_size, self.kernel_size, input_filters,
                            self.filters
                        ]
                    }
                })
            if self.dropout > 1e-12:
                ret_dict.update({'dropout': self.dropout_tensor})
            return ret_dict
Пример #27
0
    def __init__(self, kwargs):
        Layer.__init__(self, kwargs)
        self.name = ModelAssign(kwargs, 'name', None)
        self.input = ModelAssign(kwargs, 'input', None)
        self.filters = ModelAssign(kwargs, 'filters', 32)
        self.kernel_size = ModelAssign(kwargs, 'kernel_size', 3)
        self.strides = ModelAssign(kwargs, 'strides', 1)
        self.padding = ModelAssign(kwargs, 'padding', 'SAME')
        self.hyper = ModelAssign(kwargs, 'hyper', False)
        self.batchnorm = ModelAssign(kwargs, 'batchnorm', False)
        self.activation = ModelAssign(kwargs, 'activation', 'relu')
        self.use_bias = ModelAssign(kwargs, 'use_bias', True)
        self.num_trainable_parameters = 0
        self.num_non_trainable_parameters = 0
        self.shared_trainable_parameters = 0
        self.skip_from_names = None
        self.MACs = 0
        self.peak_activation_mem = 0

        self.dropout = ModelAssign(kwargs, 'dropout', 0.0)
        self.dropout_tensor = None
        # HyperNetwork parameters
        if not self.hyper:
            self.zdims = None
            self.layer_info = None
            self.basic_block_size = None
            self.hidden = None
        else:
            raise NotImplementedError

        self.input_shape = None
        self.output_shape = None

        self.output_tensor = None
Пример #28
0
    def __init__(self, kwargs):
        Layer.__init__(self, kwargs)
        self.name = ModelAssign(kwargs, 'name', None)
        self.input = ModelAssign(kwargs, 'input', None)
        self.filters = ModelAssign(kwargs, 'filters', 32)
        self.kernel_size = ModelAssign(kwargs, 'kernel_size', 3)
        self.strides = ModelAssign(kwargs, 'strides', 1)
        self.padding = ModelAssign(kwargs, 'padding', 'SAME')
        self.hyper = ModelAssign(kwargs, 'hyper', False)
        self.batchnorm = ModelAssign(kwargs, 'batchnorm', False)
        self.activation = ModelAssign(kwargs, 'activation', 'relu')
        self.dropout = ModelAssign(kwargs, 'dropout', 0.0)
        self.dropout_tensor = None
        self.use_bias = ModelAssign(kwargs, 'use_bias', True)
        self.num_trainable_parameters = 0
        self.num_non_trainable_parameters = 0
        self.shared_trainable_parameters = 0
        self.mem_cost = 0
        self.peak_activation_mem = 0
        self.skip_from_names = None
        self.MACs = 0
        # HyperNetwork parameters
        if not self.hyper:
            self.zdims = None
            self.layer_info = None
            self.basic_block_size = None
            self.hidden = None
        else:
            self.zdims = ModelAssign(kwargs, 'hyper_zdims', 4)
            self.layer_info = tf.placeholder(dtype=tf.float32,
                                             shape=[1, self.zdims],
                                             name=self.name + 'layer_info')
            self.basic_block_size = Str2List(
                ModelAssign(kwargs, 'hyper_basic_block_size', None))
            self.hidden = ModelAssign(kwargs, 'hyper_hidden', 16)

        self.input_shape = None
        self.output_shape = None

        self.output_tensor = None
Пример #29
0
 def __call__(self, kwargs=None, mode="training"):
     self.label = ModelAssign(kwargs, 'label', None)
     self.prediction = ModelAssign(kwargs, 'input', None)
     self.input_shape = self.label.get_shape()[1:]
     self.output_tensor = tf.losses.softmax_cross_entropy(
         self.label, self.prediction, label_smoothing=self.label_smoothing)
Пример #30
0
    def __call__(self, kwargs=None):
        input_tensor = ModelAssign(kwargs, 'input_tensor', None)
        is_training = ModelAssign(kwargs, 'is_training')
        if not isinstance(input_tensor, list):
            assert NotImplementedError, "The input tensor list must have exactly two tensors"
            raise NotImplementedError
        op1 = input_tensor[0]
        op2 = input_tensor[1]

        if G.data_format == 'channels_last':
            _filter_len1 = int(op1.get_shape()[-1])
            _filter_len2 = int(op2.get_shape()[-1])
            # try to guess strides
            op1_feat_size = int(op1.get_shape()[1])
            op2_feat_size = int(op2.get_shape()[1])
        else:
            _filter_len1 = int(op1.get_shape()[1])
            _filter_len2 = int(op2.get_shape()[1])
            # try to guess strides
            op1_feat_size = int(op1.get_shape()[-1])
            op2_feat_size = int(op2.get_shape()[-1])

        if op1_feat_size % op2_feat_size != 0:
            tf.logging.warning("op1_feat_size must be divided by op2_feat_size, but %d vs %d. Please double check" %(op1_feat_size, op2_feat_size))
        strides = op1_feat_size // op2_feat_size
        tf.logging.info("Inferred Strides=%d for op1_feat_size=%d vs op2_feat_size=%d" %(strides, op1_feat_size, op2_feat_size))
        if _filter_len1 ==_filter_len2 and strides == 1:
            tf.logging.info(strides)
            with tf.variable_scope(self.name, tf.AUTO_REUSE) as scope:
                output = op1 + op2
                if G.EXEC_CONV_MODE == 'conv-bn-relu':
                    output = apply_activation(output, self.activation)

                self.mem_cost = self.num_non_trainable_parameters + self.num_trainable_parameters
                self.input_shape = op1.get_shape()
                self.output_shape = op2.get_shape()
                self.peak_activation_mem += int(self.input_shape[1]) * int(self.input_shape[2]) * int(
                    self.input_shape[3])
                self.peak_activation_mem += int(self.output_shape[1]) * int(self.output_shape[2]) * int(
                    self.output_shape[3])

        else:
            with tf.variable_scope(self.name, tf.AUTO_REUSE) as scope:
                if G.EXEC_CONV_MODE == 'relu-conv-bn':
                    activation_ = self.activation
                    print("Activation is conducted at conv...")
                else:
                    activation_ = 'linear'
                    print("Activation is conducted at add...")

                print("Intializing Conv Layer with Default L2-reg.")
                id_mapping = convolution2d(op1,
                                           filters=_filter_len2,
                                           kernel_size=1,
                                           padding='same',
                                           activation=activation_,
                                           batchnorm=True,
                                           strides=strides,
                                           use_bias=False,
                                           is_training=is_training,
                                           mode=G.EXEC_CONV_MODE)
                output = id_mapping + op2
                if G.EXEC_CONV_MODE == 'conv-bn-relu':
                    output = apply_activation(output, self.activation)
                self.mem_cost = self.num_non_trainable_parameters + self.num_trainable_parameters
                self.input_shape = id_mapping.get_shape()
                self.output_shape = op2.get_shape()
                self.peak_activation_mem += int(self.input_shape[1]) * int(self.input_shape[2]) * int(
                    self.input_shape[3])
                self.peak_activation_mem += int(self.output_shape[1]) * int(self.output_shape[2]) * int(
                    self.output_shape[3])

            self.num_trainable_parameters = _filter_len2 * _filter_len1
            self.num_non_trainable_parameters = 2 * _filter_len2
            self.MACs = op2_feat_size * op2_feat_size * _filter_len1 * _filter_len2
            self.out_shape = output.get_shape()

        self.output_tensor = output