def conv2d(h_num_filters,
           h_filter_width,
           h_stride=1,
           h_dilation_rate=1,
           h_use_bias=True,
           h_padding='SAME'):

    def compile_fn(di, dh):
        conv = tf.keras.layers.Conv2D(dh['num_filters'],
                                      dh['filter_width'],
                                      dh['stride'],
                                      use_bias=dh['use_bias'],
                                      dilation_rate=dh['dilation_rate'],
                                      padding=dh['padding'])

        def forward_fn(di, is_training=True):
            return {'out': conv(di['in'])}

        return forward_fn

    return siso_tensorflow_eager_module(
        'Conv2D', compile_fn, {
            'num_filters': h_num_filters,
            'filter_width': h_filter_width,
            'stride': h_stride,
            'dilation_rate': h_dilation_rate,
            'use_bias': h_use_bias,
            'padding': h_padding
        })
def separable_conv2d(h_num_filters,
                     h_filter_width,
                     h_stride=1,
                     h_dilation_rate=1,
                     h_depth_multiplier=1,
                     h_use_bias=True,
                     h_padding='SAME'):

    def compile_fn(di, dh):

        conv_op = tf.keras.layers.SeparableConv2D(
            dh['num_filters'],
            dh['filter_width'],
            strides=dh['stride'],
            dilation_rate=dh['dilation_rate'],
            depth_multiplier=dh['depth_multiplier'],
            use_bias=dh['use_bias'],
            padding=dh['padding'])

        def fn(di, is_training=True):
            return {'out': conv_op(di['in'])}

        return fn

    return siso_tensorflow_eager_module(
        'SeparableConv2D', compile_fn, {
            'num_filters': h_num_filters,
            'filter_width': h_filter_width,
            'stride': h_stride,
            'use_bias': h_use_bias,
            'dilation_rate': h_dilation_rate,
            'depth_multiplier': h_depth_multiplier,
            'padding': h_padding
        })
def relu():
    def compile_fn(di, dh):
        def forward_fn(di, is_training=True):
            return {'out': tf.nn.relu(di['in'])}

        return forward_fn

    return siso_tensorflow_eager_module('ReLU', compile_fn, {})
def flatten():
    def compile_fn(di, dh):
        def forward_fn(di, is_training=True):
            return {'out': tf.layers.flatten(di['in'])}

        return forward_fn

    return siso_tensorflow_eager_module('Flatten', compile_fn, {})
def global_pool2d():
    def compile_fn(di, dh):
        def forward_fn(di, is_training=True):
            return {'out': tf.reduce_mean(di['in'], [1, 2])}

        return forward_fn

    return siso_tensorflow_eager_module('GlobalAveragePool', compile_fn, {})
Пример #6
0
def global_pool():
    def compile_fn(di, dh):
        def fn(di, is_training):
            with tf.device('/gpu:0'):
                return {'out': tf.reduce_mean(di['in'], [1, 2])}

        return fn

    return htfe.siso_tensorflow_eager_module('GlobalPool', compile_fn, {})
Пример #7
0
def relu():
    def compile_fn(di, dh):
        def fn(di, is_training=True):
            with tf.device('/gpu:0'):
                return {'out': tf.nn.relu(di['in'])}

        return fn

    return htfe.siso_tensorflow_eager_module('ReLU', compile_fn, {})
def batch_normalization():
    def compile_fn(di, dh):
        bn = tf.layers.BatchNormalization(momentum=.9, epsilon=1e-5)

        def forward_fn(di, is_training):
            return {'out': bn(di['in'], training=is_training)}

        return forward_fn

    return siso_tensorflow_eager_module('BatchNormalization', compile_fn, {})
def global_pool2d():

    def compile_fn(di, dh):
        pool = tf.keras.layers.GlobalAveragePooling2D()

        def forward_fn(di, is_training=True):
            return {'out': pool(di['in'])}

        return forward_fn

    return siso_tensorflow_eager_module('GlobalAveragePool', compile_fn, {})
def fc_layer(h_num_units):
    def compile_fn(di, dh):
        fc = tf.layers.Dense(dh['num_units'])

        def forward_fn(di, is_training=True):
            return {'out': fc(di['in'])}

        return forward_fn

    return siso_tensorflow_eager_module('FCLayer', compile_fn,
                                        {'num_units': h_num_units})
Пример #11
0
def conv2D(filter_size, channels, name):
    def compile_fn(di, dh):
        conv_fn = lambda: tf.keras.layers.Conv2D(channels, filter_size)
        conv = weight_sharer.get(name, conv_fn)

        def fn(di, is_training=True):
            return {'out': conv(di['in'])}

        return fn

    return siso_tensorflow_eager_module('Conv2D', compile_fn, {})
Пример #12
0
def dropout(keep_prob):
    def compile_fn(di, dh):
        def fn(di, is_training=True):
            if is_training:
                with tf.device('/gpu:0'):
                    out = tf.nn.dropout(di['in'], keep_prob)
            else:
                out = di['in']
            return {'out': out}

        return fn

    return htfe.siso_tensorflow_eager_module('Dropout', compile_fn, {})
def dropout(h_keep_prob):
    def compile_fn(di, dh):
        def forward_fn(di, is_training=True):
            if is_training:
                out = tf.nn.dropout(di['in'], dh['keep_prob'])
            else:
                out = di['in']
            return {'out': out}

        return forward_fn

    return siso_tensorflow_eager_module('Dropout', compile_fn,
                                        {'keep_prob': h_keep_prob})
Пример #14
0
def global_convolution(h_num_filters):
    def compile_fn(di, dh):
        _, h, w, _ = di['in'].shape.as_list()
        conv = tf.keras.layers.Conv2D(dh['num_filters'], [h, w],
                                      use_bias=False,
                                      padding='VALID')

        def forward_fn(di, is_training=True):
            return {'out': conv(di['in'])}

        return forward_fn

    return htfe.siso_tensorflow_eager_module('GlobalConv2D', compile_fn, {
        'num_filters': h_num_filters,
    })
def max_pool2d(h_kernel_size, h_stride=1, h_padding='SAME'):
    def compile_fn(di, dh):
        pool = tf.layers.MaxPooling2D(dh['kernel_size'],
                                      dh['stride'],
                                      padding=dh['padding'])

        def forward_fn(di, is_training=True):
            return {'out': pool(di['in'])}

        return forward_fn

    return siso_tensorflow_eager_module('MaxPool2D', compile_fn, {
        'kernel_size': h_kernel_size,
        'stride': h_stride,
        'padding': h_padding
    })
Пример #16
0
def check_filters(filters, stride=1):
    def compile_fn(di, dh):
        num_filters = di['in'].shape[-1].value
        if num_filters != filters or stride > 1:
            conv = tf.keras.layers.Conv2D(filters,
                                          1,
                                          strides=stride,
                                          padding='SAME')
        else:
            conv = None

        def forward_fn(di, is_training=True):
            return {'out': di['in'] if conv is None else conv(di['in'])}

        return forward_fn

    return htfe.siso_tensorflow_eager_module('CheckFilters', compile_fn, {})
Пример #17
0
def avg_pool(h_kernel_size, h_stride):
    def compile_fn(di, dh):
        def fn(di, is_training=True):
            with tf.device('/gpu:0'):
                return {
                    'out':
                    tf.nn.avg_pool(
                        di['in'], [1, dh['kernel_size'], dh['kernel_size'], 1],
                        [1, dh['stride'], dh['stride'], 1], 'SAME')
                }

        return fn

    return htfe.siso_tensorflow_eager_module('AvgPool', compile_fn, {
        'kernel_size': h_kernel_size,
        'stride': h_stride,
    })
def min_pool2d(h_kernel_size, h_stride=1, h_padding='SAME'):

    def compile_fn(di, dh):
        pool = tf.keras.layers.MaxPooling2D(dh['kernel_size'],
                                            dh['stride'],
                                            padding=dh['padding'])
        negate = tf.keras.layers.Lambda(lambda x: -1 * x, name='negate')

        def forward_fn(di, is_training=True):
            return {'out': negate(pool(negate(di['in'])))}

        return forward_fn

    return siso_tensorflow_eager_module('MinPool2D', compile_fn, {
        'kernel_size': h_kernel_size,
        'stride': h_stride,
        'padding': h_padding
    })
def dropout(h_keep_prob):

    class Dropout(tf.keras.layers.Layer):

        def __init__(self, rate, seed=None, **kwargs):
            super(Dropout, self).__init__(**kwargs)
            self.rate = rate
            self.seed = seed
            self.supports_masking = True

        def call(self, inputs, training=None):
            if training is None:
                training = True

            def dropped_inputs():
                return tf.nn.dropout(inputs, 1 - self.rate, seed=self.seed)

            output = tf_utils.smart_cond(
                training, dropped_inputs, lambda: array_ops.identity(inputs))
            return output

        def compute_output_shape(self, input_shape):
            return input_shape

        def get_config(self):
            config = {'rate': self.rate, 'seed': self.seed}
            base_config = super(Dropout, self).get_config()
            return dict(list(base_config.items()) + list(config.items()))

    def compile_fn(di, dh):
        dropout_op = Dropout(1 - dh['keep_prob'])

        def forward_fn(di, is_training=True):
            # out = tf.nn.dropout(di['in'], dh['keep_prob'])
            # else:
            #     out = di['in']
            out = dropout_op(di['in'])
            return {'out': out}

        return forward_fn

    return siso_tensorflow_eager_module('Dropout', compile_fn,
                                        {'keep_prob': h_keep_prob})
Пример #20
0
def keras_batch_normalization(name='default', weight_sharer=None):
    name = name + '_bn'

    def compile_fn(di, dh):
        bn = weight_sharer.get(name, tf.keras.layers.BatchNormalization,
                               lambda layer: layer.get_weights())
        if not bn.built:
            with tf.device('/gpu:0'):
                bn.build(di['in'].get_shape())
                weights = weight_sharer.load_weights(name)
                if weights is not None:
                    bn.set_weights(weights)

        def fn(di, is_training):
            with tf.device('/gpu:0'):
                return {'out': bn(di['in'], training=is_training)}

        return fn

    return htfe.siso_tensorflow_eager_module('BatchNormalization', compile_fn,
                                             {})
Пример #21
0
def fc_layer(num_classes, name, weight_sharer):
    name = name + '_fc_layer_' + str(num_classes)

    def compile_fn(di, dh):
        fc = weight_sharer.get(name,
                               lambda: tf.keras.layers.Dense(num_classes),
                               lambda layer: layer.get_weights())
        if not fc.built:
            with tf.device('/gpu:0'):
                fc.build(di['in'].get_shape())
                weights = weight_sharer.load_weights(name)
                if weights is not None:
                    fc.set_weights(weights)

        def fn(di, is_training=True):
            with tf.device('/gpu:0'):
                return {'out': fc(di['in'])}

        return fn

    return htfe.siso_tensorflow_eager_module('FC_Layer', compile_fn, {})
Пример #22
0
def conv2D_depth_separable(filter_size, name, weight_sharer, out_filters=None):
    def compile_fn(di, dh):
        (_, _, _, channels) = di['in'].get_shape().as_list()
        channels = channels if out_filters is None else out_filters
        conv_fn = lambda: tf.keras.layers.SeparableConv2D(
            channels, filter_size, padding='same')
        conv = weight_sharer.get(name + '_dsep_' + str(filter_size), conv_fn,
                                 lambda layer: layer.get_weights())
        if not conv.built:
            with tf.device('/gpu:0'):
                conv.build(di['in'].get_shape())
                weights = weight_sharer.load_weights(name)
                if weights is not None:
                    conv.set_weights(weights)

        def fn(di, is_training=True):
            with tf.device('/gpu:0'):
                return {'out': conv(di['in'])}

        return fn

    return htfe.siso_tensorflow_eager_module('Conv2DSeparable', compile_fn, {})