예제 #1
0
    def __init__(self, name='ExitFlowModule'):
        super(ExitFlowModule, self).__init__(name=name)

        with self._enter_variable_scope():
            self.resconv_ex1 = snt.Conv2D(output_channels=1024,
                                          kernel_shape=1,
                                          stride=2,
                                          name='resconv_ex1')
            self.bn_resex1 = snt.BatchNorm(name='bn_resex1')
            self.sepconv_ex1 = snt.SeparableConv2D(output_channels=728,
                                                   channel_multiplier=1,
                                                   kernel_shape=3,
                                                   name='sepconv_ex1')
            self.bn_sepex1 = snt.BatchNorm(name='bn_sepex1')
            self.sepconv_ex2 = snt.SeparableConv2D(output_channels=1024,
                                                   channel_multiplier=1,
                                                   kernel_shape=3,
                                                   name='sepconv_ex2')
            self.bn_sepex2 = snt.BatchNorm(name='bn_sepex2')
            self.sepconv_ex3 = snt.SeparableConv2D(output_channels=1536,
                                                   channel_multiplier=1,
                                                   kernel_shape=3,
                                                   name='sepconv_ex3')
            self.bn_sepex3 = snt.BatchNorm(name='bn_sepex3')
            self.sepconv_ex4 = snt.SeparableConv2D(output_channels=2048,
                                                   channel_multiplier=1,
                                                   kernel_shape=3,
                                                   name='sepconv_ex4')
            self.bn_sepex4 = snt.BatchNorm(name='bn_sepex4')
예제 #2
0
    def __init__(self,
                 num_chans,
                 sampling_rate,
                 num_filters,
                 pooling_stride,
                 act='tanh',
                 verbose=False,
                 name="cnn"):
        super(CNN, self).__init__(name=name)

        self._pool1 = DownsampleAlongW(pooling_stride,
                                       padding='VALID',
                                       verbose=verbose)
        self._pool2 = DownsampleAlongW(pooling_stride,
                                       padding='VALID',
                                       verbose=verbose)
        self._act = Activation(act, verbose=verbose)

        with self._enter_variable_scope():

            def clip_getter(getter, name, *args, **kwargs):
                var = getter(name, *args, **kwargs)
                clip_var = tf.clip_by_norm(var, 1)

                return clip_var

            self._l1_conv = snt.Conv2D(num_filters, [1, sampling_rate >> 1])
            self._l2_depthconv = snt.DepthwiseConv2D(
                1, (num_chans, 1),
                padding=snt.VALID,
                custom_getter={'w': clip_getter})
            self._l3_sepconv = snt.SeparableConv2D(num_filters, 1,
                                                   [1, sampling_rate >> 3])
예제 #3
0
    def __init__(self, num, name='MiddleFlowModule'):
        self.num = str(num)
        super(MiddleFlowModule, self).__init__(name=name)

        with self._enter_variable_scope():
            self.sepconv_m1 = snt.SeparableConv2D(output_channels=728,
                                                  channel_multiplier=1,
                                                  kernel_shape=3,
                                                  name='sepconv_m{}1'.format(
                                                      self.num))
            self.bn_sepm1 = snt.BatchNorm(name='bn_sepm{}1'.format(self.num))
            self.sepconv_m2 = snt.SeparableConv2D(output_channels=728,
                                                  channel_multiplier=1,
                                                  kernel_shape=3,
                                                  name='sepconv_m{}2'.format(
                                                      self.num))
            self.bn_sepm2 = snt.BatchNorm(name='bn_sepm{}2'.format(self.num))
            self.sepconv_m3 = snt.SeparableConv2D(output_channels=728,
                                                  channel_multiplier=1,
                                                  kernel_shape=3,
                                                  name='sepconv_m{}3'.format(
                                                      self.num))
            self.bn_sepm3 = snt.BatchNorm(name='bn_sepm{}3'.format(self.num))
예제 #4
0
    def __init__(self, output_channels, num, name='EntryFlowModule'):
        self.output_channels = output_channels
        self.num = str(num)

        super(EntryFlowModule, self).__init__(name=name)

        with self._enter_variable_scope():
            self.resconv_e1 = snt.Conv2D(output_channels=self.output_channels,
                                         kernel_shape=1,
                                         stride=2,
                                         name='resconv_e{}'.format(self.num))
            self.bn_rese1 = snt.BatchNorm(name='bn_rese{}'.format(self.num))
            self.sepconv_e1 = snt.SeparableConv2D(
                output_channels=self.output_channels,
                channel_multiplier=1,
                kernel_shape=3,
                name='sepconv_e{}1'.format(self.num))
            self.bn_sepe1 = snt.BatchNorm(name='bn_sepe{}1'.format(self.num))
            self.sepconv_e2 = snt.SeparableConv2D(
                output_channels=self.output_channels,
                channel_multiplier=1,
                kernel_shape=3,
                name='sepconv_e{}2'.format(self.num))
            self.bn_sepe2 = snt.BatchNorm(name='bn_sepe{}2'.format(self.num))
예제 #5
0
 def _build(self,inputs,is_training,depthwise=False):
     if depthwise:
         net = snt.SeparableConv2D(output_channels=self.output_channels,
                                   channel_multiplier=8,
                                   kernel_shape=self.kernel_shape,
                                   stride=self.stride,padding=self.padding,
                                   use_bias=self.use_bias)(inputs)
     else:
         net = snt.Conv2D(output_channels=self.output_channels,
                          kernel_shape=self.kernel_shape,
                          stride=self.stride,
                          padding=self.padding,
                          use_bias=self.use_bias,regularizers=regularizers)(inputs)
     if self.use_batch_norm:
         bn = snt.BatchNorm(scale=self.use_scale)
         net = bn(net,is_training=is_training,test_local_stats=False)
     if self.activation_fn is not None:
         net = self.activation_fn(net)
     return net
예제 #6
0
    def conv_layer(cls,
                   inputs,
                   is_training,
                   out_filters,
                   filter_size,
                   channel_multiplier,
                   separable=False):
        with tf.variable_scope('inp_conv_1'):
            net = snt.Conv2D(out_filters, kernel_shape=1)(inputs)
            net = snt.BatchNormV2(scale=True, decay_rate=0.9,
                                  eps=1e-5)(net, is_training=is_training)
            net = tf.nn.relu(net)

        with tf.variable_scope(f'out_conv_{out_filters}'):
            if separable:
                net = snt.SeparableConv2D(out_filters, channel_multiplier,
                                          filter_size)(net)
            else:
                net = snt.Conv2D(out_filters, kernel_shape=filter_size)(net)
            net = snt.BatchNormV2(scale=True, decay_rate=0.9,
                                  eps=1e-5)(net, is_training=is_training)
        return net
예제 #7
0
    def __init__(self,
                 act=None,
                 pool=None,
                 with_memory=True,
                 summ=None,
                 residual=True,
                 log=False,
                 name="model"):
        super(Model, self).__init__(name=name)

        self._with_memory = with_memory
        self._summ = summ
        self._residual = residual
        self._num_blocks = 6

        self._log = log

        with self._enter_variable_scope():
            self._act = Activation(act, verbose=True)
            self._pool = Pooling(pool, padding='VALID', verbose=True)

            if self._residual:
                self._convs = [
                    snt.Conv2D(eval("FLAGS.num_outputs_block_%d" % (i + 1)),
                               FLAGS.filter_size,
                               padding=snt.VALID,
                               use_bias=False) for i in range(self._num_blocks)
                ]

                self._sepconvs = [
                    snt.SeparableConv2D(
                        eval("FLAGS.num_outputs_block_%d" % (i + 1)),
                        1,
                        FLAGS.filter_size,
                        padding=snt.SAME,
                        use_bias=False) for i in range(self._num_blocks)
                ]
            else:
                self._sepconvs = [
                    snt.SeparableConv2D(
                        eval("FLAGS.num_outputs_block_%d" % (i + 1)),
                        1,
                        FLAGS.filter_size,
                        padding=snt.VALID,
                        use_bias=False) for i in range(self._num_blocks)
                ]

            self._seq = snt.Sequential([
                snt.Linear(output_size=FLAGS.num_outputs_dense), tf.nn.relu,
                snt.Linear(output_size=FLAGS.num_classes)
            ])

            if self._with_memory:
                print("Model with memory enabled")

                config = \
                {
                    "height": FLAGS.memory_height,
                    "width": FLAGS.memory_width,
                    "input_size": 32, # very dangeous, hard-coded
                    "num_iters": FLAGS.num_iterations,
                    "learning_rate": FLAGS.lr_som
                }

                self._som = SOM(**config)