def __init__(self, dim, factor=2, filter_size=5, act='relu', pool='avg', name='downsampler'): super(Downsampler, self).__init__(name=name) self._act = Activation(act, verbose=True) self._pool = Pooling(pool, 2, verbose=True) with self._enter_variable_scope(): self._conv = snt.Conv2D(dim[-1] * factor * factor, filter_size, use_bias=False) self._conv2 = snt.Conv2D(dim[-1] * factor, filter_size, use_bias=False) self._conv3 = snt.Conv2D(dim[-1], filter_size, use_bias=False) self._seq = snt.Sequential([ self._conv, self._pool, self._act, self._conv2, self._pool, self._act, self._conv3, self._pool, self._act ])
def __init__(self, num_chans, sampling_rate, num_filters, pooling_stride, act='tanh', verbose=False, name="cnn"): super(CNN, self).__init__(name=name) self._pool1 = DownsampleAlongW(pooling_stride, padding='VALID', verbose=verbose) self._pool2 = DownsampleAlongW(pooling_stride, padding='VALID', verbose=verbose) self._act = Activation(act, verbose=verbose) with self._enter_variable_scope(): def clip_getter(getter, name, *args, **kwargs): var = getter(name, *args, **kwargs) clip_var = tf.clip_by_norm(var, 1) return clip_var self._l1_conv = snt.Conv2D(num_filters, [1, sampling_rate >> 1]) self._l2_depthconv = snt.DepthwiseConv2D( 1, (num_chans, 1), padding=snt.VALID, custom_getter={'w': clip_getter}) self._l3_sepconv = snt.SeparableConv2D(num_filters, 1, [1, sampling_rate >> 3])
def __init__(self, sampling_rate, filter_size=3, num_filters=32, pooling_stride=2, pool='avg', act='elu', name="classifier"): super(Classifier, self).__init__(name=name) num_classes = 2 self._act = Activation(act, verbose=True) self._pool = Downsample1D(2) self._bf = snt.BatchFlatten() regularizers = { "w": tf.contrib.layers.l2_regularizer(scale=0.1), "b": tf.contrib.layers.l2_regularizer(scale=0.1) } with self._enter_variable_scope(): self._l1_conv = snt.Conv1D(num_filters, filter_size + 2) self._l2_sepconv = snt.SeparableConv1D(num_filters << 1, 1, filter_size) self._lin1 = snt.Linear(256, regularizers=regularizers) self._lin2 = snt.Linear(num_classes, regularizers=regularizers)
def __init__(self, num_filters=32, filter_size=5, act='', name="adaptor"): super(Adaptor, self).__init__(name=name) self._bf = snt.BatchFlatten() self._pool = Downsample1D(2) self._act = Activation(act, verbose=True) with self._enter_variable_scope(): self._l1_conv = snt.Conv1D(num_filters, filter_size + 2) self._l2_conv = snt.Conv1D(num_filters << 1, filter_size) self._l3_conv = snt.Conv1D(num_filters << 2, filter_size - 2)
def __init__(self, num_classes=2, act='elu', name="reduced_classifier"): super(ReducedClassifier, self).__init__(name=name) self._act = Activation(act, verbose=True) self._bf = snt.BatchFlatten() regularizers = { "w": tf.contrib.layers.l2_regularizer(scale=0.1), "b": tf.contrib.layers.l2_regularizer(scale=0.1) } with self._enter_variable_scope(): self._lin1 = snt.Linear(256, regularizers=regularizers) self._lin2 = snt.Linear(num_classes, regularizers=regularizers)
def __init__(self, act = 'elu', name = "discriminator"): super(Discriminator, self).__init__(name = name) self._act = Activation(act, verbose = True) with self._enter_variable_scope(): self._d_w1 = tf.get_variable('d_w1', [1, 256], initializer = tf.truncated_normal_initializer(stddev = 1)) self._d_b1 = tf.get_variable('d_b1', [256], initializer = tf.constant_initializer(0)) self._d_w2 = tf.get_variable('d_w2', [256, 64], initializer = tf.truncated_normal_initializer(stddev = 1)) self._d_b2 = tf.get_variable('d_b2', [64], initializer = tf.constant_initializer(0)) self._d_w3 = tf.get_variable('d_w3', [64, 1], initializer = tf.truncated_normal_initializer(stddev = 1)) self._d_b3 = tf.get_variable('d_b3', [1], initializer = tf.constant_initializer(0))
def __init__(self, num_filters = 32, filter_size = 5, act = '', name = "adaptor"): super(Adaptor, self).__init__(name = name) initializers = { 'w': tf.truncated_normal_initializer(stddev = 0.04), 'b': tf.zeros_initializer() } self._act = Activation(act, verbose = True) self._pool = Downsample2D(2) with self._enter_variable_scope(): self._l1_conv = snt.Conv2D(num_filters, filter_size, initializers = initializers) self._l2_conv = snt.Conv2D(num_filters << 1, filter_size, initializers = initializers) self._l3_conv = snt.Conv2D(num_filters << 2, filter_size, initializers = initializers)
def __init__(self, filter_size = 3, num_filters = 32, pooling_stride = 2, act = 'tanh', summ = None, name = "mapper"): super(Mapper, self).__init__(name = name) self._pool = Downsample2D(pooling_stride) self._act = Activation(act, verbose = True) self._bf = snt.BatchFlatten() self._summ = summ initializers = { 'w': tf.truncated_normal_initializer(stddev = 0.02), 'b': tf.zeros_initializer() } with self._enter_variable_scope(): self._l1_conv = snt.Conv2D(num_filters, filter_size) self._l2_conv = snt.Conv2D(num_filters << 1, filter_size) self._lin1 = snt.Linear(256, initializers = initializers) self._lin2 = snt.Linear(1, initializers = initializers)
def __init__(self, dim, factor=2, filter_size=5, num_filters=16, act='relu', name='upsampler'): super(Upsampler, self).__init__(name=name) self._act = Activation(act=act, verbose=True) with self._enter_variable_scope(): self._conv = snt.Conv2DTranspose(num_filters, [e * factor for e in dim[1:-1]], filter_size, stride=2, use_bias=False) dim2 = [dim[0], dim[1] * factor, dim[2] * factor, num_filters] self._conv2 = snt.Conv2DTranspose(num_filters, [e * factor for e in dim2[1:-1]], filter_size, stride=2, use_bias=False) dim3 = [ dim[0], dim[1] * factor * factor, dim[2] * factor * factor, num_filters ] self._conv3 = snt.Conv2DTranspose(num_filters, [e * factor for e in dim3[1:-1]], filter_size, stride=2, use_bias=False) self._seq = snt.Sequential([ self._conv, self._act, self._conv2, self._act, self._conv3, self._act, ])
def __init__(self, dim, factor=2, filter_size=5, act='relu', name="extractor"): super(Extractor, self).__init__(name=name) self._act = Activation(act, verbose=True) with self._enter_variable_scope(): self._conv = snt.Conv2D(dim[-1] * factor * factor, filter_size, use_bias=False) self._conv2 = snt.Conv2D(dim[-1] * factor, filter_size, use_bias=False) self._conv3 = snt.Conv2D(dim[-1], filter_size, use_bias=False) self._seq = snt.Sequential([ self._conv, self._act, self._conv2, self._act, self._conv3, self._act ])
def __init__(self, act=None, pool=None, with_memory=True, summ=None, residual=True, log=False, name="model"): super(Model, self).__init__(name=name) self._with_memory = with_memory self._summ = summ self._residual = residual self._num_blocks = 6 self._log = log with self._enter_variable_scope(): self._act = Activation(act, verbose=True) self._pool = Pooling(pool, padding='VALID', verbose=True) if self._residual: self._convs = [ snt.Conv2D(eval("FLAGS.num_outputs_block_%d" % (i + 1)), FLAGS.filter_size, padding=snt.VALID, use_bias=False) for i in range(self._num_blocks) ] self._sepconvs = [ snt.SeparableConv2D( eval("FLAGS.num_outputs_block_%d" % (i + 1)), 1, FLAGS.filter_size, padding=snt.SAME, use_bias=False) for i in range(self._num_blocks) ] else: self._sepconvs = [ snt.SeparableConv2D( eval("FLAGS.num_outputs_block_%d" % (i + 1)), 1, FLAGS.filter_size, padding=snt.VALID, use_bias=False) for i in range(self._num_blocks) ] self._seq = snt.Sequential([ snt.Linear(output_size=FLAGS.num_outputs_dense), tf.nn.relu, snt.Linear(output_size=FLAGS.num_classes) ]) if self._with_memory: print("Model with memory enabled") config = \ { "height": FLAGS.memory_height, "width": FLAGS.memory_width, "input_size": 32, # very dangeous, hard-coded "num_iters": FLAGS.num_iterations, "learning_rate": FLAGS.lr_som } self._som = SOM(**config)