Example #1
0
    def call(self, inputs, **kwargs):
        outputs = self.bert(inputs, **kwargs)

        if len(self.output_0_layers) > 0:
            layers = []
            layers.append(self.output_0_layers[0](outputs[0]))

            len_output_0_layers = len(self.output_0_layers)
            for i in range(1, len_output_0_layers):
                layers.append(self.output_0_layers[i](layers[i - 1]))

            concat = tf.keras.layers.Concatenate()([layers[-1], outputs[1]])

            concat_dropout = self.dropout(concat,
                                          training=kwargs.get(
                                              "training", False))

            logits = self.classifier(concat_dropout)
        else:
            pooled_output = outputs[1]
            pooled_output = self.dropout(pooled_output,
                                         training=kwargs.get(
                                             "training", False))
            logits = self.classifier(pooled_output)

        # add hidden states and attention if they are here
        outputs = (logits, ) + outputs[2:]

        return outputs  # logits, (hidden_states), (attentions)
Example #2
0
 def _make_layer(self, block, out_channels, num_blocks, stride):
     strides = [stride] + [1] * (num_blocks - 1)
     layers = []
     for stride in strides:
         layers.append(block(self.in_channels, out_channels, stride))
         self.in_channels = out_channels
     return tf.keras.Sequential(layers)
Example #3
0
def parse_model(d, ch, model):  # model_dict, input_channels(3)
    logger.info('\n%3s%18s%3s%10s  %-40s%-30s' %
                ('', 'from', 'n', 'params', 'module', 'arguments'))
    anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d[
        'width_multiple']
    na = (len(anchors[0]) //
          2) if isinstance(anchors, list) else anchors  # number of anchors
    no = na * (nc + 5)  # number of outputs = anchors * (classes + 5)

    layers, save, c2 = [], [], ch[-1]  # layers, savelist, ch out
    for i, (f, n, m,
            args) in enumerate(d['backbone'] +
                               d['head']):  # from, number, module, args
        m_str = m
        m = eval(m) if isinstance(m, str) else m  # eval strings
        for j, a in enumerate(args):
            try:
                args[j] = eval(a) if isinstance(a, str) else a  # eval strings
            except:
                pass

        n = max(round(n * gd), 1) if n > 1 else n  # depth gain
        if m in [
                nn.Conv2d, Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus,
                CrossConv, BottleneckCSP, C3
        ]:
            c1, c2 = ch[f], args[0]
            c2 = make_divisible(c2 * gw, 8) if c2 != no else c2

            args = [c1, c2, *args[1:]]
            if m in [BottleneckCSP, C3]:
                args.insert(2, n)
                n = 1
        elif m is nn.BatchNorm2d:
            args = [ch[f]]
        elif m is Concat:
            c2 = sum([ch[-1 if x == -1 else x + 1] for x in f])
        elif m is Detect:
            args.append([ch[x + 1] for x in f])
            if isinstance(args[1], int):  # number of anchors
                args[1] = [list(range(args[1] * 2))] * len(f)
        else:
            c2 = ch[f]

        tf_m = eval('tf_' + m_str.replace('nn.', ''))
        m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \
                else tf_m(*args, w=model.model[i])  # module

        torch_m_ = nn.Sequential(
            *[m(*args) for _ in range(n)]) if n > 1 else m(*args)  # module
        t = str(m)[8:-2].replace('__main__.', '')  # module type
        np = sum([x.numel() for x in torch_m_.parameters()])  # number params
        m_.i, m_.f, m_.type, m_.np = i, f, t, np  # attach index, 'from' index, type, number params
        logger.info('%3s%18s%3s%10.0f  %-40s%-30s' %
                    (i, f, n, np, t, args))  # print
        save.extend(x % i for x in ([f] if isinstance(f, int) else f)
                    if x != -1)  # append to savelist
        layers.append(m_)
        ch.append(c2)
    return keras.Sequential(layers), sorted(save)
def build_mlp(dim_list,
              activation='relu',
              batch_norm='batch',
              dropout=0,
              final_nonlinearity=True):
    ''' Use this function to build mlp networks
        Input: A list of sublayer dimensions
    '''
    layers = []
    for i in range(len(dim_list) - 1):
        dim_in, dim_out = dim_list[i], dim_list[i + 1]
        layers.append(
            Dense(dim_out,
                  activation="linear",
                  kernel_initializer=initializers.RandomNormal()))
        final_layer = (i == len(dim_list) - 2)
        if not final_layer or final_nonlinearity:
            if batch_norm == 'batch':
                layers.append(BatchNormalization())
            if activation == 'relu':
                layers.append(ReLU())
            elif activation == 'leakyrelu':
                layers.append(LeakyReLU())
        if dropout > 0:
            layers.append(Dropout(rate=dropout))
    return Sequential(layers)
Example #5
0
    def _make_layer(self, block, out_ch, num_block, stride):
        strides = [stride] + [1] * (num_block - 1)
        layers = []

        for stride in strides:
            layers.append(block(self.in_ch, out_ch, stride))
            self.in_ch = out_ch * block.expansion
        return tf.keras.Sequential(layers)
Example #6
0
 def enumerate_layers(self, feature_extractor) -> List[int]:
     layers = []
     k = 0
     for n, m in self.model.named_modules():
         if re.search(r'\d+$', n):
             if isinstance(m, feature_extractor):
                 layers.append(k)
             k += 1
     return layers
Example #7
0
 def __init__(self, input_dim: int, num_layers: int, dropout: float = 0.2):
     super(DanSequenceToVector, self).__init__(input_dim)
     # TODO(students): start
     self._dropout = dropout
     self._num_layers = num_layers
     self._input_dim = input_dim
     layers = []
     for i in range(self._num_layers):
         # Define the DAN layers
         layers.append(tf.keras.layers.Dense(input_dim, activation='relu'))
     self._layers = layers
Example #8
0
def create_model():
    layers = [tf.keras.layers.Flatten(input_shape=(28, 28))]
    layers.extend([
        tf.keras.layers.Dense(32,
                              activation='tanh',
                              kernel_initializer="random_normal")
        for i in range(10)
    ])
    # layers.extend([tf.keras.layers.Dense(32, activation='tanh', kernel_initializer="glorot_normal") for i in range(10)])
    # layers.append(tf.keras.layers.Dense(10, activation='softmax'))
    layers.append(tf.keras.layers.Dense(10))
    return tf.keras.models.Sequential(layers)
Example #9
0
 def create_discriminator_graph(self):
   
   layers = []
   
   with tf.variable_scope("adver"):
     layers.append(tf.layers.Dense(
         units = 240
         , activation=tf.nn.relu
         , name = "adver/lay1"
         ))
     layers.append(tf.layers.Dense(
         units=1
         , kernel_initializer=tf.random_normal_initializer(-0.005,0.005)
         , name = "adver/lay3"
         ))
   return(layers)
def unet_v2(input_shape=(256, 256, 1),
            num_classes=2,
            activation='relu',
            upsampling_method='bilinear',
            downsampling_method='max_pool',
            depth=5,
            num_first_filters=32):
    """
    Modified version of the original unet with more convolutions and bilinear upsampling
    """
    logger.debug("building model unet_v2 with args %s" % (str(locals())))
    inputs = Input(input_shape)

    y = inputs
    layers = []

    features = [
        int(pow(2, math.log2(num_first_filters))) for i in range(depth)
    ]

    for k, f in enumerate(features):
        print("encoder k=%d, features=%d" % (k, f))
        y = conv(y, f, activation=activation)
        y = conv(y, f, activation=activation)
        layers.append(y)
        if k != (len(features) - 1):
            y = downsample(y,
                           method=downsampling_method,
                           activation=activation)

    for k, f in enumerate(reversed(features[:-1])):
        print('decoder: k=%d, features=%d' % (k, f))
        y = upsample(y, method=upsampling_method, activation=activation)
        y = conv(y, f, kernel_size=(1, 1), norm=None, activation=activation)
        y = K.concatenate([layers[-(k + 2)], y])
        y = conv(y, f, norm=None, activation=activation)
        y = conv(y, f, activation=activation)

    y = conv(y, features[0], norm=False, activation=activation)
    y = conv(y, features[0], norm=False, activation=activation)

    base_model = Model(inputs, y)
    y = conv(y, num_classes, kernel_size=(1, 1), activation=None, norm=None)
    return Model(inputs, y), base_model
Example #11
0
def make_layer(block,
               inplanes,
               planes,
               blocks,
               stride=1,
               dilation=1,
               norm_layer=None):
    if norm_layer is None:
        norm_layer = layers.BatchNormalization

    if stride != 1 or inplanes != planes * block.expansion:
        downsample = K.Sequential([
            conv1x1(planes * block.expansion, stride),
            norm_layer(),  # planes * block.expansion),
        ])

    layers = []
    layers.append(
        block(
            inplanes,
            planes,
            stride=stride,
            downsample=downsample,
            groups=1,
            base_width=64,
            dilation=dilation,
            norm_layer=norm_layer,
        ))
    inplanes = planes * block.expansion

    for _ in range(1, blocks):
        layers.append(
            block(
                inplanes,
                planes,
                groups=1,
                base_width=64,
                dilation=dilation,
                norm_layer=norm_layer,
            ))

    return K.Sequential([*layers])
def unet(input_shape=(256, 256, 1),
         num_classes=3,
         depth=5,
         activation='relu',
         num_first_filters=64):
    """ 
        https://arxiv.org/pdf/1505.04597.pdf
    """
    logger.debug("building model unet with args %s" % (str(locals())))
    inputs = Input(input_shape)

    y = inputs
    layers = []

    features = [
        int(pow(2,
                math.log2(num_first_filters) + i)) for i in range(depth)
    ]

    for k, num_filters in enumerate(features):
        y = conv(y, num_filters, activation=activation)
        y = conv(y, num_filters, activation=activation)
        layers.append(y)

        if k != (len(features) - 1):
            y = downsample(y, method='max_pool')

        print("encoder - features: %d, shape: %s" %
              (num_filters, str(y.shape)))

    for k, num_filters in enumerate(reversed(features[:-1])):
        y = upsample(y, method='conv', activation=activation)
        y = K.concatenate([y, layers[-(k + 2)]])
        y = conv(y, num_filters, activation=activation)
        y = conv(y, num_filters, activation=activation)
        print("decoder - features: %d, shape: %s" %
              (num_filters, str(y.shape)))

    base_model = Model(inputs, y)
    y = conv(y, num_classes, kernel_size=(1, 1), activation=None, norm=None)
    return Model(inputs, y), base_model
Example #13
0
def encoder(pretrained=True):
    """unet encoder
       orig src : https://www.tensorflow.org/tutorials/images/segmentation
    """
    if pretrained:
        mobilenet = MobileNetV2(input_shape=[None, None, 1], include_top=False)
        layers = ["block_{}_expand_relu".format(x) for x in (1, 3, 6, 13)]
        layers.append("block_16_project")
        layers = [mobilenet.get_layer(layer).output for layer in layers]
        enc = Model(inputs=mobilenet.input, outputs=layers)
        enc.tranable = False
    else:
        n_filter = 64
        enc = [
            encode(n_filter * 1, pool=False, norm=False),
            encode(n_filter * 2, pool=False, norm=True),
            encode(n_filter * 4, pool=False, norm=True),
            encode(n_filter * 8, pool=False, norm=True),
            encode(n_filter * 16, pool=True, norm=True),
        ]
    return enc
Example #14
0
    def __init__(self, emb_size, depth, max_seq_len, heads , bucket_size , num_hash, ff_chunks , causal = False):
        super().__init__()
        self.emb_size = emb_size
        self.depth = depth
        
        #걍 feed forward
        ff_caller = lambda: Feed_Forward(emb_size)

        # lsh attention.. 핵심임.
        lsh_caller = lambda: MH_LSHAttention(emb_size, heads, bucket_size, num_hash, causal = causal)
    
        layers = []

        for _ in range(depth):
            f = lsh_caller()
            #chunk feed forward
            g = Chunk_feedforward_Normalization(LayerNormalization, ff_caller(),ff_chunks)
            #이것은 pytorch. reversible block code 참조.
            layers.append(Revnet(f, g))
        
        #요것도 참조.
        self.model_layers = Revnet_Layers(layers)
    def call(self, graph_inputs):
        input_atom, input_bond, atom_graph, bond_graph, num_nbs, node_mask, _, _ = graph_inputs
        #calculate the initial atom features using only its own features (no neighbors)
        atom_features = self.atom_features(input_atom)
        layers = []
        for i in range(self.depth):
            fatom_nei = tf.gather_nd(atom_features, tf.dtypes.cast(atom_graph,tf.int64)) #(batch, #atoms, max_nb, hidden)
            fbond_nei = tf.gather_nd(input_bond, tf.dtypes.cast(bond_graph, tf.int64)) #(batch, #atoms, max_nb, #bond features)
            h_nei_atom = self.nei_atom(fatom_nei) #(batch, #atoms, max_nb, hidden)
            h_nei_bond = self.nei_bond(fbond_nei) #(batch, #atoms, max_nb, hidden)
            h_nei = h_nei_atom * h_nei_bond #(batch, #atoms, max_nb, hidden)
            mask_nei = K.reshape(tf.sequence_mask(K.reshape(num_nbs, [-1]), self.max_nb, dtype=tf.float32), [K.shape(input_atom)[0],-1, self.max_nb,1])
            f_nei = K.sum(h_nei * mask_nei, axis=-2, keepdims=False) #(batch, #atoms, hidden) sum across atoms
            f_self = self.self_atom(atom_features) #(batch, #atoms, hidden)

            layers.append(f_nei * f_self * self.node_reshape(node_mask))#, -1))
            l_nei = K.concatenate([fatom_nei, fbond_nei], axis=3) #(batch, #atoms, max_nb, )
            pre_label = self.label_U2(l_nei)
            nei_label = K.sum(pre_label * mask_nei, axis=-2, keepdims=False)
            new_label = K.concatenate([atom_features, nei_label], axis=2)

            atom_features = self.label_U1(new_label)
        kernels = layers[-1]
        return kernels
Example #16
0
    def _make_block(self, btype, block, filters, kernel, stride, pre_block):
        """
        创建ResBlock小块
        """

        layers = []
        # 创建小块。
        if btype == 'basic':
            for i in range(block):
                if i == 0:
                    layers.append(
                        BasicResBlockUpdate(filter_num=[filters, filters],
                                            kernel_size=[kernel, kernel],
                                            strides=stride,
                                            input_channels=pre_block))
                else:
                    layers.append(
                        BasicResBlockUpdate(filter_num=[filters, filters],
                                            kernel_size=[kernel, kernel],
                                            strides=[stride[1], stride[1]],
                                            input_channels=pre_block))

        elif btype == 'bottleneck':
            for i in range(block):
                if i == 0:
                    layers.append(
                        BottleneckResBlock(filter_num=[filters, filters],
                                           kernel_size=[kernel, kernel],
                                           strides=stride,
                                           input_channels=pre_block))
                else:
                    layers.append(
                        BottleneckResBlock(filter_num=[filters, filters],
                                           kernel_size=[kernel, kernel],
                                           strides=[stride[1], stride[1]],
                                           input_channels=pre_block))
        else:
            print('层类型错误!')
            return

        return keras.Sequential(layers)
Example #17
0
def resu(x, iterations, mid_filters=12, out_filters=3):
    layers = []

    y_in = rebnconv(x, filters=out_filters, dirate=1)
    layers.append(y_in)

    y = y_in

    for i in range(iterations):
        y = rebnconv(y, filters=mid_filters, dirate=1)
        layers.append(y)
        y = pool(y)

    y = rebnconv(y, filters=mid_filters, dirate=1)
    layers.append(y)

    y = rebnconv(y, filters=mid_filters, dirate=2)

    for i in range(iterations):
        y = rebnconv(tf.concat([y, layers[-(i + 1)]], axis=-1), filters=mid_filters, dirate=1)
        y = upsample(y)

    y = rebnconv(tf.concat([y, layers[0]], axis=-1), filters=out_filters, dirate=1)
    return y + y_in
Example #18
0
 def _make_layer(self, block, num_blocks, in_channels, out_channels):
     layers = []
     for i in range(0, num_blocks):
         layers.append(block(in_channels, out_channels))
     return nn.Sequential(*layers)
 def _make_stage(self, num_stages, out_channels):
     layers = []
     layers.append(ShuffleNetUnit(out_channels, stride=2, se=self.se))
     for i in range(num_stages):
         layers.append(ShuffleNetUnit(out_channels, stride=1, se=self.se))
     return Sequential(layers)
Example #20
0
def getSequential_model(configuration, relu_max_value=None, relu_negative_slope=0.0):
	layers = []
	
	# Create dense layers with the "relu" function
	count = 0
	last_neurons_per_layer = 0		
	for neurons_per_layer in configuration:	
		# First Layer
		if count == 0:
			layers.append(keras.layers.Dense(neurons_per_layer, activation='linear', use_bias=True, name='dense_' + str(count), input_shape=(2,)))
			layers.append(keras.layers.ReLU(max_value=relu_max_value, negative_slope=relu_negative_slope))
		# Last Layer
		elif (count == len(configuration) - 1):
			layers.append(keras.layers.Dense(neurons_per_layer, activation='linear', use_bias=True, name='dense_' + str(count)))
		# Average Pooling Layer
		elif neurons_per_layer == 'pa':
			layers.append(keras.layers.Reshape((last_neurons_per_layer, 1)))
			layers.append(keras.layers.AveragePooling1D(pool_size=2, strides=None, padding='valid'))
			layers.append(keras.layers.Flatten())
		# Max Pooling Layer
		elif neurons_per_layer == 'pm':
			layers.append(keras.layers.Reshape((last_neurons_per_layer, 1)))
			layers.append(keras.layers.MaxPooling1D(pool_size=2, strides=None, padding='valid'))
			layers.append(keras.layers.Flatten())
		# Middle Layers
		else:
			layers.append(keras.layers.Dense(neurons_per_layer, activation='linear', use_bias=True, name='dense_' + str(count)))
			layers.append(keras.layers.ReLU(max_value=relu_max_value, negative_slope=relu_negative_slope))
		last_neurons_per_layer = neurons_per_layer
		count += 1
	
	return keras.models.Sequential(layers)
 def make_layer(self,channel, block, stride):
     layers = []
     layers.append(Bottleneck(channel=channel,stride=stride,downsample=True))
     for i in range(1,block):
         layers.append((Bottleneck(channel)))
     return tf.keras.Sequential(layers)