Beispiel #1
0
 def res(x):
     x = ResidualStart()(x)
     x1 = Conv2D(f, 3, strides=1, padding='same')(x)
     x1 = BatchNormalization()(x1)
     x1 = Lambda(activation)(x1)
     x1 = Conv2D(f, 3, strides=1, padding='same')(x1)
     x1 = BatchNormalization()(x1)
     return Add()([x1, x])
Beispiel #2
0
def convolutional_block(X, f, filters, stage, block, s=2):

    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    F1, F2, F3 = filters
    X_shortcut = X

    X = Conv2D(F1, (1, 1),
               strides=(s, s),
               name=conv_name_base + '2a',
               padding="valid",
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    X = Conv2D(F2, (f, f),
               strides=(1, 1),
               padding='same',
               name=conv_name_base + '2b',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
    X = Activation('relu')(X)

    X = Conv2D(F3, (1, 1),
               strides=(1, 1),
               padding='valid',
               name=conv_name_base + '2c',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)

    X_shortcut = Conv2D(F3, (1, 1),
                        strides=(s, s),
                        padding='valid',
                        name=conv_name_base + '1',
                        kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
    X_shortcut = BatchNormalization(axis=3,
                                    name=bn_name_base + '1')(X_shortcut)

    X = Add()([X, X_shortcut])
    X = Activation('relu')(X)

    return X
Beispiel #3
0
def _conv_block(x, out_filters, bneck_filters, groups, kernel_size, strides):
    scope = Scoping.get_global_scope()
    if int(x.shape[-1]) != out_filters or strides > 1:
        with scope.name_scope('shortcut'):
            shortcut = _preact_conv(x, out_filters, 1, strides)
    else:
        shortcut = x

    with scope.name_scope('in'):
        pi = _preact_conv(x, bneck_filters, 1, 1)

    with scope.name_scope('bneck'):
        pi = _preact_conv(pi, bneck_filters, kernel_size, strides, groups)

    with scope.name_scope('out'):
        pi = _preact_conv(pi, out_filters, 1, 1)

    x = Add(name=scope+'add_shortcut')([shortcut, pi])
    return x
Beispiel #4
0
def identity_block(X, f, filters, stage, block):

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'
    F1, F2, F3 = filters

    X_shortcut = X

    X = Conv2D(filters=F1,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid',
               name=conv_name_base + '2a',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    X = Conv2D(filters=F2,
               kernel_size=(f, f),
               strides=(1, 1),
               padding='same',
               name=conv_name_base + '2b',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
    X = Activation('relu')(X)

    X = Conv2D(filters=F3,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid',
               name=conv_name_base + '2c',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)

    X = Add()([X, X_shortcut])
    X = Activation('relu')(X)

    return X
    def build(self):

        gru = GRU(units=self.input_shape[-1],
                  return_sequences=False,
                  name='gru')
        bn = BatchNormalization()
        decoder_1 = Dense(units=128, activation='sigmoid', name='decoder-1')
        decoder_2 = Dense(units=self.output_dim,
                          activation='sigmoid',
                          name='decoder-2')
        self.layers.append(gru)
        self.layers.append(decoder_1)
        self.layers.append(decoder_2)

        activations = {}
        for i in range(self.historical_len):
            s_input = self.input_s[i]
            t_input = self.input_t[i]
            if i == 0:
                # hs = bn(s_input)
                hs = gru(s_input)
                activations['s-r-{}-o'.format(i)] = hs
                hs = Reshape((1, -1))(hs)
                activations['s-r-{}'.format(i)] = hs

                # ht = bn(t_input)
                ht = gru(t_input)
                activations['t-r-{}-o'.format(i)] = ht
                ht = Reshape((1, -1))(ht)
                activations['t-r-{}'.format(i)] = ht

            else:
                # hs = bn(s_input)
                hs = gru(
                    Add()([activations['s-r-{}-o'.format(i - 1)], s_input]))
                activations['s-r-{}-o'.format(i)] = hs
                hs = Reshape((1, -1))(hs)
                activations['s-r-{}'.format(i)] = hs

                # ht = bn(t_input)
                ht = gru(
                    Add()([activations['t-r-{}-o'.format(i - 1)], t_input]))
                activations['s-r-{}-o'.format(i)] = ht
                ht = Reshape((1, -1))(ht)
                activations['t-r-{}'.format(i)] = ht

        for i in range(self.historical_len):
            idx = self.historical_len - 1 - i
            s_input = self.input_s[idx]
            t_input = self.input_t[idx]
            if i == 0:
                # hs = bn(s_input)
                hs = gru(s_input)
                activations['s-l-{}-o'.format(i)] = hs
                hs = Reshape((1, -1))(hs)
                activations['s-l-{}'.format(i)] = hs

                # ht = bn(t_input)
                ht = gru(t_input)
                activations['t-l-{}-o'.format(i)] = ht
                ht = Reshape((1, -1))(ht)
                activations['t-l-{}'.format(i)] = ht

            else:
                # hs = bn(s_input)
                hs = gru(
                    Add()([activations['s-l-{}-o'.format(i - 1)], s_input]))
                activations['s-l-{}-o'.format(i)] = hs
                hs = Reshape((1, -1))(hs)
                activations['s-l-{}'.format(i)] = hs

                # ht = bn(t_input)
                ht = gru(
                    Add()([activations['t-r-{}-o'.format(i - 1)], t_input]))
                activations['s-l-{}-o'.format(i)] = ht
                ht = Reshape((1, -1))(ht)
                activations['t-l-{}'.format(i)] = ht

        s_h_r = concatenate([
            activations['s-r-{}'.format(i)] for i in range(self.historical_len)
        ],
                            axis=1)
        s_h_l = concatenate([
            activations['s-l-{}'.format(i)] for i in range(self.historical_len)
        ],
                            axis=1)
        t_h_r = concatenate([
            activations['t-r-{}'.format(i)] for i in range(self.historical_len)
        ],
                            axis=1)
        t_h_l = concatenate([
            activations['t-l-{}'.format(i)] for i in range(self.historical_len)
        ],
                            axis=1)

        s_c = concatenate([s_h_r, s_h_l], axis=1)
        t_c = concatenate([t_h_r, t_h_l], axis=1)
        s_c = Activation('relu')(Reshape((-1, ))(s_c))
        t_c = Activation('relu')(Reshape((-1, ))(t_c))

        # diff = tf.square(tf.subtract(s_c, t_c))
        # tmp = diff.get_shape().as_list()[-1]
        # self.weight = RepeatVector(tmp)(self.weight)
        # self.weight = tf.reshape(self.weight, [-1, tmp])
        # self.diff = tf.multiply(diff, self.weight)

        self.output_s = decoder_2(decoder_1(s_c))
        self.output_t = decoder_2(decoder_1(t_c))

        self.variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        self.vars = {var.name: var for var in self.variables}

        self._loss()
        self._gradients()

        self.opt_op = self.optimizer.minimize(self.loss)
Beispiel #6
0
from tensorflow.contrib.keras.api.keras.layers import Conv2D, MaxPooling2D, Flatten
from tensorflow.contrib.keras.api.keras.layers import Input, LSTM, Embedding, Dense, concatenate
from tensorflow.contrib.keras.api.keras.layers import Input, LSTM, Embedding, Dense, concatenate, Add
#from keras.layers import Conv2D, MaxPooling2D, Flatten, Layer
#from keras.layers import Input, LSTM, Embedding, Dense, concatenate
from tensorflow.contrib.keras.api.keras.models  import Model, Sequential
from tensorflow.python.layers.base import Layer

input1 = Input(shape=(16,))
x1 = Dense(8, activation='relu')(input1)
input2 = Input(shape=(32,))
x2 = Dense(8, activation='relu')(input2)
added = Add()([x1, x2])  # equivalent to added = keras.layers.add([x1, x2])

print("")
print("")
print("---- iterate inbound nodes and tensors ----")
print("")
print("")
print("added")
for n in added.inbound_nodes:
	print("")
	print("In-Node ", id(n), n.get_config())
for n in added.outbound_nodes:
	print("")
	print("Out-Node ",  id(n), n.get_config())


out = Dense(4)(added)
model = Model(inputs=[input1, input2], outputs=out)