def test_batchnorm_convnet_no_center_no_scale():
    model = Sequential()
    norm = normalization.BatchNormalization(axis=-1,
                                            center=False,
                                            scale=False,
                                            input_shape=(3, 4, 4),
                                            momentum=0.8)
    model.add(norm)
    model.compile(loss='mse', optimizer='sgd')

    # centered on 5.0, variance 10.0
    x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
    model.fit(x, x, epochs=4, verbose=0)
    out = model.predict(x)

    assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
    assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
示例#2
0
 def conv3D_and_bn(self,
                   input_tensor,
                   filters,
                   kernel_size,
                   padding_method='same',
                   activation_method=None,
                   kernel_initializer_method='he_normal'):
     output_tensor = Conv3D(
         filters,
         kernel_size,
         padding=padding_method,
         activation=activation_method,
         kernel_initializer=kernel_initializer_method)(input_tensor)
     output_tensor = normalization.BatchNormalization(
         axis=-1)(output_tensor)
     output_tensor = Activation('relu')(output_tensor)
     return output_tensor
def test_batchnorm_correctness_2d():
    model = Sequential()
    norm = normalization.BatchNormalization(axis=1,
                                            input_shape=(10, 6),
                                            momentum=0.8)
    model.add(norm)
    model.compile(loss='mse', optimizer='rmsprop')

    # centered on 5.0, variance 10.0
    x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10, 6))
    model.fit(x, x, epochs=5, verbose=0)
    out = model.predict(x)
    out -= np.reshape(K.eval(norm.beta), (1, 10, 1))
    out /= np.reshape(K.eval(norm.gamma), (1, 10, 1))

    assert_allclose(out.mean(axis=(0, 2)), 0.0, atol=1.1e-1)
    assert_allclose(out.std(axis=(0, 2)), 1.0, atol=1.1e-1)
示例#4
0
 def __init__(self):
     super(HasList, self).__init__()
     self.layer_list = data_structures.List([core.Dense(3)])
     self.layer_list.append(core.Dense(4))
     self.layer_list.extend(
         [core.Dense(5),
          core.Dense(6, kernel_regularizer=tf.reduce_sum)])
     self.layer_list += [
         core.Dense(7, bias_regularizer=tf.reduce_sum),
         core.Dense(8)
     ]
     self.layer_list += (data_structures.List([core.Dense(9)]) +
                         data_structures.List([core.Dense(10)]))
     self.layer_list.extend(
         data_structures.List(list([core.Dense(11)]) + [core.Dense(12)]))
     self.layers_with_updates = data_structures.List(
         (normalization.BatchNormalization(), ))
示例#5
0
def test_batchnorm_convnet():
    np.random.seed(1337)
    model = Sequential()
    norm = normalization.BatchNormalization(axis=1, input_shape=(3, 4, 4),
                                            momentum=0.8)
    model.add(norm)
    model.compile(loss='mse', optimizer='sgd')

    # centered on 5.0, variance 10.0
    x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
    model.fit(x, x, epochs=4, verbose=0)
    out = model.predict(x)
    out -= np.reshape(K.eval(norm.beta), (1, 3, 1, 1))
    out /= np.reshape(K.eval(norm.gamma), (1, 3, 1, 1))

    assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
    assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
示例#6
0
def test_batchnorm_config():
    norm = normalization.BatchNormalization(input_shape=(10, 10),
                                            mode=1,
                                            epsilon=0.1,
                                            momentum=0.9)
    conf = norm.get_config()
    del conf['cache_enabled']
    del conf['trainable']
    del conf['custom_name']
    conf_target = {
        "input_shape": (10, 10),
        "name": normalization.BatchNormalization.__name__,
        "epsilon": 0.1,
        "mode": 1,
        "momentum": 0.9
    }
    assert (conf == conf_target)
示例#7
0
 def __init__(self):
     super(HasList, self).__init__()
     self.layer_list = tf.__internal__.tracking.wrap([core.Dense(3)])
     self.layer_list.append(core.Dense(4))
     self.layer_list.extend(
         [core.Dense(5),
          core.Dense(6, kernel_regularizer=tf.reduce_sum)])
     self.layer_list += [
         core.Dense(7, bias_regularizer=tf.reduce_sum),
         core.Dense(8)
     ]
     self.layer_list += (tf.__internal__.tracking.wrap([core.Dense(9)]) +
                         tf.__internal__.tracking.wrap([core.Dense(10)]))
     self.layer_list.extend(
         tf.__internal__.tracking.wrap(
             list([core.Dense(11)]) + [core.Dense(12)]))
     self.layers_with_updates = tf.__internal__.tracking.wrap(
         [normalization.BatchNormalization()])
示例#8
0
def test_batchnorm_mode_0_convnet():
    model = Sequential()
    norm_m0 = normalization.BatchNormalization(mode=0,
                                               axis=1,
                                               input_shape=(3, 4, 4))
    model.add(norm_m0)
    model.compile(loss='mse', optimizer='sgd')

    # centered on 5.0, variance 10.0
    X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
    model.fit(X, X, nb_epoch=5, verbose=0)
    out = norm_m0.call(K.variable(X))
    out -= K.reshape(norm_m0.beta, (1, 3, 1, 1))
    out /= K.reshape(norm_m0.gamma, (1, 3, 1, 1))
    np_out = K.function([K.learning_phase()], [out])([1.])[0]

    assert_allclose(np.mean(np_out, axis=(0, 2, 3)), 0.0, atol=1e-1)
    assert_allclose(np.std(np_out, axis=(0, 2, 3)), 1.0, atol=1e-1)
示例#9
0
def test_batchnorm_mode_0_or_2():
    for mode in [0, 2]:
        model = Sequential()
        norm_m0 = normalization.BatchNormalization(mode=mode,
                                                   input_shape=(10, ),
                                                   momentum=0.8)
        model.add(norm_m0)
        model.compile(loss='mse', optimizer='sgd')

        # centered on 5.0, variance 10.0
        X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
        model.fit(X, X, nb_epoch=4, verbose=0)
        out = model.predict(X)
        out -= K.eval(norm_m0.beta)
        out /= K.eval(norm_m0.gamma)

        assert_allclose(out.mean(), 0.0, atol=1e-1)
        assert_allclose(out.std(), 1.0, atol=1e-1)
示例#10
0
def target_layers(params):
    layers = []
    if params.nb_hidden:
        layer = kcore.Dense(params.nb_hidden,
                            activation='linear',
                            init='glorot_uniform')
        layers.append(('h1', layer))
        if params.batch_norm:
            layer = knorm.BatchNormalization()
            layers.append(('h1b', layer))
        layer = kcore.Activation(params.activation)
        layers.append(('h1a', layer))
        if params.drop_out:
            layer = kcore.Dropout(params.drop_out)
            layers.append(('h1d', layer))
    layer = kcore.Dense(1, activation='sigmoid', init='glorot_uniform')
    layers.append(('o', layer))
    return layers
示例#11
0
def test_batchnorm_mode_0_convnet():
    model = Sequential()
    norm_m0 = normalization.BatchNormalization(mode=0,
                                               axis=1,
                                               input_shape=(3, 4, 4))
    model.add(norm_m0)
    model.compile(loss='mse', optimizer='sgd')

    # centered on 5.0, variance 10.0
    X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
    model.fit(X, X, nb_epoch=5, verbose=0)
    norm_m0.input = K.variable(X)
    out = (norm_m0.get_output(train=True) -
           K.reshape(norm_m0.beta,
                     (1, 3, 1, 1))) / K.reshape(norm_m0.gamma, (1, 3, 1, 1))

    assert_allclose(K.eval(K.mean(out, axis=(0, 2, 3))), 0.0, atol=1e-1)
    assert_allclose(K.eval(K.std(out, axis=(0, 2, 3))), 1.0, atol=1e-1)
示例#12
0
    def test_weight_init(self):
        """
        Test weight initialization
        """
        norm_m1 = normalization.BatchNormalization(input_shape=(10,), mode=1,
                                                   weights=[np.ones(10), np.ones(10), np.zeros(10), np.zeros(10)])

        for inp in [self.input_1, self.input_2, self.input_3]:
            norm_m1.input = inp
            out = (norm_m1.get_output(train=True) - np.ones(10)) / 1.
            self.assertAlmostEqual(out.mean().eval(), 0.0)
            if inp.std() > 0.:
                self.assertAlmostEqual(out.std().eval(), 1.0, places=2)
            else:
                self.assertAlmostEqual(out.std().eval(), 0.0, places=2)

        assert_allclose(norm_m1.gamma.eval(), np.ones(10))
        assert_allclose(norm_m1.beta.eval(), np.ones(10))
示例#13
0
def facial_detection():
    model = Sequential()
    model.add(normalization.BatchNormalization(input_shape=(96, 96, 1)))
    model.add(
        Convolution2D(24,
                      5,
                      5,
                      border_mode='same',
                      init='he_normal',
                      input_shape=(96, 96, 1),
                      dim_ordering='tf'))
    model.add(Activation('relu'))
    model.add(
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='valid'))

    model.add(Convolution2D(36, 5, 5))
    model.add(Activation('relu'))
    model.add(
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='valid'))

    model.add(Convolution2D(48, 5, 5))
    model.add(Activation('relu'))
    model.add(
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='valid'))

    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='valid'))

    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))

    model.add(GlobalAveragePooling2D())

    model.add(Dense(500, activation='relu'))
    model.add(Dense(90, activation='relu'))
    model.add(Dense(30))
    model.compile(optimizer='rmsprop', loss='mse', metrics=['accuracy'])
    checkpointer = ModelCheckpoint(filepath='face_model.h5',
                                   verbose=1,
                                   save_best_only=True)
    epochs = 30
示例#14
0
def transition_block(x,
                     stage,
                     nb_filter,
                     compression=1.0,
                     dropout_rate=None,
                     weight_decay=1E-4):
    ''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout 
        # Arguments
            x: input tensor
            stage: index for dense block
            nb_filter: number of filters
            compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
            dropout_rate: dropout rate
            weight_decay: weight decay factor
    '''

    eps = 1.1e-5
    conv_name_base = 'conv' + str(stage) + '_blk'
    relu_name_base = 'relu' + str(stage) + '_blk'
    pool_name_base = 'pool' + str(stage)

    x = normalization.BatchNormalization(epsilon=eps,
                                         axis=concat_axis,
                                         scale=True,
                                         name=conv_name_base + '_bn')(x)
    #x = Scale(axis=concat_axis, name=conv_name_base+'_scale')(x)
    x = Activation('relu', name=relu_name_base)(x)
    x = Convolution2D(int(nb_filter * compression),
                      1,
                      1,
                      name=conv_name_base,
                      bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    x = AveragePooling2D((2, 2), strides=(2, 2), name=pool_name_base)(x)

    return x
示例#15
0
    def test_mode_0(self):
        """
        Test the function of mode 0. Need to be somewhat lenient with the
        equality assertions because of the epsilon trick used to avoid NaNs.
        """
        norm_m0 = normalization.BatchNormalization((10, ), momentum=0.5)

        norm_m0.input = self.input_1
        out = (norm_m0.get_output(train=True) - norm_m0.beta) / norm_m0.gamma
        self.assertAlmostEqual(out.mean().eval(), 0.0)
        self.assertAlmostEqual(out.std().eval(), 1.0, places=2)

        self.assertAlmostEqual(norm_m0.running_mean, 4.5)
        self.assertAlmostEqual(norm_m0.running_std.eval(),
                               np.arange(10).std(),
                               places=2)

        norm_m0.input = self.input_2
        out = (norm_m0.get_output(train=True) - norm_m0.beta) / norm_m0.gamma
        self.assertAlmostEqual(out.mean().eval(), 0.0)
        self.assertAlmostEqual(out.std().eval(), 0.0, places=2)

        #Values calculated by hand
        self.assertAlmostEqual(norm_m0.running_mean, 2.25)
        self.assertAlmostEqual(norm_m0.running_std.eval(),
                               0.5 * np.arange(10).std(),
                               places=2)

        out_test = (norm_m0.get_output(train=False) -
                    norm_m0.beta) / norm_m0.gamma
        self.assertAlmostEqual(out_test.mean().eval(),
                               -2.25 / (0.5 * np.arange(10).std()),
                               places=2)
        self.assertAlmostEqual(out_test.std().eval(), 0.0, places=2)

        norm_m0.input = self.input_3
        out = (norm_m0.get_output(train=True) - norm_m0.beta) / norm_m0.gamma
        self.assertAlmostEqual(out.mean().eval(), 0.0)
        self.assertAlmostEqual(out.std().eval(), 0.0, places=2)
示例#16
0
def cpg_layers(params):
    layers = []
    if params.drop_in:
        layer = kcore.Dropout(params.drop_in)
        layers.append(('xd', layer))
    nb_layer = len(params.nb_filter)
    w_reg = kr.WeightRegularizer(l1=params.l1, l2=params.l2)
    for l in range(nb_layer):
        layer = kconv.Convolution2D(nb_filter=params.nb_filter[l],
                                    nb_row=1,
                                    nb_col=params.filter_len[l],
                                    activation=params.activation,
                                    init='glorot_uniform',
                                    W_regularizer=w_reg,
                                    border_mode='same')
        layers.append(('c%d' % (l + 1), layer))
        layer = kconv.MaxPooling2D(pool_size=(1, params.pool_len[l]))
        layers.append(('p%d' % (l + 1), layer))

    layer = kcore.Flatten()
    layers.append(('f1', layer))
    if params.drop_out:
        layer = kcore.Dropout(params.drop_out)
        layers.append(('f1d', layer))
    if params.nb_hidden:
        layer = kcore.Dense(params.nb_hidden,
                            activation='linear',
                            init='glorot_uniform')
        layers.append(('h1', layer))
        if params.batch_norm:
            layer = knorm.BatchNormalization()
            layers.append(('h1b', layer))
        layer = kcore.Activation(params.activation)
        layers.append(('h1a', layer))
        if params.drop_out:
            layer = kcore.Dropout(params.drop_out)
            layers.append(('h1d', layer))
    return layers
示例#17
0
def test_shared_batchnorm():
    '''Test that a BN layer can be shared
    across different data streams.
    '''
    # Test single layer reuse
    bn = normalization.BatchNormalization(input_shape=(10, ))
    x1 = Input(shape=(10, ))
    bn(x1)

    x2 = Input(shape=(10, ))
    y2 = bn(x2)

    x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
    model = Model(x2, y2)
    model.compile('sgd', 'mse')
    model.train_on_batch(x, x)

    # Test model-level reuse
    x3 = Input(shape=(10, ))
    y3 = model(x3)
    new_model = Model(x3, y3)
    new_model.compile('sgd', 'mse')
    new_model.train_on_batch(x, x)
示例#18
0
def get_model3(p, em_mat, rmat, word_ind, em_dim, isl, osl):
    """
    Bi-LSTM, BatchNorm, MaxPool, 1-Layer Enc + BN, 1-Layer-Dec + BN + TDD-tanh
    """
    do = p['dropout']
    rdo = p['rec_dropout']
    for w in rmat.keys():
        op_dim = len(rmat[w])
        break
    fine_tune = True if p['learn_embed'] == 1 else False
    si = Input(shape=(isl, ), dtype='int32')
    embedding_layer = Embedding(input_dim=len(word_ind) + 1,
                                output_dim=em_dim,
                                input_length=isl,
                                weights=[em_mat],
                                trainable=fine_tune)(si)

    encoded = Bidirectional(
        LSTM(p['l1'], dropout=do, recurrent_dropout=rdo,
             return_sequences=True))(embedding_layer)
    bn_enc = normalization.BatchNormalization()(encoded)

    pool_rnn = Lambda(lambda x: ke.max(x, axis=1))(bn_enc)
    decode_inp = RepeatVector(osl)(pool_rnn)
    print 'em_dim=' + str(em_dim)
    print 'op_dim=' + str(op_dim)

    decoded = Bidirectional(
        LSTM(p['l2'], dropout=do, recurrent_dropout=rdo,
             return_sequences=True))(decode_inp)
    td = TimeDistributed(Dense(op_dim, activation='tanh'))(decoded)

    s2s_model = Model(inputs=[si], outputs=[td])
    print 'Starting to compile the model ...'
    s2s_model.compile(optimizer='adam', loss=myloss)
    return s2s_model, True
示例#19
0
def test_that_trainable_disables_updates():
    val_a = np.random.random((10, 4))
    val_out = np.random.random((10, 4))

    a = Input(shape=(4, ))
    layer = normalization.BatchNormalization(input_shape=(4, ))
    b = layer(a)
    model = Model(a, b)

    model.trainable = False
    assert not model.updates

    model.compile('sgd', 'mse')
    assert not model.updates

    x1 = model.predict(val_a)
    model.train_on_batch(val_a, val_out)
    x2 = model.predict(val_a)
    assert_allclose(x1, x2, atol=1e-7)

    model.trainable = True
    model.compile('sgd', 'mse')
    assert model.updates

    model.train_on_batch(val_a, val_out)
    x2 = model.predict(val_a)
    assert np.abs(np.sum(x1 - x2)) > 1e-5

    layer.trainable = False
    model.compile('sgd', 'mse')
    assert not model.updates

    x1 = model.predict(val_a)
    model.train_on_batch(val_a, val_out)
    x2 = model.predict(val_a)
    assert_allclose(x1, x2, atol=1e-7)
示例#20
0
def test_batchnorm_weight_init():
    """
    Test weight initialization
    """
    np.random.seed(1337)
    norm_m1 = normalization.BatchNormalization(
        input_shape=(10, ),
        mode=1,
        weights=[np.ones(10),
                 np.ones(10),
                 np.zeros(10),
                 np.zeros(10)])

    for inp in [input_1, input_2, input_3]:
        norm_m1.input = K.variable(inp)
        out = (norm_m1.get_output(train=True) - np.ones(10)) / 1.
        assert_allclose(K.eval(K.mean(out)), 0.0, atol=1e-1)
        if inp.std() > 0.:
            assert_allclose(K.eval(K.std(out)), 1.0, atol=1e-1)
        else:
            assert_allclose(K.eval(K.std(out)), 0.0, atol=1e-1)

    assert_allclose(K.eval(norm_m1.gamma), np.ones(10), atol=1e-1)
    assert_allclose(K.eval(norm_m1.beta), np.ones(10), atol=1e-1)
示例#21
0
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255

# 搭建卷积神经网络# 搭建卷积神经网络# 搭建卷积神经网络# 搭建卷积神经网络



#layer1
model = Sequential()
model.add(Conv2D(filters=32,kernel_size=(3,3),
                 padding='same',activation='relu',strides=(1,1),input_shape=(32,32,3)))#,kernel_regularizer=regularizers.l2(0.01)))
model.add(Conv2D(32,(3,3),activation='relu',padding='same'))
model.add(MaxPooling2D(pool_size=(2,2),padding='same',strides=(2,2)))
model.add(normalization.BatchNormalization(epsilon=1e-06,mode=0,axis=-1,momentum=0.9,weights=None,beta_initializer='zero',gamma_initializer='one'))
#model.add(Dropout(0.5))
#layer2
model.add(Conv2D(filters=64,kernel_size=(3,3),
                 padding='same',activation='relu',strides=(1,1)))
model.add(Conv2D(64,(3,3),activation='relu',padding='same'))
model.add(MaxPooling2D(pool_size=(2,2),padding='same',strides=(2,2)))
model.add(normalization.BatchNormalization(epsilon=1e-06,mode=0,axis=-1,momentum=0.9,weights=None,beta_initializer='zero',gamma_initializer='one'))
#model.add(Dropout(0.5))
#layer3
model.add(Conv2D(filters=128,kernel_size=(3,3),
                 padding='same',activation='relu',strides=(1,1)))
model.add(Conv2D(128,(3,3),activation='relu',padding='same'))
model.add(MaxPooling2D(pool_size=(2,2),padding='same',strides=(2,2)))
model.add(normalization.BatchNormalization(epsilon=1e-06,mode=0,axis=-1,momentum=0.9,weights=None,beta_initializer='zero',gamma_initializer='one'))
#model.add(Dropout(0.5))
示例#22
0
Input_0 = Input(shape=(canales, img_ancho, img_alto))

#Se inicia usando una serie de 64 filtros de 7x7 los cuales convolucionan
#con la imagen para que puedan obtener caracteristicas de los frutos de cacao
tronco_conv1 = Convolution2D(64, 7, 7, subsample=(2, 2),
                             border_mode='same')(Input_0)

#Se reduce con un Subsampling de 2x2 para reducir el tamaño de la imagen
#convolucionada
tronco_pool1 = MaxPooling2D(pool_size=(2, 2),
                            strides=[2, 2],
                            border_mode='valid',
                            dim_ordering='default')(tronco_conv1)

#Ahora se iniciara el bloque Resnet
rama1_res_BN1 = normalization.BatchNormalization(mode=0, axis=-1)(tronco_pool1)

rama1_res_activation1 = Activation('relu')(rama1_res_BN1)

rama1_dp1 = Dropout(0.5)(rama1_res_activation1)

rama1_res_conv1 = Convolution2D(64, 3, 3, subsample=(1, 1),
                                border_mode='same')(rama1_dp1)

rama1_res_BN2 = normalization.BatchNormalization(mode=0,
                                                 axis=-1)(rama1_res_conv1)

rama1_res_activation2 = Activation('relu')(rama1_res_BN2)

rama1_dp2 = Dropout(0.5)(rama1_res_activation2)
示例#23
0
def get_unet(n_ch,patch_height,patch_width):
    inputs = Input((n_ch,patch_height, patch_width))
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)#'valid'
    conv1 = Dropout(0.3)(conv1)
    #conv1 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(conv1)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)

    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    conv2 = Conv2D(64, (3, 3), padding='same')(pool1) #,activation='relu', padding='same')(pool1)
    conv2 = normalization.BatchNormalization(epsilon=2e-05, axis=1, momentum=0.9, weights=None, beta_initializer='zero', gamma_initializer='one')(conv2)
    conv2 = Activation('relu')(conv2)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)#,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(128, (3, 3), padding='same')(pool2)   #, activation='relu', padding='same')(pool2)
    conv3 = normalization.BatchNormalization(epsilon=2e-05,axis=1, momentum=0.9, weights=None, beta_initializer='zero', gamma_initializer='one')(conv3)
    conv3 = Activation('relu')(conv3)
    #conv3 = Dropout(0.3)(conv3)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)#,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(256, (3, 3), padding='same')(pool3)   #, activation='relu', padding='same')(pool2)
    conv4 = normalization.BatchNormalization(epsilon=2e-05,axis=1, momentum=0.9, weights=None, beta_initializer='zero', gamma_initializer='one')(conv4)
    conv4 = Activation('relu')(conv4)
    conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)#,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv3)
    #conv4 = Dropout(0.3)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
    
    #
    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)   #, activation='relu', padding='same')(pool2)
    conv5 = normalization.BatchNormalization(epsilon=2e-05,axis=1, momentum=0.9, weights=None, beta_initializer='zero', gamma_initializer='one')(conv5)
    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
    #conv5 = Dropout(0.3)(conv5)

    up1 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=1)
    conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up1)
    conv6 = Dropout(0.3)(conv6)
    conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)

    up2 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3], axis=1)
    conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up2)
    conv7 = Dropout(0.3)(conv7)
    conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)

    up3 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2], axis=1)
    conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up3)
    conv8 = Dropout(0.3)(conv8)
    conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

    up4 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1], axis=1)
    conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up4)
    conv9 = Dropout(0.3)(conv9)
    conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
   


    conv10 = Conv2D(2, (1, 1), activation='relu',padding='same')(conv9)

    conv10 = core.Reshape((2,patch_height*patch_width))(conv10)
    conv10 = core.Permute((2,1))(conv10)

    act = Activation('softmax')(conv10)

    model = Model(inputs=inputs, outputs=act)
    return model
def test_TimeDistributed():
    # first, test with Dense layer
    model = Sequential()
    model.add(wrappers.TimeDistributed(core.Dense(2), input_shape=(3, 4)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.fit(np.random.random((10, 3, 4)),
              np.random.random((10, 3, 2)),
              epochs=1,
              batch_size=10)

    # test config
    model.get_config()

    # test when specifying a batch_input_shape
    test_input = np.random.random((1, 3, 4))
    test_output = model.predict(test_input)
    weights = model.layers[0].get_weights()

    reference = Sequential()
    reference.add(
        wrappers.TimeDistributed(core.Dense(2), batch_input_shape=(1, 3, 4)))
    reference.add(core.Activation('relu'))
    reference.compile(optimizer='rmsprop', loss='mse')
    reference.layers[0].set_weights(weights)

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test with Embedding
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(embeddings.Embedding(5, 6),
                                 batch_input_shape=(10, 3, 4),
                                 dtype='int32'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.fit(np.random.randint(5, size=(10, 3, 4), dtype='int32'),
              np.random.random((10, 3, 4, 6)),
              epochs=1,
              batch_size=10)

    # compare to not using batch_input_shape
    test_input = np.random.randint(5, size=(10, 3, 4), dtype='int32')
    test_output = model.predict(test_input)
    weights = model.layers[0].get_weights()

    reference = Sequential()
    reference.add(
        wrappers.TimeDistributed(embeddings.Embedding(5, 6),
                                 input_shape=(3, 4),
                                 dtype='int32'))
    reference.compile(optimizer='rmsprop', loss='mse')
    reference.layers[0].set_weights(weights)

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test with Conv2D
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(convolutional.Conv2D(5, (2, 2),
                                                      padding='same'),
                                 input_shape=(2, 4, 4, 3)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.train_on_batch(np.random.random((1, 2, 4, 4, 3)),
                         np.random.random((1, 2, 4, 4, 5)))

    model = model_from_json(model.to_json())
    model.summary()

    # test stacked layers
    model = Sequential()
    model.add(wrappers.TimeDistributed(core.Dense(2), input_shape=(3, 4)))
    model.add(wrappers.TimeDistributed(core.Dense(3)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')

    model.fit(np.random.random((10, 3, 4)),
              np.random.random((10, 3, 3)),
              epochs=1,
              batch_size=10)

    # test wrapping Sequential model
    model = Sequential()
    model.add(core.Dense(3, input_dim=2))
    outer_model = Sequential()
    outer_model.add(wrappers.TimeDistributed(model, input_shape=(3, 2)))
    outer_model.compile(optimizer='rmsprop', loss='mse')
    outer_model.fit(np.random.random((10, 3, 2)),
                    np.random.random((10, 3, 3)),
                    epochs=1,
                    batch_size=10)

    # test with functional API
    x = Input(shape=(3, 2))
    y = wrappers.TimeDistributed(model)(x)
    outer_model = Model(x, y)
    outer_model.compile(optimizer='rmsprop', loss='mse')
    outer_model.fit(np.random.random((10, 3, 2)),
                    np.random.random((10, 3, 3)),
                    epochs=1,
                    batch_size=10)

    # test with BatchNormalization
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(normalization.BatchNormalization(center=True,
                                                                  scale=True),
                                 name='bn',
                                 input_shape=(10, 2)))
    model.compile(optimizer='rmsprop', loss='mse')
    # Assert that mean and variance are 0 and 1.
    td = model.layers[0]
    assert np.array_equal(td.get_weights()[2], np.array([0, 0]))
    assert np.array_equal(td.get_weights()[3], np.array([1, 1]))
    # Train
    model.train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)),
                         np.broadcast_to(np.array([0, 1]), (1, 10, 2)))
    # Assert that mean and variance changed.
    assert not np.array_equal(td.get_weights()[2], np.array([0, 0]))
    assert not np.array_equal(td.get_weights()[3], np.array([1, 1]))
    # Verify input_map has one mapping from inputs to reshaped inputs.
    uid = _object_list_uid(model.inputs)
    assert len(td._input_map.keys()) == 1
    assert uid in td._input_map
    assert K.int_shape(td._input_map[uid]) == (None, 2)
示例#25
0
    def build_model(self):
        inputs = Input((self.patch_height, self.patch_width, 1))
        conv1 = Conv2D(32, (3, 3), padding='same')(inputs)  # 'valid'
        conv1 = LeakyReLU(alpha=0.3)(conv1)
        conv1 = Dropout(0.2)(conv1)
        conv1 = normalization.BatchNormalization(
            epsilon=2e-05,
            axis=1,
            momentum=0.9,
            weights=None,
            beta_initializer='RandomNormal',
            gamma_initializer='one')(conv1)
        conv1 = Conv2D(32, (3, 3), dilation_rate=2, padding='same')(conv1)
        conv1 = LeakyReLU(alpha=0.3)(conv1)
        conv1 = Conv2D(32, (3, 3), dilation_rate=4, padding='same')(conv1)
        conv1 = LeakyReLU(alpha=0.3)(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

        # pool1 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(pool1)
        conv2 = Conv2D(64, (3, 3), padding='same')(
            pool1)  # ,activation='relu', padding='same')(pool1)
        conv2 = normalization.BatchNormalization(
            epsilon=2e-05,
            axis=1,
            momentum=0.9,
            weights=None,
            beta_initializer='RandomNormal',
            gamma_initializer='one')(conv2)
        conv2 = LeakyReLU(alpha=0.3)(conv2)
        conv2 = Dropout(0.2)(conv2)
        conv2 = Conv2D(64, (3, 3), dilation_rate=2, padding='same')(conv2)
        conv2 = LeakyReLU(alpha=0.3)(conv2)
        conv2 = Conv2D(64, (3, 3), dilation_rate=4, padding='same')(
            conv2)  # ,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv2)
        conv2 = LeakyReLU(alpha=0.3)(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        # crop = Cropping2D(cropping=((int(3 * patch_height / 8), int(3 * patch_height / 8)), (int(3 * patch_width / 8), int(3 * patch_width / 8))))(conv1)
        # conv3 = concatenate([crop,pool2], axis=1)
        conv3 = Conv2D(128, (3, 3), padding='same')(
            pool2)  # , activation='relu', padding='same')(conv3)
        conv3 = normalization.BatchNormalization(
            epsilon=2e-05,
            axis=1,
            momentum=0.9,
            weights=None,
            beta_initializer='RandomNormal',
            gamma_initializer='one')(conv3)
        conv3 = LeakyReLU(alpha=0.3)(conv3)
        conv3 = Dropout(0.2)(conv3)
        conv3 = Conv2D(128, (3, 3), dilation_rate=2, padding='same')(
            conv3)  # ,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv3)
        conv3 = normalization.BatchNormalization(
            epsilon=2e-05,
            axis=1,
            momentum=0.9,
            weights=None,
            beta_initializer='RandomNormal',
            gamma_initializer='one')(conv3)
        conv3 = LeakyReLU(alpha=0.3)(conv3)

        conv3 = Conv2D(128, (3, 3), dilation_rate=4, padding='same')(conv3)
        conv3 = normalization.BatchNormalization(
            epsilon=2e-05,
            axis=1,
            momentum=0.9,
            weights=None,
            beta_initializer='RandomNormal',
            gamma_initializer='one')(conv3)
        conv3 = LeakyReLU(alpha=0.3)(conv3)

        # up1 = UpSampling2D(size=(2, 2))(conv3)
        up1 = concatenate([UpSampling2D(size=(2, 2))(conv3), conv2], axis=3)
        conv4 = Conv2D(64, (3, 3), padding='same')(up1)
        conv4 = LeakyReLU(alpha=0.3)(conv4)
        conv4 = Dropout(0.2)(conv4)
        conv4 = Conv2D(64, (3, 3), padding='same')(conv4)
        conv4 = LeakyReLU(alpha=0.3)(conv4)
        # conv4 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(conv4)
        #
        # up2 = UpSampling2D(size=(2, 2))(conv4)
        up2 = concatenate([UpSampling2D(size=(2, 2))(conv4), conv1], axis=3)
        conv5 = Conv2D(32, (3, 3), padding='same')(up2)
        conv5 = LeakyReLU(alpha=0.3)(conv5)
        conv5 = Dropout(0.2)(conv5)
        conv5 = Conv2D(32, (3, 3), padding='same')(conv5)
        conv5 = LeakyReLU(alpha=0.3)(conv5)

        conv6 = Conv2D(self.num_seg_class + 1, (1, 1), padding='same')(conv5)
        conv6 = LeakyReLU(alpha=0.3)(conv6)
        # conv6 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(conv6)

        # for tensorflow
        # conv6 = core.Reshape((patch_height*patch_width,num_lesion_class+1))(conv6)
        # for theano
        conv6 = core.Reshape((self.patch_height * self.patch_width,
                              self.num_seg_class + 1))(conv6)
        #conv6 = core.Permute((2, 1))(conv6)
        ############
        act = Activation('softmax')(conv6)

        model = Model(inputs=inputs, outputs=act)
        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['categorical_accuracy'])
        # self.config.checkpoint = "C:\\Users\\kk\\Desktop\\Optic-Disc-Unet-master\\Optic-Disc-Unet-master\\experiments\\OpticDisc\\checkpoint"
        plot_model(model,
                   to_file=os.path.join(self.config.checkpoint, "model.png"),
                   show_shapes=True)
        self.model = model
示例#26
0
def unet_model_MultiScale():
    inputs = Input(config["input_shape"])
    conv1 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(inputs)
    conv1 = Conv3D(32, (3, 3, 3), padding='same')(conv1)
    conv1 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='zero',
                                             gamma_initializer='one')(conv1)
    conv1 = core.Activation('relu')(conv1)
    pool1 = MaxPooling3D(pool_size=config["pool_size"])(conv1)

    conv2 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(pool1)
    conv2 = Conv3D(64, (3, 3, 3), padding='same')(conv2)
    conv2 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='zero',
                                             gamma_initializer='one')(conv2)
    conv2 = core.Activation('relu')(conv2)

    pool2_1 = MaxPooling3D(pool_size=config["pool_size"])(conv2)
    conv3_1 = Conv3D(128, (3, 3, 3), activation='relu',
                     padding='same')(pool2_1)
    conv3_1 = Conv3D(128, (3, 3, 3), activation='relu',
                     padding='same')(conv3_1)

    pool2_2 = MaxPooling3D(pool_size=(4, 4, 4))(conv2)
    conv3_2 = Conv3D(128, (3, 3, 3), activation='relu',
                     padding='same')(pool2_2)
    conv3_2 = Conv3D(128, (3, 3, 3), activation='relu',
                     padding='same')(conv3_2)

    fuse = concatenate(
        [UpSampling3D(size=config["pool_size"])(conv3_2), conv3_1], axis=1)
    conv3_f = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(fuse)

    up4 = concatenate([UpSampling3D(size=config["pool_size"])(conv3_f), conv2],
                      axis=1)
    conv4 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(up4)
    conv4 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv4)

    up5 = concatenate([UpSampling3D(size=config["pool_size"])(conv4), conv1],
                      axis=1)
    conv5 = Conv3D(32, (3, 3, 3), activation='relu', padding='valid')(up5)
    conv5 = Conv3D(32, (3, 3, 3), activation='relu', padding='valid')(conv5)

    conv6 = Conv3D(config["n_labels"], (1, 1, 1))(conv5)
    conv6 = core.Reshape((1, out_w * out_w * out_w))(conv6)
    conv6 = core.Permute((2, 1))(conv6)
    #conv6 =
    act = Activation('sigmoid')(conv6)
    model = Model(inputs=inputs, outputs=act)

    #model.compile(optimizer=Adam(lr=config["initial_learning_rate"]), loss='categorical_crossentropy',metrics=['fbeta_score'])
    model.compile(optimizer=Adam(lr=config["initial_learning_rate"]),
                  loss=dice_coef_loss,
                  metrics=[dice_coef])
    return model
示例#27
0
 def test_save_weights(self):
     norm = normalization.BatchNormalization(input_shape=(10, 10), mode=1, epsilon=0.1)
     weights = norm.get_weights()
     assert(len(weights) == 4)
     norm.set_weights(weights)
示例#28
0
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

batch_size = 16
nb_classes = 2
nb_epoch = 100

# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (4, 4)
# convolution kernel size
kernel_size = (3, 3)

model = Sequential()
model.add(normalization.BatchNormalization(input_shape=input_shape))
model.add(
    Convolution2D(32, kernel_size[0], kernel_size[1], border_mode='valid'))
model.add(Activation('relu'))
model.add(
    Convolution2D(32, kernel_size[0], kernel_size[1], border_mode='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))

model.add(normalization.BatchNormalization())
model.add(
    Convolution2D(64, kernel_size[0], kernel_size[1], border_mode='valid'))
model.add(Activation('relu'))
model.add(
    Convolution2D(64, kernel_size[0], kernel_size[1], border_mode='valid'))
model.add(Activation('relu'))
(x_train, y_train), (x_test, y_test) = cifar10.load_data()

y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()

model.add(
    Conv2D(32, (3, 3),
           input_shape=x_train.shape[1:],
           kernel_initializer='glorot_normal',
           bias_initializer=keras.initializers.Constant(0.1),
           padding='same'))
model.add(LeakyReLU())
model.add(normalization.BatchNormalization())
model.add(Dropout(0.2))
model.add(
    Conv2D(32, (3, 3),
           padding='same',
           kernel_initializer='glorot_normal',
           bias_initializer=keras.initializers.Constant(0.1)))
model.add(LeakyReLU())
model.add(normalization.BatchNormalization())
model.add(MaxPooling2D(pool_size=(3, 3), strides=2))
model.add(Dropout(0.2))

model.add(
    Conv2D(64, (3, 3),
           padding='same',
           kernel_initializer='glorot_normal',
示例#30
0
 def __init__(self):
   super(HasTuple, self).__init__()
   self.layer_list = (
       core.Dense(3), core.Dense(4),
       core.Dense(5, kernel_regularizer=tf.reduce_sum))
   self.layers_with_updates = (normalization.BatchNormalization(),)