Ejemplo n.º 1
0
    def create_model(self, pkeep=0.5, enable_bn=True):
        inputs = Input(shape=self._input_shape)
        # Convolution2D(n_filter, w_filter, h_filter, border_mode='same')(inputs)
        # Activation(activation='relu')()
        # return BatchNormalization()()
        conv1 = Conv2D( filters=96, kernel_size=(7, 7),
                        strides=(4, 4),
                        padding="valid",
                        kernel_initializer=random_normal(stddev=0.01),
                        kernel_regularizer=l2(self._weight_decay),
                        bias_initializer=zeros())(inputs)
        if enable_bn: conv1 = BatchNormalization(axis=self._channel_axis, momentum=0.9997)(conv1)
        pool1 = MaxPooling2D(pool_size=3, strides=2)(conv1)

        conv2 = Conv2D( filters=256, 
                        kernel_size=(5, 5),
                        strides=(1, 1),
                        padding="same",
                        kernel_initializer=random_normal(stddev=0.01),
                        kernel_regularizer=l2(self._weight_decay),
                        bias_initializer=ones())(pool1)  # "One conv at the beginning (spatial size: 32x32)"
        if enable_bn: conv2 = BatchNormalization(axis=self._channel_axis, momentum=0.9997)(conv2)
        pool2 = MaxPooling2D(pool_size=3, strides=2)(conv2)

        conv3 = Conv2D( filters=384, 
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding="same",
                        kernel_initializer=random_normal(stddev=0.01),
                        kernel_regularizer=l2(self._weight_decay),
                        bias_initializer=zeros())(pool2)  # "One conv at the beginning (spatial size: 32x32)"
        if enable_bn: conv3 = BatchNormalization(axis=self._channel_axis, momentum=0.9997)(conv3)
        pool3 = MaxPooling2D(pool_size=3, strides=2)(conv3)

        flatten = Flatten()(pool3)

        full1 = Dense(512,  kernel_regularizer=l2(self._weight_decay),
                            bias_initializer=ones(),
                            kernel_initializer=random_normal(stddev=0.005))(flatten)
        drop1 = Dropout(rate=pkeep)(full1)
        full2 = Dense(512,  kernel_regularizer=l2(self._weight_decay),
                            bias_initializer=ones(),
                            kernel_initializer=random_normal(stddev=0.005))(drop1)
        drop2 = Dropout(rate=pkeep)(full2)

        predictions_g = Dense(units=2, kernel_initializer=random_normal(stddev=0.01), bias_initializer=zeros(), name="Gender_Prediction",
                              activation="softmax")(drop2)
        predictions_a = Dense(units=101, kernel_initializer=random_normal(stddev=0.01), bias_initializer=zeros(), name="Age_Prediction",
                              activation="softmax")(drop2)

        model = Model(inputs=inputs, outputs=[predictions_g, predictions_a])

        return model
Ejemplo n.º 2
0
    def G_encoder(self, res):
        scope_name = '%dx%d_G_Encoder' % (res, res)
        with K.name_scope(scope_name):
            dlatents_mapping = Input(batch_shape=(None, self.model_num * 2,
                                                  self.latent_size),
                                     name=scope_name + '_latent')
            const_scale = Input(batch_shape=(None, 1, 1, 1),
                                name=scope_name + '_const')
            # use single scale 1 to get the const vector

            if res == self.min_resolution:
                with K.name_scope('Const'):
                    x = Conv2DTranspose(self.latent_size,
                                        kernel_size=4,
                                        use_bias=False,
                                        kernel_initializer=ones(),
                                        name='const')(
                                            const_scale)  # [batch, 4, 4, 512]
                    dlatent_res = Lambda(lambda x: x[:, 0, :])(
                        dlatents_mapping)
                    x = self.layer_epilogue(x, dlatent_res)
                with K.name_scope('Conv'):
                    x = Conv2D(self.channel_dict[res],
                               kernel_size=3,
                               strides=1,
                               kernel_initializer=VarianceScaling(
                                   math.sqrt(2)),
                               padding='same')(x)
                    dlatent_res = Lambda(lambda x: x[:, 1, :])(
                        dlatents_mapping)
                    x = self.layer_epilogue(x, dlatent_res)
            else:
                # x should be [batch, res//2, res//2, ch]
                x = self._G_encoders[res // 2]([dlatents_mapping, const_scale])
                with K.name_scope('Conv0_up'):
                    x = self.upscale2d_conv2d(x, res)
                    dlatent_res = Lambda(
                        lambda x: x[:, int(math.log2(res) * 2 - 4), :])(
                            dlatents_mapping)
                    x = self.layer_epilogue(x, dlatent_res)
                with K.name_scope('Conv1'):
                    x = Conv2D(self.channel_dict[res],
                               kernel_size=3,
                               strides=1,
                               kernel_initializer=VarianceScaling(
                                   math.sqrt(2)),
                               padding='same')(x)
                    dlatent_res = Lambda(
                        lambda x: x[:, int(math.log2(res) * 2 - 3), :])(
                            dlatents_mapping)
                    x = self.layer_epilogue(x, dlatent_res)

            return Model(inputs=[dlatents_mapping, const_scale],
                         outputs=x,
                         name=scope_name)
    ],
)
def test_parameters_by_signature(instance, signature_filter, params):
    assert parameters_by_signature(instance, signature_filter) == params


##################################################
# `keras_initializer_to_dict` Scenarios
##################################################
@pytest.mark.parametrize(
    ["initializer", "initializer_dict"],
    [
        #################### Normal Initializers ####################
        pytest.param(initializers.zeros(), dict(class_name="zeros"), id="zero_0"),
        pytest.param(initializers.Zeros(), dict(class_name="zeros"), id="zero_1"),
        pytest.param(initializers.ones(), dict(class_name="ones"), id="one_0"),
        pytest.param(initializers.Ones(), dict(class_name="ones"), id="one_1"),
        pytest.param(initializers.constant(), dict(class_name="constant", value=0), id="c_0"),
        pytest.param(initializers.Constant(5), dict(class_name="constant", value=5), id="c_1"),
        pytest.param(
            initializers.RandomNormal(0.1),
            dict(class_name="random_normal", mean=0.1, stddev=0.05, seed=None),
            id="rn_0",
        ),
        pytest.param(
            initializers.random_normal(mean=0.2, stddev=0.003, seed=42),
            dict(class_name="random_normal", mean=0.2, stddev=0.003, seed=42),
            id="rn_1",
        ),
        pytest.param(
            initializers.RandomUniform(maxval=0.1),
Ejemplo n.º 4
0
def test_one(tensor_shape):
    _runner(initializers.ones(), tensor_shape, target_mean=1., target_max=1.)
Ejemplo n.º 5
0
def test_one(tensor_shape):
    _runner(initializers.ones(), tensor_shape,
            target_mean=1., target_max=1.)
Ejemplo n.º 6
0
Archivo: NN5.py Proyecto: CheChem/BNN
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import SGD
from keras import initializers


x_train = np.loadtxt('X.txt', delimiter=",")
y_train = np.loadtxt('Y.txt', delimiter=",")

W=[]
for i in range(1):
    model = Sequential()
    model.add(Dense(input_dim=2, units=3, use_bias=False, activation='relu', kernel_initializer=initializers.ones()))
    model.add(Dense(input_dim=3, units=2, use_bias=False, activation='relu', kernel_initializer=initializers.ones()))
    model.add(Dense(input_dim=2, units=1, use_bias=False, activation=None, kernel_initializer=initializers.ones()))

    model.compile(loss='mse', optimizer=SGD(lr=0.000005))
    model.fit(x_train, y_train, batch_size=10, epochs=1000)

    W0 = Dense.get_weights(model)
    W1 = W0[0].reshape([1, 6], order='F')
    W2 = W0[1].reshape([1, 6], order='F')
    W3 = W0[2].reshape([1, 2], order='F')
    WW = np.hstack((W1, W2, W3))
    W.append(WW)

WW = np.vstack(W)
np.savetxt('W_5.txt', WW, delimiter=",")

Ejemplo n.º 7
0
# Model_1
inpt = Input(shape=(param["c0"], param["h0"], param["w0"]))
# 1
x = Conv2D(filters=param["n1"],
           kernel_size=(param["k"], param["k"]),
           strides=param["cs"],
           padding="same",
           activation=None,
           use_bias=False,
           kernel_initializer=initializers.random_normal(0.0, 0.01))(inpt)
x = BatchNormalization(axis=1,
                       center=True,
                       beta_initializer=initializers.zeros(),
                       scale=True,
                       gamma_initializer=initializers.ones(),
                       epsilon=10**-8,
                       momentum=0.9)(x)
x = PReLU(alpha_initializer=initializers.zeros())(x)
x = MaxPooling2D(pool_size=(param["k"], param["k"]),
                 strides=param["ps"],
                 padding="same")(x)
x.shape  # (96, 48, 48)
# 2
d2 = d(c=param["n1"],
       n=param["n2"],
       k=param["k"],
       w=int(x.shape[2]),
       speed=param["speed"])
x = Conv2D(filters=d2,
           kernel_size=(param["k"], 1),