def residual_block(x,out_filters,increase_filter=False):
     global in_filters
     if increase_filter:
         first_stride = (2,2)
     else:
         first_stride = (1,1)
     pre_bn   = BatchNormalization(momentum=0.9, epsilon=1e-5)(x)
     pre_relu = Activation('relu')(pre_bn)
     conv_1 = Conv2D(out_filters,kernel_size=(3,3),strides=first_stride,padding='same',
         kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay))(pre_relu)
     bn_1   = BatchNormalization(momentum=0.9, epsilon=1e-5)(conv_1)
     relu1  = Activation('relu')(bn_1)
     conv_2 = Conv2D(out_filters, kernel_size=(3,3), strides=(1,1), padding='same',
         kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay))(relu1)
     if increase_filter or in_filters != out_filters:
         projection = Conv2D(out_filters,kernel_size=(1,1),strides=first_stride,padding='same',
             kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay))(pre_relu)
         block = add([conv_2, projection])
     else:
         block = add([conv_2,x])
     return block
def buildMICCAIModel(inputSz):
	regulPen = l2(0.001)
	#regulPen = l1_l2(l1=0.01, l2=0.01) # used to 0.01/0.01 22.5.2017

	## Architecture
	model = Sequential()
		
	model.add(Dense(50, input_dim=inputSz,kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen))  # , init='he_uniform'   % 5 # sigmoid
    ##model.add(LeakyReLU(alpha=0.3))
	#model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None))
	#model.add(Dropout(0.5))
	
	
	#model.add(Dense(50,kernel_initializer=initializers.he_normal(seed=None),  W_regularizer=regulPen))  # tanh # linear
	##model.add(LeakyReLU(alpha=0.3))
	#model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None))
	#model.add(Dropout(0.5))
	
	
	#model.add(Dense(50, kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen)) # tanh
	##model.add(LeakyReLU(alpha=0.3))
	#model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None))
	#model.add(Dropout(0.5))

	
	
	#model.add(Dense(50, kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen)) # tanh
	model.add(Activation('relu'))
	model.add(Dense(50, kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen)) # tanh
	model.add(Activation('relu'))
	model.add(Dense(50, kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen)) # tanh
	model.add(Activation('relu'))
	
	model.add(Dense(5, kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen))  # % 4 # linear
	ada = adagrad(lr=0.001)
	sgd = SGD(lr=0.0001, decay=1e-4, momentum=0.9, nesterov=False)
	#earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')
	
	model.compile(loss='mse', optimizer=ada)
	return model
def wide_residual_network(img_input,classes_num,depth,k):
    print('Wide-Resnet %dx%d' %(depth, k))
    n_filters  = [16, 16*k, 32*k, 64*k]
    n_stack    = (depth - 4) // 6

    def conv3x3(x,filters):
        return Conv2D(filters=filters, kernel_size=(3,3), strides=(1,1), padding='same',
        kernel_initializer=he_normal(),
        kernel_regularizer=regularizers.l2(weight_decay))(x)

    def residual_block(x,out_filters,increase_filter=False):
        global in_filters
        if increase_filter:
            first_stride = (2,2)
        else:
            first_stride = (1,1)
        pre_bn   = BatchNormalization(momentum=0.9, epsilon=1e-5)(x)
        pre_relu = Activation('relu')(pre_bn)
        conv_1 = Conv2D(out_filters,kernel_size=(3,3),strides=first_stride,padding='same',
            kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay))(pre_relu)
        bn_1   = BatchNormalization(momentum=0.9, epsilon=1e-5)(conv_1)
        relu1  = Activation('relu')(bn_1)
        conv_2 = Conv2D(out_filters, kernel_size=(3,3), strides=(1,1), padding='same',
            kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay))(relu1)
        if increase_filter or in_filters != out_filters:
            projection = Conv2D(out_filters,kernel_size=(1,1),strides=first_stride,padding='same',
                kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay))(pre_relu)
            block = add([conv_2, projection])
        else:
            block = add([conv_2,x])
        return block

    def wide_residual_layer(x,out_filters,increase_filter=False):
        global in_filters
        x = residual_block(x,out_filters,increase_filter)
        in_filters = out_filters
        for _ in range(1,int(n_stack)):
            x = residual_block(x,out_filters)
        return x


    x = conv3x3(img_input,n_filters[0])
    x = wide_residual_layer(x,n_filters[1])
    x = wide_residual_layer(x,n_filters[2],increase_filter=True)
    x = wide_residual_layer(x,n_filters[3],increase_filter=True)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5)(x)
    x = Activation('relu')(x)
    x = GlobalAveragePooling2D()(x)
    x = Dense(classes_num,activation='softmax',kernel_initializer=he_normal(),
        kernel_regularizer=regularizers.l2(weight_decay))(x)
    return x
示例#4
0
def model_original():

    dropout = 0.2
    kernel_size = (3, 3)
    pool_size = (2, 2)
    image_size = 256
    target_size = (256, 256)
    epochs = 120
    name = 'model-1'
    batch_size = 64
    filepath = './models/' + name + '.{epoch:02d}-{val_acc:.2f}.hdf5'

    #receptive field size = prevLayerRCF + (K-1) * jumpSize
    #featOut = ceil((featIn + 2*padding - K)/strideLen)+1
    #jumpOut = (featInit-featOut)/featOut-1  OR  stride*JumpIn

    model = Sequential()
    model.add(
        Conv2D(128,
               kernel_size=kernel_size,
               padding='same',
               input_shape=(image_size, image_size, 3),
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    #model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))
    #receptive field size = 1 + (3-1) * 1 = 3
    #real Filter size = 3
    #c = 1

    model.add(
        Conv2D(128,
               kernel_size=kernel_size,
               padding='same',
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))
    #RFS = 3 + 2 * 1 = 5
    #FilterSize = 3
    #c = 3 / 5 = 0.6

    model.add(
        Conv2D(256,
               kernel_size=kernel_size,
               padding='same',
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    #model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))
    #RFS = 5+2=7
    #c = 3 / 7 = 0.42

    model.add(
        Conv2D(256,
               kernel_size=kernel_size,
               padding='same',
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))
    #RFS= 9
    #c = 3 / 9 = 0.33

    model.add(
        Conv2D(512,
               kernel_size=kernel_size,
               padding='same',
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    #model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))
    #RFS = 11
    #c = 3/11 = 0.2727

    model.add(
        Conv2D(512,
               kernel_size=kernel_size,
               padding='same',
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))
    #RFS = 13
    #c = 3/13 = 0.23

    model.add(
        Conv2D(1024,
               kernel_size=kernel_size,
               padding='same',
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))
    #RFS = 15
    #c = 3/15 = 0.2

    model.add(
        Conv2D(1024,
               kernel_size=kernel_size,
               padding='same',
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))
    #RFS = 17
    #c = 3/17 = 0.17
    #this is perfect, it should get as close to 1/6 = 0.16 without going below
    #it might be worth putting in a stride len > 1, which would increase Receptive Field Size, and therefore allow the model to see more of the big picture
    #this may also mean removing some of the deeper convolutional layers. This could be rectified by increasing kernal size

    model.add(
        Flatten())  # this converts our 3D feature maps to 1D feature vectors

    model.add(
        Dense(256, kernel_initializer=initializers.lecun_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(Dropout(dropout))

    model.add(
        Dense(128, kernel_initializer=initializers.lecun_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(Dropout(dropout))

    model.add(
        Dense(64, kernel_initializer=initializers.lecun_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(Dropout(dropout))

    model.add(
        Dense(32, kernel_initializer=initializers.lecun_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(Dropout(dropout))

    #it might be good to try freezing all convolution layers or all layers except last 32 Dense and training that specific layer to be more accurate.

    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    trainAndSaveGenerator(model, epochs, name, target_size, batch_size,
                          filepath)
示例#5
0
#     if epoch <150:
#         return 0.001
#     if epoch <180:
#         return 0.0001
#     return 0.00001

# he_normal = truncated_normal

# build model
input = Input(shape=[32, 32, 3])

# Block 1
x = Conv2D(8, (3, 3),
           padding='same',
           kernel_regularizer=keras.regularizers.l2(weight_decay),
           kernel_initializer=he_normal(),
           name='block1_conv1',
           input_shape=[32, 32, 3])(input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(8, (3, 3),
           padding='same',
           kernel_regularizer=keras.regularizers.l2(weight_decay),
           kernel_initializer=he_normal(),
           name='block1_conv2')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

# Block 2
x = Conv2D(16, (3, 3),
示例#6
0
    X_train = train_set_x_orig / 255
    Y_train = np.squeeze(
        np_utils.to_categorical(train_set_y_orig.T, num_classes=6))

    X_test = test_set_x_orig / 255
    Y_test = np.squeeze(
        np_utils.to_categorical(test_set_y_orig.T, num_classes=6))
    #print(X_train.shape[1:])

    np.random.seed(12)
    X_input = Input(X_train.shape[1:])
    X = Conv2D(6, (5, 5),
               strides=(2, 2),
               padding='valid',
               name='CONV_1',
               kernel_initializer=initializers.he_normal())(X_input)
    X = Activation('relu')(X)
    X = MaxPooling2D((2, 2), strides=(2, 2), name='MAX_POOL_1')(X)

    X = ZeroPadding2D((1, 1))(X)
    X = Conv2D(16, (5, 5),
               strides=(1, 1),
               padding='valid',
               name='CONV_2',
               kernel_initializer=initializers.he_normal())(X)
    X = Activation('relu')(X)
    X = MaxPooling2D((2, 2), strides=(2, 2), name='MAX_POOL_2')(X)

    X = Flatten()(X)
    X = Dense(120,
              activation='relu',
            id="o_1",
        ),
        pytest.param(initializers.Identity(1.1),
                     dict(class_name="identity", gain=1.1),
                     id="i_0"),
        pytest.param(initializers.identity(),
                     dict(class_name="identity", gain=1.0),
                     id="i_1"),
        #################### VarianceScaling ####################
        pytest.param(initializers.glorot_normal(),
                     dict(class_name="glorot_normal", seed=None),
                     id="gn_0"),
        pytest.param(initializers.glorot_uniform(42),
                     dict(class_name="glorot_uniform", seed=42),
                     id="gu_0"),
        pytest.param(initializers.he_normal(),
                     dict(class_name="he_normal", seed=None),
                     id="hn_0"),
        pytest.param(initializers.he_uniform(42),
                     dict(class_name="he_uniform", seed=42),
                     id="hu_0"),
        pytest.param(initializers.lecun_normal(),
                     dict(class_name="lecun_normal", seed=None),
                     id="ln_0"),
        pytest.param(initializers.lecun_uniform(42),
                     dict(class_name="lecun_uniform", seed=42),
                     id="lu_0"),
    ],
)
def test_keras_initializer_to_dict(initializer, initializer_dict):
    assert get_concise_params_dict(
 def bottleneck(x):
     channels = self.growth_rate * 4
     x = bn_relu(x)
     x = Conv2D(channels,kernel_size=(1,1),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(self.weight_decay),use_bias=False)(x)
     x = bn_relu(x)
     x = Conv2D(self.growth_rate,kernel_size=(3,3),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(self.weight_decay),use_bias=False)(x)
     return x
示例#9
0
# First time run, no unlocking
conv_base.trainable = False

# Let's see it
print('Summary')
print(conv_base.summary())

# Let's construct that top layer replacement
x = conv_base.output
x = AveragePooling2D(pool_size=(8, 8))(x)
x - Dropout(0.4)(x)
x = Flatten()(x)
x = Dense(256,
          activation='relu',
          kernel_initializer=initializers.he_normal(seed=None),
          kernel_regularizer=regularizers.l2(.0005))(x)
x = Dropout(0.5)(x)
# Essential to have another layer for better accuracy
x = Dense(128,
          activation='relu',
          kernel_initializer=initializers.he_normal(seed=None))(x)
x = Dropout(0.25)(x)
predictions = Dense(constants.NUM_CLASSES,
                    kernel_initializer="glorot_uniform",
                    activation='softmax')(x)

print('Stacking New Layers')
model = Model(inputs=conv_base.input, outputs=predictions)

# Load checkpoint if one is found
def net():
    inputs = Input(shape=(config.IMAGE_SIZE, config.IMAGE_SIZE, config.NUM_CHANNELS))
    x = Conv2D(config.FILTER_NUM[0], (1, 3), padding='same', kernel_initializer=he_normal())(inputs)
    x = Conv2D(config.FILTER_NUM[0], (3, 1), padding='same', kernel_initializer=he_normal())(x)
    x = BatchNormalization()(x)
    # x = Activation(relu6)(x)
    x = PReLU()(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(x)

    x = Conv2D(config.FILTER_NUM[1], (1, 3), padding='same', kernel_initializer=he_normal())(x)
    x = Conv2D(config.FILTER_NUM[1], (3, 1), padding='same', kernel_initializer=he_normal())(x)
    x = BatchNormalization()(x)
    # x = Activation(relu6)(x)
    x = PReLU()(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(x)

    x = Conv2D(config.FILTER_NUM[2], (1, 3), padding='same', kernel_initializer=he_normal())(x)
    x = Conv2D(config.FILTER_NUM[2], (3, 1), padding='same', kernel_initializer=he_normal())(x)
    x = BatchNormalization()(x)
    # x = Activation(relu6)(x)
    x = PReLU()(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(x)

    x = Conv2D(config.FILTER_NUM[3], (1, 3), padding='same', kernel_initializer=he_normal())(x)
    x = Conv2D(config.FILTER_NUM[3], (3, 1), padding='same', kernel_initializer=he_normal())(x)
    x = BatchNormalization()(x)
    # x = Activation(relu6)(x)
    x = PReLU()(x)
    x = Conv2D(config.FILTER_NUM[4], (1, 3), padding='same', kernel_initializer=he_normal())(x)
    x = Conv2D(config.FILTER_NUM[4], (3, 1), padding='same', kernel_initializer=he_normal())(x)
    x = BatchNormalization()(x)
    # x = Activation(relu6)(x)
    x = PReLU()(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(x)

    x = Conv2D(config.FILTER_NUM[5], (1, 3), padding='same', kernel_initializer=he_normal())(x)
    x = Conv2D(config.FILTER_NUM[5], (3, 1), padding='same', kernel_initializer=he_normal())(x)
    x = BatchNormalization()(x)
    # x = Activation(relu6)(x)
    x = PReLU()(x)
    x = Conv2D(config.FILTER_NUM[6], (1, 3), padding='same', kernel_initializer=he_normal())(x)
    x = Conv2D(config.FILTER_NUM[6], (3, 1), padding='same', kernel_initializer=he_normal())(x)
    x = BatchNormalization()(x)
    # x = Activation(relu6)(x)
    x = PReLU()(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(x)

    x = Flatten()(x)
    x = Dense(config.FILTER_NUM[7], kernel_regularizer=l2(0.005), kernel_initializer=he_normal())(x)
    x = BatchNormalization()(x)
    # x = Activation(relu6)(x)
    x = PReLU()(x)
    x = Dropout(0.5)(x)

    y = Dense(config.NUM_LABELS, activation='softmax', kernel_initializer=he_normal())(x)
    model = Model(inputs=inputs, outputs=y)
    return model
示例#11
0
 def group_conv(x,planes,stride):
     h = planes // cardinality
     groups = []
     for i in range(cardinality):
         group = Lambda(lambda z: z[:,:,:, i * h : i * h + h])(x)
         groups.append(Conv2D(h,kernel_size=(3,3),strides=stride,kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),padding='same',use_bias=False)(group))
     x = concatenate(groups)
     return x
示例#12
0
from deepreplay.replay import Replay
from deepreplay.plot import compose_animations, compose_plots

import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import StandardScaler

# Fetch the data file from the Data Folder at https://archive.ics.uci.edu/ml/datasets/spambase

group_name = 'spam'

df = pd.read_csv('spambase.data', header=None)
X, y = df.iloc[:, :57].values, df.iloc[:, 57].values
X = StandardScaler().fit_transform(X)

he_initializer = he_normal(seed=42)
normal_initializer = normal(seed=42)

replaydata = ReplayData(X, y, filename='spambase_dataset.h5', group_name=group_name)

model = Sequential()
model.add(Dense(input_dim=57,
                units=10,
                kernel_initializer=he_initializer,
                activation='tanh'))
model.add(Dense(units=2,
                kernel_initializer=normal_initializer,
                activation='linear',
                name='hidden'))
model.add(Dense(units=1,
                kernel_initializer=normal_initializer,
 def transition(x, inchannels):
     outchannels = int(inchannels * compression)
     x = bn_relu(x)
     x = Conv2D(outchannels,kernel_size=(1,1),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
     x = AveragePooling2D((2,2), strides=(2, 2))(x)
     return x, outchannels
 def single(x):
     x = bn_relu(x)
     x = Conv2D(growth_rate,kernel_size=(3,3),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
     return x
def densenet(img_input,classes_num):

    def bn_relu(x):
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        return x

    def bottleneck(x):
        channels = growth_rate * 4
        x = bn_relu(x)
        x = Conv2D(channels,kernel_size=(1,1),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
        x = bn_relu(x)
        x = Conv2D(growth_rate,kernel_size=(3,3),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
        return x

    def single(x):
        x = bn_relu(x)
        x = Conv2D(growth_rate,kernel_size=(3,3),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
        return x

    def transition(x, inchannels):
        outchannels = int(inchannels * compression)
        x = bn_relu(x)
        x = Conv2D(outchannels,kernel_size=(1,1),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
        x = AveragePooling2D((2,2), strides=(2, 2))(x)
        return x, outchannels

    def dense_block(x,blocks,nchannels):
        concat = x
        for i in range(blocks):
            x = bottleneck(concat)
            concat = concatenate([x,concat], axis=-1)
            nchannels += growth_rate
        return concat, nchannels

    def dense_layer(x):
        return Dense(classes_num,activation='softmax',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay))(x)


    nblocks = (depth - 4) // 6 
    nchannels = growth_rate * 2

    x = Conv2D(nchannels,kernel_size=(3,3),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(img_input)

    x, nchannels = dense_block(x,nblocks,nchannels)
    x, nchannels = transition(x,nchannels)
    x, nchannels = dense_block(x,nblocks,nchannels)
    x, nchannels = transition(x,nchannels)
    x, nchannels = dense_block(x,nblocks,nchannels)
    x, nchannels = transition(x,nchannels)
    x = bn_relu(x)
    x = GlobalAveragePooling2D()(x)
    x = dense_layer(x)
    return x
示例#16
0
def model_3():

    image_size = 400
    dropout = 0.2
    kernel_size = (3, 3)
    pool_size = (2, 2)
    name = 'model3'
    epochs = 120

    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=kernel_size,
               padding='same',
               input_shape=(image_size, image_size, 3),
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))

    model.add(
        Conv2D(32,
               kernel_size=kernel_size,
               padding='same',
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))

    model.add(
        Conv2D(64,
               kernel_size=kernel_size,
               padding='same',
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))

    model.add(
        Conv2D(64,
               kernel_size=kernel_size,
               padding='same',
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))

    model.add(
        Conv2D(128,
               kernel_size=kernel_size,
               padding='same',
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))

    model.add(
        Conv2D(128,
               kernel_size=kernel_size,
               padding='same',
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))

    model.add(
        Conv2D(256,
               kernel_size=kernel_size,
               padding='same',
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))

    model.add(
        Conv2D(256,
               kernel_size=kernel_size,
               padding='same',
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))

    model.add(
        Flatten())  # this converts our 3D feature maps to 1D feature vectors

    #added extra dense 512 layer on top
    model.add(
        Dense(512, kernel_initializer=initializers.lecun_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(Dropout(dropout))

    model.add(
        Dense(256, kernel_initializer=initializers.lecun_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(Dropout(dropout))

    model.add(
        Dense(128, kernel_initializer=initializers.lecun_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(Dropout(dropout))

    model.add(
        Dense(64, kernel_initializer=initializers.lecun_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(Dropout(dropout))

    model.add(
        Dense(32, kernel_initializer=initializers.lecun_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(Dropout(dropout))

    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    trainAndSave(model, epochs, name)
示例#17
0
def ResNet(input_shape=(28, 28, 1), classes=10):

    X_input = Input(input_shape)

    X = ZeroPadding2D((2, 2))(X_input)

    # Stage 1
    X = Conv2D(32, (5, 5),
               strides=(1, 1),
               name='conv1',
               kernel_initializer=he_normal(seed=None))(X)
    X = BatchNormalization(axis=3, name='bn_conv1')(X)
    X = Activation('relu')(X)
    X = MaxPooling2D((2, 2), strides=(2, 2))(X)
    print('1, X', X.shape)

    # Stage 2
    X = convolutional_block(X,
                            f=3,
                            filters=[32, 32, 64],
                            stage=2,
                            block='a',
                            s=1)
    X = identity_block(X, 3, [32, 32, 64], stage=2, block='b')
    X = identity_block(X, 3, [32, 32, 64], stage=2, block='c')
    print('2, X', X.shape)

    # Stage 3
    X = convolutional_block(X,
                            f=3,
                            filters=[32, 32, 128],
                            stage=3,
                            block='a',
                            s=2)
    X = identity_block(X, 3, [32, 32, 128], stage=3, block='b')
    X = identity_block(X, 3, [32, 32, 128], stage=3, block='c')
    X = identity_block(X, 3, [32, 32, 128], stage=3, block='d')
    X = identity_block(X, 3, [32, 32, 128], stage=3, block='e')
    X = identity_block(X, 3, [32, 32, 128], stage=3, block='f')
    print('3, X', X.shape)

    # Stage 4
    X = convolutional_block(X,
                            f=3,
                            filters=[64, 64, 256],
                            stage=4,
                            block='a',
                            s=2)
    X = identity_block(X, 3, [64, 64, 256], stage=4, block='b')
    X = identity_block(X, 3, [64, 64, 256], stage=4, block='c')
    print('4, X', X.shape)

    # AVGPOOL
    X = AveragePooling2D((2, 2), name='avg_pool')(X)
    print('average pool, X', X.shape)

    # output layer
    X = Flatten()(X)
    X = Dense(classes,
              activation='softmax',
              name='fc' + str(classes),
              kernel_initializer=he_normal(seed=None))(X)

    model = Model(inputs=X_input, outputs=X, name='ResNet')

    return model
示例#18
0
    def residual_block(x,planes,stride=(1,1)):

        D = int(math.floor(planes * (base_width/64.0)))
        C = cardinality

        shortcut = x
        
        y = Conv2D(D*C,kernel_size=(1,1),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(shortcut)
        y = add_common_layer(y)

        y = group_conv(y,D*C,stride)
        y = add_common_layer(y)

        y = Conv2D(planes*expansion, kernel_size=(1,1), strides=(1,1), padding='same', kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(y)
        y = add_common_layer(y)

        if stride != (1,1) or inplanes != planes * expansion:
            shortcut = Conv2D(planes * expansion, kernel_size=(1,1), strides=stride, padding='same', kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
            shortcut = BatchNormalization(momentum=0.9, epsilon=1e-5)(shortcut)
        y = add([y,shortcut])
        y = Activation('relu')(y)
        return y
示例#19
0
    def build_model(self):
        weight_decay = 0.0005

        inputs = Input(shape=self.x_shape)

        x = Conv2D(64,
                   3,
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay),
                   kernel_initializer=he_normal())(inputs)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
        x = Conv2D(64,
                   3,
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay),
                   kernel_initializer=he_normal())(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
        x = MaxPooling2D()(x)

        x = Conv2D(128,
                   3,
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay),
                   kernel_initializer=he_normal())(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
        x = Conv2D(128,
                   3,
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay),
                   kernel_initializer=he_normal())(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
        x = MaxPooling2D()(x)

        x = SeparableConv2D(256,
                            3,
                            padding='same',
                            kernel_regularizer=regularizers.l2(weight_decay),
                            kernel_initializer=he_normal())(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.4)(x)
        x = SeparableConv2D(256,
                            3,
                            padding='same',
                            kernel_regularizer=regularizers.l2(weight_decay),
                            kernel_initializer=he_normal())(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
        x = MaxPooling2D()(x)

        x = self.py_block(x)

        # x = GlobalAveragePooling2D()(x)
        x = GlobalMaxPooling2D()(x)
        outputs = Dense(self.num_class, activation='softmax')(x)

        model_ = Model(inputs=inputs, outputs=outputs)
        # model_ = multi_gpu_model(model_, gpus=2)

        plot_model(model_,
                   "./pyramid.png",
                   show_shapes=True,
                   show_layer_names=True,
                   dpi=120)

        model_.summary()

        return model_
示例#20
0
 def conv3x3(x,filters):
     x = Conv2D(filters=filters, kernel_size=(3,3), strides=(1,1), padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
     return add_common_layer(x)
 def residual_block(x,out_filters,increase_filter=False):
     if increase_filter:
         first_stride = (2,2)
     else:
         first_stride = (1,1)
     pre_bn   = BatchNormalization()(x)
     pre_relu = Activation('relu')(pre_bn)
     conv_1 = Conv2D(out_filters,kernel_size=(3,3),strides=first_stride,padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(self.weight_decay))(pre_relu)
     bn_1   = BatchNormalization()(conv_1)
     relu1  = Activation('relu')(bn_1)
     conv_2 = Conv2D(out_filters, kernel_size=(3,3), strides=(1,1), padding='same', kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(self.weight_decay))(relu1)
     if increase_filter or in_filters != out_filters:
         projection = Conv2D(out_filters,kernel_size=(1,1),strides=first_stride,padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(self.weight_decay))(x)
         block = add([conv_2, projection])
     else:
         block = add([conv_2,x])
     return block
示例#22
0
 def dense_layer(x):
     return Dense(classes_num,activation='softmax',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay))(x)
示例#23
0
def AtzoriNet(input_shape,
              classes,
              n_pool='average',
              n_dropout=0.,
              n_l2=0.0005,
              n_init='glorot_normal',
              batch_norm=False):
    """ Creates the Deep Neural Network architecture described in the paper of Manfredo Atzori:
    Deep Learning with Convolutional Neural Networks Applied to Electromyography Data: A Resource for the Classification of Movements for Prosthetic Hands
    https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5013051/
    
    Arguments:
        input_shape -- tuple, dimensions of the input in the form (height, width, channels)
        classes -- integer, number of classes to be classified, defines the dimension of the softmax unit
        n_pool -- string, pool method to be used {'max', 'average'}
        n_dropout -- float, rate of dropping units
        n_l2 -- float, ampunt of weight decay regularization
        n_init -- string, type of kernel initializer {'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform', 'normal', 'uniform'}
        batch_norm -- boolean, whether BatchNormalization is applied to the input
    
    Returns:
        model -- keras.models.Model (https://keras.io)
    """

    if n_init == 'glorot_normal':
        kernel_init = initializers.glorot_normal(seed=0)
    elif n_init == 'glorot_uniform':
        kernel_init = initializers.glorot_uniform(seed=0)
    elif n_init == 'he_normal':
        kernel_init = initializers.he_normal(seed=0)
    elif n_init == 'he_uniform':
        kernel_init = initializers.he_uniform(seed=0)
    elif n_init == 'normal':
        kernel_init = initializers.normal(seed=0)
    elif n_init == 'uniform':
        kernel_init = initializers.uniform(seed=0)
    # kernel_init = n_init
    kernel_regl = regularizers.l2(n_l2)

    ## Block 0 [Input]
    X_input = Input(input_shape, name='b0_input')
    X = X_input
    if batch_norm:
        X = BatchNormalization()(X)

    ## Block 1 [Pad -> Conv -> ReLU -> Dropout]
    X = ZeroPadding2D((0, 4))(X)
    X = Conv2D(32, (1, 10),
               padding='valid',
               kernel_regularizer=kernel_regl,
               kernel_initializer=kernel_init,
               name='b1_conv2d_32_1x10')(X)
    X = Activation('relu', name='b1_relu')(X)
    X = Dropout(n_dropout, name='b1_dropout')(X)

    ## Block 2 [Pad -> Conv -> ReLU -> -> Dropout -> Pool]
    X = ZeroPadding2D((1, 1))(X)
    X = Conv2D(32, (3, 3),
               padding='valid',
               kernel_regularizer=kernel_regl,
               kernel_initializer=kernel_init,
               name='b2_conv2d_32_3x3')(X)
    X = Activation('relu', name='b2_relu')(X)
    X = Dropout(n_dropout, name='b2_dropout')(X)
    if n_pool == 'max':
        X = MaxPooling2D((3, 3), strides=(3, 3), name='b2_pool')(X)
    else:
        X = AveragePooling2D((3, 3), strides=(3, 3), name='b2_pool')(X)

    ## Block 3 [Pad -> Conv -> ReLU -> Dropout -> Pool]
    X = ZeroPadding2D((2, 2))(X)
    X = Conv2D(64, (5, 5),
               padding='valid',
               kernel_regularizer=kernel_regl,
               kernel_initializer=kernel_init,
               name='b3_conv2d_64_5x5')(X)
    X = Activation('relu', name='b3_relu')(X)
    X = Dropout(n_dropout, name='b3_dropout')(X)
    if n_pool == 'max':
        X = MaxPooling2D((3, 3), strides=(3, 3), name='b3_pool')(X)
    else:
        X = AveragePooling2D((3, 3), strides=(3, 3), name='b3_pool')(X)

    ## Block 4 [Pad -> Conv -> ReLU -> Dropout]
    X = ZeroPadding2D((2, 0))(X)
    X = Conv2D(64, (5, 1),
               padding='valid',
               kernel_regularizer=kernel_regl,
               kernel_initializer=kernel_init,
               name='b4_conv2d_64_5x1')(X)
    X = Activation('relu', name='b4_relu')(X)
    X = Dropout(n_dropout, name='b4_dropout')(X)

    ## Block 5 [Pad -> Conv -> Softmax]
    X = Conv2D(classes, (1, 1),
               padding='same',
               kernel_regularizer=kernel_regl,
               kernel_initializer=kernel_init,
               name='b5_conv2d_{}_1x1'.format(classes))(X)
    X = Activation('softmax', name='b5_soft')(X)
    X = Reshape((-1, ), name='b5_reshape')(X)

    model = Model(inputs=X_input, outputs=X, name='AtzoriNet')

    return model
示例#24
0
        def build_network(self):
            s = keras_layers.Input(shape=self.nn.input_dims,
                                   dtype='float32',
                                   name='s')

            if self.nn.input_type == INPUT_TYPE_OBSERVATION_VECTOR:
                x = keras_layers.Dense(
                    self.nn.fc_layers_dims[0],
                    activation='relu',
                    kernel_initializer=keras_init.he_normal())(s)

            else:  # self.input_type == INPUT_TYPE_STACKED_FRAMES
                x = keras_layers.Conv2D(
                    filters=32,
                    kernel_size=(8, 8),
                    strides=4,
                    name='conv1',
                    kernel_initializer=keras_init.he_normal())(s)
                x = keras_layers.BatchNormalization(epsilon=1e-5,
                                                    name='conv1_bn')(x)
                x = keras_layers.Activation('relu', name='conv1_bn_ac')(x)

                x = keras_layers.Conv2D(
                    filters=64,
                    kernel_size=(4, 4),
                    strides=2,
                    name='conv2',
                    kernel_initializer=keras_init.he_normal())(x)
                x = keras_layers.BatchNormalization(epsilon=1e-5,
                                                    name='conv2_bn')(x)
                x = keras_layers.Activation('relu', name='conv2_bn_ac')(x)

                x = keras_layers.Conv2D(
                    filters=128,
                    kernel_size=(3, 3),
                    strides=1,
                    name='conv3',
                    kernel_initializer=keras_init.he_normal())(x)
                x = keras_layers.BatchNormalization(epsilon=1e-5,
                                                    name='conv3_bn')(x)
                x = keras_layers.Activation('relu', name='conv3_bn_ac')(x)

                x = keras_layers.Flatten()(x)

            x = keras_layers.Dense(
                self.nn.fc_layers_dims[-1],
                activation='relu',
                kernel_initializer=keras_init.he_normal())(x)

            q_values = keras_layers.Dense(
                self.nn.n_actions,
                name='q_values',
                kernel_initializer=keras_init.glorot_normal())(x)

            q_values_model = keras_models.Model(inputs=s, outputs=q_values)

            #############################

            a_indices_one_hot = keras_layers.Input(shape=(self.nn.n_actions, ),
                                                   dtype='float32',
                                                   name='a_indices_one_hot')

            x = keras_layers.Multiply()([q_values, a_indices_one_hot])
            q_chosen_a = keras_layers.Lambda(lambda z: keras_backend.sum(z),
                                             output_shape=(1, ))(x)

            model = keras_models.Model(inputs=[s, a_indices_one_hot],
                                       outputs=q_chosen_a)
            optimizer = keras_get_optimizer(self.nn.optimizer_type,
                                            self.nn.ALPHA)
            model.compile(optimizer=optimizer, loss='mse')

            return model, q_values_model
示例#25
0
def convolutional_block_3(X, f, filters, stage, block, s=2, seed=None):
    """
    Implementation of the convolutional block:
    X, input from the previous layer will be use for two routes:
    1. the main one, which goes through 3 layers of Conv/BatchNormalization/Relu and leading to different dimension as X_input
    2. the short cut one, which after one pass through conv/batchnorm (to get the dimensions of X at the last layer of the main path)
    will be added back into the main path (before last Relu activation).
    
    Arguments:
    X: input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev) 
    (m = number of samples, n_H = height of the image, n_W = width of the image, n_C = number of channels, prev = previous layer)
    f: integer, specifying the shape of the middle CONV's window for the main path
    filters: python list of 3 integers, defining the number of filters for each CONV layers of the main path
    stage: integer, used to name the layers, depending on their position in the network
    block: string/character, used to name the layers, depending on their position in the network
    seed: none by default, but if you want reproducible weight initialization you set it up with an integer. 
    
    Returns:
    X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
    """

    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    # Retrieve Filters
    F1, F2, F3 = filters

    # Save the input value
    X_shortcut = X

    ##### MAIN PATH #####
    # First component of main path
    X = Conv2D(F1, (1, 1),
               strides=(s, s),
               name=conv_name_base + '2a',
               kernel_initializer=he_normal(seed=seed))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    # Second component of main path
    X = Conv2D(filters=F2, kernel_size=(f,f), strides=(1,1), padding='same', name= conv_name_base + '2b', \
             kernel_initializer = he_normal(seed=seed))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
    X = Activation('relu')(X)

    # Third component of main path
    X = Conv2D(filters=F3, kernel_size=(1,1), strides=(1,1), padding='valid', name= conv_name_base + '2c', \
               kernel_initializer = he_normal(seed=seed))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)

    ##### SHORTCUT PATH ####
    X_shortcut = Conv2D(filters=F3, kernel_size=(1,1), strides=(s,s), padding='valid',name=conv_name_base + '1', \
                       kernel_initializer = he_normal(seed=seed))((X_shortcut))
    X_shortcut = BatchNormalization(axis=3,
                                    name=bn_name_base + '1')((X_shortcut))

    # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)
    X = Add()([X, X_shortcut])
    X = Activation('relu')(X)

    return X
    def custom_convolution_layer(initial_model,
                                 base,
                                 width,
                                 strides=(1, 1),
                                 dropout_rate=0.0,
                                 expand=True):
        number_of_filters = int(base * width)

        if expand:
            model = Conv2D(filters=number_of_filters,
                           kernel_size=(3, 3),
                           padding='same',
                           strides=strides,
                           use_bias=False,
                           kernel_initializer=initializers.he_normal(),
                           kernel_regularizer=regularizers.l2(weight_decay))(
                               initial_model)
        else:
            model = initial_model

        model = BatchNormalization(axis=channel_axis,
                                   momentum=0.1,
                                   epsilon=1e-5,
                                   gamma_initializer="uniform")(model)
        model = Activation(activation='relu')(model)

        model = Conv2D(filters=number_of_filters,
                       kernel_size=(3, 3),
                       padding='same',
                       use_bias=False,
                       kernel_initializer=initializers.he_normal(),
                       kernel_regularizer=regularizers.l2(weight_decay))(model)

        if expand:
            skip_layer = Conv2D(filters=number_of_filters,
                                kernel_size=(1, 1),
                                padding='same',
                                strides=strides,
                                use_bias=False,
                                kernel_initializer=initializers.he_normal(),
                                kernel_regularizer=regularizers.l2(
                                    weight_decay))(initial_model)
            model = Add()([model, skip_layer])
        else:
            if dropout_rate > 0.0:
                model = Dropout(rate=dropout_rate)(model)

            model = BatchNormalization(axis=channel_axis,
                                       momentum=0.1,
                                       epsilon=1e-5,
                                       gamma_initializer="uniform")(model)
            model = Activation(activation='relu')(model)

            model = Conv2D(
                filters=number_of_filters,
                kernel_size=(3, 3),
                padding='same',
                use_bias=False,
                kernel_initializer=initializers.he_normal(),
                kernel_regularizer=regularizers.l2(weight_decay))(model)

            model = Add()([initial_model, model])

        return (model)
示例#27
0
from keras.models import Model
from keras.layers import Conv2D, Conv2DTranspose, Input, Lambda, BatchNormalization, Activation
from keras.layers.merge import concatenate
from keras.initializers import he_normal

from ResNet import identity_block, conv_block

init = he_normal(seed=1)


def side_out(x, factor):
    x = Conv2D(1, (1, 1), activation=None, padding='same')(x)

    kernel_size = (2 * factor, 2 * factor)
    x = Conv2DTranspose(1,
                        kernel_size,
                        strides=factor,
                        padding='same',
                        use_bias=False,
                        activation=None,
                        kernel_initializer=init)(x)
    return x


def side_out_2(x, factor):
    x = Conv2D(1, (1, 1), activation=None, padding='same')(x)

    kernel_size = (factor, factor)
    x = Conv2DTranspose(1,
                        kernel_size,
                        strides=factor,
示例#28
0
def CNN_model():

    model = Sequential()

    #layer 1
    model.add(
        Conv2D(
            128,
            kernel_size=kernel_size,
            strides=stride,
            padding='valid',
            kernel_initializer=initializers.he_normal(seed=sd),
            data_format="channels_last",  #(batch, height, width, channels)
            kernel_regularizer=regularizers.l2(
                0.01),  #or kernel_regularizer=None.
            use_bias=bias,
            input_shape=(256, 256, 3)))
    model.add(BatchNormalization(epsilon=eps, axis=-1))
    model.add(Activation('relu'))
    model.add(Dropout(rate=dropout_rate_layers, seed=sd))
    model.add(
        MaxPooling2D(pool_size=maxpool_size,
                     strides=maxpool_size,
                     data_format="channels_last"))

    #layer 2
    model.add(
        Conv2D(
            64,
            kernel_size=kernel_size,
            strides=stride,
            padding='valid',
            kernel_initializer=initializers.he_normal(seed=sd),
            data_format="channels_last",  #(batch, height, width, channels)
            kernel_regularizer=regularizers.l2(
                0.01),  #or kernel_regularizer=None.
            use_bias=bias))
    model.add(BatchNormalization(epsilon=eps, axis=-1))
    model.add(Activation('relu'))
    model.add(Dropout(rate=dropout_rate_layers, seed=sd))
    model.add(
        MaxPooling2D(
            pool_size=maxpool_size,
            # strides=maxpool_size,
            data_format="channels_last"))

    #layer 3
    model.add(
        Conv2D(
            32,
            kernel_size=kernel_size,
            #strides=stride,
            padding='valid',
            kernel_initializer=initializers.he_normal(seed=sd),
            data_format="channels_last",  #(batch, height, width, channels)
            kernel_regularizer=regularizers.l2(
                0.01),  #or kernel_regularizer=None.
            use_bias=None))
    model.add(BatchNormalization(epsilon=eps, axis=-1))
    model.add(Activation('relu'))
    model.add(Dropout(rate=dropout_rate_layers, seed=sd))
    model.add(
        MaxPooling2D(
            pool_size=maxpool_size,
            # strides=maxpool_size,
            data_format="channels_last"))

    model.add(Flatten())

    # now the fully connected layers!

    model.add(
        Dense(128,
              activation='relu',
              use_bias=bias,
              kernel_initializer=initializers.he_normal(seed=sd),
              kernel_regularizer=regularizers.l2(0.01)))
    model.add(Dropout(rate=dropout_rate_dense, seed=sd))
    model.add(Dense(num_classes, activation='softmax'))

    adam = Adam(lr=learning_rate, decay=lr_decay)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])

    return model
示例#29
0
def build_model(x_train,weight_decay,dropout):
    model = Sequential()

    # Block 1
    model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
                     kernel_initializer=he_normal(), name='block1_conv1', input_shape=x_train.shape[1:]))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
                     kernel_initializer=he_normal(), name='block1_conv2'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))

    # Block 2
    model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
                     kernel_initializer=he_normal(), name='block2_conv1'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
                     kernel_initializer=he_normal(), name='block2_conv2'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))

    # Block 3
    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
                     kernel_initializer=he_normal(), name='block3_conv1'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
                     kernel_initializer=he_normal(), name='block3_conv2'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
                     kernel_initializer=he_normal(), name='block3_conv3'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
                     kernel_initializer=he_normal(), name='block3_conv4'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))

    # Block 4
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
                     kernel_initializer=he_normal(), name='block4_conv1'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
                     kernel_initializer=he_normal(), name='block4_conv2'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
                     kernel_initializer=he_normal(), name='block4_conv3'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
                     kernel_initializer=he_normal(), name='block4_conv4'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))

    # Block 5
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
                     kernel_initializer=he_normal(), name='block5_conv1'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
                     kernel_initializer=he_normal(), name='block5_conv2'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
                     kernel_initializer=he_normal(), name='block5_conv3'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
                     kernel_initializer=he_normal(), name='block5_conv4'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))

    # 用cifar10为例,如果是其他数据集,这里做修改
    model.add(Flatten(name='flatten'))
    model.add(Dense(4096, use_bias=True, kernel_regularizer=keras.regularizers.l2(weight_decay),
                    kernel_initializer=he_normal(), name='fc_cifa10'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(dropout))
    model.add(
        Dense(4096, kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='fc2'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(dropout))
    model.add(Dense(10, kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(),
                    name='predictions_cifa10'))
    model.add(BatchNormalization())
    model.add(Activation('softmax'))
    return model
示例#30
0
 def dense_layer(x):
     return Dense(classes_num,
                  activation='softmax',
                  kernel_initializer=he_normal(),
                  kernel_regularizer=regularizers.l2(
                      self.weight_decay))(x)
示例#31
0
def model_1():

    image_size = 256
    dropout = 0.2
    kernel_size = (3, 3)
    pool_size = (2, 2)
    name = 'model1'
    epochs = 120
    stride = (2, 2)

    #receptive field size = prevLayerRCF + (K-1) * jumpSize
    #featOut = ceil((featIn + 2*padding - K)/strideLen)+1
    #jumpOut = (featInit-featOut)/featOut-1  OR  stride*JumpIn

    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=kernel_size,
               padding='same',
               stride=stride,
               input_shape=(image_size, image_size, 3),
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    #model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))
    #RFS= 1 + 2*1= 3
    #c=3/3=1

    model.add(
        Conv2D(32,
               kernel_size=kernel_size,
               padding='same',
               stride=stride,
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))
    #RFS= 3+2^2 = 7
    #c=3/7

    model.add(
        Conv2D(64,
               kernel_size=kernel_size,
               padding='same',
               stride=stride,
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    #model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))
    #RFS= 7 + 2^3=15
    #c=3/15=0.2

    model.add(
        Conv2D(64,
               kernel_size=kernel_size,
               padding='same',
               stride=stride,
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))
    #RFS = 15 + 2^4=31

    model.add(
        Conv2D(128,
               kernel_size=kernel_size,
               padding='same',
               stride=stride,
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    #model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))
    #RFS = 31 +2^5=63

    model.add(
        Conv2D(128,
               kernel_size=kernel_size,
               padding='same',
               stride=stride,
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))
    #RFS = 63 + 2^6 = 127

    model.add(
        Conv2D(256,
               kernel_size=kernel_size,
               padding='same',
               stride=stride,
               kernel_initializer=initializers.he_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(dropout))
    #RFS = 127 + 2^7 = 255
    '''
	model.add(Conv2D(256, kernel_size=kernel_size, padding='same', stride=stride, kernel_initializer=initializers.he_normal(seed=None)))
	model.add(Activation('relu'))
	model.add(BatchNormalization(momentum=0.99, epsilon=0.001))
	model.add(MaxPooling2D(pool_size=pool_size))
	model.add(Dropout(dropout))
	#RFS = 255 + 2^8 = 511
	'''

    model.add(
        Flatten())  # this converts our 3D feature maps to 1D feature vectors

    model.add(
        Dense(256, kernel_initializer=initializers.lecun_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(Dropout(dropout))

    model.add(
        Dense(128, kernel_initializer=initializers.lecun_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(Dropout(dropout))

    model.add(
        Dense(64, kernel_initializer=initializers.lecun_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(Dropout(dropout))

    model.add(
        Dense(32, kernel_initializer=initializers.lecun_normal(seed=None)))
    model.add(Activation('relu'))
    model.add(Dropout(dropout))

    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    trainAndSave(model, epochs, name, target_size=(image_size, image_size))
示例#32
0
    def densenet(self, img_input, classes_num):
        def bn_relu(x):
            x = BatchNormalization()(x)
            x = Activation('relu')(x)
            return x

        def bottleneck(x):
            channels = self.growth_rate * 4
            x = bn_relu(x)
            x = Conv2D(channels,
                       kernel_size=(1, 1),
                       strides=(1, 1),
                       padding='same',
                       kernel_initializer=he_normal(),
                       kernel_regularizer=regularizers.l2(self.weight_decay),
                       use_bias=False)(x)
            x = bn_relu(x)
            x = Conv2D(self.growth_rate,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding='same',
                       kernel_initializer=he_normal(),
                       kernel_regularizer=regularizers.l2(self.weight_decay),
                       use_bias=False)(x)
            return x

        def single(x):
            x = bn_relu(x)
            x = Conv2D(self.growth_rate,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding='same',
                       kernel_initializer=he_normal(),
                       kernel_regularizer=regularizers.l2(self.weight_decay),
                       use_bias=False)(x)
            return x

        def transition(x, inchannels):
            outchannels = int(inchannels * self.compression)
            x = bn_relu(x)
            x = Conv2D(outchannels,
                       kernel_size=(1, 1),
                       strides=(1, 1),
                       padding='same',
                       kernel_initializer=he_normal(),
                       kernel_regularizer=regularizers.l2(self.weight_decay),
                       use_bias=False)(x)
            x = AveragePooling2D((2, 2), strides=(2, 2))(x)
            return x, outchannels

        def dense_block(x, blocks, nchannels):
            concat = x
            for i in range(blocks):
                x = bottleneck(concat)
                concat = concatenate([x, concat], axis=-1)
                nchannels += self.growth_rate
            return concat, nchannels

        def dense_layer(x):
            return Dense(classes_num,
                         activation='softmax',
                         kernel_initializer=he_normal(),
                         kernel_regularizer=regularizers.l2(
                             self.weight_decay))(x)

        nblocks = (self.depth - 4) // 6
        nchannels = self.growth_rate * 2

        x = Conv2D(nchannels,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   padding='same',
                   kernel_initializer=he_normal(),
                   kernel_regularizer=regularizers.l2(self.weight_decay),
                   use_bias=False)(img_input)

        x, nchannels = dense_block(x, nblocks, nchannels)
        x, nchannels = transition(x, nchannels)
        x, nchannels = dense_block(x, nblocks, nchannels)
        x, nchannels = transition(x, nchannels)
        x, nchannels = dense_block(x, nblocks, nchannels)
        x, nchannels = transition(x, nchannels)
        x = bn_relu(x)
        x = GlobalAveragePooling2D()(x)
        x = dense_layer(x)
        return x
示例#33
0
def test_he_normal(tensor_shape):
    fan_in, _ = initializers._compute_fans(tensor_shape)
    std = np.sqrt(2. / fan_in)
    _runner(initializers.he_normal(), tensor_shape,
            target_mean=0., target_std=std)
示例#34
0
 def dense_layer(x):
     return Dense(1,
                  activation='sigmoid',
                  kernel_initializer=he_normal(),
                  kernel_regularizer=regularizers.l2(weight_decay))(x)
示例#35
0
def get_unitized_resnet(input_shape, stack_nb, weight_decay=0.0005):
    inputs = KL.Input(shape=input_shape, name='inputs')
    get_layer = GetLayer()

    def residual_block(_tensor, filters):
        increase_filter = filters[0] != filters[1]
        output_filters = filters[1]
        if increase_filter:
            projection = get_layer(
                KL.Conv2D,
                output_filters,
                kernel_size=(1, 1),
                strides=(1, 1),
                padding='same',
                kernel_initializer=KI.he_normal(),
                kernel_regularizer=KR.l2(weight_decay))(_tensor)
        else:
            projection = _tensor

        _tensor = get_layer(Unitization, axis=AXIS)(_tensor)
        _tensor = get_layer(KL.Activation, 'relu')(_tensor)
        _tensor = get_layer(KL.Conv2D,
                            output_filters,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            kernel_initializer=KI.he_normal(),
                            kernel_regularizer=KR.l2(weight_decay))(_tensor)

        _tensor = get_layer(Unitization, axis=AXIS)(_tensor)
        _tensor = get_layer(KL.Activation, 'relu')(_tensor)
        _tensor = get_layer(KL.Conv2D,
                            output_filters,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            kernel_initializer=KI.he_normal(),
                            kernel_regularizer=KR.l2(weight_decay))(_tensor)

        return get_layer(KL.Add)([_tensor, projection])

    tensor = get_layer(KL.Conv2D,
                       filters=16,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding='same',
                       kernel_initializer=KI.he_normal(),
                       kernel_regularizer=KR.l2(weight_decay))(inputs)

    for _ in range(0, stack_nb):
        tensor = residual_block(tensor, [16, 16])

    tensor = residual_block(tensor, [16, 32])
    for _ in range(1, stack_nb):
        tensor = residual_block(tensor, [32, 32])

    tensor = residual_block(tensor, [32, 64])
    for _ in range(1, stack_nb):
        tensor = residual_block(tensor, [64, 64])

    tensor = get_layer(Unitization, axis=AXIS)(tensor)
    tensor = get_layer(KL.Activation, 'relu')(tensor)
    # tensor = get_layer(KL.GlobalAveragePooling2D)(tensor)
    return inputs, tensor
示例#36
0
    def build_model(self):
        # Build the network of vgg for 10 classes with massive dropout and weight decay as described in the paper.

        # build model
        model = Sequential()
        # Block 1
        model.add(
            Conv2D(64, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                   kernel_initializer=he_normal(),
                   name='block1_conv1',
                   input_shape=self.input_shape))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(
            Conv2D(64, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                   kernel_initializer=he_normal(),
                   name='block1_conv2'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))

        # Block 2
        model.add(
            Conv2D(128, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                   kernel_initializer=he_normal(),
                   name='block2_conv1'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(
            Conv2D(128, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                   kernel_initializer=he_normal(),
                   name='block2_conv2'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))

        # Block 3
        model.add(
            Conv2D(256, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                   kernel_initializer=he_normal(),
                   name='block3_conv1'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(
            Conv2D(256, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                   kernel_initializer=he_normal(),
                   name='block3_conv2'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(
            Conv2D(256, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                   kernel_initializer=he_normal(),
                   name='block3_conv3'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(
            Conv2D(256, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                   kernel_initializer=he_normal(),
                   name='block3_conv4'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))

        # Block 4
        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                   kernel_initializer=he_normal(),
                   name='block4_conv1'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                   kernel_initializer=he_normal(),
                   name='block4_conv2'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                   kernel_initializer=he_normal(),
                   name='block4_conv3'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                   kernel_initializer=he_normal(),
                   name='block4_conv4'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))

        # Block 5
        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                   kernel_initializer=he_normal(),
                   name='block5_conv1'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                   kernel_initializer=he_normal(),
                   name='block5_conv2'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                   kernel_initializer=he_normal(),
                   name='block5_conv3'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                   kernel_initializer=he_normal(),
                   name='block5_conv4'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))

        # model modification for cifar-10
        model.add(Flatten(name='flatten'))
        model.add(
            Dense(4096,
                  use_bias=True,
                  kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                  kernel_initializer=he_normal(),
                  name='fc_cifa10'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(
            Dense(4096,
                  kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                  kernel_initializer=he_normal(),
                  name='fc2'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(
            Dense(10,
                  kernel_regularizer=keras.regularizers.l2(self.weight_decay),
                  kernel_initializer=he_normal(),
                  name='predictions_cifa10'))
        model.add(BatchNormalization())
        model.add(Activation('softmax'))
        return model
        ),
        pytest.param(
            initializers.orthogonal(gain=1.2, seed=42),
            dict(class_name="orthogonal", gain=1.2, seed=42),
            id="o_1",
        ),
        pytest.param(initializers.Identity(1.1), dict(class_name="identity", gain=1.1), id="i_0"),
        pytest.param(initializers.identity(), dict(class_name="identity", gain=1.0), id="i_1"),
        #################### VarianceScaling ####################
        pytest.param(
            initializers.glorot_normal(), dict(class_name="glorot_normal", seed=None), id="gn_0"
        ),
        pytest.param(
            initializers.glorot_uniform(42), dict(class_name="glorot_uniform", seed=42), id="gu_0"
        ),
        pytest.param(initializers.he_normal(), dict(class_name="he_normal", seed=None), id="hn_0"),
        pytest.param(
            initializers.he_uniform(42), dict(class_name="he_uniform", seed=42), id="hu_0"
        ),
        pytest.param(
            initializers.lecun_normal(), dict(class_name="lecun_normal", seed=None), id="ln_0"
        ),
        pytest.param(
            initializers.lecun_uniform(42), dict(class_name="lecun_uniform", seed=42), id="lu_0"
        ),
    ],
)
def test_keras_initializer_to_dict(initializer, initializer_dict):
    assert get_concise_params_dict(keras_initializer_to_dict(initializer)) == initializer_dict

    include_top=False, 
    input_shape=(height, width, constants.NUM_CHANNELS)
)

# First time run, no unlocking
conv_base.trainable = False

# Let's see it
print('Summary')
print(conv_base.summary())

# Let's construct that top layer replacement
x = conv_base.output
x = AveragePooling2D(pool_size=(7, 7))(x)
x = Flatten()(x)
x = Dense(256, activation='relu', kernel_initializer=initializers.he_normal(seed=None), kernel_regularizer=regularizers.l2(.0005))(x)
x = Dropout(0.5)(x)
# Essential to have another layer for better accuracy
x = Dense(128,activation='relu', kernel_initializer=initializers.he_normal(seed=None))(x)
x = Dropout(0.25)(x)
predictions = Dense(constants.NUM_CLASSES,  kernel_initializer="glorot_uniform", activation='softmax')(x)

print('Stacking New Layers')
model = Model(inputs = conv_base.input, outputs=predictions)

# Load checkpoint if one is found
if os.path.exists(weights_file):
        print ("loading ", weights_file)
        model.load_weights(weights_file)

# Get all model callbacks
示例#39
0
def wide_residual_network(img_input, classes_num, depth, k):
    print('Wide-Resnet %dx%d' % (depth, k))
    n_filters = [16, 16 * k, 32 * k, 64 * k]
    n_stack = (depth - 4) // 6

    def conv3x3(x, filters):
        return Conv2D(filters=filters,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      padding='same',
                      kernel_initializer=he_normal(),
                      kernel_regularizer=regularizers.l2(weight_decay))(x)

    def residual_block(x, out_filters, increase_filter=False):
        global in_filters
        if increase_filter:
            first_stride = (2, 2)
        else:
            first_stride = (1, 1)
        pre_bn = BatchNormalization(momentum=0.9, epsilon=1e-5)(x)
        pre_relu = Activation('relu')(pre_bn)
        conv_1 = Conv2D(
            out_filters,
            kernel_size=(3, 3),
            strides=first_stride,
            padding='same',
            kernel_initializer=he_normal(),
            kernel_regularizer=regularizers.l2(weight_decay))(pre_relu)
        bn_1 = BatchNormalization(momentum=0.9, epsilon=1e-5)(conv_1)
        relu1 = Activation('relu')(bn_1)
        conv_2 = Conv2D(
            out_filters,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            kernel_initializer=he_normal(),
            kernel_regularizer=regularizers.l2(weight_decay))(relu1)
        if increase_filter or in_filters != out_filters:
            projection = Conv2D(
                out_filters,
                kernel_size=(1, 1),
                strides=first_stride,
                padding='same',
                kernel_initializer=he_normal(),
                kernel_regularizer=regularizers.l2(weight_decay))(pre_relu)
            block = add([conv_2, projection])
        else:
            block = add([conv_2, x])
        return block

    def wide_residual_layer(x, out_filters, increase_filter=False):
        global in_filters
        x = residual_block(x, out_filters, increase_filter)
        in_filters = out_filters
        for _ in range(1, int(n_stack)):
            x = residual_block(x, out_filters)
        return x

    x = conv3x3(img_input, n_filters[0])
    x = wide_residual_layer(x, n_filters[1])
    x = wide_residual_layer(x, n_filters[2], increase_filter=True)
    x = wide_residual_layer(x, n_filters[3], increase_filter=True)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5)(x)
    x = Activation('relu')(x)
    x = GlobalAveragePooling2D()(x)
    x = Dense(classes_num,
              activation='softmax',
              kernel_initializer=he_normal(),
              kernel_regularizer=regularizers.l2(weight_decay))(x)
    return x
示例#40
0
    def GenerateModel(self):
        b_f = 128
        # ENCODER
        input_ = Input(shape=(56, 56, 1))

        encoder_hidden1 = Conv2D(filters=b_f,
                                 kernel_size=2,
                                 strides=(2, 2),
                                 padding='valid',
                                 kernel_initializer='he_normal')(input_)
        encoder_hidden1 = BatchNormalization()(encoder_hidden1)
        encoder_hidden1 = Activation('relu')(encoder_hidden1)

        encoder_hidden2 = Conv2D(
            filters=b_f * 2,
            kernel_size=2,
            strides=(2, 2),
            padding='valid',
            kernel_initializer='he_normal')(encoder_hidden1)
        encoder_hidden2 = BatchNormalization()(encoder_hidden2)
        encoder_hidden2 = Activation('relu')(encoder_hidden2)

        encoder_hidden3 = Conv2D(
            filters=b_f * 4,
            kernel_size=2,
            strides=(2, 2),
            padding='valid',
            kernel_initializer='he_normal')(encoder_hidden2)
        encoder_hidden3 = BatchNormalization()(encoder_hidden3)
        encoder_hidden3 = Activation('relu')(encoder_hidden3)

        encoder_hidden4 = Flatten()(encoder_hidden3)

        # Latent Represenatation Distribution, P(z)
        z_mean = Dense(self.latent_dim,
                       activation='linear',
                       kernel_initializer=initializers.he_normal(
                           seed=None))(encoder_hidden4)
        z_std_sq_log = Dense(self.latent_dim,
                             activation='linear',
                             kernel_initializer=initializers.he_normal(
                                 seed=None))(encoder_hidden4)

        # Sampling z from P(z)
        def sample_z(args):
            mu, std_sq_log = args
            epsilon = K.random_normal(shape=(K.shape(mu)[0], self.latent_dim),
                                      mean=0.,
                                      stddev=1.)
            z = mu + epsilon * K.sqrt(K.exp(std_sq_log))
            return z

        z = Lambda(sample_z)([z_mean, z_std_sq_log])

        # DECODER
        decoder_hidden0 = Dense(
            K.int_shape(encoder_hidden4)[1],
            activation='relu',
            kernel_initializer=initializers.he_normal(seed=None))(z)
        decoder_hidden0 = Reshape(
            K.int_shape(encoder_hidden3)[1:])(decoder_hidden0)

        decoder_hidden1 = Conv2DTranspose(
            filters=b_f * 4,
            kernel_size=2,
            strides=(2, 2),
            padding='valid',
            kernel_initializer='he_normal')(decoder_hidden0)
        decoder_hidden1 = BatchNormalization()(decoder_hidden1)
        decoder_hidden1 = Activation('relu')(decoder_hidden1)

        decoder_hidden2 = Conv2DTranspose(
            filters=b_f * 2,
            kernel_size=2,
            strides=(2, 2),
            padding='valid',
            kernel_initializer='he_normal')(decoder_hidden1)
        decoder_hidden2 = BatchNormalization()(decoder_hidden2)
        decoder_hidden2 = Activation('relu')(decoder_hidden2)

        decoder_hidden3 = Conv2DTranspose(
            filters=b_f,
            kernel_size=2,
            strides=(2, 2),
            padding='valid',
            kernel_initializer='he_normal')(decoder_hidden2)
        decoder_hidden3 = BatchNormalization()(decoder_hidden3)
        decoder_hidden3 = Activation('relu')(decoder_hidden3)

        decoder_hidden4 = Conv2D(
            filters=1,
            kernel_size=1,
            strides=(1, 1),
            padding='valid',
            kernel_initializer='he_normal')(decoder_hidden3)
        decoder_hidden4 = Activation('sigmoid')(decoder_hidden4)
        # MODEL
        vae = Model(input_, decoder_hidden4)

        # Encoder Model
        encoder = Model(inputs=input_, outputs=[z_mean, z_std_sq_log])

        # Decoder Model
        no_of_encoder_layers = len(encoder.layers)
        no_of_vae_layers = len(vae.layers)

        decoder_input = Input(shape=(self.latent_dim, ))
        decoder_hidden = vae.layers[no_of_encoder_layers + 1](decoder_input)

        for i in np.arange(no_of_encoder_layers + 2, no_of_vae_layers - 1):
            decoder_hidden = vae.layers[i](decoder_hidden)
        decoder_hidden = vae.layers[no_of_vae_layers - 1](decoder_hidden)
        decoder = Model(decoder_input, decoder_hidden)

        self.VAE = vae
        self.Encoder = encoder
        self.Decoder = decoder
 def conv3x3(x,filters):
     return Conv2D(filters=filters, kernel_size=(3,3), strides=(1,1), padding='same',
     kernel_initializer=he_normal(),
     kernel_regularizer=regularizers.l2(self.weight_decay))(x)
示例#42
0
# datasetSize = 0.75, this returns 3/4th of the dataset.

# Expand the dimensions of the image to have a channel dimension. (nx128x128) ==> (nx128x128x1)
train_x = train_x.reshape(train_x.shape[0], train_x.shape[1], train_x.shape[2],
                          1)
test_x = test_x.reshape(test_x.shape[0], test_x.shape[1], test_x.shape[2], 1)

# Normalize the matrices.
train_x = train_x / 255.
test_x = test_x / 255.

model = Sequential()
model.add(
    Conv2D(filters=64,
           kernel_size=[7, 7],
           kernel_initializer=initializers.he_normal(seed=1),
           activation="relu",
           input_shape=(128, 128, 1)))
# Dim = (122x122x64)
model.add(BatchNormalization())
model.add(AveragePooling2D(pool_size=[2, 2], strides=2))
# Dim = (61x61x64)
model.add(
    Conv2D(filters=128,
           kernel_size=[7, 7],
           strides=2,
           kernel_initializer=initializers.he_normal(seed=1),
           activation="relu"))
# Dim = (28x28x128)
model.add(BatchNormalization())
model.add(AveragePooling2D(pool_size=[2, 2], strides=2))
import tensorflow as tf
import keras.backend as K
from keras.engine.topology import InputSpec
from keras.engine.topology import Layer
from keras.layers.merge import _Merge
from keras.layers import *
from keras import activations
from keras import initializers
from keras.models import Model,Sequential
import numpy as np
from layers import *

linear, linear_init = activations.linear,       initializers.he_normal()
relu,   relu_init = activations.relu,         initializers.he_normal()
lrelu,  lrelu_init = lambda x: K.relu(x, 0.2),  initializers.he_normal()


def vlrelu(x): return K.relu(x, 0.3)


def G_convblock(
    net,
    num_filter,
    filter_size,
    actv,
    init,
    pad='same',
    use_wscale=True,
    use_pixelnorm=True,
    use_batchnorm=False,
    name=None):
示例#44
0
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')

# data preprocessing 
x_train[:,:,:,0] = (x_train[:,:,:,0]-123.680)
x_train[:,:,:,1] = (x_train[:,:,:,1]-116.779)
x_train[:,:,:,2] = (x_train[:,:,:,2]-103.939)
x_test[:,:,:,0] = (x_test[:,:,:,0]-123.680)
x_test[:,:,:,1] = (x_test[:,:,:,1]-116.779)
x_test[:,:,:,2] = (x_test[:,:,:,2]-103.939)

# build model
model = Sequential()

# Block 1
model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block1_conv1', input_shape=x_train.shape[1:]))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block1_conv2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))

# Block 2
model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block2_conv1'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), kernel_initializer=he_normal(), name='block2_conv2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))