# In[ ]:


y, label_encoder = prepare_labels(train_df['Id'])

# In[ ]:


y.shape

# In[ ]:


model = Sequential()

model.add(Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0', input_shape = (100, 100, 3)))

model.add(BatchNormalization(axis = 3, name = 'bn0'))
model.add(Activation('relu'))

model.add(MaxPooling2D((2, 2), name='max_pool'))
model.add(Conv2D(64, (3, 3), strides = (1,1), name="conv1"))
model.add(Activation('relu'))
model.add(AveragePooling2D((3, 3), name='avg_pool'))

model.add(Flatten())
model.add(Dense(500, activation="relu", name='rl'))
model.add(Dropout(0.8))
model.add(Dense(y.shape[1], activation='softmax', name='sm'))

model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy'])
Beispiel #2
0
# validation_data_dir = '/data/keloli/PicLib/Kaggle/DogsVSCats/TestData'

# dimensions of our images.
img_width, img_height = 150, 150
nb_train_samples = 2000
nb_validation_samples = 800
epochs = 50
batch_size = 64

if K.image_data_format() == 'channels_first':
    input_shape = (3, img_width, img_height)
else:
    input_shape = (img_width, img_height, 3)

model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
print(type(X_train))

# In[ ]:

Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)

print(Y_train[:10])

# In[ ]:

model = Sequential()

# In[ ]:

model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(3, 32, 32)))
#model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(32, (3, 3), activation='relu'))
#model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())

# In[ ]:

model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
def unet_model():
    
    
    inputs = Input((2, img_size, img_size))
    conv1 = Conv2D(64, (3, 3), activation='relu', padding='same') (inputs)
    batch1 = BatchNormalization(axis=1)(conv1)
    conv1 = Conv2D(64, (3, 3), activation='relu', padding='same') (batch1)
    batch1 = BatchNormalization(axis=1)(conv1)
    pool1 = MaxPooling2D((2, 2)) (batch1)
    
    conv2 = Conv2D(128, (3, 3), activation='relu', padding='same') (pool1)
    batch2 = BatchNormalization(axis=1)(conv2)
    conv2 = Conv2D(128, (3, 3), activation='relu', padding='same') (batch2)
    batch2 = BatchNormalization(axis=1)(conv2)
    pool2 = MaxPooling2D((2, 2)) (batch2)
    
    conv3 = Conv2D(256, (3, 3), activation='relu', padding='same') (pool2)
    batch3 = BatchNormalization(axis=1)(conv3)
    conv3 = Conv2D(256, (3, 3), activation='relu', padding='same') (batch3)
    batch3 = BatchNormalization(axis=1)(conv3)
    pool3 = MaxPooling2D((2, 2)) (batch3)
    
    conv4 = Conv2D(512, (3, 3), activation='relu', padding='same') (pool3)
    batch4 = BatchNormalization(axis=1)(conv4)
    conv4 = Conv2D(512, (3, 3), activation='relu', padding='same') (batch4)
    batch4 = BatchNormalization(axis=1)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2)) (batch4)
    
    conv5 = Conv2D(1024, (3, 3), activation='relu', padding='same') (pool4)
    batch5 = BatchNormalization(axis=1)(conv5)
    conv5 = Conv2D(1024, (3, 3), activation='relu', padding='same') (batch5)
    batch5 = BatchNormalization(axis=1)(conv5)
    
    up6 = Conv2DTranspose(512, (2, 2), strides=(2, 2), padding='same') (batch5)
    up6 = concatenate([up6, conv4], axis=1)
    conv6 = Conv2D(512, (3, 3), activation='relu', padding='same') (up6)
    batch6 = BatchNormalization(axis=1)(conv6)
    conv6 = Conv2D(512, (3, 3), activation='relu', padding='same') (batch6)
    batch6 = BatchNormalization(axis=1)(conv6)
    
    up7 = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same') (batch6)
    up7 = concatenate([up7, conv3], axis=1)
    conv7 = Conv2D(256, (3, 3), activation='relu', padding='same') (up7)
    batch7 = BatchNormalization(axis=1)(conv7)
    conv7 = Conv2D(256, (3, 3), activation='relu', padding='same') (batch7)
    batch7 = BatchNormalization(axis=1)(conv7)
    
    up8 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same') (batch7)
    up8 = concatenate([up8, conv2], axis=1)
    conv8 = Conv2D(128, (3, 3), activation='relu', padding='same') (up8)
    batch8 = BatchNormalization(axis=1)(conv8)
    conv8 = Conv2D(128, (3, 3), activation='relu', padding='same') (batch8)
    batch8 = BatchNormalization(axis=1)(conv8)
    
    up9 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same') (batch8)
    up9 = concatenate([up9, conv1], axis=1)
    conv9 = Conv2D(64, (3, 3), activation='relu', padding='same') (up9)
    batch9 = BatchNormalization(axis=1)(conv9)
    conv9 = Conv2D(64, (3, 3), activation='relu', padding='same') (batch9)
    batch9 = BatchNormalization(axis=1)(conv9)

    conv10 = Conv2D(1, (1, 1), activation='sigmoid')(batch9)

    model = Model(inputs=[inputs], outputs=[conv10])

    model.compile(optimizer=Adam(lr=LR), loss=dice_coef_loss, metrics=[dice_coef])

    
    return model
Beispiel #5
0
image_input = Input(shape=input_shape)
ip = Pyramidnet(image_input)
ip = MaxPooling2D(pool_size=(2, 2),
                  strides=None,
                  padding='valid',
                  data_format=None)(ip)
ip = Pyramidnet(ip)
ip = MaxPooling2D(pool_size=(2, 2),
                  strides=None,
                  padding='valid',
                  data_format=None)(ip)
ip = Pyramidnet(ip)
ip = fire_incept(ip, fire=32, intercept=32)
ip = fire_squeeze(ip, fire=32, intercept=32)

ip = Conv2D(64, (3, 3))(ip)
ip = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(ip)
ip = LeakyReLU(alpha=0.1)(ip)

ip = Flatten()(ip)

ip = Dense(512)(ip)
ip = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(ip)
ip = LeakyReLU(alpha=0.1)(ip)
ip = Dropout(0.5)(ip)

ip = Dense(256)(ip)
ip = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(ip)
ip = LeakyReLU(alpha=0.1)(ip)
ip = Dropout(0.2)(ip)
#plt.show()
#dictionary = {speckle_labels_n: speckle_labels_mn_n for speckle_labels_n, speckle_labels_mn_n in zip(speckle_labels, speckle_labels_mn)}

X_train, X_test, y_train, y_test = train_test_split(speckle_data, speckle_labels, test_size=0.1, random_state=42)

X_train = X_train.reshape(-1, img_height, img_width, 1)
X_test = X_test.reshape(-1, img_height, img_width, 1)
input_shape = (img_height, img_width, 1)

y_train = y_train.reshape(-1, img_height_test, img_width_test, 1)
y_test = y_test.reshape(-1, img_height_test, img_width_test, 1)
input_shape_test = (img_height_test, img_width_test, 1)


reconstruction = Sequential()
reconstruction.add(Conv2D(128, 1, activation = 'relu', input_shape=input_shape))
reconstruction.add(MaxPooling2D(pool_size = (2,2)))
reconstruction.add(Dropout(0.25))
reconstruction.add(Conv2D(128, 1, activation = 'relu'))
reconstruction.add(Reshape((1, 128*128*128), input_shape=(128, 128, 128)))
reconstruction.add(Dense(16384, activation='relu'))
reconstruction.add(Dropout(0.25))
reconstruction.add(Dense(4096, activation='relu'))
reconstruction.add(Dropout(0.25))
reconstruction.add(Dense(4096, activation='relu'))
reconstruction.add(Dropout(0.25))
reconstruction.add(Reshape((64, 64, 1), input_shape=(1, 4096)))
reconstruction.add(Conv2D(64, 1, activation = 'relu', input_shape=input_shape_test))
reconstruction.add(Dropout(0.25))
reconstruction.add(Conv2D(32, 1, activation = 'relu'))
reconstruction.add(Dropout(0.25))
X_train, X_test,y_train,y_test=get_train_test()

max_len=13

X_train= X_train.reshape(X_train.shape[0],20,max_len,1)
X_test= X_test.reshape(X_test.shape[0],20,max_len,1)

y_train_hot = to_categorical(y_train)
y_test_hot = to_categorical(y_test)

print(np.shape(X_train))

model=Sequential()

model.add(Conv2D(32, kernel_size=(2,2), activation="relu",
          input_shape=(20,max_len,1)))
model.add(Conv2D(64,kernel_size=(2,2),activation="relu"))
model.add(Conv2D(128,kernel_size=(2,2),activation="relu"))

model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())

model.add(Dense(128,activation="relu"))
model.add(Dropout(0.25))
model.add(Dense(64,activation="relu"))
model.add(Dropout(0.25))
model.add(Dense(4,activation="softmax"))

model.compile(loss=keras.losses.categorical_crossentropy,
             optimizer=keras.optimizers.Adadelta(),
Beispiel #8
0
DROPOUT = 0.5
weight_decay = None

batch_size = 128
epochs = 1
FN = "mnist"

regularizer = l2(weight_decay) if weight_decay else None

basic_model = Sequential(name='basic')

basic_model.add(
    Conv2D(nb_filters,
           nb_conv,
           nb_conv,
           border_mode='valid',
           input_shape=(img_rows, img_cols, img_color)))
basic_model.add(Activation('relu'))
basic_model.add(Conv2D(nb_filters, nb_conv, nb_conv))
basic_model.add(Activation('relu'))
basic_model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
basic_model.add(Dropout(0.25))

basic_model.add(Conv2D(nb_filters * 2, nb_conv, nb_conv, border_mode='same'))
basic_model.add(Activation('relu'))
basic_model.add(Conv2D(nb_filters * 2, nb_conv, nb_conv))
basic_model.add(Activation('relu'))
basic_model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
basic_model.add(Dropout(0.25))
	def stem(self, X, filters, stage='stem', size=3, train=True): #Stem block for inputs and heads
		stem = Conv2D(filters=filters, kernel_size=(size,size), strides=(1,1), padding='same', data_format='channels_last', name='Conv_' + stage, kernel_initializer=glorot_uniform(), 
			trainable=train, kernel_regularizer=regularizers.l2(self.lambd))(X)
		stem = BatchNormalization(axis=-1, name="bn_block" + stage + '_1', epsilon=1e-5)(stem)
		stem = Activation('relu')(stem)
		return stem
Beispiel #10
0
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(
    Conv2D(32,
           kernel_size=(5, 5),
           padding='same',
           activation='relu',
           use_bias=False,
           input_shape=input_shape))
model.add(
    Conv2D(32,
           kernel_size=(3, 3),
           padding='same',
           activation='relu',
           use_bias=False,
           input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
Beispiel #11
0
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# Convert class vectors to binary class matrices.
print(y_train[0:3])
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print(y_train[0:3])


model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(512))
Beispiel #12
0
x_test = x_test.reshape(10000, 28, 28, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255

print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

y_train = keras.utils.to_categorical(y_train, num_classifications)
y_test = keras.utils.to_categorical(y_test, num_classifications)

model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
                 activation='relu',
                 input_shape=(28, 28, 1)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classifications, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.adadelta(),
              metrics=['accuracy'])

model.fit(x_train, y_train,
          batch_size=batch_size,
Beispiel #13
0
def SqueezeUNet(inputs,
                num_classes=None,
                deconv_ksize=3,
                dropout=0.5,
                activation='sigmoid'):
    """SqueezeUNet is a implementation based in SqueezeNetv1.1 and unet for semantic segmentation
    :param inputs: input layer.
    :param num_classes: number of classes.
    :param deconv_ksize: (width and height) or integer of the 2D deconvolution window.
    :param dropout: dropout rate
    :param activation: type of activation at the top layer.
    :returns: SqueezeUNet model
    """
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    if num_classes is None:
        num_classes = K.int_shape(inputs)[channel_axis]

    x01 = Conv2D(64, (3, 3),
                 strides=(2, 2),
                 padding='same',
                 activation='relu',
                 name='conv1')(inputs)
    x02 = MaxPooling2D(pool_size=(3, 3),
                       strides=(2, 2),
                       name='pool1',
                       padding='same')(x01)

    x03 = fire_module(x02, fire_id=2, squeeze=16, expand_1x1=64, expand_3x3=64)
    x04 = fire_module(x03, fire_id=3, squeeze=16, expand_1x1=64, expand_3x3=64)
    x05 = MaxPooling2D(pool_size=(3, 3),
                       strides=(2, 2),
                       name='pool3',
                       padding="same")(x04)

    x06 = fire_module(x05,
                      fire_id=4,
                      squeeze=32,
                      expand_1x1=128,
                      expand_3x3=128)
    x07 = fire_module(x06,
                      fire_id=5,
                      squeeze=32,
                      expand_1x1=128,
                      expand_3x3=128)
    x08 = MaxPooling2D(pool_size=(3, 3),
                       strides=(2, 2),
                       name='pool5',
                       padding="same")(x07)

    x09 = fire_module(x08,
                      fire_id=6,
                      squeeze=48,
                      expand_1x1=192,
                      expand_3x3=192)
    x10 = fire_module(x09,
                      fire_id=7,
                      squeeze=48,
                      expand_1x1=192,
                      expand_3x3=192)
    x11 = fire_module(x10,
                      fire_id=8,
                      squeeze=64,
                      expand_1x1=256,
                      expand_3x3=256)
    x12 = fire_module(x11,
                      fire_id=9,
                      squeeze=64,
                      expand_1x1=256,
                      expand_3x3=256)

    if dropout != 0.0:
        x12 = Dropout(dropout)(x12)

    up1 = concatenate([
        Conv2DTranspose(192, deconv_ksize, strides=(1, 1),
                        padding='same')(x12),
        x10,
    ],
                      axis=channel_axis)
    up1 = fire_module(up1, fire_id=10, squeeze=48, expand=192)

    up2 = concatenate([
        Conv2DTranspose(128, deconv_ksize, strides=(1, 1),
                        padding='same')(up1),
        x08,
    ],
                      axis=channel_axis)
    up2 = fire_module(up2, fire_id=11, squeeze=32, expand=128)

    up3 = concatenate([
        Conv2DTranspose(64, deconv_ksize, strides=(2, 2), padding='same')(up2),
        x05,
    ],
                      axis=channel_axis)
    up3 = fire_module(up3, fire_id=12, squeeze=16, expand=64)

    up4 = concatenate([
        Conv2DTranspose(32, deconv_ksize, strides=(2, 2), padding='same')(up3),
        x02,
    ],
                      axis=channel_axis)
    up4 = fire_module(up4, fire_id=13, squeeze=16, expand=32)
    up4 = UpSampling2D(size=(2, 2))(up4)

    x = concatenate([up4, x01], axis=channel_axis)
    x = Conv2D(64, (3, 3), strides=(1, 1), padding='same',
               activation='relu')(x)
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(num_classes, (1, 1), activation=activation)(x)

    return Model(inputs=inputs, outputs=x)
Beispiel #14
0
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Conv2D(128, kernel_size=(kernel_size, kernel_size),
                 activation='relu',
                 input_shape=input_shape))
model.add(Conv2D(128, (kernel_size, kernel_size), activation='relu'))
model.add(Conv2D(96, (kernel_size, kernel_size), activation='relu'))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(Conv2D(64, kernel_size=(kernel_size, kernel_size), activation='relu'))
model.add(Conv2D(64, (kernel_size, kernel_size), activation='relu'))
model.add(Conv2D(64, (kernel_size, kernel_size), activation='relu'))
model.add(Dropout(Dropout_rate))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dropout(Dropout_rate))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adam(),
# Define the CNN model ----------------------------------------
# Includes:
# 1.3x3 convolution filters,
# 2.layer weight regularizers (L2 reg),
# 3.ELU activation,
# 4.Batch normalization,
# 5.Max pooling
# 6.Dropout
# Hyperparameters: placing of dropout with option of dropout ratio and batch normalization can be changed
WEIGHT_DECAY = 1e-4

model = Sequential()
# Conv layer 1
model.add(
    Conv2D(32, (3, 3), padding='same', kernel_regularizer=regularizers.l2(WEIGHT_DECAY), input_shape=x_train.shape[1:]))
model.add(Activation('elu'))
model.add(BatchNormalization())
# Conv layer 2
model.add(Conv2D(32, (3, 3), padding='same', kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
# Conv layer 3
model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))
model.add(Activation('elu'))
model.add(BatchNormalization())
# Conv layer 4
model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))
model.add(Activation('elu'))
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'test3_nopad.h5'

(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
with K.tf.device('/gpu:0'):
	model = Sequential()

# 32x32x32
	model.add(Conv2D(32, (3, 3), padding='valid', input_shape=x_train.shape[1:]))
	model.add(Activation('relu'))
	model.add(Conv2D(32, (3, 3), padding='valid'))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))

# 16x16x64
	model.add(Conv2D(64, (3, 3), padding='same'))
	model.add(Activation('relu'))
	model.add(Conv2D(64, (3, 3), padding='same'))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))

# 8x8x128

	model.add(Conv2D(128, (3, 3), padding='same'))
# print(np.shape(X_validation))

import tensorflow
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Conv2D, Activation, MaxPooling2D, Dense, Flatten, Dropout
from keras.preprocessing.image import ImageDataGenerator
from IPython.display import display
from PIL import Image
from keras.models import load_model
import numpy as np
from sklearn import metrics

print(tensorflow.__version__)
classifier = Sequential()
classifier.add(Conv2D(64, (3, 3), input_shape=(150, 150, 3)))
classifier.add(Activation('relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Conv2D(64, (3, 3)))
classifier.add(Activation('relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Conv2D(64, (3, 3)))
classifier.add(Activation('relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Flatten())
classifier.add(Dense(32))
classifier.add(Activation('relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(1))
classifier.add(Activation('sigmoid'))
classifier.compile(optimizer='adam',
  x["x"] = x["x"].apply(fix_pos)
  x["y"] = x["y"].astype(int)

  y = x.iloc[:, -2:]
  x = x.iloc[:, 1:-2]

  img_x = np.zeros(shape = (x.shape[0], 25, 25, 1, ))
  for key, value in beacon_coords.items():
    img_x[:, value[0], value[1], 0] -= x[key].values/200
    #print(key, value)
    train_x, val_x, train_y, val_y = train_test_split(img_x, y, test_size = .2, shuffle = False)

  inputs = Input(shape=(train_x.shape[1], train_x.shape[2], 1))

# a layer instance is callable on a tensor, and returns a tensor
  a = Conv2D(12, kernel_size=(7,7), activation='relu', padding = "valid", data_format="channels_last")(inputs)
  a = MaxPooling2D(2)(a)
  a = Conv2D(12, kernel_size=(5,5), activation='relu', padding = "valid", data_format="channels_last")(a)
  a = MaxPooling2D(2)(a)
#a = Conv2D(12, kernel_size=(3,3), activation='relu', padding = "valid", data_format="channels_last")(a)
#a = MaxPooling2D(2)(a)
  a = Dense(24, activation='relu')(Flatten()(a))
  predictions = Dense(2, activation='relu')(a)

# This creates a model that includes
# the Input layer and three Dense layers
  model = Model(inputs=inputs, outputs=predictions)
  model.compile(optimizer=SGD(args.learning_rate,args.momentum),
              loss=rmse,
              metrics=['mse'])
  hist = model.fit(x = train_x, y = train_y, validation_data = (val_x,val_y), epochs=100, batch_size=20,  verbose=1)
'''
'''
x = Convolution2D(32*2**1, 3, 3, activation='relu')(x)
x = Convolution2D(32*2**1, 3, 3, activation='relu')(x)
x = MaxPooling2D((2, 2))(x)
'''


model = Sequential()

for i in range(4):
    model.add(Conv2D(
        # filters:卷积核的数目(即输出的维度)
        filters=32*2**i,
        # kernel_size:单个整数或由两个整数构成的list/tuple,卷积核的宽度和长度。
        # 如为单个整数,则表示在各个空间维度的相同长度。
        kernel_size=(3, 3),
        # activation:激活函数,为预定义的激活函数名(参考激活函数),或逐元素(element-wise)的Theano函数。
        # 如果不指定该参数,将不会使用任何激活函数(即使用线性激活函数:a(x)=x)
        activation='relu',
        input_shape=(height,   width, 3)))

    model.add(Conv2D(
        # filters:卷积核的数目(即输出的维度)
        filters=32*2**i,
        # kernel_size:单个整数或由两个整数构成的list/tuple,卷积核的宽度和长度。
        # 如为单个整数,则表示在各个空间维度的相同长度。
        kernel_size=(3, 3),
        # activation:激活函数,为预定义的激活函数名(参考激活函数),或逐元素(element-wise)的Theano函数。
        # 如果不指定该参数,将不会使用任何激活函数(即使用线性激活函数:a(x)=x)
        activation='relu'))
Beispiel #20
0
	def CreateModel(self):
		'''
		定义CNN/LSTM/CTC模型,使用函数式模型
		输入层:200维的特征值序列,一条语音数据的最大长度设为1600(大约16s)
		隐藏层:3*3卷积层
		隐藏层:池化层,池化窗口大小为2
		隐藏层:Dropout层,需要断开的神经元的比例为0.2,防止过拟合
		隐藏层:全连接层
		目标输出层:全连接层,神经元数量为self.MS_OUTPUT_SIZE,使用softmax作为激活函数
		输出层:自定义层,即CTC层,使用CTC的loss作为损失函数,实现连接性时序多输出
		
		'''
		# 每一帧使用13维mfcc特征及其13维一阶差分和13维二阶差分表示,最大信号序列长度为1500
		input_data = Input(name='the_input', shape=(self.AUDIO_LENGTH, self.AUDIO_FEATURE_LENGTH, 1))
		
		layer_h1 = Conv2D(32, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(input_data) # 卷积层
		layer_h1 = Dropout(0.1)(layer_h1)
		layer_h2 = Conv2D(32, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layer_h1) # 卷积层
		layer_h3 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h2) # 池化层
		#layer_h3 = Dropout(0.2)(layer_h2) # 随机中断部分神经网络连接,防止过拟合
		layer_h3 = Dropout(0.1)(layer_h3)
		layer_h4 = Conv2D(64, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layer_h3) # 卷积层
		layer_h4 = Dropout(0.2)(layer_h4)
		layer_h5 = Conv2D(64, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layer_h4) # 卷积层
		layer_h6 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h5) # 池化层
		
		layer_h6 = Dropout(0.2)(layer_h6)
		layer_h7 = Conv2D(128, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layer_h6) # 卷积层
		layer_h7 = Dropout(0.3)(layer_h7)
		layer_h8 = Conv2D(128, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layer_h7) # 卷积层
		layer_h9 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h8) # 池化层
		
		layer_h9 = Dropout(0.3)(layer_h9)
		layer_h10 = Conv2D(128, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layer_h9) # 卷积层
		layer_h10 = Dropout(0.4)(layer_h10)
		layer_h11 = Conv2D(128, (3,3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(layer_h10) # 卷积层
		layer_h12 = MaxPooling2D(pool_size=1, strides=None, padding="valid")(layer_h11) # 池化层
		
		#test=Model(inputs = input_data, outputs = layer_h12)
		#test.summary()
		
		layer_h10 = Reshape((200, 3200))(layer_h12) #Reshape层
		#layer_h5 = LSTM(256, activation='relu', use_bias=True, return_sequences=True)(layer_h4) # LSTM层
		#layer_h6 = Dropout(0.2)(layer_h5) # 随机中断部分神经网络连接,防止过拟合
		layer_h10 = Dropout(0.4)(layer_h10)
		layer_h11 = Dense(128, activation="relu", use_bias=True, kernel_initializer='he_normal')(layer_h10) # 全连接层
		layer_h11 = Dropout(0.5)(layer_h11)
		layer_h12 = Dense(self.MS_OUTPUT_SIZE, use_bias=True, kernel_initializer='he_normal')(layer_h11) # 全连接层
		
		y_pred = Activation('softmax', name='Activation0')(layer_h12)
		model_data = Model(inputs = input_data, outputs = y_pred)
		#model_data.summary()
		
		labels = Input(name='the_labels', shape=[self.label_max_string_length], dtype='float32')
		input_length = Input(name='input_length', shape=[1], dtype='int64')
		label_length = Input(name='label_length', shape=[1], dtype='int64')
		# Keras doesn't currently support loss funcs with extra parameters
		# so CTC loss is implemented in a lambda layer
		
		#layer_out = Lambda(ctc_lambda_func,output_shape=(self.MS_OUTPUT_SIZE, ), name='ctc')([y_pred, labels, input_length, label_length])#(layer_h6) # CTC
		loss_out = Lambda(self.ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])
		
		
		
		model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)
		
		#model.summary()
		
		# clipnorm seems to speeds up convergence
		#sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
		ada_d = Adadelta(lr = 0.01, rho = 0.95, epsilon = 1e-06)
		
		#model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd)
		model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer = ada_d)
		
		
		# captures output of softmax so we can decode the output during visualization
		test_func = K.function([input_data], [y_pred])
		
		print('[*提示] 创建模型成功,模型编译成功')
		return model, model_data
                                   target_size=(224,224),
                                   subset='training')

validgen = gen.flow_from_dataframe(train,
                                   directory='./images/comp/',
                                   x_col='fileid',
                                   y_col='Type1',
                                   class_mode='categorical',
                                   target_size=(224,224),
                                   subset='validation')

testdatagen = ImageDataGenerator(vertical_flip=False)
testgen = testdatagen.flow_from_dataframe(test, directory='./images/comp/', x_col='fileid', y_col='Type1', target_size=(224,224))

model = Sequential()
model.add(Conv2D(32, kernel_size=2, padding='same', activation='relu',
                 input_shape=(224, 224, 3)))
model.add(Conv2D(32, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=2, activation='relu'))
model.add(Conv2D(64, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())

model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))

model.add(Dense(18, activation='softmax'))
def inception_resnet_block(X, scale, block_type, block_idx, activation='relu'):
    if block_type == 'block35':
        branch_0 = Conv2D(32, 1, name='block35/branch_0/'+str(block_idx), padding='same')(X)
        branch_0 = BatchNormalization(axis=3, name='block35/bn_branch_0/'+str(block_idx))(branch_0)
        branch_0 = Activation('relu', name='block35/branch_0/relu/'+str(block_idx))(branch_0)

        branch_1 = Conv2D(32, 1, name='block35/branch_1_1/'+str(block_idx), padding='same')(X)
        branch_1 = BatchNormalization(axis=3, name='block35/bn_branch_1_1/'+str(block_idx))(branch_1)
        branch_1 = Activation('relu', name='block35/branch_1_1/relu/'+str(block_idx))(branch_1)

        branch_1 = Conv2D(32, 3, name='block35/branch_1_2/'+str(block_idx), padding='same')(branch_1)
        branch_1 = BatchNormalization(axis=3, name='block35/bn_branch_1_2/'+str(block_idx))(branch_1)
        branch_1 = Activation('relu', name='block35/branch_1_2/relu/'+str(block_idx))(branch_1)

        branch_2 = Conv2D(32, 1, name='block35/branch_2_1/'+str(block_idx), padding='same')(X)
        branch_2 = BatchNormalization(axis=3, name='block35/bn_branch_2_1/'+str(block_idx))(branch_2)
        branch_2 = Activation('relu', name='block35/branch_2_1/relu/'+str(block_idx))(branch_2)

        branch_2 = Conv2D(48, 3, name='block35/branch_2_2/'+str(block_idx), padding='same')(branch_2)
        branch_2 = BatchNormalization(axis=3, name='block35/bn_branch_2_2/'+str(block_idx))(branch_2)
        branch_2 = Activation('relu', name='block35/branch_2_2/relu/'+str(block_idx))(branch_2)

        branch_2 = Conv2D(64, 3, name='block35/branch_2_3/'+str(block_idx), padding='same')(branch_2)
        branch_2 = BatchNormalization(axis=3, name='block35/bn_branch_2_3/'+str(block_idx))(branch_2)
        branch_2 = Activation('relu', name='block35/branch_2_3/relu/'+str(block_idx))(branch_2)

        branches = [branch_0, branch_1, branch_2]

    elif block_type == 'block17':
        branch_0 = Conv2D(192, 1, name='block17/branch_0/'+str(block_idx), padding='same')(X)
        branch_0 = BatchNormalization(axis=3, name='block17/bn_branch_0/'+str(block_idx))(branch_0)
        branch_0 = Activation('relu', name='block17/branch_0/relu/'+str(block_idx))(branch_0)

        branch_1 = Conv2D(128, 1, name='block17/branch_1_1/'+str(block_idx), padding='same')(X)
        branch_1 = BatchNormalization(axis=3, name='block17/bn_branch_1_1/'+str(block_idx))(branch_1)
        branch_1 = Activation('relu', name='block17/branch_1_1/relu/'+str(block_idx))(branch_1)

        branch_1 = Conv2D(160, [1, 7], name='block17/branch_1_2/'+str(block_idx), padding='same')(branch_1)
        branch_1 = BatchNormalization(axis=3, name='block17/bn_branch_1_2/'+str(block_idx))(branch_1)
        branch_1 = Activation('relu', name='block17/branch_1_2/relu/'+str(block_idx))(branch_1)

        branch_1 = Conv2D(192, [7, 1], name='block17/branch_1_3/'+str(block_idx), padding='same')(branch_1)
        branch_1 = BatchNormalization(axis=3, name='block17/bn_branch_1_3/'+str(block_idx))(branch_1)
        branch_1 = Activation('relu', name='block17/branch_1_3/relu/'+str(block_idx))(branch_1)
        branches = [branch_0, branch_1]

    elif block_type == 'block8':

        branch_0 = Conv2D(192, 1, name='block8/branch_0/'+str(block_idx), padding='same')(X)
        branch_0 = BatchNormalization(axis=3, name='block8/bn_branch_0/'+str(block_idx))(branch_0)
        branch_0 = Activation('relu', name='block8/branch_0/relu/'+str(block_idx))(branch_0)

        branch_1 = Conv2D(192, 1, name='block8/branch_1_1/'+str(block_idx), padding='same')(X)
        branch_1 = BatchNormalization(axis=3, name='block8/bn_branch_1_1/'+str(block_idx))(branch_1)
        branch_1 = Activation('relu', name='block8/branch_1_1/relu/'+str(block_idx))(branch_1)

        branch_1 = Conv2D(224, [1, 3], name='block8/branch_1_2/'+str(block_idx), padding='same')(branch_1)
        branch_1 = BatchNormalization(axis=3, name='block8/bn_branch_1_2/'+str(block_idx))(branch_1)
        branch_1 = Activation('relu', name='block8/branch_1_2/relu/'+str(block_idx))(branch_1)

        branch_1 = Conv2D(256, [3, 1], name='block8/branch_1_3/'+str(block_idx), padding='same')(branch_1)
        branch_1 = BatchNormalization(axis=3, name='block8/bn_branch_1_3/'+str(block_idx))(branch_1)
        branch_1 = Activation('relu', name='block8/branch_1_3/relu/'+str(block_idx))(branch_1)
        branches = [branch_0, branch_1]
    else:
        raise ValueError('unknown block type')

    block_name = block_type + '_' + str(block_idx)
    mixed = Concatenate(axis=3, name=block_name + '_mixed')(branches)
    up = Conv2D(K.int_shape(X)[3], 1, name=block_name + '_conv', padding='same')(mixed)

    X = Lambda(lambda inputs, scale: inputs[0] + inputs[1] * scale, output_shape=K.int_shape(X)[1:],arguments={'scale': scale}, name=block_name+"_Lambda")([X, up])
    if activation is not None:
        X = Activation(activation, name=block_name + '_ac')(X)

    return X
Beispiel #23
0
def U_Net(img_rows, img_cols, color_type=1, num_class=1):

    nb_filter = [32, 64, 128, 256, 512]
    act = 'elu'

    # Handle Dimension Ordering for different backends
    global bn_axis
    if K.image_dim_ordering() == 'tf':
        bn_axis = 3
        img_input = Input(shape=(img_rows, img_cols, color_type),
                          name='main_input')
    else:
        bn_axis = 1
        img_input = Input(shape=(color_type, img_rows, img_cols),
                          name='main_input')

    conv1_1 = standard_unit(img_input, stage='11', nb_filter=nb_filter[0])
    pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(conv1_1)

    conv2_1 = standard_unit(pool1, stage='21', nb_filter=nb_filter[1])
    pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(conv2_1)

    conv3_1 = standard_unit(pool2, stage='31', nb_filter=nb_filter[2])
    pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(conv3_1)

    conv4_1 = standard_unit(pool3, stage='41', nb_filter=nb_filter[3])
    pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(conv4_1)

    conv5_1 = standard_unit(pool4, stage='51', nb_filter=nb_filter[4])

    up4_2 = Conv2DTranspose(nb_filter[3], (2, 2),
                            strides=(2, 2),
                            name='up42',
                            padding='same')(conv5_1)
    conv4_2 = concatenate([up4_2, conv4_1], name='merge42', axis=bn_axis)
    conv4_2 = standard_unit(conv4_2, stage='42', nb_filter=nb_filter[3])

    up3_3 = Conv2DTranspose(nb_filter[2], (2, 2),
                            strides=(2, 2),
                            name='up33',
                            padding='same')(conv4_2)
    conv3_3 = concatenate([up3_3, conv3_1], name='merge33', axis=bn_axis)
    conv3_3 = standard_unit(conv3_3, stage='33', nb_filter=nb_filter[2])

    up2_4 = Conv2DTranspose(nb_filter[1], (2, 2),
                            strides=(2, 2),
                            name='up24',
                            padding='same')(conv3_3)
    conv2_4 = concatenate([up2_4, conv2_1], name='merge24', axis=bn_axis)
    conv2_4 = standard_unit(conv2_4, stage='24', nb_filter=nb_filter[1])

    up1_5 = Conv2DTranspose(nb_filter[0], (2, 2),
                            strides=(2, 2),
                            name='up15',
                            padding='same')(conv2_4)
    conv1_5 = concatenate([up1_5, conv1_1], name='merge15', axis=bn_axis)
    conv1_5 = standard_unit(conv1_5, stage='15', nb_filter=nb_filter[0])

    unet_output = Conv2D(num_class, (1, 1),
                         activation='sigmoid',
                         name='output',
                         kernel_initializer='he_normal',
                         padding='same',
                         kernel_regularizer=l2(1e-4))(conv1_5)

    model = Model(input=img_input, output=unet_output)

    return model
def InceptionResnetV2(X_input):
    #X_input = Input(input_shape)
    #print( X_input )
    # 299 X 299 X 3  ->   # 149 X 149 X 32
    X = Cropping2D(cropping=((11, 11), (11, 11)))(X_input)
    X = Conv2D(32, 3, strides=2, name='conv1', padding='valid')(X)
    X = BatchNormalization(axis=3, name='bn1')(X)
    X = Activation('relu', name="arelu1")(X)
    # 149 X 149 X 32   ->  # 147 x 147 X 32
    X = Conv2D(32, 3, name='conv2', padding='valid')(X)
    X = BatchNormalization(axis=3, name='bn2')(X)
    X = Activation('relu', name="arelu2")(X)
    # 147 x 147 X 32   ->    # 147 X 147 X 64
    X = Conv2D(64, 3, name='conv3', padding='same')(X)
    X = BatchNormalization(axis=3, name='bn3')(X)
    X = Activation('relu',name="arelu3")(X)
    # 147 X 147 X 64   ->    # 73 X 73 X 64
    X = MaxPooling2D(3, strides=2, name="mp1")(X)
    # 73 X 73 X 64    ->    # 73 X 73 X 80
    X = Conv2D(80, 1, name='conv4', padding='valid')(X)
    X = BatchNormalization(axis=3, name='bn4')(X)
    X = Activation('relu', name="arelu4")(X)
    # 73 X 73 X 80    ->    # 71 X 71 X 192
    X = Conv2D(192, 3, name='conv5', padding='valid')(X)
    X = BatchNormalization(axis=3, name='bn5')(X)
    X = Activation('relu', name="arelu5")(X)
    # 71 X 71 X 192  ->  # 35 X 35 X 192
    X = MaxPooling2D(3, strides=2, name="mp2")(X)
    # 35 X 35 X 192 -> 35 X 35 X 96
    branch_0 = Conv2D(96, 1, name='mixed_5b/branch_0', padding='same')(X)
    branch_0 = BatchNormalization(axis=3, name='mixed_5b/bn_branch_0')(branch_0)
    branch_0 = Activation('relu',name="arelu6")(branch_0)

    # 35 X 35 X 192 -> 35 X 35 X 64
    branch_1 = Conv2D(48, 1, name='mixed_5b/branch_1_1', padding='same')(X)
    branch_1 = BatchNormalization(axis=3, name='mixed_5b/bn_branch_1_1')(branch_1)
    branch_1 = Activation('relu',name="arelu7")(branch_1)

    branch_1 = Conv2D(64, 5, name='mixed_5b/branch_1_2', padding='same')(branch_1)
    branch_1 = BatchNormalization(axis=3, name='mixed_5b/bn_branch_1_2')(branch_1)
    branch_1 = Activation('relu',name="arelu8")(branch_1)

    # 35 X 35 X 192 -> 35 X 35 X 96
    branch_2 = Conv2D(64, 1, name='mixed_5b/branch_2_1', padding='same')(X)
    branch_2 = BatchNormalization(axis=3, name='mixed_5b/bn_branch_2_1')(branch_2)
    branch_2 = Activation('relu',name="arelu9")(branch_2)

    branch_2 = Conv2D(96, 3, name='mixed_5b/branch_2_2', padding='same')(branch_2)
    branch_2 = BatchNormalization(axis=3, name='mixed_5b/bn_branch_2_2')(branch_2)
    branch_2 = Activation('relu',name="arelu10")(branch_2)

    branch_2 = Conv2D(96, 3, name='mixed_5b/branch_2_3', padding='same')(branch_2)
    branch_2 = BatchNormalization(axis=3, name='mixed_5b/bn_branch_2_3')(branch_2)
    branch_2 = Activation('relu',name="arelu11")(branch_2)

    # 35 X 35 X 192 -> 35 X 35 X 64
    branch_pool = AveragePooling2D(3, strides=1, padding='same',name="ap1")(X)
    branch_pool = Conv2D(64, 1, name='mixed_5b/branch_pool_1', padding='same')(branch_pool)
    branch_pool = BatchNormalization(axis=3, name='mixed_5b/bn_branch_pool_1')(branch_pool)
    branch_pool = Activation('relu',name="arelu12")(branch_pool)

    branches = [branch_0, branch_1, branch_2, branch_pool]
    X = Concatenate(axis=3, name='mixed_5b')(branches)  # 35 X 35 X 320

    # 10x block35
    #for block_idx in range(1, 11):
    #    X = inception_resnet_block(X, scale=0.17, block_type='block35', block_idx=block_idx)

    branch_0 = Conv2D(384, 3, strides=2, name='mixed_6a/branch_0', padding='valid')(X)
    branch_0 = BatchNormalization(axis=3, name='mixed_6a/bn_branch_0')(branch_0)
    branch_0 = Activation('relu',name="arelu13")(branch_0)

    branch_1 = Conv2D(256, 1, name='mixed_6a/branch_1_1', padding='same')(X)
    branch_1 = BatchNormalization(axis=3, name='mixed_6a/bn_branch_1_1')(branch_1)
    branch_1 = Activation('relu',name="arelu14")(branch_1)

    branch_1 = Conv2D(256, 3, name='mixed_6a/branch_1_2', padding='same')(branch_1)
    branch_1 = BatchNormalization(axis=3, name='mixed_6a/bn_branch_1_2')(branch_1)
    branch_1 = Activation('relu',name="arelu15")(branch_1)

    branch_1 = Conv2D(384, 3, strides=2, name='mixed_6a/branch_1_3', padding='valid')(branch_1)
    branch_1 = BatchNormalization(axis=3, name='mixed_6a/bn_branch_1_3')(branch_1)
    branch_1 = Activation('relu',name="arelu16")(branch_1)

    branch_pool = MaxPooling2D(3, strides=2, padding='valid', name="mp4")(X)
    branches = [branch_0, branch_1, branch_pool]
    X = Concatenate(axis=3, name='mixed_6a')(branches)

    #for block_idx in range(1, 21):
    #    X = inception_resnet_block(X, scale=0.1, block_type='block17', block_idx=block_idx)

    branch_0 = Conv2D(256, 1, name='mixed_7a/branch_0', padding='same')(X)
    branch_0 = BatchNormalization(axis=3, name='mixed_7a/bn_branch_0')(branch_0)
    branch_0 = Activation('relu',name="arelu17")(branch_0)

    branch_0 = Conv2D(384, 3, strides=2, name='mixed_7a/branch_0_1', padding='valid')(branch_0)
    branch_0 = BatchNormalization(axis=3, name='mixed_7a/bn_branch_0_1')(branch_0)
    branch_0 = Activation('relu',name="arelu18")(branch_0)

    branch_1 = Conv2D(256, 1, name='mixed_7a/branch_1_1', padding='same')(X)
    branch_1 = BatchNormalization(axis=3, name='mixed_7a/bn_branch_1_1')(branch_1)
    branch_1 = Activation('relu',name="arelu19")(branch_1)

    branch_1 = Conv2D(288, 3, strides=2, name='mixed_7a/branch_1_2', padding='valid')(branch_1)
    branch_1 = BatchNormalization(axis=3, name='mixed_7a/bn_branch_1_2')(branch_1)
    branch_1 = Activation('relu',name="arelu20")(branch_1)

    branch_2 = Conv2D(256, 1, name='mixed_7a/branch_2_1', padding='same')(X)
    branch_2 = BatchNormalization(axis=3, name='mixed_7a/bn_branch_2_1')(branch_2)
    branch_2 = Activation('relu',name="arelu21")(branch_2)

    branch_2 = Conv2D(288, 3, name='mixed_7a/branch_2_2', padding='same')(branch_2)
    branch_2 = BatchNormalization(axis=3, name='mixed_7a/bn_branch_2_2')(branch_2)
    branch_2 = Activation('relu',name="arelu22")(branch_2)

    branch_2 = Conv2D(320, 3, strides=2, name='mixed_7a/branch_2_3', padding='valid')(branch_2)
    branch_2 = BatchNormalization(axis=3, name='mixed_7a/bn_branch_2_3')(branch_2)
    branch_2 = Activation('relu',name="arelu23")(branch_2)

    branch_pool = MaxPooling2D(3, strides=2, padding='valid',name="mp5")(X)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    X = Concatenate(axis=3, name='mixed_7a')(branches)

    #for block_idx in range(1, 10):
    #    X = inception_resnet_block(X, scale=0.2, block_type='block8', block_idx=block_idx)

    X = inception_resnet_block(X, scale=1., activation=None, block_type='block8', block_idx=10)

    X = Conv2D(1536, 1, name='conv_7b', padding='same')(X)
    X = BatchNormalization(axis=3, name='bn7b')(X)
    X = Activation('relu',name="arelu24")(X)
    X = AveragePooling2D(K.int_shape(X)[1:3], strides=1, padding='valid', name="ap3")(X)
    X = Flatten()(X)
    X = Dropout(1.0)(X)
    X = Dense(128,name='embeddings' )(X)
    X = Lambda( lambda x: K.l2_normalize(x, axis=1), name="LambdaForNormalization")(X)
    #model = Model( inputs = X_input, outputs=X, name="FaceRecoModel")
    return X
Beispiel #25
0
                                                      'images/train',
                                                      seed=mySeed,
                                                      target_size=(224, 224),
                                                      class_mode='categorical',
                                                      batch_size=batch_size)

mobileNetV2 = MobileNetV2(include_top=False,
                          weights='imagenet',
                          pooling=None,
                          input_shape=(224, 224, 3))

model = Sequential([
    mobileNetV2,
    AveragePooling2D((7, 7)),
    Dropout(0.45, noise_shape=None, seed=mySeed),
    Conv2D(12, (1, 1), activation='softmax'),
    # BatchNormalization(axis=-1),
    # Conv2D(12, (1,1), activation='softmax'),
    # BatchNormalization(axis=-1),
    # Conv2D(12, (1,1), activation='softmax', kernel_regularizer=regularizers.l1(0.012), activity_regularizer=regularizers.l1(0.01)),
    Flatten(),
])

for layer in mobileNetV2.layers:
    layer.trainable = False

optimizer = keras.optimizers.Adam(lr=1e-3,
                                  beta_1=0.9,
                                  beta_2=0.999,
                                  epsilon=None,
                                  decay=0.0,
##Importing Keras Libraries and Packages

from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense

## Initialising the Convolutional Neural Network
classifier = Sequential()
classifier.add(Conv2D(32, (3, 3), input_shape=(64, 64, 3), activation = 'relu')) #1. Convolution
classifier.add(MaxPooling2D(pool_size = (2, 2))) # Pooling

# #2. Adds a Second Convolutional layer
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Flatten()) #3. Flattens the Dataset

# #4. Full Connection
classifier.add(Dense(units = 128, activation = 'relu')) #units, the number of nodes in the layer
classifier.add(Dense(units = 1, activation = 'sigmoid')) #output, should have just one unit, think the 'sigmoid' for binary classification

# #5. Compiling the CNN
classifier.compile(optimizer = 'adam',
                   loss = 'binary_crossentropy', metrics  = ['accuracy'])

## II: Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator

# #1. Pre-Processing
train_datagen = ImageDataGenerator(rescale=1./255, shear_range = 0.2,
                                   zoom_range = 0.2, horizontal_flip = True)
### Aside the 'rescale', the other arguments are performed for data augmentation
# -blurring, zooming and flipping respectively;
Beispiel #27
0
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
print(y_train.shape[0], 'train samples')
print(y_test.shape[0], 'test samples')
print("num_classes =", num_classes)

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(
    Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
              metrics=['accuracy'])

model.fit(x_train,
          y_train,
          batch_size=batch_size,
from tensorflow.python.client import device_lib

print(device_lib.list_local_devices())

K.tensorflow_backend._get_available_gpus()

# the function to implement the orgnization layer (thanks to github.com/allanzelener/YAD2K)
def space_to_depth_x2(x):
    return tf.space_to_depth(x, block_size=2)


input_image = Input(shape=(IMAGE_H, IMAGE_W, 3))
true_boxes  = Input(shape=(1, 1, 1, TRUE_BOX_BUFFER , 4))

# Layer 1
x = Conv2D(1, (3,3), strides=(1,1), padding='same', name='conv_1', use_bias=False)(input_image)
x = BatchNormalization(name='norm_1')(x)
x = LeakyReLU(alpha=0.1)(x)
x = Conv2D(32, (1,1), strides=(1,1), padding='same', name='conv_1_1', use_bias=False)(x)
x = BatchNormalization(name='norm_1_1')(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)

# Layer 2
x = Conv2D(1, (3,3), strides=(1,1), padding='same', name='conv_2', use_bias=False)(x)
x = BatchNormalization(name='norm_2')(x)
x = LeakyReLU(alpha=0.1)(x)
x = Conv2D(64, (1,1), strides=(1,1), padding='same', name='conv_2_1', use_bias=False)(x)
x = BatchNormalization(name='norm_2_1')(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
pool_size = (2, 2)
input_shape = X_train.shape[1:]

# Here is the actual neural network #
# Sequential序惯模型:函数式模型的简略版,为最简单的线性、从头到尾的结构顺序,不分叉,是多个网络层的线性堆叠。
# 可以通过将层的列表传递给Sequential的构造函数,来创建一个Sequential模型。
# 也可以使用.add()方法将各层添加到模型中。
model = Sequential()
# Normalizes incoming inputs. First layer needs the input shape to work
# 该层在每个batch上将前一层的激活值重新规范化,即使得其输出数据的均值接近0,其标准差接近1
# 对网络中每一层的单个神经元输入,计算均值和方差后,再进行normalization
model.add(BatchNormalization(input_shape=input_shape))

# Below layers were re-named for easier reading of model summary; this not necessary
# Conv Layer 1
model.add(Conv2D(8, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv1'))

# Conv Layer 2
model.add(Conv2D(16, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv2'))

# Pooling 1
model.add(MaxPooling2D(pool_size=pool_size))

# Conv Layer 3
model.add(Conv2D(16, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv3'))
model.add(Dropout(0.2))

# Conv Layer 4
model.add(Conv2D(32, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv4'))
model.add(Dropout(0.2))
Beispiel #30
0
from readData import *
from keras.models import Sequential
from keras.layers import Dense, Flatten, Lambda, BatchNormalization, Conv2D, Cropping2D, MaxPooling2D, Dropout

model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((50, 20), (0, 0))))
model.add(Conv2D(24, (5, 5), strides=(2, 2), activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(36, (5, 5), strides=(2, 2), activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(48, (5, 5), strides=(2, 2), activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dropout(.2))
model.add(Dense(50, activation='relu'))
model.add(Dropout(.2))
model.add(Dense(10, activation='relu'))
model.add(Dropout(.2))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')
history_object = model.fit_generator(
    train_generator,
    steps_per_epoch=int(len(train_samples) / 32),
    validation_data=validation_generator,