Beispiel #1
0
#1-1. 데이터 전처리
scaler = StandardScaler()
x = scaler.fit_transform(x)
# print(x[0])

x = x.reshape(-1, 5, 2, 1)
# print(x.shape)          # (442,5,2,1)

x_train, x_test, y_train, y_test = train_test_split(x,
                                                    y,
                                                    random_state=99,
                                                    train_size=0.8)

#2. 모델
input1 = Input(shape=(5, 2, 1))
dense1 = Conv2D(10, (2, 1), padding='same')(input1)
dense1 = Conv2D(20, (2, 1), padding='same')(dense1)
dense1 = Conv2D(40, (2, 1), padding='same')(dense1)
dense1 = Conv2D(60, (2, 1), padding='same')(dense1)
dense1 = Conv2D(80, (2, 1), padding='same')(dense1)
dense1 = MaxPooling2D(pool_size=2, padding='same')(dense1)
dense1 = Conv2D(70, (2, 1), padding='same')(dense1)
dense1 = Conv2D(50, (2, 1), padding='same')(dense1)
dense1 = Conv2D(30, (2, 1), padding='same')(dense1)
dense1 = Conv2D(10, (2, 1), padding='same')(dense1)
dense1 = MaxPooling2D(pool_size=2, padding='same')(dense1)
dense1 = Flatten()(dense1)
output1 = Dense(1)(dense1)

model = Model(inputs=input1, outputs=output1)
def main(job_dir, **args):

    ##Setting up the path for saving logs
    logs_path = job_dir + 'logs/tensorboard-{}'.format(int(time.time()))

    logs_dir = job_dir + 'logs/tensorboard/'

    ##Using the GPU
    with tf.device('/device:GPU:0'):

        with file_io.FileIO(job_dir + 'dataset/train_data.pickle',
                            mode='rb') as file:
            train_set = pickle.load(file)

        with file_io.FileIO(job_dir + 'dataset/train_label.pickle',
                            mode='rb') as file:
            train_label = pickle.load(file)

        dense_layers = [0, 1, 2]
        layer_sizes = [8, 16, 32, 64]
        conv_layers = [1, 2, 3]

        for dense_layer in dense_layers:
            for layer_size in layer_sizes:
                for conv_layer in conv_layers:
                    NAME = "{}-conv-{}-nodes-{}-dense-{}".format(
                        conv_layer, layer_size, dense_layer, int(time.time()))

                    model = Sequential()

                    model.add(
                        Conv2D(layer_size, (3, 3), input_shape=(64, 64, 3)))
                    model.add(Activation('relu'))
                    model.add(MaxPooling2D(pool_size=(2, 2)))

                    for l in range(conv_layer - 1):
                        model.add(Conv2D(layer_size, (3, 3)))
                        model.add(Activation('relu'))
                        model.add(MaxPooling2D(pool_size=(2, 2)))

                    model.add(Flatten())
                    for _ in range(dense_layer):
                        model.add(Dense(layer_size))
                        model.add(Activation('relu'))

                    model.add(Dense(13))
                    model.add(Activation('sigmoid'))

                    tensorboard = callbacks.TensorBoard(log_dir=logs_dir +
                                                        "{}".format(NAME))

                    # checkpoint = ModelCheckpoint('model-checkpoint-{}.h5'.format(NAME),
                    #                              monitor='val_loss', verbose=1,
                    #                              save_best_only=True, mode='min')

                    model.compile(
                        loss='categorical_crossentropy',
                        optimizer='adam',
                        metrics=['accuracy'],
                    )

                    model.fit(train_set,
                              train_label,
                              batch_size=40,
                              epochs=15,
                              validation_split=0.3,
                              callbacks=[tensorboard])

                    model.save('model-{}.h5'.format(NAME))
                    with file_io.FileIO('model-{}.h5'.format(NAME),
                                        mode='r') as input_f:
                        with file_io.FileIO(job_dir + 'model/' +
                                            'model-{}.h5'.format(NAME),
                                            mode='w+') as output_f:
                            output_f.write(input_f.read())
# Load data set
(x_train, y_train), (x_test, y_test) = cifar10.load_data()

# Normalize data set to 0-to-1 range
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255

# Convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)

# Create a model and add layers
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', activation= 'relu', input_shape=(32, 32, 3)))
#input is not an image so we have no padding
model.add(Conv2D(32, (3, 3), activation= 'relu'))
#to speed up the process we add max pool
model.add(MaxPooling2D(pool_size=(2,2)))
#to prevent neuron co-adaptation to avoid overfitting
model.add(Dropout(0.25))

model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
#to tell Dense layer that we are no longer on 2D data (do this flatten layer whenever you go from convolutional to dense layer)
model.add(Flatten())

model.add(Dense(512, activation="relu"))
print("y_test",y_test.shape)


# 2. reshape train and test data to 4D arrays
#    note: the CNN expects 4D tensors as input for the
#          convolutional layers!
X_train = X_train.reshape(X_train.shape[0], 20, 11, 1)
X_test = X_test.reshape(X_test.shape[0], 20, 11, 1)

y_train_hot = np_utils.to_categorical(y_train)
y_test_hot = np_utils.to_categorical(y_test)


# 3. setup a CNN
model = Sequential()
model.add(Conv2D(20, kernel_size=(3, 3), activation='relu', input_shape=(20, 11, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(40, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(50, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
              metrics=['accuracy'])

# 4. train the CNN
model.fit(X_train,
          y_train_hot,
          batch_size=100,
          epochs=50,
kernel_size = 3
latent_dim = 16
# Encoder/Decoder number of CNN layers and filters per layer
layer_filters = [32, 64]

# Build the Autoencoder Model
# First build the Encoder Model
inputs = Input(shape=input_shape, name='encoder_input')
x = inputs
# Stack of Conv2D blocks
# Notes:
# 1) Use Batch Normalization before ReLU on deep networks
# 2) Use MaxPooling2D as alternative to strides>1
# - faster but not as good as strides>1
for filters in layer_filters:
    x = Conv2D(filters=filters, kernel_size=kernel_size, strides=2, activation='relu', padding='same')(x)

# Shape info needed to build Decoder Model
shape = K.int_shape(x)

# Generate the latent vector
x = Flatten()(x)
latent = Dense(latent_dim, name='latent_vector')(x)

# Instantiate Encoder Model
encoder = Model(inputs, latent, name='encoder')
encoder.summary()

# Build the Decoder Model
latent_inputs = Input(shape=(latent_dim,), name='decoder_input')
x = Dense(shape[1] * shape[2] * shape[3])(latent_inputs)
train_generator = generator(train_samples)
validation_generator = generator(validation_samples)
print('generators setup')

from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Conv2D, Lambda
from keras.layers.convolutional import Cropping2D
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import Adam

model = Sequential()
model.add(
    Cropping2D(cropping=((50, 20), (0, 0)), input_shape=(height, width, 3)))
model.add(Lambda(lambda x: x / 127.5 - 1))
model.add(Conv2D(24, 5, 5))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(36, 5, 5))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(48, 5, 5))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, 3, 3))
model.add(Conv2D(64, 3, 3))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
model.compile(optimizer=Adam(lr=0.001), loss='mse', metrics=['accuracy'])
#model.summary()
Beispiel #7
0
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)

print(y_train.shape)  # (60000, 10) -> one hot encoding 

x_train  = x_train.reshape(60000,28,28,1).astype('float32')/255.0   # CNN 모델에 input 하기 위해 4차원으로 만들면서 실수형으로 형변환 & 0과 1 사이로 Minmax정규화 
x_test  = x_test.reshape(10000,28,28,1).astype('float32')/255.0      

# ____________모델 구성____________________

from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout, BatchNormalization

model= Sequential()

model.add(Conv2D(64, (3,3), input_shape = (28, 28, 1), padding='same'))   
model.add(BatchNormalization())
model.add(Conv2D(32, (3,3), activation = 'relu', strides= 1, padding='same'))  
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size= 2))
model.add(Dropout(0.20))

model.add(Conv2D(64, (3,3), input_shape = (28, 28, 1), padding='same'))   
model.add(BatchNormalization())
model.add(Conv2D(64, (2,2), activation = 'relu', strides= 1, padding='same'))  
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size= 2))
model.add(Dropout(0.3))

model.add(Conv2D(128, (3,3), activation = 'relu', strides= 1, padding='same'))  
model.add(BatchNormalization())
Beispiel #8
0
	# 							weights=[embeddingMatrix],
	# 							input_length=maxLength,
	# 							trainable=False)

	sequenceInput = Input(shape=(maxLength, ), dtype='int32')
	embedding = Embedding(input_dim = len(wordIdMap) + 1, 
						output_dim = embeddingDim, 
						weights = [embeddingMatrix],
						input_length = maxLength,
						trainable = False)(sequenceInput)
	# embeddedSequences = embeddingLayer(sequenceInput)
	finalEmbeddedSeq = Reshape((maxLength, embeddingDim, 1))(embedding)



	conv_0 = Conv2D(numOfFilters, kernel_size=(filterSizes[0], embeddingDim), padding='valid', kernel_initializer='normal', activation='relu')(finalEmbeddedSeq)
	conv_1 = Conv2D(numOfFilters, kernel_size=(filterSizes[1], embeddingDim), padding='valid', kernel_initializer='normal', activation='relu')(finalEmbeddedSeq)
	conv_2 = Conv2D(numOfFilters, kernel_size=(filterSizes[2], embeddingDim), padding='valid', kernel_initializer='normal', activation='relu')(finalEmbeddedSeq)

	maxpool_0 = MaxPooling2D(pool_size=(maxLength - filterSizes[0] + 1, 1), strides=(1,1), padding='valid')(conv_0)
	maxpool_1 = MaxPooling2D(pool_size=(maxLength - filterSizes[1] + 1, 1), strides=(1,1), padding='valid')(conv_1)
	maxpool_2 = MaxPooling2D(pool_size=(maxLength - filterSizes[2] + 1, 1), strides=(1,1), padding='valid')(conv_2)

	concatenated_tensor = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2])
	flatten = Flatten()(concatenated_tensor)
	dropout = Dropout(drop)(flatten)
	output = Dense(units = numOfLabels, activation = 'softmax')(dropout)

	model = Model(inputs=sequenceInput, outputs=output)

	# checkpoint = ModelCheckpoint('weights.{epoch:03d}-{val_acc:.4f}.hdf5', monitor='val_acc', verbose=1, save_best_only=True, mode='auto')
Beispiel #9
0
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = Conv2D(filters, (f_size, f_size), padding='same')(y)
    y = BatchNormalization()(y)

    y = Add()([layer_input, y])

    return Activation('relu')(y)


IMG_HEIGHT, IMG_WIDTH = 128, 128

inputs = Input((None, None, 1))

x = Conv2D(64, 9, padding='same')(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)

x = resnet_block(x)
x = resnet_block(x)
x = resnet_block(x)

outputs = Conv2D(1, 3, padding='same', activation='sigmoid')(x)

model = Model(inputs=inputs, outputs=outputs)
model.summary()

if LOAD_WEIGHTS:
    model.load_weights('model4.h5')
Beispiel #10
0
    def CreateModel(self):
        '''
		定义CNN/LSTM/CTC模型,使用函数式模型
		输入层:200维的特征值序列,一条语音数据的最大长度设为1600(大约16s)
		隐藏层:卷积池化层,卷积核大小为3x3,池化窗口大小为2
		隐藏层:全连接层
		输出层:全连接层,神经元数量为self.MS_OUTPUT_SIZE,使用softmax作为激活函数,
		CTC层:使用CTC的loss作为损失函数,实现连接性时序多输出
		
		'''

        input_data = Input(name='the_input',
                           shape=(self.AUDIO_LENGTH, self.AUDIO_FEATURE_LENGTH,
                                  1))

        layer_h1 = Conv2D(32, (3, 3),
                          use_bias=False,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(input_data)  # 卷积层
        layer_h1 = Dropout(0.05)(layer_h1)
        layer_h2 = Conv2D(32, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(layer_h1)  # 卷积层
        layer_h3 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h2)  # 池化层
        #layer_h3 = Dropout(0.2)(layer_h2) # 随机中断部分神经网络连接,防止过拟合
        layer_h3 = Dropout(0.05)(layer_h3)
        layer_h4 = Conv2D(64, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(layer_h3)  # 卷积层
        layer_h4 = Dropout(0.1)(layer_h4)
        layer_h5 = Conv2D(64, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(layer_h4)  # 卷积层
        layer_h6 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h5)  # 池化层

        layer_h6 = Dropout(0.1)(layer_h6)
        layer_h7 = Conv2D(128, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(layer_h6)  # 卷积层
        layer_h7 = Dropout(0.15)(layer_h7)
        layer_h8 = Conv2D(128, (3, 3),
                          use_bias=True,
                          activation='relu',
                          padding='same',
                          kernel_initializer='he_normal')(layer_h7)  # 卷积层
        layer_h9 = MaxPooling2D(pool_size=2, strides=None,
                                padding="valid")(layer_h8)  # 池化层

        layer_h9 = Dropout(0.15)(layer_h9)
        layer_h10 = Conv2D(128, (3, 3),
                           use_bias=True,
                           activation='relu',
                           padding='same',
                           kernel_initializer='he_normal')(layer_h9)  # 卷积层
        layer_h10 = Dropout(0.2)(layer_h10)
        layer_h11 = Conv2D(128, (3, 3),
                           use_bias=True,
                           activation='relu',
                           padding='same',
                           kernel_initializer='he_normal')(layer_h10)  # 卷积层
        layer_h12 = MaxPooling2D(pool_size=1, strides=None,
                                 padding="valid")(layer_h11)  # 池化层

        layer_h12 = Dropout(0.2)(layer_h12)
        layer_h13 = Conv2D(128, (3, 3),
                           use_bias=True,
                           activation='relu',
                           padding='same',
                           kernel_initializer='he_normal')(layer_h12)  # 卷积层
        layer_h13 = Dropout(0.2)(layer_h13)
        layer_h14 = Conv2D(128, (3, 3),
                           use_bias=True,
                           activation='relu',
                           padding='same',
                           kernel_initializer='he_normal')(layer_h13)  # 卷积层
        layer_h15 = MaxPooling2D(pool_size=1, strides=None,
                                 padding="valid")(layer_h14)  # 池化层

        #test=Model(inputs = input_data, outputs = layer_h12)
        #test.summary()

        layer_h16 = Reshape((200, 3200))(layer_h15)  #Reshape层
        #layer_h5 = LSTM(256, activation='relu', use_bias=True, return_sequences=True)(layer_h4) # LSTM层
        #layer_h6 = Dropout(0.2)(layer_h5) # 随机中断部分神经网络连接,防止过拟合
        layer_h16 = Dropout(0.3)(layer_h16)
        layer_h17 = Dense(128,
                          activation="relu",
                          use_bias=True,
                          kernel_initializer='he_normal')(layer_h16)  # 全连接层
        layer_h17 = Dropout(0.3)(layer_h17)
        layer_h18 = Dense(self.MS_OUTPUT_SIZE,
                          use_bias=True,
                          kernel_initializer='he_normal')(layer_h17)  # 全连接层

        y_pred = Activation('softmax', name='Activation0')(layer_h18)
        model_data = Model(inputs=input_data, outputs=y_pred)
        #model_data.summary()

        labels = Input(name='the_labels',
                       shape=[self.label_max_string_length],
                       dtype='float32')
        input_length = Input(name='input_length', shape=[1], dtype='int64')
        label_length = Input(name='label_length', shape=[1], dtype='int64')
        # Keras doesn't currently support loss funcs with extra parameters
        # so CTC loss is implemented in a lambda layer

        #layer_out = Lambda(ctc_lambda_func,output_shape=(self.MS_OUTPUT_SIZE, ), name='ctc')([y_pred, labels, input_length, label_length])#(layer_h6) # CTC
        loss_out = Lambda(self.ctc_lambda_func, output_shape=(1, ),
                          name='ctc')(
                              [y_pred, labels, input_length, label_length])

        model = Model(inputs=[input_data, labels, input_length, label_length],
                      outputs=loss_out)

        model.summary()

        # clipnorm seems to speeds up convergence
        #sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
        #opt = Adadelta(lr = 0.01, rho = 0.95, epsilon = 1e-06)
        opt = Adam(lr=0.0001,
                   beta_1=0.9,
                   beta_2=0.999,
                   decay=0.0,
                   epsilon=10e-8)
        #model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd)
        model.compile(loss={
            'ctc': lambda y_true, y_pred: y_pred
        },
                      optimizer=opt)

        # captures output of softmax so we can decode the output during visualization
        test_func = K.function([input_data], [y_pred])

        #print('[*提示] 创建模型成功,模型编译成功')
        print('[*Info] Create Model Successful, Compiles Model Successful. ')
        return model, model_data
#Import the required packages
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense

#Initializing the CNN (classifier nueral network)
classifier = Sequential(
)  #sequential is the function call for the neural network model

#--------------Step 1 - Convolution
#adding first layer 64 x 64 pixels each with 3 values
#input shape needs to match your data
#conv2D is conversion 2D. used to convert the photo to a 2 dimensional setup
classifier.add(Conv2D(32, (3, 3), input_shape=(64, 64, 3), activation='relu'))

#--------------Step 2 Adding a second convolutional layer
classifier.add(Conv2D(32, (3, 3), activation='relu'))
classifier.add(
    MaxPooling2D(pool_size=(2, 2)))  #Pooling. changing the dimension to 2D?

#--------------Step 3 Flattening
classifier.add(
    Flatten())  #flattening to a single array without multiple dimensions

#--------------Step 4 Full connection
#adding final two dense layers to reduce the layer
classifier.add(Dense(units=128, activation='relu'))
classifier.add(Dense(units=1, activation='sigmoid'))
Beispiel #12
0
    def build_model(self):
        # Input
        inp = Input(shape=self.input_shape, name='input_1')

        x = Conv2D(32, kernel_size=3, padding='same', activation='relu')(inp)
        x = Conv2D(32, kernel_size=3, padding='same', activation='relu')(x)
        x = BatchNormalization()(x)
        x = MaxPool2D(pool_size=3, strides=2)(x)

        x = Conv2D(64, kernel_size=3, padding='same', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Conv2D(64, kernel_size=3, padding='same', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Conv2D(64, kernel_size=3, padding='same', activation='relu')(x)
        x = BatchNormalization()(x)
        x = MaxPool2D(pool_size=3, strides=2)(x)
        x = Dropout(0.3)(x)

        x = Conv2D(128, kernel_size=3, padding='same', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Conv2D(128, kernel_size=3, padding='same', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Conv2D(128, kernel_size=3, padding='same', activation='relu')(x)
        x = BatchNormalization()(x)
        x = MaxPool2D(pool_size=3, strides=2)(x)
        x = Dropout(0.3)(x)

        x = Conv2D(256, kernel_size=3, padding='same', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Conv2D(256, kernel_size=3, padding='same', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Conv2D(256, kernel_size=3, padding='same', activation='relu')(x)
        x = BatchNormalization()(x)
        x = MaxPool2D(pool_size=3, strides=2)(x)
        x = Dropout(0.3)(x)

        x = Flatten()(x)

        x = Dense(512, activation='relu')(x)
        x = Dropout(0.3)(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.3)(x)

        out = Dense(self.classes, activation='softmax')(x)

        model = Model(inputs=inp, outputs=out)
        #model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

        return model
def ResNet50(input_shape = (64, 64, 3), classes = 6):
    """
    Implementation of the popular ResNet50 the following architecture:
    CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
    -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER

    Arguments:
    input_shape -- shape of the images of the dataset
    classes -- integer, number of classes

    Returns:
    model -- a Model() instance in Keras
    """
    
    # Define the input as a tensor with shape input_shape
    X_input = Input(input_shape)

    
    # Zero-Padding
    X = ZeroPadding2D((3, 3))(X_input)
    
    # Stage 1
    X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
    X = Activation('relu')(X)
    X = MaxPooling2D((3, 3), strides=(2, 2))(X)

    # Stage 2
    X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)
    X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
    X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')

    ### START CODE HERE ###

    # Stage 3 (≈4 lines)
    X = convolutional_block(X, f = 3, filters = [128,128,512], stage = 3, block='a', s = 2)
    X = identity_block(X, 3,  [128,128,512], stage=3, block='b')
    X = identity_block(X, 3,  [128,128,512], stage=3, block='c')
    X = identity_block(X, 3,  [128,128,512], stage=3, block='d')

    # Stage 4 (≈6 lines)
    X = convolutional_block(X, f = 3, filters = [256, 256, 1024], stage = 4, block='a', s = 2)
    X = identity_block(X, 3,  [256, 256, 1024], stage=4, block='b')
    X = identity_block(X, 3,  [256, 256, 1024], stage=4, block='c')
    X = identity_block(X, 3,  [256, 256, 1024], stage=4, block='d')
    X = identity_block(X, 3,  [256, 256, 1024], stage=4, block='e')
    X = identity_block(X, 3,  [256, 256, 1024], stage=4, block='f')

    # Stage 5 (≈3 lines)
    X = convolutional_block(X, f = 3, filters =  [512, 512, 2048], stage = 5, block='a', s = 2)
    X = identity_block(X, 3,[512, 512, 2048], stage=5, block='b')
    X = identity_block(X, 3,[512, 512, 2048], stage=5, block='c')

    # AVGPOOL (≈1 line). Use "X = AveragePooling2D(...)(X)"
    X = AveragePooling2D(pool_size=(2,2))(X)
    
    ### END CODE HERE ###

    # output layer
    X = Flatten()(X)
    X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)
    
    
    # Create model
    model = Model(inputs = X_input, outputs = X, name='ResNet50')

    return model
def faceRecoModel(input_shape):
    """
    Implementation of the Inception model used for FaceNet
    
    Arguments:
    input_shape -- shape of the images of the dataset

    Returns:
    model -- a Model() instance in Keras
    """
        
    # Define the input as a tensor with shape input_shape
    X_input = Input(input_shape)

    # Zero-Padding
    X = ZeroPadding2D((3, 3))(X_input)
    
    # First Block
    X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1')(X)
    X = BatchNormalization(axis = 1, name = 'bn1')(X)
    X = Activation('relu')(X)
    
    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)
    X = MaxPooling2D((3, 3), strides = 2)(X)
    
    # Second Block
    X = Conv2D(64, (1, 1), strides = (1, 1), name = 'conv2')(X)
    X = BatchNormalization(axis = 1, epsilon=0.00001, name = 'bn2')(X)
    X = Activation('relu')(X)
    
    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)

    # Second Block
    X = Conv2D(192, (3, 3), strides = (1, 1), name = 'conv3')(X)
    X = BatchNormalization(axis = 1, epsilon=0.00001, name = 'bn3')(X)
    X = Activation('relu')(X)
    
    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)
    X = MaxPooling2D(pool_size = 3, strides = 2)(X)
    
    # Inception 1: a/b/c
    X = inception_block_1a(X)
    X = inception_block_1b(X)
    X = inception_block_1c(X)
    
    # Inception 2: a/b
    X = inception_block_2a(X)
    X = inception_block_2b(X)
    
    # Inception 3: a/b
    X = inception_block_3a(X)
    X = inception_block_3b(X)
    
    # Top layer
    X = AveragePooling2D(pool_size=(3, 3), strides=(1, 1), data_format='channels_first')(X)
    X = Flatten()(X)
    X = Dense(128, name='dense_layer')(X)
    
    # L2 normalization
    X = Lambda(lambda  x: K.l2_normalize(x,axis=1))(X)

    # Create model instance
    model = Model(inputs = X_input, outputs = X, name='FaceRecoModel')
        
    return model
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, Flatten, Reshape

(previsores_treinamento, _), (previsores_teste, _) = mnist.load_data()
previsores_treinamento = previsores_treinamento.reshape(
    (len(previsores_treinamento), 28, 28, 1))
previsores_teste = previsores_teste.reshape((len(previsores_teste), 28, 28, 1))

previsores_treinamento = previsores_treinamento.astype('float32') / 255
previsores_teste = previsores_teste.astype('float32') / 255

autoencoder = Sequential()

# Encoder
autoencoder.add(
    Conv2D(filters=16,
           kernel_size=(3, 3),
           activation='relu',
           input_shape=(28, 28, 1)))
autoencoder.add(MaxPooling2D(pool_size=(2, 2)))

autoencoder.add(
    Conv2D(filters=8, kernel_size=(3, 3), activation='relu', padding='same'))
autoencoder.add(MaxPooling2D(pool_size=(2, 2), padding='same'))

# 4, 4, 8
autoencoder.add(
    Conv2D(filters=8,
           kernel_size=(3, 3),
           activation='relu',
           padding='same',
           strides=(2, 2)))
Beispiel #16
0
    def _build_inception_model(self):

        #model = InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
        # create the base pre-trained model
        #base_model = InceptionV3(include_top=False, weights='imagenet', input_tensor=None, input_shape=state_size, pooling=None, classes=100)

        inputA = Input(shape=(22, 22, 1))

        # add a global spatial average pooling layer

        tower_1 = Conv2D(64, (1, 1), padding='same', activation='relu')(inputA)
        tower_1 = Conv2D(64, (3, 3), padding='same',
                         activation='relu')(tower_1)

        tower_2 = Conv2D(64, (1, 1), padding='same', activation='relu')(inputA)
        tower_2 = Conv2D(64, (5, 5), padding='same',
                         activation='relu')(tower_2)

        tower_3 = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(inputA)
        tower_3 = Conv2D(64, (1, 1), padding='same',
                         activation='relu')(tower_3)

        output = keras.layers.concatenate([tower_1, tower_2, tower_3], axis=1)

        tower_1 = Conv2D(64, (1, 1), padding='same', activation='relu')(output)
        tower_1 = Conv2D(64, (3, 3), padding='same',
                         activation='relu')(tower_1)

        tower_2 = Conv2D(64, (1, 1), padding='same', activation='relu')(output)
        tower_2 = Conv2D(64, (5, 5), padding='same',
                         activation='relu')(tower_2)

        tower_3 = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(output)
        tower_3 = Conv2D(64, (1, 1), padding='same',
                         activation='relu')(tower_3)

        output = keras.layers.concatenate([tower_1, tower_2, tower_3], axis=1)

        # let's add a fully-connected layer
        output = Flatten()(output)
        x = Dense(16, activation='relu')(output)
        x = Dense(16, activation='relu')(x)
        x = Dense(16, activation='relu')(x)
        x = Model(inputA, outputs=x)

        inputB = Input(shape=(state_info, ))
        y = Dense(16, activation="relu")(inputB)
        y = Dense(16, activation="relu")(y)
        y = Model(inputs=inputB, outputs=y)

        combined = concatenate([x.output, y.output])

        z = Dense(32, activation="relu")(combined)
        z = Dense(32, activation="linear")(z)

        # and a logistic layer -- let's say we have self.action_size classes
        predictions = Dense(self.action_size, activation='softmax')(z)

        # this is the model we will train
        model = Model(inputs=[inputA, inputB], outputs=predictions)

        model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
        model.summary
        return model
def get_crfrnn_model_def():
    """ Returns Keras CRN-RNN model definition.

    Currently, only 500 x 500 images are supported. However, one can get this to
    work with different image sizes by adjusting the parameters of the Cropping2D layers
    below.
    """

    channels, height, weight = 3, 500, 500

    # Input
    input_shape = (height, weight, 3)
    img_input = Input(shape=input_shape)

    # Add plenty of zero padding
    x = ZeroPadding2D(padding=(100, 100))(img_input)

    # VGG-16 convolution block 1
    x = Conv2D(64, (3, 3), activation='relu', padding='valid', name='conv1_1')(x)
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)

    # VGG-16 convolution block 2
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_1')(x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2', padding='same')(x)

    # VGG-16 convolution block 3
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_1')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_2')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3', padding='same')(x)
    pool3 = x

    # VGG-16 convolution block 4
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4', padding='same')(x)
    pool4 = x

    # VGG-16 convolution block 5
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool5', padding='same')(x)

    # Fully-connected layers converted to convolution layers
    x = Conv2D(4096, (7, 7), activation='relu', padding='valid', name='fc6')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(4096, (1, 1), activation='relu', padding='valid', name='fc7')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(21, (1, 1), padding='valid', name='score-fr')(x)

    # Deconvolution
    score2 = Conv2DTranspose(21, (4, 4), strides=2, name='score2')(x)

    # Skip connections from pool4
    score_pool4 = Conv2D(21, (1, 1), name='score-pool4')(pool4)
    score_pool4c = Cropping2D((5, 5))(score_pool4)
    score_fused = Add()([score2, score_pool4c])
    score4 = Conv2DTranspose(21, (4, 4), strides=2, name='score4', use_bias=False)(score_fused)

    # Skip connections from pool3
    score_pool3 = Conv2D(21, (1, 1), name='score-pool3')(pool3)
    score_pool3c = Cropping2D((9, 9))(score_pool3)

    # Fuse things together
    score_final = Add()([score4, score_pool3c])

    # Final up-sampling and cropping
    upsample = Conv2DTranspose(21, (16, 16), strides=8, name='upsample', use_bias=False)(score_final)
    upscore = Cropping2D(((31, 37), (31, 37)))(upsample)

    output = CrfRnnLayer(image_dims=(height, weight),
                         num_classes=21,
                         theta_alpha=160.,
                         theta_beta=3.,
                         theta_gamma=3.,
                         num_iterations=10,
                         name='crfrnn')([upscore, img_input])

    # Build the model
    model = Model(img_input, output, name='crfrnn_net')

    return model
                                                                                                                                                                            
import cv2
from skimage.io import imshow
from keras.models import Sequential
from keras.layers import Conv2D,Conv2DTranspose, Cropping2D, Dense, Activation, Dropout, Flatten,MaxPooling2D, Merge, Average
from keras.preprocessing import image
from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions

# Number of classes
n_classes = 21
input_shape = (224, 224, 3)

#fcn-32s architecture

#block1
FCN32 = Sequential()
FCN32.add(Conv2D(64,(3, 3), activation='relu', input_shape=input_shape, padding='same',name = 'conv1_1'))
FCN32.add(Conv2D(64,(3, 3), activation='relu', name = 'conv1_2',padding='same'))
FCN32.add(MaxPooling2D(pool_size=(2,2), strides = (2,2), name = 'block1_pool'))

#block2
FCN32.add(Conv2D(128,(3, 3), activation='relu', name = 'conv2_1',padding='same'))
FCN32.add(Conv2D(128,(3, 3), activation='relu', name = 'conv2_2',padding='same'))
FCN32.add(MaxPooling2D(pool_size=(2,2), strides = (2,2), name = 'block2_pool'))

#block3
FCN32.add(Conv2D(256,(3, 3), activation='relu', name = 'conv3_1',padding='same'))
FCN32.add(Conv2D(256,(3, 3), activation='relu', name = 'conv3_2',padding='same'))
FCN32.add(Conv2D(256,(3, 3), activation='relu', name = 'conv3_3',padding='same'))
FCN32.add(MaxPooling2D(pool_size=(2,2), strides = (2,2), name = 'block3_pool'))

#block4
Beispiel #19
0
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255

y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)

model = Sequential()

model.add(
    Conv2D(32,
           3,
           3,
           activation='relu',
           padding='same',
           input_shape=(28, 28, 1)))
model.add(Conv2D(32, 3, 3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(10, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
validation_generator = validation_datagen.flow_from_directory(
    validation_data_dir,
    color_mode='grayscale',
    target_size=(img_rows, img_cols),
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=True)

model = Sequential()

# Block-1

model.add(
    Conv2D(32, (3, 3),
           padding='same',
           kernel_initializer='he_normal',
           input_shape=(img_rows, img_cols, 1)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(
    Conv2D(32, (3, 3),
           padding='same',
           kernel_initializer='he_normal',
           input_shape=(img_rows, img_cols, 1)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))

# Block-2
Beispiel #21
0
    img = cv2.imread(DATA_PATH+"/train/Parasitized/{}".format(file))
    img = cv2.resize(img,dsize=(130,130),interpolation=cv2.INTER_AREA)
    cv2.imwrite(os.getcwd()+"/train/1/{}".format(file),img)
for file in tqdm(uni):
    img = cv2.imread(DATA_PATH+"/train/Uninfected/{}".format(file))
    img = cv2.resize(img,dsize=(130,130),interpolation=cv2.INTER_AREA)
    cv2.imwrite(os.getcwd()+"/train/0/{}".format(file),img)


# Deep Neural Network

# In[ ]:


model = Sequential()
model.add(Conv2D(filters=64,kernel_size=(5,5),activation='relu',input_shape=IMG_SIZE))
model.add(Conv2D(filters=32,kernel_size=(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(filters=16,kernel_size=(5,5),activation='relu'))
model.add(Conv2D(filters=8,kernel_size=(5,5),activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Conv2D(filters=4,kernel_size=(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())

model.add(Flatten())
model.add(Dense(256,activation='relu'))
model.add(Dropout(0.25))
    return mushroom_files, mushroom_targets

train_files, train_targets = load_dataset('data/train')
valid_files, valid_targets = load_dataset('data/valid')
test_files, test_targets = load_dataset('data/test')

train_tensors = paths_to_tensor(train_files).astype('float32')/255
valid_tensors = paths_to_tensor(valid_files).astype('float32')/255
test_tensors = paths_to_tensor(test_files).astype('float32')/255

print(train_tensors.shape)

model = Sequential()

model.add(BatchNormalization(input_shape=(150, 150, 3)))
model.add(Conv2D(filters=16, kernel_size=3, kernel_initializer='lecun_normal', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(BatchNormalization())

model.add(Conv2D(filters=32, kernel_size=3, kernel_initializer='lecun_normal', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(BatchNormalization())

model.add(Conv2D(filters=64, kernel_size=3, kernel_initializer='lecun_normal', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(BatchNormalization())

model.add(Conv2D(filters=128, kernel_size=3, kernel_initializer='lecun_normal', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(BatchNormalization())
def define_NN_architecture():
    wavelet_inputs = Input(shape=(248, 16, 1), name='wavelet_input')

    rms_inputs = Input(shape=(16, ), name='rms_input')
    RMS_out = BatchNormalization(
                        momentum=0.99,
                        epsilon=0.001,
                        center=True,
                        scale=True,
                        beta_initializer='zeros',
                        gamma_initializer='ones',
                        moving_mean_initializer='zeros',
                        moving_variance_initializer='ones'
                        )(rms_inputs)

    x = Conv2D(
                        32,
                        (3, 3),
                        padding='same',
                        )(wavelet_inputs)

    x = BatchNormalization(
                        momentum=0.99,
                        epsilon=0.001,
                        center=True,
                        scale=True,
                        beta_initializer='zeros',
                        gamma_initializer='ones',
                        moving_mean_initializer='zeros',
                        moving_variance_initializer='ones'
                        )(x)
    x = ReLU()(x)

    x_parallel = x

    x_parallel = MaxPooling2D((2, 2), padding='same')(x_parallel)

    x = Conv2D(
                        32,
                        (3, 3),
                        padding='same',
                        )(x)
    x = BatchNormalization(
                        momentum=0.99,
                        epsilon=0.001,
                        center=True,
                        scale=True,
                        beta_initializer='zeros',
                        gamma_initializer='ones',
                        moving_mean_initializer='zeros',
                        moving_variance_initializer='ones'
                        )(x)
    x = ReLU()(x)
    x = Dropout(0.5)(x)
    x = Conv2D(
                        32,
                        (3, 3),
                        strides=(2, 2),
                        padding='same',
                        )(x)

    x = keras.layers.concatenate([x, x_parallel], axis=3)

    x_parallel = x

    x_parallel = MaxPooling2D((2, 2), padding='same')(x_parallel)

    x = BatchNormalization(
                        momentum=0.99,
                        epsilon=0.001,
                        center=True,
                        scale=True,
                        beta_initializer='zeros',
                        gamma_initializer='ones',
                        moving_mean_initializer='zeros',
                        moving_variance_initializer='ones'
                        )(x)
    x = ReLU()(x)
    x = Dropout(0.5)(x)
    x = Conv2D(
                        32,
                        (3, 3),
                        padding='same',
                        )(x)

    x = BatchNormalization(
                        momentum=0.99,
                        epsilon=0.001,
                        center=True,
                        scale=True,
                        beta_initializer='zeros',
                        gamma_initializer='ones',
                        moving_mean_initializer='zeros',
                        moving_variance_initializer='ones'
                        )(x)
    x = ReLU()(x)
    x = Dropout(0.5)(x)
    x = Conv2D(
                        32,
                        (3, 3),
                        strides=(2, 2),
                        padding='same',
                        )(x)

    x = keras.layers.concatenate([x, x_parallel], axis=3)

    x = Flatten()(x)
    x = BatchNormalization(
                        momentum=0.99,
                        epsilon=0.001,
                        center=True,
                        scale=True,
                        beta_initializer='zeros',
                        gamma_initializer='ones',
                        moving_mean_initializer='zeros',
                        moving_variance_initializer='ones'
                        )(x)
    wavelet_out = ReLU()(x)

    combined_inputs = keras.layers.concatenate(
                [RMS_out, wavelet_out]
            )

    x = Dense(120,
              activation='relu'
              )(combined_inputs)

    x = Dropout(0.5)(x)

    predictions = Dense(18,
                        activation='softmax'
                        )(x)

    model = Model(inputs=[wavelet_inputs, rms_inputs],
                  outputs=predictions)
    return model
Beispiel #24
0
'''how many times should it repeat'''
epochs = 100    

'''dimensions of the images'''
img_rows, img_cols = 64, 64

"""Size"""
TrainSize = 606
TestSize =  140


# Initialising the CNN
classifier = Sequential()

# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), input_shape = (img_rows, img_cols, 1), activation = 'relu'))
classifier.add(Dropout(rate = 0.3))

# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
 
# Adding a third convolutional layer
classifier.add(Conv2D(64, (3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Dropout(rate = 0.3))

# Step 3 - Flattening
classifier.add(Flatten())

# Step 4 - Full connection
classifier.add(Dense(units = 64, activation = 'relu'))
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import TensorBoard
import numpy as np
import os
import random

# Setting up the model structure
model = Sequential()

model.add(
    Conv2D(32, (3, 3),
           padding='same',
           input_shape=(176, 200, 3),
           activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))

model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))

model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))

model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
from keras.utils.np_utils import to_categorical
from keras.optimizers import SGD
from keras.models import Model
from keras.layers import Dense , Conv2D , MaxPooling2D , Input , Reshape
from keras.layers import BatchNormalization , Dropout , regularizers , Flatten , Activation , GlobalAveragePooling2D
from keras.optimizers import Adam , Adadelta , RMSprop , SGD
import matplotlib.pyplot as plt
import numpy as np
size=96
train_x = np.load("train.npy")
train_y = np.load("label.npy")
train_y=to_categorical(train_y)
X_train, X_test, y_train, y_test = train_test_split(train_x, train_y, test_size=0.3, random_state=0)
print(X_train.shape,X_test.shape,y_train.shape,y_test.shape)
input_data = Input(shape=[size ,size, 3])
conv1 = Conv2D(filters=32 , kernel_size=[3 , 3] , padding='same' , kernel_initializer='he_normal' , use_bias=True , activation='relu')(input_data)
conv1 = BatchNormalization()(conv1)
conv2 = Conv2D(filters=32 , kernel_size=[3 , 3] , padding='same' , kernel_initializer='he_normal' , use_bias=True , activation='relu')(conv1)
conv2 = BatchNormalization()(conv2)
pool1 = MaxPooling2D(pool_size=[2 ,2] , strides=[2 , 2])(conv2)
pool1 = Dropout(0.1)(pool1)

conv3 = Conv2D(filters=64 , kernel_size=[3 , 3] , padding='same' , kernel_initializer='he_normal' , use_bias=True , activation='relu')(pool1)
conv3 = BatchNormalization()(conv3)
conv4 = Conv2D(filters=64 , kernel_size=[3 , 3] , padding='same' , kernel_initializer='he_normal' , use_bias=True , activation='relu')(conv3)
conv4 = BatchNormalization()(conv4)
pool2 = MaxPooling2D(pool_size=[2 , 2] ,strides=[2 , 2])(conv4)
pool2 = Dropout(0.1)(pool2)

conv5 = Conv2D(filters=128 , kernel_size=[3 , 3] , padding='same' , kernel_initializer='he_normal' , use_bias=True , activation='relu')(pool2)
conv5 = BatchNormalization()(conv5)
Beispiel #27
0
# dimensions of images.
img_width, img_height = 64,64 

train_data_dir = 'data/train'
validation_data_dir = 'data/test'
nb_train_samples = 25473
nb_validation_samples = 7000
epochs = 50
batch_size = 64

if K.image_data_format() == 'channels_first':
    input_shape = (3, img_width, img_height)
else:
    input_shape = (img_width, img_height, 3)
model = Sequential()
convout1 = Conv2D(32, kernel_size=6, strides=2, input_shape=input_shape)
model.add(convout1)
activ1 = Activation('relu')
model.add(activ1)
convout2 = Conv2D(64, kernel_size=5, strides=1)
model.add(convout2)
activ2 = Activation('relu')
model.add(activ2)
pool1 = MaxPooling2D(pool_size=(3, 3), strides=1)
model.add(pool1)

convout3 = Conv2D(128, kernel_size=4, strides=2)
model.add(convout3)
activ3 = Activation('relu')
model.add(activ3)
convout4 = Conv2D(128, kernel_size=3, strides=1)
# Installing Keras
# pip install --upgrade keras

# Part 1 - Building the CNN

# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.optimizers import Adam

# Initialising the CNN
classifier = Sequential()

# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), padding = 'same', input_shape = (32, 32, 3), activation = 'relu'))

# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))

# Adding more convolutional layers
classifier.add(Conv2D(32, (3, 3), padding = 'same', activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Conv2D(64, (3, 3), padding = 'same', activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))


# Step 3 - Flattening
classifier.add(Flatten())

# Step 4 - Full connection
Beispiel #29
0
x_test = [np.matrix(xi) for xi in x_test]
pca = TruncatedSVD(n_components=10).fit(x_train)
x_train = pca.transform(X_train)
x_test = pca.transform(X_test)
"""
############
## Construct model
#############
## check if first part can be trained using unsupervised set.
###############################
# 1. first learn representation
## get physionet data
# 2. then supervised learning.
model = Sequential()
model.add(
    Conv2D(16, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(8, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(2))
model.add(Flatten())
model.add(Dense(8, activation='relu'))
model.add(Dropout(2))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(lr=0.000001),
              metrics=['accuracy'])

model.fit(x_train,
          y_train,
          batch_size=batch_size,
Beispiel #30
0
def conv_block(input_tensor,
               kernel_size,
               filters,
               stage,
               block,
               strides=(2, 2)):
    '''conv_block is the block that has a conv layer at shortcut
    # Arguments
        input_tensor: input tensor
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
    And the shortcut should have subsample=(2,2) as well
    '''
    eps = 1.1e-5
    nb_filter1, nb_filter2, nb_filter3 = filters
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'
    scale_name_base = 'scale' + str(stage) + block + '_branch'

    x = Conv2D(nb_filter1,
               1,
               1,
               subsample=strides,
               name=conv_name_base + '2a',
               bias=False)(input_tensor)
    x = BatchNormalization(epsilon=eps, axis=bn_axis,
                           name=bn_name_base + '2a')(x)
    x = Scale(axis=bn_axis, name=scale_name_base + '2a')(x)
    x = Activation('relu', name=conv_name_base + '2a_relu')(x)

    x = ZeroPadding2D((1, 1), name=conv_name_base + '2b_zeropadding')(x)
    x = Conv2D(nb_filter2,
               kernel_size,
               kernel_size,
               name=conv_name_base + '2b',
               bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=bn_axis,
                           name=bn_name_base + '2b')(x)
    x = Scale(axis=bn_axis, name=scale_name_base + '2b')(x)
    x = Activation('relu', name=conv_name_base + '2b_relu')(x)

    x = Conv2D(nb_filter3, 1, 1, name=conv_name_base + '2c', bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=bn_axis,
                           name=bn_name_base + '2c')(x)
    x = Scale(axis=bn_axis, name=scale_name_base + '2c')(x)

    shortcut = Conv2D(nb_filter3,
                      1,
                      1,
                      subsample=strides,
                      name=conv_name_base + '1',
                      bias=False)(input_tensor)
    shortcut = BatchNormalization(epsilon=eps,
                                  axis=bn_axis,
                                  name=bn_name_base + '1')(shortcut)
    shortcut = Scale(axis=bn_axis, name=scale_name_base + '1')(shortcut)

    x = merge([x, shortcut], mode='sum', name='res' + str(stage) + block)
    x = Activation('relu', name='res' + str(stage) + block + '_relu')(x)
    return x