示例#1
0
y_test[0]

"""It is cat , coz (we saw from googlr the image @ 3rd index position is Cat.

[Link: ](https://www.cs.toronto.edu/~kriz/cifar.html)

## **Lets Start our CNN Model**
"""

'Covolutional Layer'
model= Sequential()
model.add(Conv2D(filters=32, kernel_size=(3,3), padding='same', activation='relu', input_shape=[32,32,3]))
model.add(Conv2D(filters=32, kernel_size=(3,3), padding='same', activation='relu'))

model.add(MaxPool2D(pool_size=(2,2), strides=2, padding='valid'))

model.add(Dropout(0.5))

'Flattening Layer'
model.add(Flatten())

'Dense Layer'
model.add(Dense(units=128, activation='relu'))

'Output Layer'
model.add(Dense(units=10, activation='softmax'))

"""Filter: No of usage of filter depends on usage and hit and trial 

Kernal Size= 3*3  means (3*3 matrix Kernal)
示例#2
0
model_log_dir = os.path.join(log_dir, "model3")

# Model params
lr = 0.001
optimizer = Adam(lr=lr)
epochs = 10
batch_size = 256

# Define the DNN
input_img = Input(shape=x_train.shape[1:])

x = Conv2D(filters=32, kernel_size=5, padding='same')(input_img)
x = Activation("relu")(x)
x = Conv2D(filters=32, kernel_size=5, padding='same')(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)

x = Conv2D(filters=64, kernel_size=5, padding='same')(x)
x = Activation("relu")(x)
x = Conv2D(filters=64, kernel_size=5, padding='same')(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)

x = Flatten()(x)
x = Dense(units=128)(x)
x = Activation("relu")(x)
x = Dense(units=num_classes)(x)
y_pred = Activation("softmax")(x)

# Build the model
model = Model(inputs=[input_img], outputs=[y_pred])
示例#3
0
文件: test.py 项目: TienNHM/CRNN-OCR
        break

#%%
# pad each output label to maximum text length
valid_padded_txt = pad_sequences(valid_txt,
                                 maxlen=max_label_len,
                                 padding='post',
                                 value=len(char_list))

#%%
inputs = Input(shape=(32, 128, 1))

# convolution layer with kernel size (3,3)
conv_1 = Conv2D(64, (3, 3), activation='relu', padding='same')(inputs)
# poolig layer with kernel size (2,2)
pool_1 = MaxPool2D(pool_size=(2, 2), strides=2)(conv_1)

conv_2 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool_1)
pool_2 = MaxPool2D(pool_size=(2, 2), strides=2)(conv_2)

conv_3 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool_2)

conv_4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv_3)
# poolig layer with kernel size (2,1)
pool_4 = MaxPool2D(pool_size=(2, 1))(conv_4)

conv_5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool_4)
# Batch normalization layer
batch_norm_5 = BatchNormalization()(conv_5)

conv_6 = Conv2D(512, (3, 3), activation='relu', padding='same')(batch_norm_5)
 def nn_model(self):
     obs_input = Input(self.state_dim, name="im_obs")
     conv1 = Conv2D(
         filters=64,
         kernel_size=(3, 3),
         strides=(1, 1),
         padding="same",
         input_shape=self.state_dim,
         data_format="channels_last",
         activation="relu",
     )(obs_input)
     pool1 = MaxPool2D(pool_size=(3, 3), strides=1)(conv1)
     conv2 = Conv2D(
         filters=32,
         kernel_size=(3, 3),
         strides=(1, 1),
         padding="valid",
         activation="relu",
     )(pool1)
     pool2 = MaxPool2D(pool_size=(3, 3), strides=1)(conv2)
     conv3 = Conv2D(
         filters=16,
         kernel_size=(3, 3),
         strides=(1, 1),
         padding="valid",
         activation="relu",
     )(pool2)
     pool3 = MaxPool2D(pool_size=(3, 3), strides=1)(conv3)
     conv4 = Conv2D(
         filters=8,
         kernel_size=(3, 3),
         strides=(1, 1),
         padding="valid",
         activation="relu",
     )(pool3)
     pool4 = MaxPool2D(pool_size=(3, 3), strides=1)(conv4)
     flat = Flatten()(pool4)
     dense1 = Dense(16,
                    activation="relu",
                    kernel_initializer=self.weight_initializer)(flat)
     dropout1 = Dropout(0.3)(dense1)
     dense2 = Dense(8,
                    activation="relu",
                    kernel_initializer=self.weight_initializer)(dropout1)
     dropout2 = Dropout(0.3)(dense2)
     # action_dim[0] = 2
     output_val = Dense(
         self.action_dim[0],
         activation="relu",
         kernel_initializer=self.weight_initializer,
     )(dropout2)
     # Scale & clip x[i] to be in range [0, action_bound[i]]
     action_bound = copy.deepcopy(self.action_bound)
     mu_output = Lambda(
         lambda x: tf.clip_by_value(x * action_bound, 1e-9, action_bound),
         name="mu_output",
     )(output_val)
     std_output_1 = Dense(
         self.action_dim[0],
         activation="softplus",
         kernel_initializer=self.weight_initializer,
     )(dropout2)
     std_output = Lambda(lambda x: tf.clip_by_value(
         x * action_bound, 1e-9, action_bound / 2, name="std_output"))(
             std_output_1)
     return tf.keras.models.Model(inputs=obs_input,
                                  outputs=[mu_output, std_output],
                                  name="Actor")
示例#5
0
def attention_block(input,
                    input_channels=None,
                    output_channels=None,
                    encoder_depth=1):

    p = 1
    t = 2
    r = 1

    if input_channels is None:
        input_channels = input.get_shape()[-1]
    if output_channels is None:
        output_channels = input_channels

    # First Residual Block
    for i in range(p):
        input = residual_block(input)

    # Trunc Branch
    output_trunk = input
    for i in range(t):
        output_trunk = residual_block(output_trunk)

    # Soft Mask Branch

    ## encoder
    ### first down sampling
    output_soft_mask = MaxPool2D(padding='same')(input)
    for i in range(r):
        output_soft_mask = residual_block(output_soft_mask)

    skip_connections = []
    for i in range(encoder_depth - 1):
        ## skip connections
        output_skip_connection = residual_block(output_soft_mask)
        skip_connections.append(output_skip_connection)
        # print ('skip shape:', output_skip_connection.get_shape())

        ## down sampling
        output_soft_mask = MaxPool2D(padding='same')(output_soft_mask)
        for _ in range(r):
            output_soft_mask = residual_block(output_soft_mask)

    skip_connections = list(reversed(skip_connections))
    for i in range(encoder_depth - 1):
        for _ in range(r):
            output_soft_mask = residual_block(output_soft_mask)
        output_soft_mask = UpSampling2D()(output_soft_mask)
        output_soft_mask = Add()([output_soft_mask, skip_connections[i]])

    for i in range(r):
        output_soft_mask = residual_block(output_soft_mask)
    output_soft_mask = UpSampling2D()(output_soft_mask)

    ## Output
    output_soft_mask = Conv2D(input_channels, (1, 1))(output_soft_mask)
    output_soft_mask = Conv2D(input_channels, (1, 1))(output_soft_mask)
    output_soft_mask = Activation('sigmoid')(output_soft_mask)

    # Attention: (1 + output_soft_mask) * output_trunk
    output = Lambda(lambda x: x + 1)(output_soft_mask)

    output = Multiply()([output, output_trunk])  #

    # Last Residual Block
    for i in range(p):
        output = residual_block(output)

    return output
示例#6
0
train_path = './train'
test_path ='./test'

train_gen, val_gen = train_val_generator(16,train_path,test_path)
input_shape = (64,64,3)

physical_devices = tf.config.list_physical_devices('GPU')
try:
  tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
  pass

X_in = Input(input_shape)
X = Conv2D(64, 3, activation = 'relu',padding = 'same')(X_in)
X = MaxPool2D(pool_size = (2,2))(X)
X = Dropout(0.5)(X)
X = Conv2D(32, 3, activation = 'relu',padding = 'same')(X)
X = MaxPool2D(pool_size = (2,2))(X)
X = Dropout(0.5)(X)
X = Conv2D(16, 3, activation = 'relu',padding = 'same')(X)
X = MaxPool2D(pool_size = (2,2))(X)
X = Flatten()(X)
X = Dense(32,activation = 'relu')(X)
X = Dropout(0.5)(X)
X_out = Dense(2, activation = 'softmax')(X)
tr_model = Model(X_in, X_out)

checkpoint = ModelCheckpoint('vanilla.h5', monitor = 'val_accuracy', verbose = 1,save_best_only = True)
early_stop = EarlyStopping(monitor = 'val_accuracy', min_delta = 0, patience = 5,verbose = 1,mode = 'auto')
示例#7
0
def cnn_train():
    from gensim.models.word2vec import Word2Vec
    import numpy as np
    import pandas as pd
    import os
    from sklearn.feature_extraction.text import CountVectorizer
    from sklearn.metrics import confusion_matrix, accuracy_score
    from sklearn.model_selection import train_test_split

    from keras.preprocessing.text import Tokenizer
    from keras.preprocessing.sequence import pad_sequences
    from keras.initializers import Constant
    from keras.models import Model
    from keras.layers import Input, Reshape, concatenate
    from keras.utils.np_utils import to_categorical
    import re
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Embedding, Concatenate, Dropout, Conv2D, MaxPool2D, Dense, Dropout, Flatten
    from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
    from tensorflow.keras.models import load_model
    import numpy as np
    import pickle
    import matplotlib.pyplot as plt

    make_dir()
    print('train data를 불러옵니다.')
    #f.write("train data를 불러옵니다")

    ##### 모델링 ######
    #train data load
    data = pd.read_csv('./train_data/single_20110224-20210224.csv')

    if os.path.isfile('./model/word2vec_100.model'):
        #Word2Vec_model=Worid2Vec.load_word2vec_format('./model/word2vec/model_100',binary=False, encoding='utf-8')
        Word2Vec_model = Word2Vec.load('./model/word2vec_100.model')
    else:
        Word2Vec_model = W2V()

    data.columns.to_list()
    data = data.drop_duplicates()
    del data['Unnamed: 0']
    print('train data를 불러오는데 성공하였습니다.')
    #f.write("train data를 불러오는데 성공하였습니다.")

    #tokenizer
    from keras.preprocessing.text import Tokenizer

    print("tokenizer를 생성합니다.")
    tokenizer = Tokenizer(num_words=1000, filters=',')
    print("tokenizer에게 1000개의 단어에 대한 dictionary를 만들도록 fit합니다")
    tokenizer.fit_on_texts(data['키워드'])
    print("tokenizer fit에 성공하였습니다.")

    # 532171

    print("만들어진 dictionary를 기준으로 텍스트를 숫자형으로 변환합니다.")
    text_sequence = tokenizer.texts_to_sequences(data['키워드'])
    max_length = max(len(l) for l in text_sequence)
    from keras.preprocessing.sequence import pad_sequences

    print(max_length, "를 최대길이로 pad_sequence를 시작합니다.")
    pad_text = pad_sequences(text_sequence, maxlen=max_length)
    y = pd.get_dummies(data['주제']).values

    from sklearn.model_selection import train_test_split
    x_train, x_test, y_train, y_test = train_test_split(pad_text,
                                                        y,
                                                        test_size=0.1)

    vocab_size = len(
        tokenizer.word_index) + 1  # 1을 더해주는 것은 padding으로 채운 0 때문입니다
    print("pad_sequence를 마치고, 임베딩을 진행합니다.")
    embedding_dim = 100
    input_length = max_length  # 현재 1410
    print(input_length)
    max_features = 2000

    num_words = min(max_features, len(tokenizer.word_index)) + 1
    print(num_words)

    # first create a matrix of zeros, this is our embedding matrix
    embedding_matrix = np.zeros((num_words, embedding_dim))

    def get_vector(word):
        if word in Word2Vec_model.wv.index_to_key:
            return Word2Vec_model.wv[word]
        else:
            return None

    # for each word in out tokenizer lets try to find that work in our w2v model
    for word, i in tokenizer.word_index.items():
        if i > max_features:
            continue
        embedding_vector = get_vector(word)
        if embedding_vector is not None:
            # we found the word - add that words vector to the matrix
            embedding_matrix[i] = embedding_vector
        else:
            # doesn't exist, assign a random vector
            embedding_matrix[i] = np.random.randn(embedding_dim)

    sequence_length = max_length
    num_filters = 100

    inputs_2 = Input(shape=(sequence_length, ), dtype='int32')

    # note the `trainable=False`, later we will make this layer trainable
    embedding_layer_2 = Embedding(
        num_words,
        embedding_dim,
        embeddings_initializer=Constant(embedding_matrix),
        input_length=sequence_length,
        trainable=False)(inputs_2)

    reshape_2 = Reshape((sequence_length, embedding_dim, 1))(embedding_layer_2)

    conv_0_2 = Conv2D(num_filters,
                      kernel_size=(3, embedding_dim),
                      activation='relu',
                      kernel_regularizer='l2')(reshape_2)
    conv_1_2 = Conv2D(num_filters,
                      kernel_size=(4, embedding_dim),
                      activation='relu',
                      kernel_regularizer='l2')(reshape_2)
    conv_2_2 = Conv2D(num_filters,
                      kernel_size=(5, embedding_dim),
                      activation='relu',
                      kernel_regularizer='l2')(reshape_2)

    maxpool_0_2 = MaxPool2D(pool_size=(sequence_length - 3 + 1, 1),
                            strides=(1, 1),
                            padding='valid')(conv_0_2)
    maxpool_1_2 = MaxPool2D(pool_size=(sequence_length - 4 + 1, 1),
                            strides=(1, 1),
                            padding='valid')(conv_1_2)
    maxpool_2_2 = MaxPool2D(pool_size=(sequence_length - 5 + 1, 1),
                            strides=(1, 1),
                            padding='valid')(conv_2_2)

    concatenated_tensor_2 = Concatenate(axis=1)(
        [maxpool_0_2, maxpool_1_2, maxpool_2_2])
    flatten_2 = Flatten()(concatenated_tensor_2)

    dropout_2 = Dropout(0.5)(flatten_2)
    output_2 = Dense(units=7, activation='softmax')(dropout_2)

    model_2 = Model(inputs=inputs_2, outputs=output_2)
    model_2.compile(loss='categorical_crossentropy',
                    optimizer='adam',
                    metrics=['accuracy'])

    start = time.time()
    batch_size = 32
    early_stopping = EarlyStopping(monitor='val_loss',
                                   mode='auto',
                                   verbose=1,
                                   patience=10)
    history = model_2.fit(x_train,
                          y_train,
                          epochs=30,
                          batch_size=batch_size,
                          verbose=1,
                          validation_split=0.2,
                          callbacks=[early_stopping])

    filename = './model/cnn.h5'

    #with open(filename, 'wb') as filehandle:
    #    pickle.dump(model_2,filehandle, protocol=pickle.HIGHEST_PROTOCOL)
    model_2.save(filename)
    model = load_model(filename)
    print("\n\n#### 모델 학습 완료 ####")

    from keras.models import model_from_json

    model_json = model_2.to_json()
    with open("./model/cnn_model.json", "w") as json_file:
        json_file.write(model_json)

    model_2.save_weights("./model/cnn_model.h5")

    print("time : ", time.time() - start)
    print("cnn 모델 학습을 성공적으로 마무리하였습니다.")
    return "cnn 모델 학습을 성공적으로 마무리하였습니다."
示例#8
0
def model_extractor(activation_func, weight_decay=1e-4):
    # Creating a ZFNet Classifier
    model = Sequential()

    #Instantiating Layer 1
    model.add(
        Conv2D(96,
               kernel_size=(7, 7),
               strides=(2, 2),
               activation=activation_func,
               padding='same',
               kernel_constraint=unit_norm(),
               kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(MaxPool2D(pool_size=(3, 3), strides=(2, 2), name='pool1'))
    model.add(BatchNormalization())

    # #Instantiating Layer 2
    model.add(
        Conv2D(256,
               kernel_size=(5, 5),
               strides=(2, 2),
               activation=activation_func,
               padding='same',
               kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(1, 1), name='pool2'))
    model.add(BatchNormalization())

    # #Instantiating Layer 3
    model.add(
        Conv2D(384,
               kernel_size=(3, 3),
               strides=(1, 1),
               activation=activation_func,
               padding='same',
               kernel_regularizer=regularizers.l2(weight_decay)))

    # #Instantiating Layer 4
    model.add(
        Conv2D(384,
               kernel_size=(3, 3),
               strides=(1, 1),
               activation=activation_func,
               padding='same',
               kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(BatchNormalization())

    # #Instantiating Layer 5
    model.add(
        Conv2D(256,
               kernel_size=(3, 3),
               strides=(1, 1),
               activation=activation_func,
               padding='same',
               kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(1, 1), name='pool3'))
    model.add(BatchNormalization())

    model.add(Flatten())

    #Instantiating Layer 6
    model.add(Dense(4096, activation=activation_func))

    #Instantiating Layer 8
    model.add(Dense(4096, activation=activation_func))

    #Output Layer
    model.add(Dense(10, activation='softmax'))

    return model
示例#9
0
    def build(self):
        input_sig = Input(shape=self.input_shape)
        x = self._make_stem(input_sig,
                            stem_width=self.stem_width,
                            deep_stem=self.deep_stem)

        if self.preact is False:
            x = layers.BatchNormalization(axis=self.channel_axis,
                                          epsilon=1.001e-5)(x)
            x = layers.Activation(self.active)(x)
        if self.verbose: print('stem_out', x.shape)

        x = MaxPool2D(pool_size=3,
                      strides=2,
                      padding='same',
                      data_format='channels_last')(x)
        if self.verbose: print('MaxPool2D out', x.shape)

        if self.preact is True:
            x = layers.BatchNormalization(axis=self.channel_axis,
                                          epsilon=1.001e-5)(x)
            x = layers.Activation(self.active)(x)

        x = self._make_layer(x,
                             blocks=self.blocks_set[0],
                             filters=64,
                             stride=1,
                             is_first=False)
        if self.verbose: print('-' * 5, 'layer1 out', x.shape, '-' * 5)
        x = self._make_layer(x,
                             blocks=self.blocks_set[1],
                             filters=128,
                             stride=2)
        if self.verbose: print('-' * 5, 'layer2 out', x.shape, '-' * 5)
        x = self._make_layer(x,
                             blocks=self.blocks_set[2],
                             filters=256,
                             stride=2)
        if self.verbose: print('-' * 5, 'layer3 out', x.shape, '-' * 5)
        x = self._make_layer(x,
                             blocks=self.blocks_set[3],
                             filters=512,
                             stride=2)
        if self.verbose: print('-' * 5, 'layer4 out', x.shape, '-' * 5)

        concats = GlobalAveragePooling2D(name='avg_pool')(x)
        if self.verbose: print("pool_out:", concats.shape)

        if self.dropout_rate > 0:
            x = Dropout(self.dropout_rate, noise_shape=None)(x)

        fc_out = Dense(self.n_classes,
                       kernel_initializer='he_normal',
                       use_bias=False,
                       name='fc_NObias')(concats)
        if self.verbose: print("fc_out:", fc_out.shape)

        if self.fc_activation:
            fc_out = Activation(self.fc_activation)(fc_out)

        model = models.Model(inputs=input_sig, outputs=fc_out)

        if self.verbose:
            print("Resnest builded with input {}, output{}".format(
                input_sig.shape, fc_out.shape))
        if self.verbose: print('-------------------------------------------')
        if self.verbose: print('')

        return model
示例#10
0
    filters=16,
    kernel_size=[3, 3],
    strides=[2, 2],
    padding="same",
    dilation_rate=[1, 1],
    kernel_initializer=Constant(
        np.load(
            'weights_200/siamese_neural_congas_Mixed_6a_Branch_1_Conv2d_1c_3x3_weights'
        ).transpose(1, 2, 3, 0)),
    bias_initializer=Constant(
        np.load(
            'weights_200/siamese_neural_congas_1_Mixed_6a_Branch_1_Conv2d_1c_3x3_Conv2D_bias'
        )))(relu2_4)
relu2_5 = ReLU(max_value=6.)(conv2_5)

maxpool2_1 = MaxPool2D(pool_size=[3, 3], strides=[2, 2],
                       padding='same')(relu1_3)

concat2_1 = Concatenate(axis=3)([relu2_2, relu2_5, maxpool2_1])

# Block_03
conv3_1 = Conv2D(
    filters=32,
    kernel_size=[1, 1],
    strides=[1, 1],
    padding="same",
    dilation_rate=[1, 1],
    kernel_initializer=Constant(
        np.load(
            'weights_200/siamese_neural_congas_Mixed7a_Branch_0_Conv2d_0a_1x1_weights'
        ).transpose(1, 2, 3, 0)),
    bias_initializer=Constant(
    directory=
    r"F:\Pycharm_projects\pneumonia detection with deep learning\-pneumonia-detection-with-deep-learning\chest_xray\train",
    target_size=(64, 64),
    class_mode="categorical")
model = tf.keras.Sequential()

model.add(Conv2DTranspose(filters=256, strides=1, kernel_size=2))
model.add(BatchNormalization())

model.add(Conv2D(filters=256, kernel_size=1, strides=1, activation="relu"))
model.add(BatchNormalization())

model.add(Conv2D(filters=256, kernel_size=1, strides=1, activation="relu"))
model.add(BatchNormalization())

model.add(MaxPool2D(pool_size=2, strides=1))
model.add(BatchNormalization())

model.add(Conv2DTranspose(filters=128, strides=1, kernel_size=2))
model.add(BatchNormalization())

model.add(Conv2D(filters=128, kernel_size=2, strides=1, activation="relu"))
model.add(BatchNormalization())

model.add(Conv2D(filters=128, kernel_size=2, strides=1, activation="relu"))
model.add(BatchNormalization())

model.add(MaxPool2D(pool_size=2, strides=1))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=2, strides=1, activation="relu"))
model.add(BatchNormalization())
示例#12
0
def resnet(input_tensor, block_settings, use_bias, weight_decay, trainable, bn_trainable):
	'''
	https://arxiv.org/pdf/1512.03385.pdf
	Bottleneck architecture
	Arguments
		input_tensor:
		block_settings:
			[[64, 64, 256], [3, [2, 2]], [4, [2, 2]], [6, [2, 2]], [3, [2, 2]]] # Resnet 50, pool 64
			[[64, 64, 256], [3, [2, 2]], [4, [2, 2]], [23, [2, 2]], [3, [2, 2]]] # Resnet 101, pool 64
			[[64, 64, 256], [3, [2, 2]], [8, [2, 2]], [36, [2, 2]], [3, [2, 2]]] # Resnet 152, pool 64
		trainable:
	Return
		tensor:
	'''

	filters = np.array(block_settings[0])
	n_C2, strides_C2 = block_settings[1]
	tensors = []

	# C1
	tensor = Conv2D(
		filters=filters[0], 
		kernel_size=[7, 7], 
		strides=[2, 2], 
		padding='same', 
		use_bias=use_bias, 
		kernel_regularizer=regularizers.l2(weight_decay),
		trainable=trainable, 
		name='conv1')(input_tensor)
	tensor = BatchNormalization(trainable=bn_trainable, name='conv1_bn')(tensor)
	tensor = Activation('relu')(tensor)

	# C2
	tensor = MaxPool2D(pool_size=[3, 3], strides=strides_C2, padding='same')(tensor)
	tensor = conv_block(
		input_tensor=tensor, 
		kernel_size=[3, 3], 
		filters=filters, 
		strides=[1, 1], 
		block_name='stg1_blk0_', 
		use_bias=use_bias, 
		weight_decay=weight_decay, 
		trainable=trainable,
		bn_trainable=bn_trainable)
	for n in range(1, n_C2):
		tensor = identity_block(
			input_tensor=tensor, 
			kernel_size=[3, 3], 
			filters=filters, 
			block_name='stg1_blk'+str(n)+'_', 
			use_bias=use_bias, 
			weight_decay=weight_decay,
			trainable=trainable,
			bn_trainable=bn_trainable)

	tensors.append(tensor)

	# C34...
	for c in range(2, 2+len(block_settings[2:])):
		n_C, strides_C = block_settings[c]
		tensor = conv_block(
			input_tensor=tensor, 
			kernel_size=[3, 3], 
			filters=(2**(c-1))*filters, 
			strides=strides_C, 
			block_name='stg'+str(c)+'_blk0_', 
			use_bias=use_bias, 
			weight_decay=weight_decay,
			trainable=trainable,
			bn_trainable=bn_trainable)
		for n in range(1, n_C):
			tensor = identity_block(
				input_tensor=tensor, 
				kernel_size=[3, 3], 
				filters=(2**(c-1))*filters, 
				block_name='stg'+str(c)+'_blk'+str(n)+'_', 
				use_bias=use_bias, 
				weight_decay=weight_decay,
				trainable=trainable,
				bn_trainable=bn_trainable)

		tensors.append(tensor)

	return tensors
示例#13
0
def plotImages(images_arr):
    fig, axes = plt.subplots(1, 10, figsize=(20,20))
    axes = axes.flatten()
    for img, ax in zip( images_arr, axes):
        ax.imshow(img)
        ax.axis('off')
    plt.tight_layout()
    plt.show()

plotImages(imgs)
print(labels)

model = Sequential([
    Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding = 'same'),
    MaxPool2D(pool_size=(2, 2), strides=2),
    BatchNormalization(),
    Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding = 'same'),
    MaxPool2D(pool_size=(2, 2), strides=2),
    BatchNormalization(),
    Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding = 'same'),
    MaxPool2D(pool_size=(2, 2), strides=2),
    BatchNormalization(),
    Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same'),
    MaxPool2D(pool_size=(2, 2), strides=2),
    BatchNormalization(),
    Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'),
    MaxPool2D(pool_size=(2, 2), strides=2),
    BatchNormalization(),
    Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same'),
    MaxPool2D(pool_size=(2, 2), strides=2),
示例#14
0
    def __init__(self):
        super(VGGModel, self).__init__()

        # Optimizer
        self.optimizer = tf.keras.optimizers.RMSprop(
            learning_rate=hp.learning_rate, momentum=hp.momentum)

        self.vgg16 = [
            # Block 1
            Conv2D(64,
                   3,
                   1,
                   padding="same",
                   activation="relu",
                   name="block1_conv1"),
            Conv2D(64,
                   3,
                   1,
                   padding="same",
                   activation="relu",
                   name="block1_conv2"),
            MaxPool2D(2, name="block1_pool"),
            # Block 2
            Conv2D(128,
                   3,
                   1,
                   padding="same",
                   activation="relu",
                   name="block2_conv1"),
            Conv2D(128,
                   3,
                   1,
                   padding="same",
                   activation="relu",
                   name="block2_conv2"),
            MaxPool2D(2, name="block2_pool"),
            # Block 3
            Conv2D(256,
                   3,
                   1,
                   padding="same",
                   activation="relu",
                   name="block3_conv1"),
            Conv2D(256,
                   3,
                   1,
                   padding="same",
                   activation="relu",
                   name="block3_conv2"),
            Conv2D(256,
                   3,
                   1,
                   padding="same",
                   activation="relu",
                   name="block3_conv3"),
            MaxPool2D(2, name="block3_pool"),
            # Block 4
            Conv2D(512,
                   3,
                   1,
                   padding="same",
                   activation="relu",
                   name="block4_conv1"),
            Conv2D(512,
                   3,
                   1,
                   padding="same",
                   activation="relu",
                   name="block4_conv2"),
            Conv2D(512,
                   3,
                   1,
                   padding="same",
                   activation="relu",
                   name="block4_conv3"),
            MaxPool2D(2, name="block4_pool"),
            # Block 5
            Conv2D(512,
                   3,
                   1,
                   padding="same",
                   activation="relu",
                   name="block5_conv1"),
            Conv2D(512,
                   3,
                   1,
                   padding="same",
                   activation="relu",
                   name="block5_conv2"),
            Conv2D(512,
                   3,
                   1,
                   padding="same",
                   activation="relu",
                   name="block5_conv3"),
            MaxPool2D(2, name="block5_pool")
        ]

        # ============================================================================

        # TODO: Make all layers in self.vgg16 non-trainable. This will freeze the
        #       pretrained VGG16 weights into place so that only the classificaiton
        #       head is trained.

        for layer in self.vgg16:
            layer.trainable = False

        # TODO: Write a classification head for our 15-scene classification task.
        #       Hint: The layers Flatten and Dense are essential here.

        self.head = [
            Flatten(),
            Dense(64, activation='relu'),
            Dense(15, activation='softmax')
        ]
示例#15
0
y_test = keras.utils.to_categorical(y_test, num_classes)
print(input_shape)

# 构建网络
model = Sequential()
# 第一个卷积层,32个卷积核,大小5x5,卷积模式SAME,激活函数relu,输入张量的大小
model.add(
    Conv2D(filters=32,
           kernel_size=(5, 5),
           padding='Same',
           activation='relu',
           input_shape=(28, 28, 1)))
model.add(
    Conv2D(filters=32, kernel_size=(5, 5), padding='Same', activation='relu'))
# 池化层,池化核大小2x2
model.add(MaxPool2D(pool_size=(2, 2)))
# 随机丢弃四分之一的网络连接,防止过拟合
model.add(Dropout(0.25))
model.add(
    Conv2D(filters=64, kernel_size=(3, 3), padding='Same', activation='relu'))
model.add(
    Conv2D(filters=64, kernel_size=(3, 3), padding='Same', activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
# 全连接层,展开操作,
model.add(Flatten())
# 添加隐藏层神经元的数量和激活函数
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.25))
# 输出层
model.add(Dense(10, activation='softmax'))
'''
from tensorflow.keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten, Activation, AvgPool2D, MaxPool2D, GlobalAvgPool2D
from tensorflow.keras import models
from src.data.generators import create
from tensorflow import keras
import datetime
from matplotlib import pyplot as plt

train, test = create()

## Model ##

model = keras.Sequential()
model.add(Conv2D(filters=32, kernel_size=4, input_shape=(256, 256, 3)))
model.add(Activation('relu'))
model.add(MaxPool2D(2, 2))
model.add(Flatten())
model.add(Dense(15, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()

## Callbacks##
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=log_dir,
                                                   histogram_freq=1)
early_Stopping = keras.callbacks.EarlyStopping(monitor='loss', patience=3)

opt = keras.optimizers.Adam(learning_rate=0.0001)
model.compile(optimizer=opt,
              loss='binary_crossentropy',
              metrics=[
示例#17
0
def down_sampling(x):
    num_filter = int(x.shape[-1] * 2)
    x = MaxPool2D(pool_size=(2, 2))(x)
    x = conv_block(x, num_filter)

    return x
# 这4个维度是(数据数量,图片高度,图片宽度,图片通道数)
# 所以这里把数据reshape变成4维数据,黑白图片的通道数是1,彩色图片通道数是3
x_train = x_train.reshape(-1, 28, 28, 1) / 255.0
x_test = x_test.reshape(-1, 28, 28, 1) / 255.0
# 把训练集和测试集的标签转为独热编码
y_train = tf.keras.utils.to_categorical(y_train, num_classes=10)
y_test = tf.keras.utils.to_categorical(y_test, num_classes=10)

# 定义模型输入
inputs = Input(shape=(28, 28, 1))
x = Conv2D(filters=32,
           kernel_size=5,
           strides=1,
           padding='same',
           activation='relu')(inputs)
x = MaxPool2D(pool_size=2, strides=2, padding='same')(x)
x = Conv2D(64, 5, strides=1, padding='same', activation='relu')(x)
x = MaxPool2D(pool_size=2, strides=2, padding='same')(x)
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(10, activation='softmax')(x)
# 定义模型
model = Model(inputs, x)

# 定义优化器
adam = Adam(lr=1e-4)
# 定义优化器,loss function,训练过程中计算准确率
model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=['accuracy'])
def build_model(n_features_conv,
                input_shape,
                n_features_dense=[64, 16],
                n_output_classes=2):
    """
    artemp: this function builds a convolutional neural net
    """
    model = Sequential()

    block_index = 0
    conv_index = 0
    conv = Conv2D(n_features_conv[block_index],
                  3,
                  padding='same',
                  strides=(1, 1),
                  name='block{}_conv{}'.format(block_index + 1,
                                               conv_index + 1),
                  activation='relu',
                  input_shape=input_shape)
    model.add(conv)
    pool = MaxPool2D(pool_size=(3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='maxpool{}'.format(block_index))
    model.add(pool)

    block_index += 1
    conv_index = 1
    conv = Conv2D(n_features_conv[block_index],
                  3,
                  padding='same',
                  strides=(1, 1),
                  name='block{}_conv{}'.format(block_index + 1,
                                               conv_index + 1),
                  activation='relu')
    model.add(conv)
    pool = MaxPool2D(pool_size=(3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='maxpool{}'.format(block_index))
    model.add(pool)

    block_index += 1
    conv_index = 1
    conv = Conv2D(n_features_conv[block_index],
                  3,
                  padding='same',
                  strides=(1, 1),
                  name='block{}_conv{}'.format(block_index + 1,
                                               conv_index + 1),
                  activation='relu')
    model.add(conv)
    pool = MaxPool2D(pool_size=(3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='maxpool{}'.format(block_index))
    model.add(pool)

    block_index += 1
    conv_index = 1
    conv = Conv2D(n_features_conv[block_index],
                  3,
                  padding='same',
                  strides=(1, 1),
                  name='block{}_conv{}'.format(block_index + 1,
                                               conv_index + 1),
                  activation='relu')
    model.add(conv)
    pool = MaxPool2D(pool_size=(3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='maxpool{}'.format(block_index))
    model.add(pool)

    model.add(Flatten(name='flatten'))
    # artemp: Flattening is converting the data into a 1-dimensional array for inputting it to the next layer

    dense_block_index = 0
    dense = Dense(n_features_dense[dense_block_index],
                  name='dense{}'.format(dense_block_index + 1),
                  activation='relu')
    model.add(dense)

    # artemp: insert the drop out layer here (it can be inserted between 2 dense layers):
    # artemp: drop-out means a different set of neurons is removed for each CNN realization to regularize the model
    layer = Dropout(0.5)
    model.add(layer)

    dense_block_index += 1
    dense = Dense(n_features_dense[dense_block_index],
                  name='dense{}'.format(dense_block_index + 1),
                  activation='relu')
    model.add(dense)

    print('number of convolutonal layers', block_index + 1)
    print('number of dense layers', dense_block_index + 1)

    classifier = Dense(n_output_classes,
                       name='classifier',
                       activation='sigmoid')
    # artemp: the default number of classes is 2 corresponding to the bright circle and the dim square
    model.add(classifier)

    return model
示例#20
0
def get_model(hyper_params):
    """Generating ssd model for hyper params.
    inputs:
        hyper_params = dictionary

    outputs:
        ssd_model = tf.keras.model
    """
    # Initial scale factor 20 in the paper.
    # Even if this scale factor could cause loss value to be NaN in some of the cases,
    # it was decided to remain the same after some tests.
    scale_factor = 20.0
    reg_factor = 5e-4
    total_labels = hyper_params["total_labels"]
    # +1 for ratio 1
    len_aspect_ratios = [len(x) + 1 for x in hyper_params["aspect_ratios"]]
    #
    input = Input(shape=(None, None, 3), name="input")
    # conv1 block
    conv1_1 = Conv2D(64, (3, 3),
                     padding="same",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv1_1")(input)
    conv1_2 = Conv2D(64, (3, 3),
                     padding="same",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv1_2")(conv1_1)
    pool1 = MaxPool2D((2, 2), strides=(2, 2), padding="same",
                      name="pool1")(conv1_2)
    # conv2 block
    conv2_1 = Conv2D(128, (3, 3),
                     padding="same",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv2_1")(pool1)
    conv2_2 = Conv2D(128, (3, 3),
                     padding="same",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv2_2")(conv2_1)
    pool2 = MaxPool2D((2, 2), strides=(2, 2), padding="same",
                      name="pool2")(conv2_2)
    # conv3 block
    conv3_1 = Conv2D(256, (3, 3),
                     padding="same",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv3_1")(pool2)
    conv3_2 = Conv2D(256, (3, 3),
                     padding="same",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv3_2")(conv3_1)
    conv3_3 = Conv2D(256, (3, 3),
                     padding="same",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv3_3")(conv3_2)
    pool3 = MaxPool2D((2, 2), strides=(2, 2), padding="same",
                      name="pool3")(conv3_3)
    # conv4 block
    conv4_1 = Conv2D(512, (3, 3),
                     padding="same",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv4_1")(pool3)
    conv4_2 = Conv2D(512, (3, 3),
                     padding="same",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv4_2")(conv4_1)
    conv4_3 = Conv2D(512, (3, 3),
                     padding="same",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv4_3")(conv4_2)
    pool4 = MaxPool2D((2, 2), strides=(2, 2), padding="same",
                      name="pool4")(conv4_3)
    # conv5 block
    conv5_1 = Conv2D(512, (3, 3),
                     padding="same",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv5_1")(pool4)
    conv5_2 = Conv2D(512, (3, 3),
                     padding="same",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv5_2")(conv5_1)
    conv5_3 = Conv2D(512, (3, 3),
                     padding="same",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv5_3")(conv5_2)
    pool5 = MaxPool2D((3, 3), strides=(1, 1), padding="same",
                      name="pool5")(conv5_3)
    # conv6 and conv7 converted from fc6 and fc7 and remove dropouts
    # These layers coming from modified vgg16 model
    # https://gist.github.com/weiliu89/2ed6e13bfd5b57cf81d6
    conv6 = Conv2D(1024, (3, 3),
                   dilation_rate=6,
                   padding="same",
                   activation="relu",
                   kernel_initializer="glorot_normal",
                   kernel_regularizer=l2(reg_factor),
                   name="conv6")(pool5)
    conv7 = Conv2D(1024, (1, 1),
                   strides=(1, 1),
                   padding="same",
                   activation="relu",
                   kernel_initializer="glorot_normal",
                   kernel_regularizer=l2(reg_factor),
                   name="conv7")(conv6)
    ############################ Extra Feature Layers Start ############################
    # conv8 block <=> conv6 block in paper caffe implementation
    conv8_1 = Conv2D(256, (1, 1),
                     strides=(1, 1),
                     padding="valid",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv8_1")(conv7)
    conv8_2 = Conv2D(512, (3, 3),
                     strides=(2, 2),
                     padding="same",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv8_2")(conv8_1)
    # conv9 block <=> conv7 block in paper caffe implementation
    conv9_1 = Conv2D(128, (1, 1),
                     strides=(1, 1),
                     padding="valid",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv9_1")(conv8_2)
    conv9_2 = Conv2D(256, (3, 3),
                     strides=(2, 2),
                     padding="same",
                     activation="relu",
                     kernel_initializer="glorot_normal",
                     kernel_regularizer=l2(reg_factor),
                     name="conv9_2")(conv9_1)
    # conv10 block <=> conv8 block in paper caffe implementation
    conv10_1 = Conv2D(128, (1, 1),
                      strides=(1, 1),
                      padding="valid",
                      activation="relu",
                      kernel_initializer="glorot_normal",
                      kernel_regularizer=l2(reg_factor),
                      name="conv10_1")(conv9_2)
    conv10_2 = Conv2D(256, (3, 3),
                      strides=(1, 1),
                      padding="valid",
                      activation="relu",
                      kernel_initializer="glorot_normal",
                      kernel_regularizer=l2(reg_factor),
                      name="conv10_2")(conv10_1)
    # conv11 block <=> conv9 block in paper caffe implementation
    conv11_1 = Conv2D(128, (1, 1),
                      strides=(1, 1),
                      padding="valid",
                      activation="relu",
                      kernel_initializer="glorot_normal",
                      kernel_regularizer=l2(reg_factor),
                      name="conv11_1")(conv10_2)
    conv11_2 = Conv2D(256, (3, 3),
                      strides=(1, 1),
                      padding="valid",
                      activation="relu",
                      kernel_initializer="glorot_normal",
                      kernel_regularizer=l2(reg_factor),
                      name="conv11_2")(conv11_1)
    ############################ Extra Feature Layers End ############################
    # l2 normalization for each location in the feature map
    conv4_3_norm = L2Normalization(scale_factor)(conv4_3)
    #
    pred_bbox_deltas, pred_labels = get_head_from_outputs(
        hyper_params,
        [conv4_3_norm, conv7, conv8_2, conv9_2, conv10_2, conv11_2])
    #
    return Model(inputs=input, outputs=[pred_bbox_deltas, pred_labels])
示例#21
0
from artist import CustomImage, ImageBundle, InputImage, OutputImage
import pickle
import sys
from tensorflow.keras.layers import Input, MaxPool2D, Conv2D, BatchNormalization, Dropout, Flatten, Concatenate, Dense, Reshape, Activation, Lambda, LeakyReLU

# Define Sobel filter
sobel_x = tf.constant_initializer(sobel_x())

# TensorFlow expects 4D tensors of shape (samples, rows, cols, channels)
# Note that the first index (the sample index out of the batch) is stripped

model_input = Input(shape=(512, 512, 1))

Lambda_In = Lambda(lambda x: x / 255.)(model_input)
#Pool1 = MaxPool2D(pool_size=(2, 2))(model_input)  # (256, 256, 1)
Pool2 = MaxPool2D(pool_size=(8, 8))(Lambda_In)  # (164, 64, 1)
#Pool3 = MaxPool2D(pool_size=(2, 2))(Pool2)  # (64, 64, 1)
#Pool4 = MaxPool2D(pool_size=(2, 2))(Pool3)  # (32, 32, 1)
#Pool5 = MaxPool2D(pool_size=(2, 2))(Pool4)  # (16, 16, 1)

Conv21 = Conv2D(128, (3, 3),
                padding='same',
                kernel_initializer=sobel_x,
                data_format='channels_last')(Pool2)  # (128, 128, 30)
Activ21 = Activation('sigmoid')(Conv21)
BN21 = BatchNormalization(axis=2)(Activ21)
Drop21 = Dropout(0.1)(BN21)

Conv22 = Conv2D(128, (5, 5), padding='same',
                data_format='channels_last')(Drop21)
Activ22 = Activation('sigmoid')(Conv22)
示例#22
0
from tensorflow.keras.layers import MaxPool2D, Conv2D, Flatten, Dense, Dropout
from tensorflow.keras.models import Sequential

model1 = Sequential([
    Conv2D(
        10, 5, activation='relu', input_shape=(28, 28, 1)
    ),  # 첫 층에는 꼭 input_shape를 써 주어야 함, 10 자리에는 피쳐맵 수, 5 자리에는 (이전층 피쳐맵 크기)-(이번층 피쳐맵크기)+1 가 들어감
    MaxPool2D(
        pool_size=(2, 2)
    ),  # pool_size에 들어가는 수는 이전의 Conv2D 층의 피쳐맵 크기를 이 MaxPool2D의 이미지 피쳐맵 크기로 나눈 것
    Conv2D(20, 5, activation='relu'),
    MaxPool2D(pool_size=(2, 2)),
    Flatten(),
    Dropout(0.5),
    Dense(100, activation='relu'),
    Dense(10, activation='softmax')
])
model1.summary()
示例#23
0
import matplotlib.pyplot as plt

tf.random.set_seed(42)
np.random.seed(42)

# X_train_full = X_train_full.astype(np.float32) / 255
# X_test = X_test.astype(np.float32) / 255

(X_train_full, y_train_full), (X_test, y_test) = load_minst()
X_train, X_valid = X_train_full[:-5000], X_train_full[-5000:]
y_train, y_valid = y_train_full[:-5000], y_train_full[-5000:]

encoder = Sequential([
    Reshape([28, 28, 1], input_shape=[28, 28, 1]),
    Conv2D(16, kernel_size=3, padding='same', activation='selu'),
    MaxPool2D(pool_size=2),
    Conv2D(32, kernel_size=3, padding='same', activation='selu'),
    MaxPool2D(pool_size=2),
    Conv2D(64, kernel_size=3, padding='same', activation='selu'),
    MaxPool2D(pool_size=2)
])

decoder = Sequential([
    Conv2DTranspose(32, kernel_size=3, strides=2, padding='valid', activation='selu', input_shape=[3, 3, 64]),
    Conv2DTranspose(16, kernel_size=3, strides=2, padding='same', activation='selu'),
    Conv2DTranspose(1, kernel_size=3, strides=2, padding='same', activation='sigmoid'),
    Reshape([28, 28])
])


def rounded_accuracy(y_true, y_pred):
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.losses import sparse_categorical_crossentropy
from tensorflow.keras.layers import Conv2D, Flatten, MaxPool2D, BatchNormalization, Dense, Dropout

model = Sequential([
    Conv2D(filters=32,
           kernel_size=3,
           strides=2,
           activation=tf.nn.relu,
           input_shape=[32, 32, 3],
           data_format='channels_last',
           name='Conv1'),
    MaxPool2D(2, 2, name='MaxPool1'),
    Conv2D(filters=64,
           kernel_size=3,
           strides=2,
           activation=tf.nn.relu,
           name='Conv2'),
    Conv2D(filters=128,
           kernel_size=3,
           strides=2,
           activation=tf.nn.relu,
           name='Conv3'),
    Flatten(),
    Dense(units=512, activation=tf.nn.relu),
    Dense(units=10, activation=tf.nn.softmax)
])

model.compile(optimizer='adam',
        x = self.softmax(x)

        return x


# Variante 1: Definition des Modells mit Sequential

print("=== Variante 1 mit Sequential === ")

model = Sequential()
model.add(
    Conv2D(32, (3, 3),
           padding='same',
           input_shape=(32, 32, 3),
           activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation="relu"))
model.add(Dense(NUM_CLASSES, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
              metrics=["accuracy"])
model.fit(images_train, labels_train, batch_size=BATCH_SIZE, epochs=EPOCHS)
scores = model.evaluate(images_test, labels_test)

print('Loss:', scores[0])
示例#26
0
 def __init__(self, k=2):
     super(MP, self).__init__()
     self.m = MaxPool2D(pool_size=k, strides=k)
示例#27
0
文件: cnnHeatMap.py 项目: meiTob/ML
def build_model(img_shape: Tuple[int, int, int], num_classes: int,
                optimizer: tf.keras.optimizers.Optimizer, learning_rate: float,
                filter_block1: int, kernel_size_block1: int,
                filter_block2: int, kernel_size_block2: int,
                filter_block3: int, kernel_size_block3: int,
                dense_layer_size: int,
                kernel_initializer: tf.keras.initializers.Initializer,
                activation_cls: tf.keras.layers.Activation,
                dropout_rate: float, use_batch_normalization: bool,
                use_dense: bool, use_global_pooling: bool) -> Model:
    input_img = Input(shape=img_shape)

    x = Conv2D(filters=filter_block1,
               kernel_size=kernel_size_block1,
               padding="same",
               kernel_initializer=kernel_initializer,
               name="heatmap1")(input_img)
    if use_batch_normalization:
        x = BatchNormalization()(x)
    x = activation_cls(x)
    x = Conv2D(filters=filter_block1,
               kernel_size=kernel_size_block1,
               padding="same",
               kernel_initializer=kernel_initializer)(x)
    if use_batch_normalization:
        x = BatchNormalization()(x)
    if dropout_rate:
        x = Dropout(rate=dropout_rate)(x)
    x = activation_cls(x)
    x = MaxPool2D()(x)

    x = Conv2D(filters=filter_block2,
               kernel_size=kernel_size_block2,
               padding="same",
               kernel_initializer=kernel_initializer)(x)
    if use_batch_normalization:
        x = BatchNormalization()(x)
    x = activation_cls(x)
    x = Conv2D(filters=filter_block2,
               kernel_size=kernel_size_block2,
               padding="same",
               kernel_initializer=kernel_initializer)(x)
    if use_batch_normalization:
        x = BatchNormalization()(x)
    if dropout_rate:
        x = Dropout(rate=dropout_rate)(x)
    x = activation_cls(x)
    x = MaxPool2D()(x)

    x = Conv2D(filters=filter_block3,
               kernel_size=kernel_size_block3,
               padding="same",
               kernel_initializer=kernel_initializer)(x)
    if use_batch_normalization:
        x = BatchNormalization()(x)
    x = activation_cls(x)
    x = Conv2D(filters=filter_block3,
               kernel_size=kernel_size_block3,
               padding="same",
               kernel_initializer=kernel_initializer)(x)
    if use_batch_normalization:
        x = BatchNormalization()(x)
    if dropout_rate:
        x = Dropout(rate=dropout_rate)(x)
    x = activation_cls(x)
    x = MaxPool2D()(x)

    if use_global_pooling:
        x = GlobalAveragePooling2D()(x)
    else:
        x = Flatten()(x)
    if use_dense:
        x = Dense(units=dense_layer_size,
                  kernel_initializer=kernel_initializer)(x)
        if use_batch_normalization:
            x = BatchNormalization()(x)
        x = activation_cls(x)
    x = Dense(units=num_classes, kernel_initializer=kernel_initializer)(x)
    y_pred = Activation("softmax")(x)

    model = Model(inputs=[input_img], outputs=[y_pred])

    opt = optimizer(learning_rate=learning_rate)

    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    return model
示例#28
0
)  # On Colab/GPU, a higher batch size does not help and sometimes does not fit on the GPU (OOM).[2]
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(AUTO)

print(ds_test)

# Plug the input pipeline into Keras.
model = Sequential([
    Conv2D(filters=96,
           kernel_size=(11, 11),
           strides=(4, 4),
           activation=tf.nn.relu,
           data_format='channels_last',
           input_shape=(227, 227, 3)),
    BatchNormalization(),
    MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same'),
    Conv2D(filters=256,
           kernel_size=(5, 5),
           strides=(1, 1),
           activation=tf.nn.relu,
           padding="same"),
    BatchNormalization(),
    MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same'),
    Conv2D(filters=384,
           kernel_size=(3, 3),
           strides=(1, 1),
           activation=tf.nn.relu,
           padding="same"),
    BatchNormalization(),
    Conv2D(filters=384,
           kernel_size=(3, 3),
示例#29
0
def init_model():
    model = Sequential()
    model.add(Input(shape=ng_config.input_shape))
    model.add(
        Conv2D(
            filters=128,
            kernel_size=[3, 3],
            padding="same",
            activation="elu",
        ))
    model.add(
        Conv2D(
            filters=128,
            kernel_size=[3, 3],
            padding="same",
            activation="elu",
        ))
    model.add(MaxPool2D(pool_size=[2, 2], ))
    model.add(
        Conv2D(
            filters=256,
            kernel_size=[3, 3],
            padding="same",
            activation="elu",
        ))
    model.add(
        Conv2D(
            filters=256,
            kernel_size=[3, 3],
            padding="same",
            activation="elu",
        ))
    model.add(MaxPool2D(pool_size=[2, 2], ))
    model.add(Dropout(rate=0.25, ))
    model.add(
        Conv2D(
            filters=512,
            kernel_size=[3, 3],
            padding="same",
            activation="elu",
        ))
    model.add(
        Conv2D(
            filters=512,
            kernel_size=[3, 3],
            padding="same",
            activation="elu",
        ))
    model.add(MaxPool2D(pool_size=[2, 2], ))
    model.add(Dropout(rate=0.25, ))
    model.add(Flatten())
    model.add(Dense(
        units=1024,
        activation="elu",
    ))
    model.add(Dropout(rate=0.5, ))
    model.add(Dense(
        units=100,
        activation="softmax",
    ))
    return model
chanDim = -1

try:
    with tf.device('/device:GPU:7'):
        # specify the input size of the images
        images = Input((
            train_x.shape[1],
            train_x.shape[2],
            1,
        ))
        x = Conv2D(32, kernel_size=(3, 3), padding="same")(images)

        x = Activation("relu")(x)

        x = BatchNormalization(axis=chanDim)(x)
        x = MaxPool2D(pool_size=(3, 3))(x)
        x = Dropout(0.25)(x)

        x = Conv2D(64, (3, 3), padding="same")(x)
        x = Activation("relu")(x)
        x = BatchNormalization(axis=chanDim)(x)

        x = Conv2D(64, (3, 3), padding="same")(x)
        x = Activation("relu")(x)
        x = BatchNormalization(axis=chanDim)(x)
        x = MaxPool2D(pool_size=(2, 2))(x)
        x = Dropout(0.25)(x)

        x = Conv2D(128, (3, 3), padding="same")(x)
        x = Activation("relu")(x)
        x = BatchNormalization(axis=chanDim)(x)