Пример #1
0
    def _create_model(self,
                      data_shape,
                      channel_size=5,
                      kernel_size=10,
                      activation='relu'):
        if self._is_regression:
            loss_func = 'mean_squared_error'
        else:
            loss_func = 'sparse_categorical_crossentropy'

        conv1d_1 = Conv1D(filters=1,
                          kernel_size=channel_size,
                          padding='valid',
                          activation=activation,
                          input_shape=(data_shape[1], data_shape[2]))
        conv1d_2 = Conv1D(filters=1,
                          kernel_size=5,
                          padding='same',
                          activation='tanh')
        global_max_1d = GlobalMaxPooling1D()

        if self._with_functional_api:
            inputs = Input(name='layer_in',
                           shape=(data_shape[1], data_shape[2]))
            x1 = conv1d_1(inputs)
            x2 = conv1d_2(x1)
            x3 = global_max_1d(x2)
            if not self._is_regression:
                layer_out = Dense(name='layer_out', units=2)
                acti_out = Activation('softmax', name='acti_out')
                outputs = acti_out(layer_out(x3))
            else:
                outputs = x3

            model = Model(inputs=inputs,
                          outputs=outputs,
                          name='cnn_model_constructor')
        else:
            model = Sequential([conv1d_1, conv1d_2, global_max_1d],
                               name='cnn_seq_constructor')
            if not self._is_regression:
                model.add(Dense(name='layer_out', units=2))
                model.add(Activation('softmax', name='acti_out'))

        model.compile(optimizer='adam', loss=loss_func, metrics=['acc'])

        return model
Пример #2
0
    def _build_model(self,
                     input_size,
                     output_size,
                     hidden_size0=200,
                     hidden_size1=10):
        if self.dueling_dqn:
            inputs = Input(shape=(input_size, ))
            x = Dense(hidden_size0, activation='relu')(inputs)
            x = Dense(hidden_size1, activation='relu')(x)

            value = Dense(3, activation='linear')(x)
            a = Dense(3, activation='linear')(x)
            mean = Lambda(lambda x: K.mean(x, axis=1, keepdims=True))(a)
            advantage = Subtract()([a, mean])

            q = Add()([value, advantage])

            model = Model(inputs=inputs, outputs=q)
            model.compile(loss='mse', optimizer=Adam(self.lr))
        else:
            model = Sequential()
            model.add(
                Dense(hidden_size0,
                      input_shape=(input_size, ),
                      activation='relu'))
            model.add(Dense(hidden_size1, activation='relu'))
            model.add(Dense(output_size, activation='linear'))
            model.compile(loss='mse', optimizer=Adam(lr=self.lr))
        return model
class FCN():
    def __init__(self, method=1):
        self.method = method
        self._build()

    def _build(self):

        if self.method == 1:

            self.model = Sequential([
                Flatten(input_shape=(32, 32, 3)),
                Dense(200, activation='relu'),
                Dense(150, activation='relu'),
                Dense(10, activation='softmax')  # 10: the number of classes
            ])

        elif self.method == 2:

            self.model = Sequential()
            self.model.add(Flatten(input_shape=(32, 32, 3)))
            self.model.add(Dense(200, activation='relu'))
            self.model.add(Dense(150, activation='relu'))
            self.model.add(Dense(10, activation='softmax'))

        elif self.method == 3:

            input_layer = Input(shape=(32, 32, 3))
            x = input_layer
            x = Flatten()(x)
            x = Dense(200, activation='relu')(x)
            x = Dense(150, activation='relu')(x)
            output_layer = Dense(10, activation='softmax')(x)
            self.model = Model(input_layer, output_layer)

        show_structure(self.model, __file__)

    def compile(self, learning_rate):
        opt = Adam(lr=learning_rate)
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=opt,
                           metrics=['accuracy'])

    def train(self, x_train, y_train, batch_size, epochs):
        self.model.fit(x_train,
                       y_train,
                       batch_size=batch_size,
                       epochs=epochs,
                       shuffle=True)

    def evaluate(self, x_test, y_test, batch_size):
        self.model.evaluate(x_test, y_test, batch_size=batch_size)

    def predict(self, x):
        return self.model.predict(x)
Пример #4
0
def create_model(optimiser, learning_rate, c, transfer_learning, deepen_model,
                 base_model):
    # transfer learning condition
    model = copy_model(base_model, optimiser, transfer_learning)
    model.pop()
    if deepen_model:
        if transfer_learning:
            model.layers[0].trainable = False
            start = Dense(120, activation='relu',
                          activity_regularizer=l2(c))(model.layers[-1].output)
            end = Dense(250, activation='relu',
                        activity_regularizer=l2(c))(start)
            out_r = Dense(1,
                          activation="sigmoid",
                          activity_regularizer=l2(c),
                          name='r_prop')(end)
            final_model = Model(model.input, out_r)
        else:
            final_model = Sequential()
            final_model.add(
                Dense(120,
                      input_dim=51,
                      activation="relu",
                      activity_regularizer=l2(c)))
            final_model.add(
                Dense(250, activation='relu', activity_regularizer=l2(c)))
            final_model.add(
                Dense(1,
                      activation="sigmoid",
                      activity_regularizer=l2(c),
                      name='r_prop'))
    else:
        out_r = Dense(1,
                      activation="sigmoid",
                      activity_regularizer=l2(c),
                      name='r_prop')(model.layers[-1].output)
        final_model = Model(model.input, out_r)

    # Compile model
    if optimiser == "nag":
        opt = SGD(learning_rate=learning_rate, nesterov=True)
    elif optimiser == "adam":
        opt = Adam(learning_rate=learning_rate)
    final_model.compile(loss=cross_entropy, optimizer=opt, metrics=["acc"])

    return final_model
Пример #5
0
Out = TimeDistributed(Lambda(part))(mer)

layer_1 = -main_input

Out = TimeDistributed(Lambda(part2))([main_input, layer_1])

model = Model(inputs=[main_input, aux_input], outputs=Out)
model.compile(loss='mean_squared_error', optimizer='RMSprop')
model.predict([DATA, DATA2])
#%%
from tensorflow.keras.utils import plot_model
plot_model(model)

model = Sequential()
x = Input
model.add(InputLayer(input_shape=(3, 2)))
model.add(TimeDistributed(Lambda(lambda x: x[:, 0])))
model.compile(loss='mean_squared_error', optimizer='RMSprop')
out = model.predict(DATA)

model.predict()
#    model.add(BatchNormalization())
for i in range(n_layer):
    if i == n_layer - 1:
        model.add(
            LSTM(n_hidden,
                 dropout=dropout,
                 kernel_initializer=init,
                 return_sequences=False))
    else:
        model.add(
Пример #6
0
    reture_value = LSTM(units=16,return_sequences=False)(counsellor_msg)
    #reture_value = Dense(units=8)(counsellor_msg)
    
    drop = Dropout(0.5)(reture_value)
    
    out = Dense(1, activation='sigmoid')(drop)
    
    optimizer = Adam(lr=0.001)
    
    model = Model(sequence_input, outputs=out)
    '''

    model = Sequential()
    model.add(
        Embedding(num_words,
                  embedding_dim,
                  weights=[embedding_matrix],
                  input_length=max_tokens,
                  trainable=False))

    model.add(Bidirectional(LSTM(units=32, return_sequences=True)))
    model.add(LSTM(units=16, return_sequences=False))
    model.add(Dropout(0.1))
    model.add(Dense(1, activation='sigmoid'))
    optimizer = Adam(lr=0.001)

print(model.summary())

model.compile(loss=[focal_loss(alpha=.75, gamma=2.)],
              optimizer=optimizer,
              metrics=['accuracy'])
Пример #7
0
def michael_model_2(input_shape=(600, 800, 3)):
    base_model = ResNet50(weights='imagenet', include_top=False)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(200, activation="softmax")(x)
    model = Model(inputs=base_model.input, output=predictions)
    for layer in base_model.layers:
        layer.trainable = False
    model.add(Flatten())
    model.add(BatchNormalization())
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))
    model.add(BatchNormalization())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(BatchNormalization())
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.5))
    model.add(BatchNormalization())
    model.add(Dense(2, activation='relu'))
    return model
Пример #8
0
#2. 모델링
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv1D, MaxPooling1D, Dense, Flatten, Dropout, Input
input1 = Input(shape=(x_train[1], 1))
conv1 = Conv1D(100, 2, padding='SAME')(input1)
maxp = MaxPooling1D(2)(conv1)
conv1 = Conv1D(100, 2, padding='SAME')(maxp)
drop = Dropout(0.2)
dense1 = Dense(50, activation='relu')(drop)
dense1 = Dense(50, activation='relu')(drop)
dense1 = Dense(42, activation='relu')(drop)
output1 = Dense(3, activation='relu')(dense1)
model = Model(Input=input1, outputs=output1)

# model.add(Dropout(0.2)),
model.add(Dense(1, activation='relu'))

#3. 컴파일, 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
'''
EarlyStopping
'''
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='loss', patience=20, mode='auto')
model.fit(x_train,
          y_train,
          epochs=1000,
          batch_size=7,
          validation_data=(x_val, y_val),
          callbacks=[early_stopping])
    def build_model(self):
        if self.pretrained_embeddings:
            self.vocab_size, self.emb_size = self.pretrained_embeddings.shape
            emb_layer = Embedding(input_dim=self.vocab_size, output_dim=self.emb_size,
                            weights=[self.pretrained_embeddings],
                            input_length=self.max_seq_len, trainable=self.finetune_embeddings)
        else:
            emb_layer = Embedding(input_dim=self.vocab_size, output_dim=self.emb_size, input_length=self.max_seq_len)
        if self.labels_num == 2:
            output_layer = Dense(1, activation="sigmoid")
        else:
            output_layer = Dense(self.labels_num, activation="softmax")

        # Functional API
        if self.model_api == 'functional':
            INPUT = Input(shape=(self.max_seq_len,))
            EMB = emb_layer(INPUT)
            x = Bidirectional(LSTM_LAYER(128, return_sequences=True))(EMB)
            x = Bidirectional(LSTM_LAYER(64, return_sequences=True))(x)
            x = AttentionWithContext()(x)
            x = Dense(64, activation="relu")(x)
            OUTPUT = output_layer(x)
            model = Model(inputs=INPUT, outputs=OUTPUT)

        # Sequential API
        elif self.model_api == 'sequential':
            model = Sequential()
            model.add(emb_layer)
            model.add(Bidirectional(LSTM_LAYER(128, return_sequences=True)))
            model.add(Bidirectional(LSTM_LAYER(64, return_sequences=True)))
            model.add(AttentionWithContext)
            model.add(Dense(64, activation="relu"))
            model.add(output_layer)
        else:
            logger.error(f"Model-api type `{self.model_api}` is not supported. Please choose either `functional` or `sequential`")
            raise IOError(f"Model-api type `{self.model_api}` is not supported. Please choose either `functional` or `sequential`")

        if self.labels_num == 2:
            model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[f1_m, 'accuracy'])
        else:
            model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[f1_m, 'accuracy'])

        return model
Пример #10
0
test_generator = data_generator('./dataset/test/')

if __name__ == '__main__':
    os.mkdir('./models/')

    print("Extracting features with ResNet50")
    base_model = ResNet50(include_top=False, input_shape=(128, 128, 3))
    output = base_model.layers[38].output
    model = Model(inputs=base_model.input, outputs=output)
    pretrain(model, 'train')
    pretrain(model, 'test')
    print()

    print("Building CNN")
    model = Sequential()
    model.add(Conv2D(256, (1, 1), input_shape=(32, 32, 256),
                     activation='relu'))
    model.add(Conv2D(256, (3, 3), activation='relu'))
    model.add(MaxPooling2D())
    model.add(Conv2D(512, (2, 2), activation='relu'))
    model.add(MaxPooling2D())
    model.add(Flatten())
    model.add(Dropout(0.2))
    model.add(Dense(196))
    model.compile('adam', loss='mse', metrics=['accuracy'])
    model.summary()
    plot_model(model, to_file='./models/model.png', show_shapes=True)
    print()

    history = model.fit_generator(train_generator,
                                  steps_per_epoch=STEPS_PER_EPOCH,
                                  validation_data=test_generator,
Пример #11
0
class Models:

    embedding_dim = 500
    input_length = 100
    lstm_units = 75
    lstm_dropout = 0.4
    recurrent_dropout = 0.4
    spatial_dropout = 0.3
    filters = 32
    kernel_size = 3
    max_sequence_length = 75

    def build_Base_model(self, embedding_matrix):
        self.sequence_input = Input(shape=(self.max_sequence_length, ))
        embedding = Embedding(
            input_dim=embedding_matrix.shape[0],
            output_dim=embedding_matrix.shape[1],
            weights=[embedding_matrix],
            input_length=self.max_sequence_length,
            trainable=False,
        )(self.sequence_input)
        base = SpatialDropout1D(self.spatial_dropout)(embedding)
        return base

    def build_GRU_model(self, base):
        base = GRU(self.lstm_units,
                   dropout=self.lstm_dropout,
                   recurrent_dropout=self.recurrent_dropout,
                   return_sequences=True)(base)
        base = GRU(self.lstm_units, return_sequences=True)(base)
        base = GRU(self.lstm_units, return_sequences=True)(base)
        base = GRU(self.lstm_units, return_sequences=True)(base)
        base = GRU(self.lstm_units, return_sequences=True)(base)
        base = GRU(self.lstm_units,
                   dropout=self.lstm_dropout,
                   recurrent_dropout=self.recurrent_dropout)(base)
        # base = BatchNormalization(name="batchy3")(base)
        return base

    def build_LSTM_model(self, embedding_matrix):
        self.model = Sequential()
        self.model.add(
            Embedding(input_dim=embedding_matrix.shape[0],
                      output_dim=embedding_matrix.shape[1],
                      weights=[embedding_matrix],
                      input_length=75,
                      trainable=False))
        self.model.add(SpatialDropout1D(0.5))
        self.model.add(
            Conv1D(self.filters,
                   kernel_size=self.kernel_size,
                   kernel_regularizer=regularizers.l2(0.00001),
                   padding='same'))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(MaxPooling1D(pool_size=2))
        self.model.add(
            Bidirectional(
                LSTM(self.lstm_units,
                     dropout=0.5,
                     recurrent_dropout=0.5,
                     return_sequences=True)))
        self.model.add(SpatialDropout1D(0.5))
        self.model.add(
            Conv1D(self.filters,
                   kernel_size=self.kernel_size,
                   kernel_regularizer=regularizers.l2(0.00001),
                   padding='same'))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(MaxPooling1D(pool_size=2))
        self.model.add(
            Bidirectional(
                LSTM(self.lstm_units,
                     dropout=0.5,
                     recurrent_dropout=0.5,
                     return_sequences=True)))
        self.model.add(SpatialDropout1D(0.5))
        self.model.add(
            Conv1D(self.filters,
                   kernel_size=self.kernel_size,
                   kernel_regularizer=regularizers.l2(0.00001),
                   padding='same'))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(MaxPooling1D(pool_size=2))
        self.model.add(
            Bidirectional(
                LSTM(self.lstm_units, dropout=0.5, recurrent_dropout=0.5)))
        self.model.add(Dense(5, activation='softmax'))
        self.model.compile(optimizer='adam',
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])

    def build_BLSTM_model(self, embedding_matrix):
        self.model = Sequential()
        self.model.add(
            Embedding(input_dim=embedding_matrix.shape[0],
                      output_dim=embedding_matrix.shape[1],
                      weights=[embedding_matrix],
                      input_length=self.max_sequence_length,
                      trainable=False))
        self.model.add(SpatialDropout1D(0.5))
        self.model.add(
            Bidirectional(LSTM(self.lstm_units, return_sequences=True)))
        self.model.add(
            Bidirectional(LSTM(self.lstm_units, return_sequences=True)))
        self.model.add(
            Bidirectional(
                LSTM(self.lstm_units, dropout=0.5, recurrent_dropout=0.5)))
        self.model.add(Dense(5, activation='softmax'))
        self.model.compile(optimizer='adam',
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])

    def build_BLTSM_model(self, base):
        base = Bidirectional(
            LSTM(self.lstm_units,
                 return_sequences=True,
                 dropout=self.lstm_dropout,
                 recurrent_dropout=self.recurrent_dropout))(base)
        base = Bidirectional(
            LSTM(self.lstm_units,
                 dropout=self.lstm_dropout,
                 return_sequences=True,
                 recurrent_dropout=self.recurrent_dropout))(base)
        base = Bidirectional(
            LSTM(self.lstm_units,
                 recurrent_dropout=self.recurrent_dropout,
                 return_sequences=True))(base)
        base = Bidirectional(
            LSTM(self.lstm_units,
                 recurrent_dropout=self.recurrent_dropout,
                 return_sequences=True))(base)
        base = Bidirectional(
            LSTM(self.lstm_units,
                 recurrent_dropout=self.recurrent_dropout,
                 return_sequences=True))(base)
        base = Bidirectional(
            LSTM(self.lstm_units,
                 recurrent_dropout=self.recurrent_dropout,
                 return_sequences=True))(base)
        base = Bidirectional(
            LSTM(self.lstm_units,
                 dropout=self.lstm_dropout,
                 recurrent_dropout=self.recurrent_dropout))(base)
        # max = GlobalMaxPooling1D()(base)
        # concat = concatenate([avg,max])
        # base = BatchNormalization(name="batch_blstm")(base)
        return base

    def build_BGRU_model(self, base):
        base = Bidirectional(
            GRU(self.lstm_units,
                return_sequences=True,
                dropout=self.lstm_dropout,
                recurrent_dropout=self.recurrent_dropout))(base)
        base = Bidirectional(
            GRU(self.lstm_units,
                dropout=self.lstm_dropout,
                return_sequences=True,
                recurrent_dropout=self.recurrent_dropout))(base)
        base = Bidirectional(
            GRU(self.lstm_units,
                recurrent_dropout=self.recurrent_dropout,
                return_sequences=True))(base)
        base = Bidirectional(
            GRU(self.lstm_units,
                recurrent_dropout=self.recurrent_dropout,
                return_sequences=True))(base)
        base = Bidirectional(
            GRU(self.lstm_units,
                recurrent_dropout=self.recurrent_dropout,
                return_sequences=True))(base)
        base = Bidirectional(
            GRU(self.lstm_units,
                recurrent_dropout=self.recurrent_dropout,
                return_sequences=True))(base)
        base = Bidirectional(
            GRU(self.lstm_units,
                dropout=self.lstm_dropout,
                recurrent_dropout=self.recurrent_dropout))(base)
        # max = GlobalMaxPooling1D()(base)
        # concat = concatenate([avg,max])
        # base = BatchNormalization(name="batch_blstm")(base)
        return base

    def build_LTSM_model(self, base):
        base = LSTM(self.lstm_units,
                    return_sequences=True,
                    dropout=self.lstm_dropout,
                    recurrent_dropout=self.recurrent_dropout)(base)
        base = LSTM(self.lstm_units,
                    dropout=self.lstm_dropout,
                    return_sequences=True,
                    recurrent_dropout=self.recurrent_dropout)(base)
        base = LSTM(self.lstm_units,
                    recurrent_dropout=self.recurrent_dropout,
                    return_sequences=True)(base)
        base = LSTM(self.lstm_units,
                    recurrent_dropout=self.recurrent_dropout,
                    return_sequences=True)(base)
        base = LSTM(self.lstm_units,
                    recurrent_dropout=self.recurrent_dropout,
                    return_sequences=True)(base)
        base = LSTM(self.lstm_units,
                    recurrent_dropout=self.recurrent_dropout,
                    return_sequences=True)(base)
        base = LSTM(self.lstm_units,
                    dropout=self.lstm_dropout,
                    recurrent_dropout=self.recurrent_dropout)(base)
        # max = GlobalMaxPooling1D()(base)
        # concat = concatenate([avg,max])
        # base = BatchNormalization(name="batch_blstm")(base)
        return base

    def build_CNN_model(self, base):
        base = Conv1D(self.filters,
                      kernel_size=self.kernel_size,
                      kernel_regularizer=regularizers.l2(0.0005),
                      padding='same')(base)
        base = LeakyReLU(alpha=0.2)(base)
        base = Conv1D(self.filters,
                      kernel_size=self.kernel_size,
                      kernel_regularizer=regularizers.l2(0.0005),
                      padding='same')(base)
        base = LeakyReLU(alpha=0.2)(base)
        base = Conv1D(self.filters,
                      kernel_size=self.kernel_size,
                      kernel_regularizer=regularizers.l2(0.0005),
                      padding='same')(base)
        base = LeakyReLU(alpha=0.2)(base)
        base = Conv1D(self.filters,
                      kernel_size=self.kernel_size,
                      kernel_regularizer=regularizers.l2(0.0005),
                      padding='same')(base)
        base = LeakyReLU(alpha=0.2)(base)
        base = Conv1D(self.filters,
                      kernel_size=self.kernel_size,
                      kernel_regularizer=regularizers.l2(0.0005),
                      padding='same')(base)
        base = LeakyReLU(alpha=0.2)(base)
        avg = GlobalAveragePooling1D()(base)
        max = GlobalMaxPooling1D()(base)
        concat = concatenate([avg, max])
        base = BatchNormalization()(concat)
        return base

    def f1(self, y_true, y_pred):
        def recall(y_true, y_pred):
            """Recall metric.
            Only computes a batch-wise average of recall.
            Computes the recall, a metric for multi-label classification of
            how many relevant items are selected.
            """
            true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
            possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
            recall = true_positives / (possible_positives + K.epsilon())
            return recall

        def precision(y_true, y_pred):
            """Precision metric.
            Only computes a batch-wise average of precision.
            Computes the precision, a metric for multi-label classification of
            how many selected items are relevant.
            """
            true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
            predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
            precision = true_positives / (predicted_positives + K.epsilon())
            return precision

        precision = precision(y_true, y_pred)
        recall = recall(y_true, y_pred)
        return 2 * ((precision * recall) / (precision + recall + K.epsilon()))

    def build_myModel(self, text_embedding, model):
        for layer in model.layers[2:-2]:
            layer.trainable = False
        # print("layer ",model.layers[2].name)
        concat_cnn = model.layers[-2].output
        base = model.layers[2].output
        # base = self.build_Base_model(text_embedding)
        # model.layers[3] = model.layers[3](base)
        print("***** concat_cnn ", concat_cnn.shape)
        concat_blstm = self.build_BLTSM_model(base)
        # concat_lstm = self.build_LTSM_model(base)
        print("***** concat_blstm ", concat_blstm.shape)
        # concat_cnn = self.build_CNN_model(base)
        concat_gru = self.build_GRU_model(base)
        print("***** concat_gru ", concat_gru.shape)
        concat_out = concatenate([concat_cnn, concat_blstm], name="concat1")
        concat_out = concatenate([concat_out, concat_gru], name="concat2")
        # concat_out = concatenate([concat_out, concat_lstm],name = "concat3")
        # avg = GlobalAveragePooling1D()(base)
        # out = BatchNormalization(name = "batchyend")(concat_out)
        # out = Dense(300, activation='softmax',kernel_regularizer=regularizers.l2(0.00005))(concat_out)
        pred = Dense(5, activation='softmax')(concat_out)
        self.model = Model(model.input, pred)
        weights = np.ones((5, ))
        op = SGD(lr=0.0001)
        self.model.compile(
            optimizer='adam',
            loss=self.weighted_categorical_crossentropy(weights),
            metrics=['acc'])

    def compile(self, model):
        op = SGD(lr=4e-4, momentum=0.9)
        model.compile(optimizer=op,
                      loss='categorical_crossentropy',
                      metrics=['acc'])
        return model

    def buil_pre_model(self, text_embedding):
        base = self.build_Base_model(text_embedding)
        # concat_cnn = self.build_CNN_model(base)
        # concat_blstm = self.build_BLTSM_model(base)
        # concat_gru = self.build_GRU_model(base)
        # concat_lstm = self.build_LTSM_model(base)
        concat_bgru = self.build_BGRU_model(base)
        # out = BatchNormalization()(concat_cnn)
        # out = Dense(75, activation='softmax',kernel_regularizer=regularizers.l2(0.00005))(out)
        pred = Dense(5, activation='softmax')(concat_bgru)
        self.model = Model(self.sequence_input, pred)

        op = SGD(lr=0.0001)
        self.model.compile(optimizer='adam',
                           loss='categorical_crossentropy',
                           metrics=['acc'])

    def weighted_categorical_crossentropy(self, weights):

        # A weighted version of keras.objectives.categorical_crossentropy

        # Variables:
        #     weights: numpy array of shape (C,) where C is the number of classes

        # Usage:
        #     weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.
        #     loss = weighted_categorical_crossentropy(weights)
        #     model.compile(loss=loss,optimizer='adam')

        weights = K.variable(weights)

        def loss(y_true, y_pred):
            # scale predictions so that the class probas of each sample sum to 1
            y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
            # clip to prevent NaN's and Inf's
            y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
            # calc
            loss = y_true * K.log(y_pred) * weights
            loss = -K.sum(loss, -1)
            return loss

        return loss
Пример #12
0
dense (Dense)                (None, 5)                 10
_________________________________________________________________
dense_1 (Dense)              (None, 3)                 18
_________________________________________________________________
dense_2 (Dense)              (None, 4)                 16
_________________________________________________________________
dense_3 (Dense)              (None, 1)                 5
=================================================================
Total params: 49
Trainable params: 49
Non-trainable params: 0
_________________________________________________________________
'''
#2.2 시퀀셜 모델
model= Sequential()
model.add(Dense(10,input_dim=1)) #인풋레이어(칼럼)=1, 은닉층 첫번째 5
model.add(Dense(5))
model.add(Dense(3))
model.add(Dense(4))
model.add(Dense(1))
model.summary()
'''
'''
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #
=================================================================
dense (Dense)                (None, 10)                20
_________________________________________________________________
dense_2 (Dense)              (None, 3)                 18
_________________________________________________________________
Пример #13
0
## add more if you want
x = Flatten()(mobilnet.output)

prediction = Dense(len(folders), activation='softmax')(x)

# create a model object
model = Model(inputs=mobilnet.input, outputs=prediction)

model.summary()

from tensorflow.keras.layers import MaxPooling2D

### Create Model from scratch using CNN
model=Sequential()
model.add(Conv2D(filters=16,kernel_size=2,padding="same",activation="relu",input_shape=(224,224,3)))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=32,kernel_size=2,padding="same",activation ="relu"))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=64,kernel_size=2,padding="same",activation="relu"))
model.add(MaxPooling2D(pool_size=2))
model.add(Flatten())
model.add(Dense(500,activation="relu"))
model.add(Dense(2,activation="softmax"))
model.summary()

model.compile(
  loss='categorical_crossentropy',
  optimizer='adam',
  metrics=['accuracy']
)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(256, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(5, activation="softmax")(headModel)

# place the head FC model on top of the base model (this will become
# the actual model we will train)
model = Model(inputs=baseModel.input, outputs=headModel)
# loop over all layers in the base model and freeze them so they will
# *not* be updated during the training process
for layer in baseModel.layers:
    layer.trainable = False

# Construction of model
model = Sequential()
model.add(Conv2D(32, (5, 5), activation='relu', input_shape=(128, 128, 3))) 
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu')) 
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(5, activation='softmax'))

# Configures the model for training
model.compile(optimizer='adam', # Optimization routine, which tells the computer how to adjust the parameter values to minimize the loss function.
              loss='sparse_categorical_crossentropy', # Loss function, which tells us how bad our predictions are.
              metrics=['accuracy']) # List of metrics to be evaluated by the model during training and testing.

# Trains the model for a given number of epochs (iterations on a dataset) and validates it.
Пример #15
0
def cnn_model(X_train, X_test , y_train, y_test):

    base_model = applications.VGG16(include_top=False, input_shape=X_train.shape[1:], weights='imagenet',classes=CLASSES)

    # Freezing VGG16 layers
    for layer in base_model.layers:
        layer.trainable=False
    
    last_layer = 'block5_pool'
    model = Model(base_model.input, base_model.get_layer(last_layer).output)

    model.layers[-1].output.shape
    model = Sequential()

    model.add(base_model)      # Stack vgg16 

    model.add(Conv2D(128,(3,3),activation="relu", input_shape=model.layers[-1].output.shape, data_format='channels_first'))
    model.add(MaxPooling2D(2,2))

    # model.add(Conv2D(128,(3,3),activation="relu"))
    # model.add(MaxPooling2D(2,2))

    model.add(Flatten())        # Flatten the output

    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.2))

    # Output layer
    model.add(Dense(CLASSES, activation="sigmoid"))

    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    model.fit(X_train, y_train, batch_size=bs, epochs=ep, validation_data = (X_test, y_test),  callbacks=[tensorboard])

    # Save model
    model.save('cnn.model')


    model.summary()
Пример #16
0
def get_model_architecture(dataset, is_dropout=False):
    assert dataset.name in DATASETS, "dataset must be one of: mnist, mnist_fashion, cifar10, cifar100"

    num_classes = dataset.num_classes
    img_shape = dataset.img_shape
    img_input = Input(shape=img_shape)

    if dataset.name == 'mnist' or dataset.name == 'mnist_fashion':

        # architecture from: https://keras.io/examples/mnist_cnn/
        x = Conv2D(32, kernel_size=(3, 3))(img_input)
        x = Activation('relu')(x)
        x = Conv2D(64, (3, 3))(x)
        x = Activation('relu')(x)
        x = MaxPooling2D(pool_size=(2, 2), name='pool1')(x)
        if is_dropout:
            x = Dropout(0.25)(x)
        x = Flatten()(x)
        x = Dense(128)(x)
        x = Activation('relu')(x)
        if is_dropout:
            x = Dropout(0.5)(x)
        x = Dense(num_classes, name='features')(x)
        x = Activation('softmax')(x)
        # Create model
        model = Model(img_input, x)

    elif dataset.name == 'cifar100' or dataset.name == 'cifar10':
        # taken from: https://github.com/geifmany/cifar-vgg/tree/e7d4bd4807d15631177a2fafabb5497d0e4be3ba
        model = Sequential()
        weight_decay = 0.0005

        model.add(
            Conv2D(64, (3, 3),
                   padding='same',
                   input_shape=img_shape,
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        if is_dropout:
            model.add(Dropout(0.3))

        model.add(
            Conv2D(64, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())

        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(128, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        if is_dropout:
            model.add(Dropout(0.4))

        model.add(
            Conv2D(128, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())

        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(256, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        if is_dropout:
            model.add(Dropout(0.4))

        model.add(
            Conv2D(256, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        if is_dropout:
            model.add(Dropout(0.4))

        model.add(
            Conv2D(256, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())

        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        if is_dropout:
            model.add(Dropout(0.4))

        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        if is_dropout:
            model.add(Dropout(0.4))

        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())

        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        if is_dropout:
            model.add(Dropout(0.4))

        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())
        if is_dropout:
            model.add(Dropout(0.4))

        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())

        model.add(MaxPooling2D(pool_size=(2, 2)))
        if is_dropout:
            model.add(Dropout(0.5))

        model.add(Flatten())
        model.add(Dense(512, kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization())

        if is_dropout:
            model.add(Dropout(0.5))
        model.add(Dense(num_classes, name='features'))
        model.add(Activation('softmax'))
        '''
        x = Conv2D(32, (3, 3), padding='same')(img_input)
        x = Activation('relu')(x)

        x = Conv2D(32, (3, 3))(x)
        x = Activation('relu')(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        if is_dropout:
            x = Dropout(0.25)(x)
        x = Conv2D(64, (3, 3), padding='same')(x)
        x = Activation('relu')(x)
        x = Conv2D(64, (3, 3))(x)
        x = Activation('relu')(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        if is_dropout:
            x = Dropout(0.25)(x)
        x = Flatten()(x)
        x = Dense(512)(x)
        x = Activation('relu')(x)
        if is_dropout:
            x = Dropout(0.5)(x)
        x = Dense(num_classes, name='features')(x)
        x = Activation('softmax')(x)
        # Create model
        model = Model(img_input, x) 
        '''

    #model.summary()
    return model
def GenModel(data_objects, model_params):
    # Clear the TF graph
    K.clear_session()

    # Load the data to check dimensionality
    x_train, y_train = data_objects["x_train"], data_objects["y_train"]
    print("Input data shape:", x_train.shape)

    # Model-controlled parameters:
    F_modeltype = data_objects["F_modeltype"]

    #######################################################################

    # Number of block layers
    BLOCK_LAYERS = model_params["BLOCK_LAYERS"]

    # Alternative block type setup:
    BLOCK1_TYPE = model_params["BLOCK1_TYPE"]
    BLOCK2_TYPE = model_params["BLOCK2_TYPE"]
    BLOCK3_TYPE = model_params["BLOCK3_TYPE"]
    BLOCK4_TYPE = model_params["BLOCK4_TYPE"]

    FC_BLOCK1 = model_params["FC_BLOCK1"]
    FC_BLOCK2 = model_params["FC_BLOCK2"]
    FC_BLOCK3 = model_params["FC_BLOCK3"]
    FC_BLOCK4 = model_params["FC_BLOCK4"]

    #CNN related params
    DROPOUT_RATE = model_params["DROPOUT_RATE"]

    FULLY_CONNECTED = model_params["FULLY_CONNECTED"]

    NUM_FILTERS = model_params["NUM_FILTERS"]
    KERNEL_SIZE = model_params["KERNEL_SIZE"]
    KERNEL_STRIDE = model_params["KERNEL_STRIDE"]
    POOL_STRIDE = model_params["POOL_STRIDE"]
    POOL_SIZE = model_params["POOL_SIZE"]

    PADDING = 0  #used for analysis only

    input_dim = (x_train.shape[1], x_train.shape[2])

    # Print the configuration to be trained:

    print("Hyper params:")
    print("================================" * 3)
    print("================================" * 3)
    print("\nModel:        ", F_modeltype)
    print("\nTraining:")
    print("- batch_size:   ", (model_params["batch_size"]))
    print("- optimizer:    ", (model_params["optimizer"]))
    print("- learningrate: ", (model_params["learningrate"]))
    print("\nArchitecture::")
    print("Blocks:         ", model_params["BLOCK_LAYERS"])
    print("")
    print("Block types:    ", model_params["BLOCK1_TYPE"],
          model_params["BLOCK2_TYPE"], model_params["BLOCK3_TYPE"],
          model_params["BLOCK4_TYPE"])
    print("Hidden units:   ", model_params["FC_BLOCK1"],
          model_params["FC_BLOCK2"], model_params["FC_BLOCK3"],
          model_params["FC_BLOCK4"])
    print("Dropout:        ", (model_params["DROPOUT_RATE"]))
    print("\n")
    print("NUM_FILTERS:    ", NUM_FILTERS)
    print("KERNEL_SIZE:    ", KERNEL_SIZE)
    print("KERNEL_STRIDE:  ", KERNEL_STRIDE)
    print("POOL_SIZE:      ", POOL_SIZE)
    print("")

    print("================================" * 3)
    print("================================" * 3)

    #######################################################################

    #################### Architecture #####################

    #######################################################################
    if F_modeltype == "LSTM":
        K.clear_session()

        # X matrix inputs
        inputs = Input(shape=(input_dim))

        # Network:
        x = LSTM(FULLY_CONNECTED,
                 recurrent_dropout=DROPOUT_RATE,
                 return_sequences=False)(inputs)
        x2 = Dense(64, activation='relu')(x)
        predictions = Dense(1, activation='linear')(x2)
        model = Model(inputs=inputs, outputs=predictions)

        blocks_available = 1

    if F_modeltype == "LSTM-0":
        model = Sequential()  ####### input_shape=input_dim

        model.add(
            LSTM(FULLY_CONNECTED,
                 implementation=2,
                 input_shape=input_dim,
                 recurrent_dropout=DROPOUT_RATE,
                 return_sequences=True))

        model.add(BatchNormalization())

        model.add(Dense(
            1))  #, dtype='float32' #Only the softmax is adviced to be float32

        blocks_available = 1

    #################### TESTING #########################
    """
    # Print model overview
    print(model.summary())
    
    
    
    #compiling the model, creating the callbacks
    model.compile(loss='mae', 
          optimizer='Nadam',
          metrics=['mae'])
    
    
    trainhist = model.fit(x_train, 
                       y_train, 
                       validation_split=0.2,
                       epochs=10, 
                       batch_size=(batch_size))
    
    scores = model.evaluate(x_test, y_test, verbose=1, batch_size=512)
    
    mae_test = scores[1]#/(24.0*3600)
    
    mae_test = mae_test/(24.0*3600)
    mae_test
    """

    return model, blocks_available
Пример #18
0
def make_model(classes, lr_rate, height, width, model_size, rand_seed):
    size = len(classes)
    #check_file = os.path.join(output_dir, 'tmp.h5')

    if model_size == 'L':
        if height != 224 or width != 224:
            Top = False
            weights = None
            layer_cut = -1
        else:
            Top = True
            weights = 'imagenet'
            layer_cut = -6
        # mobile = keras.applications.mobilenet_v2.MobileNetV2(input_shape=input_shape)
        #keras.applications.mobilenet.MobileNet(input_shape=None, alpha=1.0, depth_multiplier=1,
        #dropout=1e-3, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000)
        mobile = tf.keras.applications.mobilenet.MobileNet(include_top=Top,
                                                           input_shape=(height,
                                                                        width,
                                                                        3),
                                                           pooling='avg',
                                                           weights=weights,
                                                           alpha=1,
                                                           depth_multiplier=1)

        x = mobile.layers[layer_cut].output
        x = Dense(128,
                  kernel_regularizer=regularizers.l2(l=0.015),
                  activation='relu')(x)
        x = Dropout(rate=.4, seed=rand_seed)(x)
        predictions = Dense(size, activation='softmax')(x)
        model = Model(inputs=mobile.input, outputs=predictions)

        for layer in model.layers:
            layer.trainable = True
        model.compile(Adam(lr=lr_rate),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

    else:
        if model_size == 'M':
            fm = 2
        else:
            fm = 1
        model = Sequential()
        model.add(
            Conv2D(filters=4 * fm,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same',
                   name='L11',
                   kernel_regularizer=regularizers.l2(l=0.015),
                   input_shape=(height, width, 3)))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='L12'))
        model.add(BatchNormalization(name='L13'))
        model.add(
            Conv2D(filters=8 * fm,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_regularizer=regularizers.l2(l=0.015),
                   padding='same',
                   name='L21'))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='L22'))
        model.add(BatchNormalization(name='L23'))
        model.add(
            Conv2D(filters=16 * fm,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_regularizer=regularizers.l2(l=0.015),
                   padding='same',
                   name='L31'))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='L32'))
        model.add(BatchNormalization(name='L33'))
        if fm == 2:
            model.add(
                Conv2D(filters=32 * fm,
                       kernel_size=(3, 3),
                       activation='relu',
                       kernel_regularizer=regularizers.l2(l=0.015),
                       padding='same',
                       name='L41'))
            model.add(
                MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='L42'))
            model.add(BatchNormalization(name='L43'))
            model.add(
                Conv2D(filters=64 * fm,
                       kernel_size=(3, 3),
                       activation='relu',
                       kernel_regularizer=regularizers.l2(l=0.015),
                       padding='same',
                       name='L51'))
            model.add(
                MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='L52'))
            model.add(BatchNormalization(name='L53'))

        model.add(Flatten())
        model.add(
            Dense(256 * fm,
                  kernel_regularizer=regularizers.l2(l=0.015),
                  activation='relu',
                  name='Dn1'))
        model.add(Dropout(rate=.5))
        model.add(Dense(size, activation='softmax', name='predict'))
        model.compile(Adam(lr=lr_rate, ),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
    #early_stop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=10, mode='min', verbose=1)
    #checkpoint = ModelCheckpoint(check_file, monitor='val_loss', verbose=1, save_best_only=True, mode='min', period=1)
    #lrck=keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=.8, patience=1,
    # verbose=1, mode='min', min_delta=0.000001, cooldown=1, min_lr=1.0e-08)
    #callbacks=[checkpoint,lrck, early_stop, ]
    return model
Пример #19
0
class MixedModel:
    def __init__(self,
                 mode,
                 model_params,
                 input_shapes,
                 output_shape,
                 output_folder,
                 neurons=None,
                 auto_build=False):
        self.model = None
        self.mode = mode
        self.is_trained = False
        self.output_folder = output_folder
        self.input_shapes = input_shapes
        self.output_shape = output_shape

        self.batch_size = model_params['MINIBATCH_SIZE']
        self.epochs = model_params['EPOCHS']
        self.loss_func = model_params['LOSS']
        self.optimizer = model_params['OPTIMIZER']
        self.patience = model_params['PATIENCE']
        self.verbose = model_params['VERBOSE_OUTPUT']
        self.validation_percentage = model_params['VALIDATION_PERCENTAGE']

        self.neurons = neurons

        if auto_build:
            self.build_model(self.mode)

    def build_model(self, neurons=None, cnn_layers=None, auto_compile=True):
        #if self.mode != "mixed" and len(self.input_shapes) > 1:
        #    raise ValueError("Cannot accept more than one input shape if mode is not mixed")

        if self.mode == "mlp":
            inputs, x = self.build_default_mlp(self.input_shapes[0])
            x = Dense(self.output_shape[0][0], activation="softmax")(x)
            self.model = Model(inputs, x)
        elif self.mode == "cnn":
            inputs, x = self.build_default_cnn(self.input_shapes[0])
            x = Dense(self.output_shape[0][0], activation="softmax")(x)
            self.model = Model(inputs, x)
        elif self.mode == "mixed":
            inputs1, x = self.build_default_cnn(self.input_shapes[0])
            inputs2, y = self.build_default_mlp(self.input_shapes[1])
            z = concatenate([x, y])
            z = Dense(self.output_shape[0][0], activation="softmax")(z)
            self.model = Model([inputs1, inputs2], z)
        elif self.mode == "mlp-custom":
            inputs, x = self.build_custom_mlp(self.input_shapes[0], neurons)
            x = Dense(self.output_shape[0][0], activation="softmax")(x)
            self.model = Model(inputs, x)
        elif self.mode == "cnn-custom":
            #input_size = self.input_shapes[0] + (1,)
            self.model = tf.keras.Sequential()
            self.model.add(Input(shape=self.input_shapes[0]))
            self.build_custom_cnn(cnn_layers)
            self.model.add(Flatten())
            self.model.add(Dense(6, activation="relu"))
            self.model.add(Dense(self.output_shape[0][0],
                                 activation="softmax"))
        elif self.mode == "mixed-searched":

            # build model for image
            inputs1 = Input(shape=self.input_shapes[0])
            a = Conv2D(filters=16,
                       kernel_size=(3, 3),
                       padding="same",
                       activation="relu")(inputs1)
            a = BatchNormalization(axis=-1)(a)
            a = Dropout(0.25)(a)
            a = Conv2D(filters=16,
                       kernel_size=(5, 5),
                       padding="same",
                       activation="relu")(a)
            a = BatchNormalization(axis=-1)(a)
            a = MaxPooling2D(pool_size=(2, 2))(a)
            a = Flatten()(a)
            a = Dense(6, activation="relu")(a)

            # build model for mlp
            inputs2 = Input(shape=self.input_shapes[1])
            b = Dense(10, activation="relu")(inputs2)
            b = Dropout(0.25)(b)  # removing this yieled about 58% accuracy
            b = Dense(4, activation="relu")(b)
            b = Dense(10, activation="relu")(b)

            # concatenate
            z = concatenate([a, b])

            # softmax activation to find output
            z = Dense(self.output_shape[0][0], activation="softmax")(z)

            # create model
            self.model = Model([inputs1, inputs2], z)

        elif self.mode == "full-mixed":

            # build model for img1
            inputs1 = Input(shape=self.input_shapes[0])
            a = Conv2D(filters=16,
                       kernel_size=(3, 3),
                       padding="same",
                       activation="relu")(inputs1)
            a = BatchNormalization(axis=-1)(a)
            a = Dropout(0.25)(a)
            a = Conv2D(filters=16,
                       kernel_size=(5, 5),
                       padding="same",
                       activation="relu")(a)
            a = BatchNormalization(axis=-1)(a)
            a = MaxPooling2D(pool_size=(2, 2))(a)
            a = Flatten()(a)
            a = Dense(6, activation="relu")(a)

            # build model for img2
            inputs2 = Input(shape=self.input_shapes[1])
            b = Conv2D(filters=16,
                       kernel_size=(3, 3),
                       padding="same",
                       activation="relu")(inputs2)
            b = BatchNormalization(axis=-1)(b)
            b = Dropout(0.25)(b)
            b = Conv2D(filters=16,
                       kernel_size=(5, 5),
                       padding="same",
                       activation="relu")(b)
            b = BatchNormalization(axis=-1)(b)
            b = MaxPooling2D(pool_size=(2, 2))(b)
            b = Flatten()(b)
            b = Dense(6, activation="relu")(b)

            # build model for img3
            inputs3 = Input(shape=self.input_shapes[2])
            c = Conv2D(filters=16,
                       kernel_size=(3, 3),
                       padding="same",
                       activation="relu")(inputs3)
            c = BatchNormalization(axis=-1)(c)
            c = Dropout(0.25)(c)
            c = Conv2D(filters=16,
                       kernel_size=(5, 5),
                       padding="same",
                       activation="relu")(c)
            c = BatchNormalization(axis=-1)(c)
            c = MaxPooling2D(pool_size=(2, 2))(c)
            c = Flatten()(c)
            c = Dense(6, activation="relu")(c)

            # build model for img4
            inputs4 = Input(shape=self.input_shapes[3])
            d = Conv2D(filters=16,
                       kernel_size=(3, 3),
                       padding="same",
                       activation="relu")(inputs4)
            d = BatchNormalization(axis=-1)(d)
            d = Dropout(0.25)(d)
            d = Conv2D(filters=16,
                       kernel_size=(5, 5),
                       padding="same",
                       activation="relu")(d)
            d = BatchNormalization(axis=-1)(d)
            d = MaxPooling2D(pool_size=(2, 2))(d)
            d = Flatten()(d)
            d = Dense(6, activation="relu")(d)

            # build model for mlp
            inputs5 = Input(shape=self.input_shapes[4])
            e = Dense(25, activation="relu")(inputs5)
            #e = Dropout(0.25)(e) # removing this yieled about 58% accuracy
            #e = Dense(4, activation="relu")(e)
            #e = Dense(10, activation="relu")(e)

            # concatenate
            z = concatenate([a, b, c, d, e])

            # softmax activation to find output
            z = Dense(self.output_shape[0][0], activation="softmax")(z)

            # create model
            self.model = Model([inputs1, inputs2, inputs3, inputs4, inputs5],
                               z)

        else:
            raise ValueError("Unrecognized mode for model generation")

        if auto_compile:
            self.compile_model()

    def build_default_mlp(self, input_size):
        inputs = Input(shape=input_size[0])
        x = Dense(8, activation="relu")(inputs)
        x = Dense(4, activation="relu")(x)
        x = Dense(10, activation="relu")(x)
        #x = Dense(self.output_shape[0][0], activation="softmax")(x)
        return inputs, x

    def build_default_cnn(self, input_size):
        input_size = input_size + (
            1,
        )  # our images are all b&w so we need to attach the single layer here
        inputs = Input(shape=input_size)
        x = Conv2D(16, (3, 3), padding="same", activation="relu")(inputs)
        x = BatchNormalization(axis=-1)(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        x = Dropout(0.5)(x)

        x = Conv2D(32, (3, 3), padding="same", activation="relu")(x)
        x = BatchNormalization(axis=-1)(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        x = Dropout(0.25)(x)

        x = Flatten()(x)
        x = Dense(6, activation="relu")(x)
        x = BatchNormalization(axis=-1)(x)
        x = Dropout(0.25)(x)

        return inputs, x

    def build_custom_mlp(self, input_size, neurons):
        inputs = Input(shape=input_size[0])
        x = Dense(8, activation="relu")(inputs)
        for n in neurons:
            x = Dense(n, activation="relu")(x)
        #x = Dense(self.output_shape[0][0], activation="softmax")(x)

        return inputs, x

    def build_custom_cnn(
        self, layer_defs
    ):  # This will only add layers, input/output should be defined in the build_model function
        for l in layer_defs:
            self.model.add(self.decode_cnn_layer(l))
            self.model.add(BatchNormalization(axis=-1))

    def decode_cnn_layer(self, op):
        print(op)
        k = op.keys()
        if "conv" in k:
            # Generate convolutional layer
            f = op['filters']
            s = op['conv']
            return Conv2D(f, (s, s), padding='same', activation="relu")

        if "pooling" in k:
            # Generate pooling layer
            s = op['pooling']
            return MaxPooling2D((s, s))

    def compile_model(self):
        if self.model is not None:
            self.model.compile(loss=self.loss_func,
                               optimizer=self.optimizer,
                               metrics='accuracy')
            return True
        return False

    def train(self,
              x,
              y,
              output_model_file,
              save_at_checkpoints=True,
              early_stopping=True,
              use_existing=True):
        if self.model is not None:

            # TODO: Implement loading from existing model

            callbacks = []
            if early_stopping:
                callbacks.append(
                    tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                     patience=self.patience,
                                                     verbose=self.verbose))

            # TODO: Implement saving at checkpoints

            print(f"Training {self.mode} model...")
            history = self.model.fit(
                x,
                y,
                validation_split=self.validation_percentage,
                epochs=self.epochs,
                shuffle=True,
                verbose=self.verbose,
                callbacks=callbacks)
            self.is_trained = True

            return history

    def print_model(self):
        if self.model is not None:
            print(self.model.summary())
        else:
            raise ValueError("Model is undefined, cannot print")

    def save_model(self):
        pass

    def load_model(self):
        pass

    def test(self, x, y):
        if not self.is_trained:
            raise ValueError("Model is not yet trained")

        results = self.model.evaluate(x, y, return_dict=True)
        return results
Пример #20
0
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input

input1=Input(shape=(5,))
dense1 = Dense(5, activation='relu')(input1)
dense2=Dense(3)(dense1)
dense3=Dense(4)(dense2)
outputs=Dense(2)(dense3)
#차례대로 앞에 있던게 맨 뒤로 온다. input을 뒤에 명시
model=Model(inputs=input1, outputs=outputs)
model.summary()
#인풋과 아웃풋 dim 맞추기
#이거 함수 input일

'''
model=Sequential()
#model.add(Dense(10, input_dim=5)) #행이 무시되고 열만 표시 3열
model.add(Dense(5,activation='relu', input_shape=(5,))) 
#input_shape=(5,) 컬럼이 5개라는 말
model.add(Dense(3))
model.add(Dense(4))
model.add(Dense(2))
model.summary()
# output_dim=2 y도 3차원 이므로 Dense(2)
'''

#3. 컴파일 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
model.fit(x_train,y_train,batch_size=1, epochs=50,
          validation_split=0.2) #각 컬럼별 20%씩 쓰는 것
Пример #21
0
def build_ATN(architecture=1, input_shape=[28, 28, 1], num_classes=10):
    if architecture == 0:
        image = Input(shape=input_shape)
        target = Input(shape=(num_classes, ))
        #target_int = Lambda(lambda x:K.argmax(x,axis=-1))(target)
        x1 = Flatten()(image)
        #x2 = Embedding(10,20,input_length=1)(target_int)
        #x2 = Lambda(lambda x: K.squeeze(x, -2))(x2)
        x = Concatenate(axis=-1)([x1, target])
        x = Dense(2048,
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(np.prod(input_shape),
                  activation='sigmoid',
                  bias_initializer='zeros')(x)
        x = Reshape(input_shape)(x)
        cnn = Model(inputs=[image, target], outputs=x)
    elif architecture == 1:
        image = Input(shape=input_shape)
        target = Input(shape=(num_classes, ))
        x1 = Flatten()(image)
        x = Concatenate(axis=-1)([x1, target])
        x = Dense(1024,
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(1024,
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(np.prod(input_shape),
                  activation='sigmoid',
                  bias_initializer='zeros')(x)
        x = Reshape(input_shape)(x)
        cnn = Model(inputs=[image, target], outputs=x)
    elif architecture == -1:
        cnn = Sequential()
        cnn.add(Flatten(input_shape=input_shape))
        cnn.add(
            Dense(2048,
                  activation='relu',
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros'))
        cnn.add(Dropout(0.25))
        cnn.add(
            Dense(np.prod(input_shape),
                  activation='sigmoid',
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros'))
        cnn.add(Reshape(input_shape))
    elif architecture == -2:
        cnn = Sequential()
        cnn.add(
            Conv2D(
                64,
                kernel_size=(3, 3),
                activation='relu',
                kernel_initializer='glorot_normal',
                bias_initializer='zeros',  #Constant(-0.5),
                kernel_regularizer=l2(0.005),
                input_shape=input_shape,
                padding='same'))
        cnn.add(
            Conv2D(128,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='glorot_normal',
                   bias_initializer='zeros',
                   kernel_regularizer=l2(0.005),
                   padding='same'))
        cnn.add(MaxPooling2D(pool_size=(2, 2)))

        cnn.add(
            Conv2D(256,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='glorot_normal',
                   bias_initializer='zeros',
                   kernel_regularizer=l2(0.005),
                   padding='same'))
        cnn.add(MaxPooling2D(pool_size=(2, 2)))
        cnn.add(Flatten())
        cnn.add(
            Dense(2048,
                  activation='relu',
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros',
                  kernel_regularizer=l2(0.05)))
        cnn.add(Dropout(0.25))
        cnn.add(
            Dense(np.prod(input_shape),
                  activation='sigmoid',
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros'))
        cnn.add(Reshape(input_shape))
    elif architecture == 2:
        cnn = Sequential()
        cnn.add(
            Conv2D(256,
                   kernel_size=(3, 3),
                   activation='relu',
                   input_shape=input_shape,
                   padding='same',
                   use_bias=True,
                   kernel_initializer='glorot_normal',
                   bias_initializer='zeros'))
        cnn.add(MaxPooling2D(pool_size=(2, 2)))
        cnn.add(Dropout(0.5))
        cnn.add(
            Conv2D(512,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same',
                   use_bias=True,
                   kernel_initializer='glorot_normal',
                   bias_initializer='zeros'))
        #cnn.add(MaxPooling2D(pool_size=(2, 2)))
        #cnn.add(Dropout(0.5))
        #cnn.add(Conv2D(512, kernel_size=(3, 3),activation='relu',padding='same',
        #          use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros'))
        #cnn.add(UpSampling2D(data_format='channels_last'))
        #cnn.add(Dropout(0.5))
        #cnn.add(Conv2DTranspose(256, kernel_size=(3,3), padding='same', activation='relu',
        #          use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros'))
        cnn.add(UpSampling2D(data_format='channels_last'))
        cnn.add(Dropout(0.5))
        cnn.add(
            Conv2DTranspose(256,
                            kernel_size=(3, 3),
                            padding='same',
                            activation='relu',
                            use_bias=True,
                            kernel_initializer='glorot_normal',
                            bias_initializer='zeros'))
        cnn.add(Dropout(0.5))
        cnn.add(
            Conv2DTranspose(1,
                            kernel_size=(3, 3),
                            padding='same',
                            activation='sigmoid',
                            use_bias=True,
                            kernel_initializer='glorot_normal',
                            bias_initializer='zeros'))
    return cnn