Пример #1
0
    stripped_html = tf.strings.regex_replace(lowercase, '<br />', ' ')
    return tf.strings.regex_replace(stripped_html,
                                    '[%s]' % re.escape(string.punctuation), '')


vectorize_layer = TextVectorization(standardize=custom_standardization,
                                    max_tokens=vocab_size,
                                    output_mode='int',
                                    output_sequence_length=sequence_lenght)

text_ds = train_ds.map(lambda x, y: x)
vectorize_layer.adapt(text_ds)

model = Sequential([
    vectorize_layer,
    Embedding(vocab_size, Embedding_dim, name="embedding"),
    GlobalAveragePooling1D(),
    Dense(16, activation='relu'),
    Dense(1)
])

tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")

model.compile(optimizer='adam',
              loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
              metrics=['accuracy'])

model.fit(train_ds,
          validation_data=val_ds,
          epochs=15,
          callbacks=[tensorboard_callback])
from tensorflow.keras.layers import Embedding, Dropout, Conv1D, GlobalAveragePooling1D, Dense
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.models import load_model

embedding_size = 256
batch_size = 256

# 모델 설계
"""
1D합성곱 연산을 수행하되, 커널수는 256 , 커널의 크기느 3을 사용. 두개의 밀집층으로 은닉층과 출력층 설계
"""
model = Sequential()
model.add(Embedding(vocab_size, 256))
model.add(Dropout(0.3))
model.add(Conv1D(256, 3, padding='valid', activation='relu'))
model.add(GlobalAveragePooling1D())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))

# +
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=3)
mc = ModelCheckpoint('CNN_MODEL.h5',
                     monitor='val_acc',
                     mode='max',
                     verbose=1,
                     save_best_only=True)

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(X_train,
                    y_train,
Пример #3
0
                         output_dim=embedding_dim,
                         input_shape=(input_length, ))(input_layer)

output_layer = SpatialDropout1D(spatial_dropout)(output_layer)

output_layer = Bidirectional(
    LSTM(lstm_units,
         return_sequences=True,
         dropout=lstm_dropout,
         recurrent_dropout=recurrent_dropout))(output_layer)
output_layer = Conv1D(filters,
                      kernel_size=kernel_size,
                      padding='valid',
                      kernel_initializer='glorot_uniform')(output_layer)

avg_pool = GlobalAveragePooling1D()(output_layer)
max_pool = GlobalMaxPooling1D()(output_layer)
output_layer = concatenate([avg_pool, max_pool])

output_layer = Dense(num_classes, activation='softmax')(output_layer)

model = Model(input_layer, output_layer)

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()

train_sequences = [text.split() for text in train.text]
validation_sequences = [text.split() for text in validation.text]
list_tokenized_train = tokenizer.texts_to_sequences(train_sequences)
Пример #4
0
    def create_model(self,
                     filters_number,
                     network_depth=6,
                     use_residual=True,
                     use_bottleneck=True,
                     max_kernel_size=20,
                     learning_rate=0.01,
                     regularization_rate=0.0):
        """
        Generate a InceptionTime model. See Fawaz et al. 2019.

        The compiled Keras model is returned.

        Parameters
        ----------
        input_shape : tuple
            Shape of the input dataset: (num_samples, num_timesteps, num_channels)
        class_number : int
            Number of classes for classification task
        filters_number : int
            number of filters for each convolutional layer
        network_depth : int
            Depth of network, i.e. number of Inception modules to stack.
        use_residual: bool
            If =True, then residual connections are used. Default is True.
        use_bottleneck: bool
            If=True, bottleneck layer is used at the entry of Inception modules.
            Default is true.
        max_kernel_size: int,
            Maximum kernel size for convolutions within Inception module.
        learning_rate : float
            learning rate
        regularization_rate: float
            regularization rate

        Returns
        -------
        model : Keras model
            The compiled Keras model
        """
        dim_length = self.x_shape[1]  # number of samples in a time series
        dim_channels = self.x_shape[2]  # number of channels
        weightinit = 'lecun_uniform'  # weight initialization
        bottleneck_size = 32

        def inception_module(input_tensor, stride=1, activation='linear'):

            if use_bottleneck and int(input_tensor.shape[-1]) > 1:
                input_inception = Conv1D(filters=bottleneck_size,
                                         kernel_size=1,
                                         padding='same',
                                         activation=activation,
                                         kernel_initializer=weightinit,
                                         use_bias=False)(input_tensor)
            else:
                input_inception = input_tensor

            kernel_sizes = [max_kernel_size // (2**i) for i in range(3)]
            conv_list = []

            for kernel_size in kernel_sizes:
                conv_list.append(
                    Conv1D(filters=filters_number,
                           kernel_size=kernel_size,
                           strides=stride,
                           padding='same',
                           activation=activation,
                           kernel_initializer=weightinit,
                           use_bias=False)(input_inception))

            max_pool_1 = MaxPool1D(pool_size=3, strides=stride,
                                   padding='same')(input_tensor)

            conv_last = Conv1D(filters=filters_number,
                               kernel_size=1,
                               padding='same',
                               activation=activation,
                               kernel_initializer=weightinit,
                               use_bias=False)(max_pool_1)

            conv_list.append(conv_last)

            x = Concatenate(axis=2)(conv_list)
            x = BatchNormalization()(x)
            x = Activation(activation='relu')(x)
            return x

        def shortcut_layer(input_tensor, out_tensor):
            shortcut_y = Conv1D(filters=int(out_tensor.shape[-1]),
                                kernel_size=1,
                                padding='same',
                                kernel_initializer=weightinit,
                                use_bias=False)(input_tensor)
            shortcut_y = BatchNormalization()(shortcut_y)

            x = Add()([shortcut_y, out_tensor])
            x = Activation('relu')(x)
            return x

        # Build the actual model:
        input_layer = Input((dim_length, dim_channels))
        x = BatchNormalization()(
            input_layer)  # Added batchnorm (not in original paper)
        input_res = x

        for depth in range(network_depth):
            x = inception_module(x)

            if use_residual and depth % 3 == 2:
                x = shortcut_layer(input_res, x)
                input_res = x

        gap_layer = GlobalAveragePooling1D()(x)

        # Final classification layer
        output_layer = Dense(self.number_of_classes,
                             activation='relu')(gap_layer)

        # Create model and compile
        model = Model(inputs=input_layer, outputs=output_layer)

        model.compile(loss='mean_absolute_error',
                      optimizer=Adam(lr=learning_rate),
                      metrics=self.metrics)

        return model
Пример #5
0
    def __init__(self, num_actions):
        super().__init__('mlp_policy')
        embeddings = []
        embeddings_shape = []

        # Generations
        embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
        generations_embedding = Embedding(POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                                          embedding_size,
                                          input_length=1)
        embeddings.append(generations_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # Game Types
        embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
        gametypes_embedding = Embedding(POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                                        embedding_size,
                                        input_length=1)
        embeddings.append(gametypes_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        #Tiers
        embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
        tiers_embedding = Embedding(POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                                    embedding_size,
                                    input_length=1)
        embeddings.append(tiers_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # Weather
        embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
        weather_embedding = Embedding(POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                                      embedding_size,
                                      input_length=1)
        embeddings.append(weather_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # Terrain
        embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
        terrain_embedding = Embedding(POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                                      embedding_size,
                                      input_length=1)
        embeddings.append(terrain_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # Room
        embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
        room_embedding = Embedding(POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                                   embedding_size,
                                   input_length=1)
        embeddings.append(room_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # Effective p1 a
        embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
        effective_p1_a_embedding = Embedding(POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                                             embedding_size,
                                             input_length=1)
        embeddings.append(effective_p1_a_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # Effective p2 a
        embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
        effective_p2_a_embedding = Embedding(POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                                             embedding_size,
                                             input_length=1)
        embeddings.append(effective_p2_a_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # p1 Pending Attacks A
        embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
        seen_attacks_a_embedding = Embedding(POKEMON_MAX_VOCAB_SIZE,
                                             embedding_size,
                                             input_length=1)
        embeddings.append(seen_attacks_a_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # p2 Pending Attacks A
        embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
        seen_attacks_a_embedding = Embedding(POKEMON_MAX_VOCAB_SIZE,
                                             embedding_size,
                                             input_length=1)
        embeddings.append(seen_attacks_a_embedding)
        embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

        # for each pokemon for player and agent
        for i in range(6):

            embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
            player_pokemon_name_embedding = Embedding(POKEMON_MAX_VOCAB_SIZE,
                                                      embedding_size,
                                                      input_length=1)
            embeddings.append(player_pokemon_name_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
            player_pokemon_status_embedding = Embedding(
                POKEMON_EXTRA_SMALL_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(player_pokemon_status_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_SMALL_EMBEDDINGS_DIM
            player_pokemon_first_element_embedding = Embedding(
                POKEMON_MAX_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(player_pokemon_first_element_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_SMALL_EMBEDDINGS_DIM
            player_pokemon_second_element_embedding = Embedding(
                POKEMON_MAX_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(player_pokemon_second_element_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
            player_pokemon_abilities_embedding = Embedding(
                POKEMON_MEDIUM_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(player_pokemon_abilities_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
            player_pokemon_items_embedding = Embedding(POKEMON_MAX_VOCAB_SIZE,
                                                       embedding_size,
                                                       input_length=1)
            embeddings.append(player_pokemon_items_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
            player_pokemon_genders_embedding = Embedding(
                POKEMON_EXTRA_SMALL_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(player_pokemon_genders_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            # 4 attack slots
            for j in range(4):
                embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
                player_attack_slot_1_embedding = Embedding(
                    POKEMON_MAX_VOCAB_SIZE, embedding_size, input_length=1)
                embeddings.append(player_attack_slot_1_embedding)
                embeddings_shape.append(
                    Reshape(target_shape=(embedding_size, )))

                embedding_size = POKEMON_SMALL_EMBEDDINGS_DIM
                player_attack_slot_1_element_embedding = Embedding(
                    POKEMON_SMALL_VOCAB_SIZE, embedding_size, input_length=1)
                embeddings.append(player_attack_slot_1_element_embedding)
                embeddings_shape.append(
                    Reshape(target_shape=(embedding_size, )))

                embedding_size = 1
                player_attack_slot_1_category_embedding = Embedding(
                    POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                    embedding_size,
                    input_length=1)
                embeddings.append(player_attack_slot_1_category_embedding)
                embeddings_shape.append(
                    Reshape(target_shape=(embedding_size, )))

        # for each pokemon for player and agent
        for i in range(6):

            embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
            agent_pokemon_name_embedding = Embedding(POKEMON_MAX_VOCAB_SIZE,
                                                     embedding_size,
                                                     input_length=1)
            embeddings.append(agent_pokemon_name_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
            agent_pokemon_status_embedding = Embedding(
                POKEMON_EXTRA_SMALL_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(agent_pokemon_status_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_SMALL_EMBEDDINGS_DIM
            agent_pokemon_first_element_embedding = Embedding(
                POKEMON_MAX_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(agent_pokemon_first_element_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_SMALL_EMBEDDINGS_DIM
            agent_pokemon_second_element_embedding = Embedding(
                POKEMON_MAX_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(agent_pokemon_second_element_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
            agent_pokemon_abilities_embedding = Embedding(
                POKEMON_MEDIUM_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(agent_pokemon_abilities_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
            agent_pokemon_items_embedding = Embedding(POKEMON_MAX_VOCAB_SIZE,
                                                      embedding_size,
                                                      input_length=1)
            embeddings.append(agent_pokemon_items_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            embedding_size = POKEMON_EXTRA_SMALL_EMBEDDINGS_DIM
            agent_pokemon_genders_embedding = Embedding(
                POKEMON_EXTRA_SMALL_VOCAB_SIZE, embedding_size, input_length=1)
            embeddings.append(agent_pokemon_genders_embedding)
            embeddings_shape.append(Reshape(target_shape=(embedding_size, )))

            # 4 attack slots
            for j in range(4):
                embedding_size = POKEMON_LARGE_EMBEDDINGS_DIM
                agent_attack_slot_1_embedding = Embedding(
                    POKEMON_MAX_VOCAB_SIZE, embedding_size, input_length=1)
                embeddings.append(agent_attack_slot_1_embedding)
                embeddings_shape.append(
                    Reshape(target_shape=(embedding_size, )))

                embedding_size = POKEMON_SMALL_EMBEDDINGS_DIM
                agent_attack_slot_1_element_embedding = Embedding(
                    POKEMON_SMALL_VOCAB_SIZE, embedding_size, input_length=1)
                embeddings.append(agent_attack_slot_1_element_embedding)
                embeddings_shape.append(
                    Reshape(target_shape=(embedding_size, )))

                embedding_size = 1
                agent_attack_slot_1_category_embedding = Embedding(
                    POKEMON_EXTRA_SMALL_VOCAB_SIZE,
                    embedding_size,
                    input_length=1)
                embeddings.append(agent_attack_slot_1_category_embedding)
                embeddings_shape.append(
                    Reshape(target_shape=(embedding_size, )))

        merged = Concatenate(axis=-1)  #(embeddings)

        self.conv1_1 = Conv1D(256, 10, activation='relu')
        #    conv1 = Conv1D(100, 10, activation='relu', batch_input_shape=(None, ob_space.shape[1]))(field_inputs_)
        self.conv1_2 = Conv1D(256, 10, activation='relu')
        self.max_1 = MaxPooling1D(8)
        self.conv1_3 = Conv1D(128, 4, activation='relu')
        self.conv1_4 = Conv1D(128, 4, activation='relu')
        self.max_2 = MaxPooling1D(8)
        self.conv1_5 = Conv1D(256, 10, activation='relu')
        self.conv1_6 = Conv1D(256, 10, activation='relu')
        self.glob_1 = GlobalAveragePooling1D()
        self.drop = Dropout(0.3)

        # This returns a tensor
        non_category_data_input_keras = tf.keras.layers.Input(
            POKEMON_FIELD_REMAINDER, name="non_category_data_input")
        categorical_dense = tf.keras.layers.Dense(512,
                                                  activation='relu')  #(merged)
        #    categorical_dense = Reshape(target_shape=(512,))(categorical_dense)
        non_categorical_dense_1 = tf.keras.layers.Dense(
            512, activation='relu')  #(non_category_data_input_keras)
        non_categorical_dense_2 = tf.keras.layers.Dense(
            1024, activation='relu')  #(non_category_data_input_keras)
        non_categorical_dense_3 = tf.keras.layers.Dense(
            512, activation='relu')  #(non_category_data_input_keras)

        combined_fields = Concatenate(
            axis=-1)  #([non_categorical_dense, categorical_dense])

        self.combined_dense_1 = tf.keras.layers.Dense(256, activation='relu')
        self.combined_dense_2 = tf.keras.layers.Dense(512, activation='relu')
        self.combined_dense_3 = tf.keras.layers.Dense(256, activation='relu')

        self.embeddings = embeddings
        self.embeddings_shape = embeddings_shape
        self.merged = merged
        self.categorical_dense = categorical_dense
        self.non_categorical_dense_1 = non_categorical_dense_1
        self.non_categorical_dense_2 = non_categorical_dense_2
        self.non_categorical_dense_3 = non_categorical_dense_3
        self.non_category_data_input_keras = non_category_data_input_keras
        self.combined_fields = combined_fields

        # Note: no tf.get_variable(), just simple Keras API!
        self.hidden1 = kl.Dense(256, activation='relu')  #(combined_fields)
        self.hidden2 = kl.Dense(128, activation='relu')
        self.value = kl.Dense(1, name='value')
        # Logits are unnormalized log probabilities.
        self.logits = kl.Dense(num_actions, name='policy_logits')
        self.dist = ProbabilityDistribution()
    def __init__(self, num_classes=2):
        super(Multi_Resnet_scalars, self).__init__()
        
        activ = relu
        activ_end = 'softmax'
        init = 'he_normal'
        drop_rate = 0.0
        nfeat = 16
        dil_rate = 1
        
        self.conv1 = Conv2D(nfeat, (3, 3), kernel_initializer=init,
            padding='same', name="conv1", 
            input_shape = (360, 360, 2))
        self.bn1 = BatchNormalization(name="bn1")
        self.activ1 = Activation(activ, name="activ1")
    
        self.drop1 = Dropout(drop_rate, name = "drop1")
        
        self.conv2 = Conv2D(nfeat * 2, (3, 3), kernel_initializer=init,
            strides = 2, padding='same', name="conv2")
        self.bn2 = BatchNormalization(name="bn2")
        self.activ2 = Activation(activ, name="activ2")

        self. conv3 = Conv2D(nfeat * 2, (3, 3), kernel_initializer=init,
            padding='same', name="conv3")
        self.bn3 = BatchNormalization(name="bn3")
    
        self.conv_b1 = Conv2D(nfeat*2, (1, 1), kernel_initializer=init, 
            strides = 2, padding='same', name="conv_b1")
    
        self.skip0 = Add(name="skip0")
        self.activ3 = Activation(activ, name = "activ3")
        self.drop2 = Dropout(drop_rate, name = "drop2")
    
        self.conv4 = Conv2D(nfeat * 4, (3, 3), kernel_initializer=init, 
            strides = 2, padding='same', name="conv4")
        self.bn4 = BatchNormalization(name="bn4")
        self.activ4 = Activation(activ, name="activ4")
        
        self.conv5 = Conv2D(nfeat * 4, (3, 3), kernel_initializer=init,
            padding='same', name="conv5")
        self.bn5 = BatchNormalization(name="bn5")
        
        self.conv_b2 = Conv2D(nfeat * 4, (1, 1), kernel_initializer=init, 
            strides = 2, padding='same', name="conv_b2")
    
        self.skip1 = Add(name="skip1")
        self.activ5 = Activation(activ, name="activ5")

        self.gap = GlobalAveragePooling2D(name = "gap")
        
        self.gap_scalars = GlobalAveragePooling3D(name = "gap_scalars")
        
        self.conc = Concatenate(axis = 0, name = "merge")
        self.conc_scalars = Concatenate(axis = 1, name = "conc_scalars")
        
        self.conv_final0 = Conv1D(64, 3, padding = "same",
            name = "conv_final0", input_shape = (None, 64))
        
        self.conv_final = Conv1D(64, 3, 
            padding = "valid", name= "conv_final", 
            input_shape = (None, 64))
            
        self.gap1d = GlobalAveragePooling1D(name = "gap1d")
        
        self.dense2 = Dense(50, activation=activ, name="dense2")
        self.activ7 = Activation(activ, name="activ7")
        
        self.drop3 = Dropout(drop_rate, name="drop3")
        
        self.out = Dense(num_classes, kernel_initializer=init,
                         activation=activ_end, name="out1")
Пример #7
0
    def _init_model(self):
        # According to parameters to check data is '2D' or '3D'
        self.dimstr = check_dims(self.input_dim_3, self.is_2D, self.is_3D)

        if self.dimstr == '2D':
            inputs = Input(shape=(self.input_dim_1, self.input_dim_2),
                           name='inputs')
        elif self.dimstr == '3D':
            inputs = Input(shape=(self.input_dim_1, self.input_dim_2,
                                  self.input_dim_3),
                           name='inputs')

        # loop for convolution layers
        for i in range(self.n_conv_layers):
            if i == 0:
                res = self._basic_cnn(inputs, i)
            else:
                res = self._basic_cnn(res, i)

        # whether or not use global average pooling layer
        if self.use_global:
            if self.dimstr == '2D':
                res = GlobalAveragePooling1D(name='global_1')(res)
            elif self.dimstr == '3D':
                res = GlobalAveragePooling2D(name='global_1')(res)
        else:  # if not global average pooling or Flatten Conv result
            res = Flatten()(res)

        # whether or not use Dense layer
        if self.use_dnn:
            for _ in range(self.n_dnn_layers):
                res = Dense(self.dnn_units, name='dense_1')(res)
                if self.use_batch:
                    res = BatchNormalization(name='dense_batch_1')(res)
                res = Activation(self.activation)(res)
                if self.use_dropout:
                    res = Dropout(self.drop_ratio, name='dense_drop_1')(res)

        # this is method private function to check whether or not loss is not given, then use default loss
        def _check_loss(model, loss, metrics, optimizer):
            if loss is not None:
                model.compile(loss=loss,
                              metrics=[metrics],
                              optimizer=optimizer)
            return model

        if self.n_classes == 2:  # this is binary class problem.
            out = Dense(self.n_classes, activation='sigmoid')(res)
            model = Model(inputs, out)
            if self.loss is None:
                model.compile(loss='binary_crossentropy',
                              metrics=[self.metrics],
                              optimizer=self.optimizer)
            else:
                _check_loss(model, self.loss, self.metrics, self.optimizer)
        elif self.n_classes >= 2:  # this is multiclass problem.
            out = Dense(self.n_classes, activation='softmax')(res)
            model = Model(inputs, out)
            if self.loss is None:
                model.compile(loss='categorical_crossentropy',
                              metrics=[self.metrics],
                              optimizer=self.optimizer)
            else:
                _check_loss(model, self.loss, self.metrics, self.optimizer)
        elif self.n_classes == -1:  # this is regression problem
            out = Dense(1)(res)
            model = Model(inputs, out)
            if self.loss is None:
                model.compile(loss='mse',
                              metrics=[self.metrics],
                              optimizer=self.optimizer)
            else:
                _check_loss(model, self.loss, self.metrics, self.optimizer)
        else:
            raise AttributeError(
                "Parameter 'n_classes' should be -1, 2 or up 2!")

        print('Model structure summary:')
        model.summary()

        return model
                                                                       #Padding the sequences. Padding type is "post".


pdf = pd.DataFrame(padded_sequences)

from sklearn.model_selection import train_test_split

train_x, test_x, train_y, test_y = train_test_split(pdf, train_y, test_size = 0.1, random_state = 10, shuffle = True) 
                                                                       #Splitting the dataset into train_data and test_data
    
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding, GlobalAveragePooling1D

model = Sequential([                                                   #Creating a sequential model that uses embeddings of dimension 16
    Embedding(num_words, embedding_dim, input_length = max_length),
    GlobalAveragePooling1D(),                                          #GlobalAveragePooling1D() instead of Flatten() as the former results in less trainable parameters
    Dense(16, activation = 'relu'),
    Dense(1, activation = 'sigmoid')
])

model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])

history = model.fit(train_x, train_y, epochs = 50, validation_data = (test_x, test_y), verbose = 1)

test_file = "test_oJQbWVk.csv"
sub_file = "sample_submission_LnhVWA4.csv"

tdf = pd.read_csv(path + test_file).fillna(' ')
sdf = pd.read_csv(path + sub_file).fillna(' ')
tdf.head()
Пример #9
0
        maxlen=max_len, num_words=vocab_size)
    x_train = sequence.pad_sequences(x_train, maxlen=max_len)
    x_test = sequence.pad_sequences(x_test, maxlen=max_len)
    x_train_masks = tf.equal(x_train, 0)
    x_test_masks = tf.equal(x_test, 0)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    print('Model building ... ')
    inputs = Input(shape=(max_len,), name="inputs")
    masks = Input(shape=(max_len,), name='masks')
    embeddings = Embedding(vocab_size, model_dim)(inputs)
    encodings = PositionEncoding(model_dim)(embeddings)
    encodings = Add()([embeddings, encodings])
    x = MultiHeadAttention(8, 64)([encodings, encodings, encodings, masks])
    x = GlobalAveragePooling1D()(x)
    x = Dropout(0.2)(x)
    x = Dense(10, activation='relu')(x)
    outputs = Dense(2, activation='softmax')(x)

    model = Model(inputs=[inputs, masks], outputs=outputs)
    model.compile(optimizer=Adam(beta_1=0.9, beta_2=0.98, epsilon=1e-9),
                  loss='categorical_crossentropy', metrics=['accuracy'])

    print("Model Training ... ")
    es = EarlyStopping(patience=5)
    model.fit([x_train, x_train_masks], y_train,
              batch_size=batch_size, epochs=epochs, validation_split=0.2, callbacks=[es])

    test_metrics = model.evaluate(
        [x_test, x_test_masks], y_test, batch_size=batch_size, verbose=0)
Пример #10
0
    def _init_model(self):
        # Check given parameters to judge data is '2D' or '3D'
        self.dimstr = check_dims(self.input_dim_3, self.is_2D, self.is_3D)

        if self.dimstr == '2D':
            inputs = Input(shape=(self.input_dim_1, self.input_dim_2))
        elif self.dimstr == '3D':
            inputs = Input(shape=(self.input_dim_1, self.input_dim_2,
                                  self.input_dim_3))

        # Whether first use Convolutional layer, can be choosen.
        if self.first_conv:
            for _ in range(self.n_first_convs):
                if self.dimstr == '2D':
                    res_conv = Conv1D(self.conv_units,
                                      self.kernel_size,
                                      self.strides,
                                      padding=self.padding)(inputs)
                elif self.dimstr == '3D':
                    res_conv = Conv2D(self.conv_units,
                                      self.kernel_size,
                                      self.strides,
                                      padding=self.padding)(inputs)
        else:
            res_conv = inputs

        # Here is Wide & Deep model Block
        for i in range(self.n_wide_layers):
            if i == 0:
                res = self._wide_deep_block(res_conv)
            else:
                res = self._wide_deep_block(res)

        # Whether to use global avarega pooling or just Flatten concolutional result
        if self.use_global:
            if self.dimstr == '2D':
                res = GlobalAveragePooling1D()(res)
            elif self.dimstr == '3D':
                res = GlobalAveragePooling2D()(res)
        else:
            res = Flatten()(res)

        # Whether to use Dense layers
        if self.use_dnn:
            for _ in range(self.n_dnn_layers):
                res = Dense(self.dnn_units)(res)
                if self.use_batch:
                    res = BatchNormalization()(res)
                res = Activation(self.activation)(res)
                if self.use_dropout:
                    res = Dropout(self.drop_ratio)(res)

        # this is method private function to check whether or not loss is not given, then use default loss
        def _check_loss(model, loss, metrics, optimizer):
            if loss is not None:
                model.compile(loss=loss,
                              metrics=[metrics],
                              optimizer=optimizer)
            return model

        if self.n_classes == 2:  # this is binary class problem.
            out = Dense(self.n_classes, activation='sigmoid')(res)
            model = Model(inputs, out)
            if self.loss is None:
                model.compile(loss='binary_crossentropy',
                              metrics=[self.metrics],
                              optimizer=self.optimizer)
            else:
                _check_loss(model, self.loss, self.metrics, self.optimizer)
        elif self.n_classes >= 2:  # this is multiclass problem.
            out = Dense(self.n_classes, activation='softmax')(res)
            model = Model(inputs, out)
            if self.loss is None:
                model.compile(loss='categorical_crossentropy',
                              metrics=[self.metrics],
                              optimizer=self.optimizer)
            else:
                _check_loss(model, self.loss, self.metrics, self.optimizer)
        elif self.n_classes == -1:  # this is regression problem
            out = Dense(1)(res)
            model = Model(inputs, out)
            if self.loss is None:
                model.compile(loss='mse',
                              metrics=[self.metrics],
                              optimizer=self.optimizer)
            else:
                _check_loss(model, self.loss, self.metrics, self.optimizer)
        else:
            raise AttributeError(
                "Parameter 'n_classes' should be -1, 2 or up 2!")

        print('Model structure summary:')
        model.summary()

        return model
Пример #11
0
tokenizer.fit_on_texts(''.join(x_train))
x_train = tokenizer.texts_to_sequences(x_train)
x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=MAX_LEN)

y_train = a_list
y_train = tokenizer.texts_to_sequences(y_train)
y_train = keras.preprocessing.sequence.pad_sequences(y_train, maxlen=MAX_LEN)

q_inputs = Input(shape=(None, ), dtype='int32')
a_inputs = Input(shape=(None, ), dtype='int32')

embeddings = Embedding(NUM_WORDS, 128)(q_inputs)
embeddings = Embedding(NUM_WORDS, 128)(a_inputs)

O_seq = Attention(16, 16)([embeddings, embeddings, embeddings])
O_seq = GlobalAveragePooling1D()(O_seq)
O_seq = Dropout(0.5)(O_seq)

outputs = Dense(1, activation='sigmoid')(O_seq)

model = Model(inputs=[q_inputs, a_inputs], outputs=outputs)
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()

print('Train...')
hist = model.fit(x_train,
                 y_train,
                 batch_size=batch_size,
                 epochs=5,