Exemplo n.º 1
0
 def feature_extractor_network(self):
     # input
     in_image = Input(shape = in_shape)
     # C1 Layer
     nett = Conv2D(32,(5,5))(in_image)		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     # M2 Layer
     nett = MaxPooling2D(pool_size = (3,3))(nett)
     # C3 Layer
     nett = Conv2D(64,(3,3))		
     nett = BatchNormalization(pool_size = (3,3))(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     # L4 Layer
     nett = LocallyConnected2D(128,(3,3))(nett)
     # L5 Layer
     nett = LocallyConnected2D(256,(3,3))(nett)
     # F6 Layer
     nett = Dense(512,activation='relu')(nett)
     nett = Dropout(0.2)(nett)
     # F7 Layer 
     out_features = Dense(activation='tanh')(nett)
     # output
     model = Model(inputs = in_image, outputs = out_features)
     return model
Exemplo n.º 2
0
def input_hidden(input_words, input_roles, n_word_vocab, n_role_vocab, emb_init, missing_word_id, 
    n_factors_emb=256, n_hidden=256, n_sample=1, mask_zero=True, using_dropout=False, dropout_rate=0.3, 
    activation='linear', a_target=False):
    """Input layer designed by Ottokar

        Embedding layers are initialized with glorot uniform.
        batch_size is None during compile time.
        input_length is length of input_words/input_roles

    # Arguments:
        input_words:        place holder for input words, shape is (batch_size, input_length)
        input_roles:        place holder for input roles, shape is (batch_size, input_length)
        n_word_vocab:       size of word vocabulary
        n_role_vocab:       size of role vocabulary
        emb_init:           initializer of embedding
        missing_word_id:    the id used as place-holder for the role without a word appearing
        n_factors_emb:      tensor factorization number
        n_hidden:           number of hidden units
        n_sample:           number of samples, useful when there are negative samples # QUESTION: what is number of samples/negative samples? (team1-change)
        mask_zero:          bool, zero out the weight of missing word
        using_dropout:      bool, using drop-out layer or not
        dropout_rate:       rate of drop-out layer
        activation:         activation function in fully connected layer
        is_target:          bool, True if this is a target embedding

    # if a_target:
    #     input_length = n_sample
    # else:
    #     input_length = n_role_vocab - 1
    """
    hidden = role_based_word_embedding(input_words, input_roles, n_word_vocab, n_role_vocab, emb_init, 
        missing_word_id, n_factors_emb, mask_zero, using_dropout, dropout_rate)

    if a_target: # QUESTION: What is a_target controlling? (team1-change) 
        # fully connected layer, output shape is (batch_size, n_sample, n_hidden)
        output = Dense(n_hidden, 
            activation=activation, 
            use_bias=False,
            input_shape=(n_sample, n_factors_emb,),
            name='target_role_based_embedding')(hidden)

    else:
        # sum on input_length direction;
        # obtaining context embedding layer, shape is (batch_size, n_factors_emb)
        context_hidden = Lambda(lambda x: K.sum(x, axis=1), 
            name='context_hidden',
            output_shape=(n_factors_emb,))(hidden)

        # fully connected layer, output shape is (batch_size, n_hidden)
        output = Dense(n_hidden, 
            activation=activation, 
            use_bias=True,
            input_shape=(n_factors_emb,), 
            name='role_based_embedding')(context_hidden)

    # if using_dropout:
    #     # Drop-out layer after fully connected layer
    #     output = Dropout(0.5)(output)

    return output
Exemplo n.º 3
0
    def _create_stacked_birnn(self, input_tensor, num_classes, dropout_rate):
        num_hidden_units = 128

        # Input shape = (samples, time-steps, features) = (None, None, VOCAB_SIZE).
        x = Bidirectional(
            LSTM(num_hidden_units,
                 dropout=dropout_rate,
                 recurrent_dropout=dropout_rate,
                 return_sequences=True))(input_tensor)
        # FIXME [check] >> I don't know why.
        # Output shape = (None, num_hidden_units * 2).
        #x = Bidirectional(LSTM(num_hidden_units, dropout=dropout_rate, recurrent_dropout=dropout_rate))(x)
        # Output shape = (None, None, num_hidden_units * 2).
        x = Bidirectional(
            LSTM(num_hidden_units,
                 dropout=dropout_rate,
                 recurrent_dropout=dropout_rate,
                 return_sequences=True))(x)
        if 1 == num_classes:
            x = Dense(1, activation='sigmoid')(x)
            #x = Dense(1, activation='sigmoid', activity_regularizer=keras.regularizers.activity_l2(0.0001))(x)
        elif num_classes >= 2:
            x = Dense(num_classes, activation='softmax')(x)
            #x = Dense(num_classes, activation='softmax', activity_regularizer=keras.regularizers.activity_l2(0.0001))(x)
        else:
            assert num_classes > 0, 'Invalid number of classes.'

        return x
Exemplo n.º 4
0
    def _create_model_2(self, input_tensor, input_shape, num_classes,
                        dropout_rate):
        #input_tensor = Input(shape=input_shape)

        model = Sequential()
        model.add(
            Conv2D(32,
                   kernel_size=(5, 5),
                   padding='same',
                   activation='relu',
                   input_shape=input_shape[1:]))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        model.add(
            Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        model.add(Flatten())

        model.add(Dense(1024, activation='relu'))
        model.add(Dropout(dropout_rate))

        if 1 == num_classes:
            model.add(Dense(1, activation='sigmoid'))
            #model.add(Dense(1, activation='sigmoid', activity_regularizer=keras.regularizers.activity_l2(0.0001)))
        elif num_classes >= 2:
            model.add(Dense(num_classes, activation='softmax'))
            #model.add(Dense(num_classes, activation='softmax', activity_regularizer=keras.regularizers.activity_l2(0.0001)))
        else:
            assert num_classes > 0, 'Invalid number of classes.'

        # Display the model summary.
        #model.summary()

        return model(input_tensor)
Exemplo n.º 5
0
def squeeze_excitation_block_3D(inputSE, ratio=16):
    '''
    Creates a squeeze and excitation block
    :param input: input tensor
    :param ratio: reduction ratio r for bottleneck given by the two FC layers
    :return: keras tensor
    '''

    if backend.image_data_format() == 'channels_first':
        channels = 1
    else:
        channels = -1

    # number of input filters/channels
    inputSE_shape = backend.int_shape(inputSE)
    numChannels = inputSE_shape[channels]

    #squeeze operation
    output = GlobalAveragePooling3D(
        data_format=backend.image_data_format())(inputSE)

    #excitation operation
    output = Dense(numChannels // ratio,
                   activation='relu',
                   use_bias=True,
                   kernel_initializer='he_normal')(output)
    output = Dense(numChannels,
                   activation='sigmoid',
                   use_bias=True,
                   kernel_initializer='he_normal')(output)

    #scale operation
    output = multiply([inputSE, output])

    return output
Exemplo n.º 6
0
def main():
    # Load the data
    train_data, train_label, validation_data, validation_label, test_data, test_label = data_preparation(
    )
    num_features = train_data.shape[1]

    print('Training data shape = {}'.format(train_data.shape))
    print('Validation data shape = {}'.format(validation_data.shape))
    print('Test data shape = {}'.format(test_data.shape))

    # Compile model
    model = Sequential()
    model.add(BatchNormalization(input_shape=(1, )))
    model.add(Dense(10, use_bias=True))
    model.add(Activation('relu'))
    model.add(Dense(1, use_bias=True))
    learning_rates = [1e-4, 1e-3, 1e-2]
    adam_optimizer = Adam(lr=learning_rates[0])
    model.compile(loss='mean_absolute_error',
                  optimizer=adam_optimizer,
                  metrics=[metrics.mae])

    # Print out model architecture summary
    model.summary()

    # Train the model
    model.fit(x=train_data,
              y=train_label,
              validation_data=(validation_data, validation_label),
              epochs=100
              #,callbacks=[kp.plot_losses]
              )

    return model
Exemplo n.º 7
0
    def _create_model_1(self, input_tensor, num_classes, dropout_rate):
        x = Conv2D(32, kernel_size=(5, 5), padding='same',
                   activation='relu')(input_tensor)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

        x = Conv2D(64, kernel_size=(3, 3), padding='same',
                   activation='relu')(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

        x = Flatten()(x)

        x = Dense(1024, activation='relu')(x)
        x = Dropout(dropout_rate)(x)

        if 1 == num_classes:
            x = Dense(1, activation='sigmoid')(x)
            #x = Dense(1, activation='sigmoid', activity_regularizer=keras.regularizers.activity_l2(0.0001))(x)
        elif num_classes >= 2:
            x = Dense(num_classes, activation='softmax')(x)
            #x = Dense(num_classes, activation='softmax', activity_regularizer=keras.regularizers.activity_l2(0.0001))(x)
        else:
            assert num_classes > 0, 'Invalid number of classes.'

        #model = Model(inputs=input_tensor, outputs=x)

        return x
Exemplo n.º 8
0
 def create_model(self):
     model   = Sequential()
     state_shape  = self.env.observation_space.shape
     model.add(Conv1D(72, 3, input_dim=state_shape, activation="relu"))
     model.add(Dense(48, activation="relu"))
     model.add(Dense(24, activation="relu"))
     model.add(Dense(self.env.action_space.n))
     model.compile(loss="mean_squared_error",
         optimizer=Adam(lr=self.learning_rate))
     return model
Exemplo n.º 9
0
 def mlp_layer(self, lstm):
     fc = Dense(self.fc_args[0], activation='relu', kernel_initializer='truncated_normal')(lstm)
     fc = Dropout(self.dropout_args)(fc)
     fc = Dense(self.fc_args[1], activation='relu', kernel_initializer='truncated_normal')(fc)
     fc = Dropout(self.dropout_args)(fc)
     fc = Dense(self.fc_args[2], activation='relu', kernel_initializer='truncated_normal')(fc)
     fc = Dropout(self.dropout_args)(fc)
     fc = Dense(self.fc_args[3], activation='relu', kernel_initializer='truncated_normal')(fc)
     fc = Dropout(self.dropout_args)(fc)
     output = Dense(self.fc_args[4], activation='softmax', name='output')(fc)
     return output
Exemplo n.º 10
0
def main1():
    # Load the data
    train_data, train_label, validation_data, validation_label, test_data, test_label = data_preparation_moe(
    )
    num_features = train_data.shape[1]

    print('Training data shape = {}'.format(train_data.shape))
    print('Validation data shape = {}'.format(validation_data.shape))
    print('Test data shape = {}'.format(test_data.shape))

    #print('Training laebl shape = {}'.format(len(train_label)))

    # Set up the input layer
    input_layer = Input(shape=(num_features, ))

    # Set up MMoE layer
    mmoe_layers = MMoE(units=16, num_experts=8, num_tasks=2)(input_layer)

    output_layers = []

    output_info = ['y0', 'y1']

    # Build tower layer from MMoE layer
    for index, task_layer in enumerate(mmoe_layers):
        tower_layer = Dense(units=8,
                            activation='relu',
                            kernel_initializer=VarianceScaling())(task_layer)
        output_layer = Dense(units=1,
                             name=output_info[index],
                             activation='linear',
                             kernel_initializer=VarianceScaling())(tower_layer)
        output_layers.append(output_layer)

    # Compile model
    model = Model(inputs=[input_layer], outputs=output_layers)
    learning_rates = [1e-4, 1e-3, 1e-2]
    adam_optimizer = Adam(lr=learning_rates[0])
    model.compile(loss={
        'y0': 'mean_squared_error',
        'y1': 'mean_squared_error'
    },
                  optimizer=adam_optimizer,
                  metrics=[metrics.mae])

    # Print out model architecture summary
    model.summary()

    # Train the model
    model.fit(x=train_data,
              y=train_label,
              validation_data=(validation_data, validation_label),
              epochs=100)
    return model
Exemplo n.º 11
0
def vanilla_rnn(num_words,
                state,
                lra,
                dropout,
                num_outputs=2,
                emb_dim=50,
                input_length=500):
    model = Sequential()
    model.add(
        Embedding(input_dim=num_words + 1,
                  output_dim=emb_dim,
                  input_length=input_length,
                  trainable=False,
                  weights=[embed_matrix]))
    model.add(
        SimpleRNN(units=state,
                  input_shape=(num_words, 1),
                  return_sequences=False))
    model.add(Dropout(dropout))
    model.add(Dense(num_outputs, activation='sigmoid'))

    RMS = optimizers.RMSprop(lr=lra)
    model.compile(loss='binary_crossentropy',
                  optimizer=RMS,
                  metrics=['accuracy'])

    return model
Exemplo n.º 12
0
def create_autoencoder(input_dim, encoding_dim):
    """
    Args:
        input_dim: dimension of one-hot encoded categorical features
        encoding_dim: dimension of encoded data(hidden layer representation)
    Return: 
        model
    """
    one_hot_in = Input(shape=(input_dim, ), name='input', sparse=True)
    X = Dense(HIDDEN_UNITS, activation='selu')(one_hot_in)
    encoding = Dense(encoding_dim, activation='selu', name='enco')(X)
    X = Dense(HIDDEN_UNITS, activation='selu')(encoding)
    output = Dense(input_dim, activation='sigmoid')(X)

    model = Model(inputs=one_hot_in, outputs=output)
    return model
Exemplo n.º 13
0
    def build_discriminator(self):

        model = Sequential()

        model.add(Dense(78, activation="relu", input_dim=self.input_shape))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(56, activation="relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(32, activation="relu"))
        model.add(Dropout(rate=0.3))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(28, activation="relu"))
        model.add(Dropuout(rate=0.3))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(10, activation="relu"))

        model.summary()

        img = Input(shape=self.img_shape)

        features = model(img)
        valid = Dense(1, activation="sigmoid")(features)
        label = Dense(self.num_classes + 1, activation="softmax")(features)

        return Model(img, [valid, label])
Exemplo n.º 14
0
    def build_generator(self):

        model = Sequential()

        model.add(Dense(78, activation="relu", input_dim=self.latent_dim))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(56, activation="relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(32, activation="relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(28, activation="tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim, ))
        img = model(noise)

        return Model(noise, img)
Exemplo n.º 15
0
    def __init__(self, num_actions):
        super(ReinforceAgent, self).__init__()
        self.num_actions = num_actions

        self.start_embedding_size = 50
        self.gru_embedding_size = 100
        self.hidden_dense_size = 100

        self.state_embedding = Embedding(63, self.start_embedding_size)
        self.hand_embedding = Embedding(252, self.start_embedding_size)
        self.gru = GRU(self.gru_embedding_size,
                       return_sequences=True,
                       return_state=True)
        self.hand_dense_layer = Dense(self.hidden_dense_size)
        self.concatted_dense_1 = Dense(self.hidden_dense_size)
        self.final_dense = Dense(61, activation='softmax')

        self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
Exemplo n.º 16
0
    def _create_rnn(self, input_tensor, num_classes, dropout_rate):
        num_hidden_units = 512

        # Input shape = (samples, time-steps, features) = (None, None, VOCAB_SIZE).
        x = LSTM(num_hidden_units,
                 dropout=dropout_rate,
                 recurrent_dropout=dropout_rate,
                 return_sequences=True)(input_tensor)
        # Output shape = (samples, time-steps, features) = (None, None, VOCAB_SIZE).
        if 1 == num_classes:
            x = Dense(1, activation='sigmoid')(x)
            #x = Dense(1, activation='sigmoid', activity_regularizer=keras.regularizers.activity_l2(0.0001))(x)
        elif num_classes >= 2:
            x = Dense(num_classes, activation='softmax')(x)
            #x = Dense(num_classes, activation='softmax', activity_regularizer=keras.regularizers.activity_l2(0.0001))(x)
        else:
            assert num_classes > 0, 'Invalid number of classes.'

        return x
Exemplo n.º 17
0
def init_model():
    #K.clear_session()
    tf.reset_default_graph()

    model = Sequential()
    model.add(Conv2D(16, (3, 3), input_shape=(28, 28, 1), padding = "SAME", activation = "relu"))
    model.add(Conv2D(32, (3, 3), activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(10, activation='softmax'))

    for layer in model.layers:
        print(layer.output_shape)
        
    model.compile(loss='sparse_categorical_crossentropy',
              optimizer=keras.optimizers.Adadelta(lr=0.1),
              metrics=['accuracy'])
              
    return model
Exemplo n.º 18
0
    def __init__(self, state_size, num_actions):
        """
        The ReinforceWithBaseline class that inherits from tf.keras.Model.

        The forward pass calculates the policy for the agent given a batch of states. During training,
        ReinforceWithBaseLine estimates the value of each state to be used as a baseline to compare the policy's
        performance with.

        :param state_size: number of parameters that define the state. You don't necessarily have to use this, 
                           but it can be used as the input size for your first dense layer.
        :param num_actions: number of actions in an environment
        """
        super(ReinforceWithBaseline, self).__init__()
        self.num_actions = num_actions
        self.event_size = 63
        self.hand_size = 252

        self.event_embedding_size = 100
        self.hand_embedding_size = 100

        self.event_hidden_size = 100
        self.hand_hidden_size = 100

        self.concat_hidden_size = 100

        # TODO: Define network parameters and optimizer
        self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.002)

        self.event_embedding = Dense(self.event_embedding_size,
                                     activation='relu')
        self.hand_embedding = Dense(self.hand_embedding_size,
                                    activation='relu')

        self.actor_event_gru = GRU(self.event_hidden_size,
                                   return_sequences=True,
                                   return_state=True)
        self.actor_hand_dense = Dense(self.hand_hidden_size, activation='relu')
        self.actor_concat_dense = Dense(self.concat_hidden_size,
                                        activation='relu')
        self.actor_output_dense = Dense(self.num_actions, activation='softmax')

        self.critic_event_gru = GRU(self.event_hidden_size,
                                    return_sequences=True,
                                    return_state=True)
        self.critic_hand_dense = Dense(self.hand_hidden_size,
                                       activation='relu')
        self.critic_concat_dense = Dense(self.concat_hidden_size,
                                         activation='relu')
        self.critic_output_dense = Dense(1)
class Classifier:
	def __init__(self, dense_classifier, embedding_dim, layers, dropout):
		self.model = self.build_dense_classifier(embedding_dim, layers, dropout) if dense_classifier else self.build_recurrent_classifier(embedding_dim, layers, dropout)
		self.batch_size = batch_size
		self.epochs = epochs
		self.validation_split = validation_split

	def build_dense_classifier(self, embedding_dim, layers, dropout):
		input1 = Input(shape=(embedding_dim*2,))

		h1 = Dense(layers[0], activation='relu')(input1)
			if dropout is not None:
				h1 = Dropout(rate=dropout)(h1)

		for layer in layers[1:]:
			h1 = Dense(layer, activation='relu')(h1)
			if dropout is not None:
				h1 = Dropout(rate=dropout)(h1)

		out = Dense(2, activation='softmax')(h1)
		model = Model(inputs=input1, outputs=out)

		return model
Exemplo n.º 20
0
def target_word_hidden(inputs, target_role, n_word_vocab, n_role_vocab, emb_init, 
    n_factors_cls=512, n_hidden=256, using_dropout=False, dropout_rate=0.3):
    """Hidden layer of non-incremental model role-filler to predict target word given context 
    (input words, input roles, target role).

    # Args:
        inputs:         output of context embedding from the last layer, shape is (batch_size, input_length)
        target_role:    place holder for target roles, shape is (batch_size, 1)
        n_word_vocab:   size of word vocabulary
        n_role_vocab:   size of role vocabulary
        emb_init:       initializer of embedding
        n_hidden:       number of hidden units
        using_dropout:      bool, using drop-out layer or not
        dropout_rate:       rate of drop-out layer        
    # Return:
        (n_factors_cls, )
    """
    
    # target role embedding; shape is (batch_size, 1, n_factors_cls)
    target_role_embedding = Embedding(n_role_vocab, n_factors_cls, 
        embeddings_initializer=emb_init, 
        name='target_role_embedding')(target_role)

    if using_dropout:
        # Drop-out layer after embeddings
        target_role_embedding = Dropout(dropout_rate)(target_role_embedding)

    # reduce dimension of tensor from 3 to 2
    target_role_embedding = Lambda(lambda x: K.sum(x, axis=1),
        output_shape=(n_factors_cls,))(target_role_embedding)
    
    # context_emb after linear projection
    weighted_context_embedding = Dense(n_factors_cls, # QUESTION: what's the point of the linear transformation? (team1-change)
        activation='linear', 
        use_bias=False,
        input_shape=(n_hidden, ))(inputs)

    # if using_dropout:
    #     # Drop-out layer after fully connected layer
    #    weighted_context_embedding = Dropout(0.5)(weighted_context_embedding)

    # hidden units after combining 2 embeddings; shape is the same with embedding
    hidden = Multiply()([weighted_context_embedding, target_role_embedding])

    return hidden
Exemplo n.º 21
0
def target_role_hidden(inputs, target_word, n_word_vocab, n_role_vocab, emb_init, 
    n_factors_cls=512, n_hidden=256, using_dropout=False, dropout_rate=0.3):
    """Hidden layer of multi-task non-incremental model role-filler to predict target role given context 
    (input words, input roles, target word).

    # Args:
        context_emb:    output of context embedding from the last layer, shape is (batch_size, input_length)
        target_word:    place holder for target word, shape is (batch_size, 1)
        n_word_vocab:   size of word vocabulary
        n_role_vocab:   size of role vocabulary
        emb_init:       initializer of embedding
        n_hidden:       number of hidden units
    # Return:
        (n_factors_cls, )
    """
    
    # target role embedding; shape is (batch_size, 1, n_factors_emb)
    target_word_embedding = Embedding(n_word_vocab, n_factors_cls, 
        embeddings_initializer=emb_init, 
        name='target_word_embedding')(target_word)

    if using_dropout:
        target_word_embedding = Dropout(dropout_rate)(target_word_embedding)

    # reduce dimension of tensor from 3 to 2
    target_word_embedding = Lambda(lambda x: K.sum(x, axis=1),
        output_shape=(n_factors_cls,))(target_word_embedding)
    
    # context_emb after linear projection
    weighted_context_embedding = Dense(n_factors_cls, 
        activation='linear', 
        use_bias=False,        
        input_shape=(n_hidden, ))(inputs)

    # if using_dropout:
    #     weighted_context_embedding = Dropout(0.5)(weighted_context_embedding)

    # hidden units after combining 2 embeddings; shape is the same with embedding
    hidden = Multiply()([weighted_context_embedding, target_word_embedding])

    return hidden
Exemplo n.º 22
0
 def generator_network(self):
     # input
     in_latents = Input(shape = (self.latent_dim,))
     #DC1
     nett = Conv2DTranspose(512,(3,3))(in_latents)		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     #DC2
     nett = Conv2DTranspose(128,(3,3))(nett)	
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     #DC3
     nett = Conv2DTranspose(64,(3,3))		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     #DC4
     nett = Conv2DTranspose(32,(5,5))(nett)		
     nett = BatchNormalization()(nett)
     out_image = Dense(alpha = 0.2)(nett)
     #output
     model = Model(inputs = in_latents, outputs = out_image)
     return model
Exemplo n.º 23
0
 def discriminator_network(self):
     # input
     in_image = Input(shape=self.img_shape)
     # C1 layer
     nett = Conv2D(64,(5,5))(in_image)		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     # C2 layer
     nett = Conv2D(128,(5,5))(nett)		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     nett = Dropout(0.2)(nett)
     # C3 layer
     nett = Conv2D(256,(5,5))(nett)		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     nett = Dropout(0.2)(nett)
     # F4 layer
     nett = Flatten()(nett)
     validity = Dense(1,alpha = 0.2)(nett)
     #output
     model =  Model(inputs = in_image, outputs = validity)
     return model
Exemplo n.º 24
0
 def attention_layer(self, lstm):
     dense1 = Dense(self.attention_args[0], activation="softmax")(lstm)
     dense2 = Dense(self.attention_args[1], activation="softmax")(dense1)
     lstm = multiply([lstm, dense2])
     return lstm
Exemplo n.º 25
0
import tensorflow as tf
from tf.keras.models import Sequential
from tf.keras.layers import Dense
from tf.keras.models import model_from_json
import numpy
import os
# fix random seed for reproducibility
numpy.random.seed(7)
# load pima indians dataset
dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:, 0:8]
Y = dataset[:, 8]
# create model
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
# Fit the model
model.fit(X, Y, epochs=150, batch_size=10, verbose=0)
# evaluate the model
scores = model.evaluate(X, Y, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))

# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
Exemplo n.º 26
0
# 定义encoder-decoder模型
encoder_inputs = Input(shape=(None, ))
encoder_embedding = Embedding(vocab_size, 200, mask_zero=True)(encoder_inputs)
#参考链接:嵌入层 Embedding<https://keras.io/zh/layers/embeddings/#embedding>
encoder_outputs, state_h, state_c = tf.keras.layers.LSTM(
    200, return_state=True)(encoder_embedding)
#参考链接:https://keras.io/zh/layers/recurrent/#lstm
encoder_states = [state_h, state_c]

decoder_inputs = Input(shape=(None, ))
decoder_embedding = Embedding(vocab_size, 200, mask_zero=True)(decoder_inputs)
decoder_lstm = LSTM(200, return_state=True, return_sequences=True)
decoder_outputs, _, _ = decoder_lstm(decoder_embedding,
                                     initial_state=encoder_states)
decoder_dense = Dense(vocab_size, activation=tf.keras.activations.softmax)
output = decoder_dense(decoder_outputs)

model = Model([encoder_inputs, decoder_inputs], output)
model.compile(optimizer=optimizers.RMSprop(),
              loss='categorical_crossentropy',
              metrics=['accuracy'])
#参考链接:RMSprop<https://keras.io/zh/optimizers/#rmsprop>
#categorical_crossentropy<https://keras.io/zh/backend/#categorical_crossentropy>

model.summary()

# 模型训练以及保存
model.fit([encoder_input_data, decoder_input_data],
          decoder_output_data,
          batch_size=50,
# In[23]:
import tensorflow as tf

# (10) 양방향 LSTM 모델링 작업
from tf.keras.models import Model, Sequential
from tf.keras.layers import SimpleRNN, Input, Dense, LSTM
from tf.keras.layers import Bidirectional, TimeDistributed

# 학습
from tf.keras.callbacks import EarlyStopping
# 조기종료 콜백함수 정의

xInput = Input(batch_shape=(None, right_idx3, 256))
xBiLstm = Bidirectional(LSTM(240, return_sequences=True),
                        merge_mode='concat')(xInput)
xOutput = TimeDistributed(Dense(1, activation='sigmoid'))(xBiLstm)
# 각 스텝에서 cost가 전송되고, 오류가 다음 step으로 전송됨.

model1 = Model(xInput, xOutput)
model1.compile(loss='binary_crossentropy',
               optimizer='rmsprop',
               metrics=['accuracy'])
model1.summary()

from keras.callbacks import EarlyStopping

early_stopping = EarlyStopping(monitor='val_loss', patience=3)  # 조기종료 콜백함수 정의
# In[24]:

########## 3gram
# 교차검증 kfold
Exemplo n.º 28
0
def compiled_tcn(num_feat,  # type: int
                 num_classes,  # type: int
                 nb_filters,  # type: int
                 kernel_size,  # type: int
                 dilations,  # type: List[int]
                 nb_stacks,  # type: int
                 max_len,  # type: int
                 padding='causal',  # type: str
                 use_skip_connections=True,  # type: bool
                 return_sequences=True,
                 regression=False,  # type: bool
                 dropout_rate=0.05,  # type: float
                 name='tcn',  # type: str,
                 opt='adam',
                 lr=0.002):
    # type: (...) -> keras.Model
    """Creates a compiled TCN model for a given task (i.e. regression or classification).
    Classification uses a sparse categorical loss. Please input class ids and not one-hot encodings.

    Args:
        num_feat: The number of features of your input, i.e. the last dimension of: (batch_size, timesteps, input_dim).
        num_classes: The size of the final dense layer, how many classes we are predicting.
        nb_filters: The number of filters to use in the convolutional layers.
        kernel_size: The size of the kernel to use in each convolutional layer.
        dilations: The list of the dilations. Example is: [1, 2, 4, 8, 16, 32, 64].
        nb_stacks : The number of stacks of residual blocks to use.
        max_len: The maximum sequence length, use None if the sequence length is dynamic.
        padding: The padding to use in the convolutional layers.
        use_skip_connections: Boolean. If we want to add skip connections from input to each residual block.
        return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence.
        regression: Whether the output should be continuous or discrete.
        use_separable_convolutions: whether to use these instead of normal conv layers for optimizing parameter count
        dropout_rate: Float between 0 and 1. Fraction of the input units to drop.
        name: Name of the model. Useful when having multiple TCN.
        opt: Optimizer name.
        lr: Learning rate.
    Returns:
        A compiled keras TCN.
    """

    dilations = process_dilations(dilations)

    input_layer = Input(shape=(max_len, num_feat))

    x = TCN(nb_filters, kernel_size, nb_stacks, dilations, padding,
            use_skip_connections, dropout_rate, return_sequences, name)(input_layer)

    print('x.shape=', x.shape)

    def get_opt():
        
            return tf.train.AdamOptimizer(learning_rate=1e-3, )
        

    if not regression:
        # classification
        x = Dense(num_classes)(x)
        x = Activation('softmax')(x)
        output_layer = x
        model = Model(input_layer, output_layer)            
        model.compile(get_opt(), loss='sparse_categorical_crossentropy', metrics=[accuracy])
    else:
        # regression
        x = Dense(1)(x)
        x = Activation('linear')(x)
        output_layer = x
        model = Model(input_layer, output_layer)
        model.compile(get_opt(), loss='mean_squared_error')
    print(f'model.x = {input_layer.shape}')
    print(f'model.y = {output_layer.shape}')
    return model
Exemplo n.º 29
0
    def __init__(self,
                 n_word_vocab=50001,
                 n_role_vocab=7,
                 n_factors_emb=256,
                 n_factors_cls=512,
                 n_hidden=256,
                 word_vocabulary={},
                 role_vocabulary={},
                 unk_word_id=50000,
                 unk_role_id=7,
                 missing_word_id=50001,
                 using_dropout=False,
                 dropout_rate=0.3,
                 optimizer='adagrad',
                 loss='sparse_categorical_crossentropy',
                 metrics=['accuracy']):
        super(NNRF, self).__init__(n_word_vocab, n_role_vocab, n_factors_emb,
                                   n_hidden, word_vocabulary, role_vocabulary,
                                   unk_word_id, unk_role_id, missing_word_id,
                                   using_dropout, dropout_rate, optimizer,
                                   loss, metrics)

        # minus 1 here because one of the role is target role
        self.input_length = n_role_vocab - 1

        # each input is a fixed window of frame set, each word correspond to one role
        input_words = Input(
            shape=(self.input_length, ), dtype=tf.uint32,
            name='input_words')  # Switched dtype to tf specific (team1-change)
        input_roles = Input(
            shape=(self.input_length, ), dtype=tf.uint32,
            name='input_roles')  # Switched dtype to tf specific (team1-change)
        target_role = Input(
            shape=(1, ), dtype=tf.uint32,
            name='target_role')  # Switched dtype to tf specific (team1-change)

        # role based embedding layer
        embedding_layer = role_based_word_embedding(
            input_words, input_roles, n_word_vocab, n_role_vocab,
            glorot_uniform(), missing_word_id, self.input_length,
            n_factors_emb, True, using_dropout, dropout_rate)

        # sum on input_length direction;
        # obtaining context embedding layer, shape is (batch_size, n_factors_emb)
        event_embedding = Lambda(
            lambda x: K.sum(x, axis=1),
            name='event_embedding',
            output_shape=(n_factors_emb, ))(embedding_layer)

        # fully connected layer, output shape is (batch_size, input_length, n_hidden)
        hidden = Dense(n_hidden,
                       activation='linear',
                       input_shape=(n_factors_emb, ),
                       name='projected_event_embedding')(event_embedding)

        # non-linear layer, using 1 to initialize
        non_linearity = PReLU(alpha_initializer='ones',
                              name='context_embedding')(hidden)

        # hidden layer
        hidden_layer2 = target_word_hidden(non_linearity,
                                           target_role,
                                           n_word_vocab,
                                           n_role_vocab,
                                           glorot_uniform(),
                                           n_factors_cls,
                                           n_hidden,
                                           using_dropout=using_dropout,
                                           dropout_rate=dropout_rate)

        # softmax output layer
        output_layer = Dense(n_word_vocab,
                             activation='softmax',
                             input_shape=(n_factors_cls, ),
                             name='softmax_word_output')(hidden_layer2)

        self.model = Model(inputs=[input_words, input_roles, target_role],
                           outputs=[output_layer])

        self.model.compile(optimizer, loss, metrics)
Exemplo n.º 30
0
X_train, y_train = np.array(X_train), np.array(y_train)

X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))

# Xay dung model LSTM
regressor = Sequential()
regressor.add(
    LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1)))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units=50))
regressor.add(Dropout(0.2))
regressor.add(Dense(units=1))
regressor.compile(optimizer='adam', loss='mean_squared_error')

# Neu ton tai file model thi load
if path.exists("mymodel.h5"):
    regressor.load_weights("mymodel.h5")
else:
    # Con khong thi train
    regressor.fit(X_train, y_train, epochs=100, batch_size=32)
    regressor.save("mymodel.h5")

# Load du lieu tu 1/1/2019 - 2/10/2019
dataset_test = pd.read_csv('vcb_2019.csv')
real_stock_price = dataset_test.iloc[:, 1:2].values

# Tien hanh du doan