def create_mem_network():
    input_story = layers.Input(shape=(story_maxlen, ))
    input_m_encoded = layers.Embedding(input_dim=vocab_size,
                                       output_dim=64)(input_story)
    input_m_encoded = layers.Dropout(0.3)(input_m_encoded)

    input_c_encoded = layers.Embedding(input_dim=vocab_size,
                                       output_dim=query_maxlen)(input_story)
    input_c_encoded = layers.Dropout(0.3)(input_c_encoded)

    input_ques = layers.Input(shape=(query_maxlen, ))
    ques_encoded = layers.Embedding(input_dim=vocab_size,
                                    output_dim=64,
                                    input_length=query_maxlen)(input_ques)
    ques_encoded = layers.Dropout(0.3)(ques_encoded)
    # (samples, story_maxlen, query_maxlen)
    match = layers.dot([input_m_encoded, ques_encoded], axes=(2, 2))
    match = layers.Activation('softmax')(match)

    response = layers.add([match, input_c_encoded
                           ])  # (samples, story_maxlen, query_maxlen)
    response = layers.Permute((2, 1))(response)

    answer = layers.concatenate([response, ques_encoded])
    answer = layers.LSTM(32)(answer)
    answer = layers.Dropout(0.3)(answer)
    answer = layers.Dense(vocab_size, activation=None)(answer)

    return models.Model(inputs=[input_story, input_ques], outputs=answer)
Exemplo n.º 2
0
def main():
    vocabulary_size = 10000
    maxlen = 24

    model = Sequential()
    model.add(layers.Embedding(vocabulary_size, 64, name="text"))
    model.add(
        layers.Conv1D(64, 4, padding='valid', activation='relu', strides=1))
    model.add(layers.MaxPooling1D(pool_size=3))
    model.add(layers.LSTM(64))
    model.add(layers.Dense(32, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))

    # # if use keras not tf.keras
    # model = tf.keras.models.Model(model)

    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['acc'])

    estimator_model = tf.keras.estimator.model_to_estimator(keras_model=model)

    data, labels = mr_load_data(max_word_num=vocabulary_size)
    data = pad_sequences(data, padding="pre", maxlen=maxlen)
    labels = np.asarray(labels).reshape(-1, 1)
    print(labels.shape)

    x_train, y_train = data, labels
    input_dict = {"text_input": x_train}
    input_fn = train_input_fn(input_dict, y_train, batch_size=32)
    print(input_fn)
    #
    # estimator_model.train(input_fn=input_fn, steps=10000)
    estimator_model.train(input_fn=input_function(input_dict, y_train),
                          steps=10000)
Exemplo n.º 3
0
def define_generator(latent_dim=50, nclasses=10):
    label = layers.Input(shape=(1, ))
    li = layers.Embedding(nclasses, 50)(label)
    li = layers.Dense(7 * 7 * 1, activation="relu")(li)
    li = layers.Reshape((7, 7, 1))(li)

    noise = layers.Input(shape=(latent_dim, ))
    n = layers.Dense(7 * 7 * 384, activation="relu")(noise)
    n = layers.Reshape((7, 7, 384))(n)

    input = layers.concatenate([n, li], axis=-1)
    x = layers.Conv2DTranspose(filters=192,
                               kernel_size=5,
                               strides=2,
                               padding="same")(input)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    x = layers.Conv2DTranspose(filters=1,
                               kernel_size=5,
                               strides=2,
                               padding="same",
                               activation="tanh")(x)

    model = tf.keras.Model([noise, label], x)

    return model
    def build_bigru_model(self, embedding_matrix) -> Tuple[Model, Model]:
        """
        build and return multi-headed BiGru model
        with 1) MLM output from first GRU layer
             2) standard toxicity classification output from second
        :param embedding_matrix:
        :return:
        """
        token_input = layers.Input(shape=(self.max_seq_len,))
        embedding_layer = layers.Embedding(self.vocab_size + 1,
                                           self.embedding_dims,
                                           weights=[embedding_matrix],
                                           trainable=False)
        embedded_input = embedding_layer(token_input)
        gru1_output = layers.Bidirectional(layers.CuDNNGRU(self.num_neurons,
                                                           return_sequences=True))(embedded_input)
        aux_output = layers.Dense(self.vocab_size + 1, 'softmax', name='aux_output')(gru1_output)
        gru2_output = layers.Bidirectional(layers.CuDNNGRU(self.num_neurons))(gru1_output)
        main_output = layers.Dense(6, activation='sigmoid', name='main_output')(gru2_output)

        training_model = Model(inputs=token_input, outputs=[main_output, aux_output])
        mlm_loss = MaskedPenalizedSparseCategoricalCrossentropy(CONFIDENCE_PENALTY)
        training_model.compile(optimizer=optimizers.Adam(),
                               loss={'main_output': MaskedBinaryCrossedentropy(),
                                     'aux_output': mlm_loss})

        inference_model = Model(inputs=token_input, outputs=main_output)

        print('generated bigru model...')
        print(training_model.summary())

        return training_model, inference_model
Exemplo n.º 5
0
    def __init__(self, units):
        super(MyRNN, self).__init__()

        # transform text to embedding representation
        self.embedding = layers.Embedding(hp.total_words,
                                          hp.embedding_len,
                                          input_length=hp.max_sentence_len)

        # two layer rnn
        # normal rnn
        self.rnn = Sequential([
            layers.SimpleRNN(units=units,
                             dropout=0.5,
                             return_sequences=True,
                             unroll=True),
            layers.SimpleRNN(units=units, dropout=0.5, unroll=True)
        ])

        # # lstm rnn
        # self.rnn = Sequential([
        #     layers.LSTM(units=units, dropout=0.5, return_sequences=True, unroll=True),
        #     layers.LSTM(units=units, dropout=0.5, unroll=True)
        # ])

        # # gru rnn
        # self.rnn = Sequential([
        #     layers.GRU(units=units, dropout=0.5, return_sequences=True, unroll=True),
        #     layers.GRU(units=units, dropout=0.5, unroll=True)
        # ])

        self.fc = layers.Dense(1, activation=tf.nn.sigmoid)
def create_mem_network():
    sentence = layers.Input(shape=(story_maxlen,), dtype=tf.int32)
    encoded_sentence = layers.Embedding(input_dim=vocab_size, output_dim=50)(sentence)
    encoded_sentence = layers.Dropout(0.3)(encoded_sentence)

    question = layers.Input(shape=(query_maxlen,), dtype=tf.int32)
    encoded_ques = layers.Embedding(input_dim=vocab_size, output_dim=50)(question)
    encoded_ques = layers.Dropout(0.3)(encoded_ques)
    encoded_ques = layers.LSTM(50)(encoded_ques)
    encoded_ques = layers.RepeatVector(story_maxlen)(encoded_ques)

    merged = layers.add([encoded_sentence, encoded_ques])
    merged = layers.LSTM(50)(merged)
    merged = layers.Dropout(0.3)(merged)
    preds = layers.Dense(vocab_size, activation=None)(merged)
    return models.Model(inputs=[sentence, question], outputs=preds)
    def build_bigru_model(self, embedding_matrix) -> Model:
        """
        build and return BiGru model using standard optimizer and loss
        :param embedding_matrix:
        :return:
        """
        token_input = layers.Input(shape=(self.max_seq_len, ))
        embedding_layer = layers.Embedding(self.vocab_size + 1,
                                           self.embedding_dims,
                                           weights=[embedding_matrix],
                                           trainable=False)
        embedded_input = embedding_layer(token_input)
        gru_output = layers.Bidirectional(
            layers.CuDNNGRU(self.num_neurons,
                            return_sequences=True))(embedded_input)
        gru_output = layers.Bidirectional(layers.CuDNNGRU(
            self.num_neurons))(gru_output)
        dense_output = layers.Dense(6, activation='sigmoid')(gru_output)

        bigru_model = Model(token_input, dense_output)
        bigru_model.compile(optimizer=optimizers.Adam(),
                            loss=losses.binary_crossentropy)

        print('generated bigru model...')

        return bigru_model
def build_cnn_model():
  #Instantiate a Keras tensor
  sequences= layers.Input(shape = (max_length, )) 
  #Turns positive integers (indexes) into dense vectors of fixed size
  embedded = layers.Embedding(12000, 64) (sequences)
  #Convolution kernel is convoled with the layer to produce a tensor of outputs
  #(output_space, kernel_size, linear_unit_activation_function)
  x = layers.Conv1D(64, 3, activation='relu') (embedded)
  #Normalize and scale inputs or activations
  x = layers.BatchNormalization() (x)
  #Downsamples the input representation by taking the maximum value over the window
  x = layers.MaxPool1D(3) (x)
  x = layers.Conv1D(64, 5, activation='relu') (x)
  x = layers.BatchNormalization() (x)
  x = layers.MaxPool1D(5) (x)
  x = layers.Conv1D(64, 5, activation='relu') (x)
  #Downsamples the input representation by taking the maximum value over the time dimension
  x = layers.GlobalMaxPool1D() (x)
  x = layers.Flatten() (x)
  #First parameter represents the dimension of the output space
  x = layers.Dense(100, activation='relu') (x)

  #Sigmoid function: values <-5 => value close to 0; values >5 => values close to 1
  predictions = layers.Dense(1, activation='sigmoid') (x)

  model = models.Model(inputs = sequences, outputs = predictions)

  model.compile(
      optimizer='rmsprop',
      loss='binary_crossentropy',
      metrics=['binary_accuracy']
  )

  return model
Exemplo n.º 9
0
 def test_variable_not_casted_for_int_inputs(self, strategy_fn):
     x = constant_op.constant([[1]], dtype=dtypes.int32)
     with strategy_fn().scope():
         with policy.policy_scope('infer_float32_vars'):
             layer = layers.Embedding(input_dim=10, output_dim=32)
             y = layer(x)
             self.assertEqual(layer.embeddings.dtype, dtypes.float32)
             self.assertEqual(y.dtype, dtypes.float32)
 def __init__(self):
     super(Model, self).__init__()
     self.main = tf.keras.Sequential([
         layers.Embedding(vocab_size, embedding_dim, input_length=maxlen),
         layers.GlobalAveragePooling1D(),
         layers.Dense(16, activation=tf.nn.relu),
         layers.Dense(1, activation=tf.nn.sigmoid)
     ])
Exemplo n.º 11
0
 def __init__(self, units, vocab_size, name):
     super(Embedding, self).__init__(name=name + 'embed')
     self.model_dim = units
     self.embdding = layers.Embedding(input_dim=vocab_size,
                                      output_dim=units,
                                      mask_zero=False)
     # self.lookup = self.add_weight(name='lookup', shape=[vocab_size, self.model_dim], initializer="random_normal")
     self.scale = math.sqrt(self.model_dim)
Exemplo n.º 12
0
def create_dialog_network(time_steps, classes_num):
    vocab_size = 60
    vec_size = 20
    model = Sequential()
    model.add(layers.Embedding(vocab_size, vec_size, input_length=time_steps))
    model.add(layers.LSTM(vec_size, dropout=0.2, recurrent_dropout=0.2))
    model.add(layers.Dense(classes_num, activation='softmax'))

    return model
Exemplo n.º 13
0
    def __init__(self, cfg=Args, vocab=40558, n_ctx=512):
        super(TransformerModel, self).__init__()

        self.vocab = vocab
        self.embed = layers.Embedding(vocab, cfg.n_embed)
        # 构造输入embed的位置信息
        self.embed.build([1])
        self.drop = layers.Dropout(cfg.embed_pdrop)
        self.h = [Block(n_ctx, cfg, scale=False) for _ in range(cfg.n_layer)]
Exemplo n.º 14
0
def create_lstm():
    model = tf.keras.Sequential()
    model.add(layers.Embedding(MAX_WORDS, 64, input_length=MAX_LEN))
    model.add(layers.Conv1D(32, 3, padding='same', activation='relu'))
    model.add(layers.MaxPooling1D(pool_size=4))
    model.add(layers.LSTM(64))
    model.add(layers.Dense(250, activation='relu'))
    model.add(layers.Dense(1, activation="sigmoid"))
    return model
Exemplo n.º 15
0
    def create_model(self):
        input_text = Input(shape=self.max_sequence_length)
        input_image = Input(shape=(self.img_height, self.img_width,
                                   self.num_channels))

        embedded_id = layers.Embedding(self.vocab_size,
                                       self.embedding_size)(input_text)
        embedded_id = layers.Flatten()(embedded_id)
        embedded_id = layers.Dense(units=input_image.shape[1] *
                                   input_image.shape[2])(embedded_id)
        embedded_id = layers.Reshape(target_shape=(input_image.shape[1],
                                                   input_image.shape[2],
                                                   1))(embedded_id)

        x = layers.Concatenate(axis=3)([input_image, embedded_id])

        x = layers.Conv2D(filters=64,
                          kernel_size=(3, 3),
                          strides=(2, 2),
                          padding='same')(x)
        x = layers.LeakyReLU()(x)
        x = layers.Dropout(0.3)(x)

        x = layers.Conv2D(filters=64,
                          kernel_size=(3, 3),
                          strides=(2, 2),
                          padding='same')(x)
        x = layers.LeakyReLU()(x)
        x = layers.Dropout(rate=0.3)(x)

        # x = layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
        # x = layers.LeakyReLU()(x)
        # x = layers.Dropout(rate=0.3)(x)
        #
        # x = layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
        # x = layers.LeakyReLU()(x)
        # x = layers.Dropout(rate=0.3)(x)

        x = layers.Conv2D(filters=128,
                          kernel_size=(3, 3),
                          strides=(2, 2),
                          padding='same')(x)
        x = layers.LeakyReLU()(x)
        x = layers.Dropout(rate=0.3)(x)

        x = layers.Flatten()(x)
        x = layers.Dense(units=1000)(x)
        x = layers.LeakyReLU()(x)

        x = layers.Dense(units=1)(x)

        model = Model(name='discriminator',
                      inputs=[input_text, input_image],
                      outputs=x)

        return model
Exemplo n.º 16
0
 def __init__(self, vocab_size, embedding_size, hidden_size, batch_size,
              intent_size):
     super(TestModel, self).__init__()
     self.embedding = layers.Embedding(input_dim=vocab_size,
                                       output_dim=embedding_size,
                                       embeddings_initializer='uniform',
                                       name='embedding')
     # initialize encoder and decoder
     self.encoder = Encoder(self.embedding, hidden_size, batch_size)
     self.decoder = Decoder(intent_size)
Exemplo n.º 17
0
def multi_input_model():
    text_vocabulary_size = 10000
    question_vocabulary_size = 10000
    answer_vocabulary_size = 500
    text_input = Input(shape=(None,), dtype='int32', name='text')
    embedded_text = layers.Embedding(
        text_vocabulary_size, 64)(text_input)
    encoded_text = layers.CuDNNLSTM(32)(embedded_text)
    question_input = Input(shape=(None,),
                           dtype='int32',
                           name='question')
    embedded_question = layers.Embedding(
        question_vocabulary_size, 32)(question_input)
    encoded_question = layers.CuDNNLSTM(16)(embedded_question)

    # 注意这里将两个输入合并
    # 设axis=i,则沿着第i个下标变化的方向进行操作,-1代表倒数第一个加起来
    concatenated = layers.concatenate([encoded_text, encoded_question],
                                      axis=-1)
    answer = layers.Dense(answer_vocabulary_size,
                          activation='softmax')(concatenated)
    model = Model([text_input, question_input], answer)
    '''
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['acc'])
    num_samples = 1000
    max_length = 100
    text = np.random.randint(1, text_vocabulary_size,
                             size=(num_samples, max_length))
    question = np.random.randint(1, question_vocabulary_size,
                                 size=(num_samples, max_length))
    answers = np.random.randint(answer_vocabulary_size, size=(num_samples))
    answers = to_categorical(answers, answer_vocabulary_size)
    model.fit([text, question], answers, epochs=10, batch_size=128)
    model.fit({'text': text, 'question': question}, answers,
              epochs=10, batch_size=128)
    '''
    return model
def build_rnn_model():
    sequences = layers.Input(shape=(MAX_LENGTH, ))
    embedded = layers.Embedding(MAX_FEATURES, 64)(sequences)
    x = layers.LSTM(128, return_sequences=True)(embedded)
    x = layers.LSTM(128)(x)
    x = layers.Dense(32, activation='relu')(x)
    x = layers.Dense(100, activation='relu')(x)
    predictions = layers.Dense(1, activation='sigmoid')(x)
    model = models.Model(inputs=sequences, outputs=predictions)
    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['binary_accuracy'])
    return model
Exemplo n.º 19
0
def multi_output_model():
    vocabulary_size = 50000
    num_income_groups = 10
    posts_input = Input(shape=(None,), dtype='int32', name='posts')
    embedded_posts = layers.Embedding(256, vocabulary_size)(posts_input)
    x = layers.Conv1D(128, 5, activation='relu')(embedded_posts)
    x = layers.MaxPooling1D(5)(x)
    x = layers.Conv1D(256, 5, activation='relu')(x)
    x = layers.Conv1D(256, 5, activation='relu')(x)
    x = layers.MaxPooling1D(5)(x)
    x = layers.Conv1D(256, 5, activation='relu')(x)
    x = layers.Conv1D(256, 5, activation='relu')(x)
    x = layers.GlobalMaxPooling1D()(x)
    x = layers.Dense(128, activation='relu')(x)

    # 两个输出
    age_prediction = layers.Dense(1, name='age')(x)
    income_prediction = layers.Dense(num_income_groups,
                                     activation='softmax',
                                     name='income')(x)
    gender_prediction = layers.Dense(1, activation='sigmoid', name='gender')(x)
    model = Model(posts_input,
                  [age_prediction, income_prediction, gender_prediction])

    '''
    #编译时选择多个损失标准
    model.compile(optimizer='rmsprop',
        loss=['mse', 'categorical_crossentropy', 'binary_crossentropy'])
    model.compile(optimizer='rmsprop',
        loss={'age': 'mse',
        'income': 'categorical_crossentropy',
        'gender': 'binary_crossentropy'})
    model.compile(optimizer='rmsprop',
        loss=['mse', 'categorical_crossentropy', 'binary_crossentropy'],
        loss_weights=[0.25, 1., 10.])
    model.compile(optimizer='rmsprop',
        loss={'age': 'mse',
        'income': 'categorical_crossentropy',
        'gender': 'binary_crossentropy'},
        loss_weights={'age': 0.25,
        'income': 1.,
        'gender': 10.})
    model.fit(posts, [age_targets, income_targets, gender_targets],
        epochs=10, batch_size=64)   
    model.fit(posts, {'age': age_targets,
        'income': income_targets,
        'gender': gender_targets},
        epochs=10, batch_size=64)
    '''

    return model
Exemplo n.º 20
0
def make_generator_model(input_tensor=None,
                         input_shape=(noise_dim,)):
  """

  Returns:
    tf.keras.Model
  """
  if input_tensor is None:
    img_input = layers.Input(shape=input_shape)
  else:
    if not backend.is_keras_tensor(input_tensor):
      img_input = layers.Input(tensor=input_tensor, shape=input_shape)
    else:
      img_input = input_tensor

  x = layers.Dense(7 * 7 * 256,
                   activation=tf.nn.relu,
                   use_bias=False,
                   name='fc1')(img_input)

  x = layers.Reshape(target_shape=(7, 7, 128), name='reshape1')(x)

  x = layers.BatchNormalization(momentum=0.8, name='bn1')(x)
  x = layers.UpSampling2D(name='upsampling1')(x)

  x = layers.Conv2D(128, (3, 3),
                    activation=tf.nn.relu,
                    padding="same",
                    use_bias=False,
                    name='conv1')(x)
  x = layers.BatchNormalization(momentum=0.8, name='bn2')(x)
  x = layers.UpSampling2D(name='upsampling2')(x)

  x = layers.Conv2D(64, (3, 3),
                    activation=tf.nn.relu,
                    padding="same",
                    use_bias=False,
                    name='conv2')(x)
  x = layers.BatchNormalization(momentum=0.8, name='bn3')(x)

  x = layers.Conv2D(1, (3, 3),
                    activation=tf.nn.tanh,
                    use_bias=False,
                    name='conv3')(x)

  noise = layers.Input(shape=(noise_dim,))
  label = layers.Input(shape=(1,), dtype='int32')
  label_embedding = layers.Flatten()(layers.Embedding(num_classes, 100)(label))

  x = layers.multiply([noise, label_embedding])(x)
  return models.Model([noise, label], x)
Exemplo n.º 21
0
 def __init__(self, vocab_size, embedding_size, max_sentence_len):
     self.vocab_size = vocab_size
     self.embedding_size = embedding_size
     # create model
     self.model = models.Sequential()
     self.model.add(
         layers.Embedding(vocab_size, embedding_size, max_sentence_len))
     self.model.add(layers.GlobalAveragePooling1D())
     self.model.add(layers.Dense(units=1, activation='sigmoid'))
     # 损失和优化器选择
     self.model.compile(loss='binary_crossentropy',
                        optimizer='adam',
                        metrics=['accuracy'])
     self.model.summary()
Exemplo n.º 22
0
Arquivo: DEM.py Projeto: ifuding/TC
    def create_dem_bc_aug(self, kernel_initializer = 'he_normal', img_flat_len = 1024, only_emb = False):
        attr_input = layers.Input(shape = (self.attr_len,), name = 'attr')
        word_emb = layers.Input(shape = (self.wv_len,), name = 'wv')
        img_input = layers.Input(shape = (self.pixel, self.pixel, 3))
        label = layers.Input(shape = (1,), name = 'label')
        
        # img_flat_model = Model(inputs = self.img_model[0].inputs, outputs = self.img_model[0].get_layer(name = 'avg_pool').output)
        imag_classifier = self.img_flat_model(img_input)
        if self.attr_emb_transform == 'flat':
            attr_emb = layers.Embedding(294, self.attr_emb_len)(attr_input)
            attr_dense = layers.Flatten()(attr_emb) #layers.GlobalAveragePooling1D()(attr_emb)
        elif self.attr_emb_transform == 'dense':
            attr_dense = layers.Dense(self.attr_emb_len, use_bias = True, kernel_initializer=kernel_initializer, 
                        kernel_regularizer = l2(1e-4), name = 'attr_dense')(attr_input)
        if only_emb:
            attr_word_emb = word_emb
        else:
            attr_word_emb = layers.Concatenate(name = 'attr_word_emb')([word_emb, attr_dense])
        attr_word_emb_dense = self.full_connect_layer(attr_word_emb, hidden_dim = [
#                                                                             int(img_flat_len * 4),
                                                                            int(img_flat_len * 2),
                                                                            int(img_flat_len * 1.5), 
                                                                            int(img_flat_len * 1.25), 
#                                                                             int(img_flat_len * 1.125),
                                                                            int(img_flat_len)
                                                                            ], \
                                                activation = 'relu', resnet = False, drop_out_ratio = 0.2)
#         attr_word_emb_dense = self.full_connect_layer(attr_word_emb_dense, hidden_dim = [img_flat_len], 
#                                                 activation = 'relu')
        
        attr_x_img = layers.Lambda(lambda x: x[0] * x[1], name = 'attr_x_img')([attr_word_emb_dense, imag_classifier])
#         attr_x_img = layers.Concatenate(name = 'attr_x_img')([attr_word_emb_dense, imag_classifier])
    
        attr_img_input = layers.Input(shape = (img_flat_len,), name = 'attr_img_input')
#         attr_img_input = layers.Input(shape = (img_flat_len * 2,), name = 'attr_img_input')
        proba = self.full_connect_layer(attr_img_input, hidden_dim = [1], activation = 'sigmoid')
        attr_img_model = Model(inputs = attr_img_input, outputs = proba, name = 'attr_x_img_model')
        
        out = attr_img_model([attr_x_img])
        
#         dem_bc_model = self.create_dem_bc(kernel_initializer = 'he_normal', 
#                                            img_flat_len = img_flat_len, 
#                                            only_emb = only_emb)
#         attr_word_emb_dense, out = dem_bc_model([imag_classifier, attr_input, word_emb, label])
        
        bc_loss = K.mean(binary_crossentropy(label, out))
        model = Model([img_input, attr_input, word_emb, label], outputs = [attr_word_emb_dense, out, imag_classifier])
        model.add_loss(bc_loss)
        model.compile(optimizer=Adam(lr=1e-4), loss=None)
        return model
    def test_generator_dynamic_shapes(self):

        x = [
            'I think juice is great',
            'unknown is the best language since slicedbread',
            'a a a a a a a',
            'matmul'
            'Yaks are also quite nice',
        ]
        y = [1, 0, 0, 1, 1]

        vocab = {
            word: i + 1
            for i, word in enumerate(
                sorted(set(itertools.chain(*[i.split() for i in x]))))
        }

        def data_gen(batch_size=2):
            np.random.seed(0)
            data = list(zip(x, y)) * 10
            np.random.shuffle(data)

            def pack_and_pad(queue):
                x = [[vocab[j] for j in i[0].split()] for i in queue]
                pad_len = max(len(i) for i in x)
                x = np.array([i + [0] * (pad_len - len(i)) for i in x])
                y = np.array([i[1] for i in queue])
                del queue[:]
                return x, y[:, np.newaxis]

            queue = []
            for i, element in enumerate(data):
                queue.append(element)
                if not (i + 1) % batch_size:
                    yield pack_and_pad(queue)

            if queue:
                # Last partial batch
                yield pack_and_pad(queue)

        model = testing_utils.get_model_from_layers([
            layers_module.Embedding(input_dim=len(vocab) + 1, output_dim=4),
            layers_module.SimpleRNN(units=1),
            layers_module.Activation('sigmoid')
        ],
                                                    input_shape=(None, ))

        model.compile(loss=losses.binary_crossentropy, optimizer='sgd')
        model.fit(data_gen(), epochs=1, steps_per_epoch=5)
Exemplo n.º 24
0
    def __init__(self, hidden_len=64):
        super(MyLSTM, self).__init__()

        self.embedding = layers.Embedding(hps.total_words,
                                          hps.embedding_len,
                                          input_length=hps.max_sentence_len)

        self.state = [
            tf.zeros(shape=[hps.batch_size, hidden_len]),
            tf.zeros(shape=[hps.batch_size, hidden_len])
        ]
        self.lstm_cell = MyLSTMCell(hidden_len=hidden_len)

        self.drop = layers.Dropout(0.5)
        self.fc = layers.Dense(1, activation=tf.nn.sigmoid)
Exemplo n.º 25
0
def get_glove_model(vocab_size, glove_dimension, embed_matrix, max_length,
                    num_classes):
    keras.backend.clear_session()
    model = Sequential()
    embed = layers.Embedding(vocab_size,
                             glove_dimension,
                             weights=[embed_matrix],
                             input_length=max_length,
                             trainable=False)

    model.add(embed)
    model.add(layers.Flatten())
    model.add(layers.Dense(30, activation=tf.nn.relu)),
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(num_classes, activation=tf.nn.softmax))

    return model
Exemplo n.º 26
0
    def define_model(self):
        input_img = Input(shape=[
            self.model_parameters.img_height,
            self.model_parameters.img_width,
            self.model_parameters.num_channels,
        ])
        class_id = Input(shape=[1])

        embedded_id = layers.Embedding(input_dim=10, output_dim=50)(class_id)
        embedded_id = layers.Dense(units=input_img.shape[1] *
                                   input_img.shape[2])(embedded_id)
        embedded_id = layers.Flatten()(embedded_id)

        x = layers.Conv2D(filters=128,
                          kernel_size=(3, 3),
                          strides=(1, 1),
                          padding='same')(input_img)
        x = layers.BatchNormalization(momentum=0.9)(x)
        x = layers.LeakyReLU(alpha=0.1)(x)

        x = layers.Conv2D(filters=128,
                          kernel_size=(4, 4),
                          strides=(2, 2),
                          padding='same')(x)
        x = layers.BatchNormalization(momentum=0.9)(x)
        x = layers.LeakyReLU(alpha=0.1)(x)

        x = layers.Conv2D(filters=128,
                          kernel_size=(4, 4),
                          strides=(2, 2),
                          padding='same')(x)
        x = layers.BatchNormalization(momentum=0.9)(x)
        x = layers.LeakyReLU(alpha=0.1)(x)
        x = layers.Flatten()(x)

        x = layers.Concatenate()([x, embedded_id])
        x = layers.Dense(units=512, activation='relu')(x)

        x = layers.Dense(units=1)(x)

        model = Model(name=self.model_name,
                      inputs=[input_img, class_id],
                      outputs=x)

        return model
def build_model():
    sequences = layers.Input(shape=(MAX_LENGTH, ))
    embedded = layers.Embedding(MAX_FEATURES, 64)(sequences)
    x = layers.Conv1D(64, 3, activation='relu')(embedded)
    x = layers.BatchNormalization()(x)
    x = layers.MaxPool1D(3)(x)
    x = layers.Conv1D(64, 5, activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.MaxPool1D(5)(x)
    x = layers.Conv1D(64, 5, activation='relu')(x)
    x = layers.GlobalMaxPool1D()(x)
    x = layers.Flatten()(x)
    x = layers.Dense(100, activation='relu')(x)
    predictions = layers.Dense(1, activation='sigmoid')(x)
    model = models.Model(inputs=sequences, outputs=predictions)
    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['binary_accuracy'])
    return model
Exemplo n.º 28
0
def define_generator(latent_dim=50, nclasses=10):
    label = layers.Input(shape=(1, ))
    li = layers.Embedding(nclasses, 50)(label)
    li = layers.Dense(7*7)(li)
    li = layers.Reshape((7, 7, 1))(li)

    in_lat = layers.Input((latent_dim,))
    lat = layers.Dense(7*7*128)(in_lat)
    lat = layers.Reshape((7, 7, 128))(lat)

    x = layers.concatenate([li, lat], axis=-1)
    x = layers.Conv2DTranspose(filters=128, kernel_size=4, strides=2, padding="same")(x)
    x = layers.LeakyReLU(alpha=0.2)(x)

    x = layers.Conv2DTranspose(filters=128, kernel_size=4, strides=2, padding="same")(x)
    x = layers.LeakyReLU(alpha=0.2)(x)

    out = layers.Conv2D(filters=1, kernel_size=7, activation="tanh", padding="same")(x)
    model = tf.keras.Model([label, in_lat], out)
    return model
Exemplo n.º 29
0
def define_discriminator(in_shape=(28, 28, 1), nclasses=10):
    label = layers.Input(shape=(1, ))
    li = layers.Embedding(nclasses, 50)(label)
    li = layers.Dense(in_shape[0]*in_shape[1])(li)
    li = layers.Reshape(in_shape)(li)
    image = layers.Input(shape=in_shape)
    x = layers.concatenate([image, li], axis=-1)

    x = layers.Conv2D(filters=128, kernel_size=3, strides=2, padding="same")(x)
    x = layers.LeakyReLU(alpha=0.2)(x)

    x = layers.Conv2D(filters=128, kernel_size=3, strides=2, padding="same")(x)
    x = layers.LeakyReLU(alpha=0.2)(x)

    x = layers.Flatten()(x)
    x = layers.Dropout(0.4)(x)

    out = layers.Dense(1, activation="sigmoid")(x)
    model = tf.keras.Model([image, label], out)
    return model
Exemplo n.º 30
0
def build_lstm(num_filters, vocab_size, dropout, embedding_dim, maxlen,
               optimizer):
    sequential_lstm = Sequential()
    sequential_lstm.add(
        layers.Embedding(input_dim=vocab_size,
                         output_dim=embedding_dim,
                         input_length=maxlen))
    sequential_lstm.add(layers.SpatialDropout1D(dropout))
    sequential_lstm.add(
        layers.LSTM(num_filters,
                    dropout=dropout,
                    recurrent_dropout=dropout,
                    return_sequences=True))
    sequential_lstm.add(
        layers.LSTM(num_filters, dropout=dropout, recurrent_dropout=dropout))
    sequential_lstm.add(layers.Dense(6, activation='softmax'))
    sequential_lstm.compile(loss='categorical_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])
    return sequential_lstm