示例#1
0
    def build_model(self, layers, compile={}):
        """根據配置項構建 `self.model`。

        Args:
            layers ([dict]): 層定義集合。集合中每一項為一層的定義。
                             層定義包含 `type`:(dense或lstm) 用來定義層的類型。
                             層定義中其他屬性參見 `Dense`_ 和 `LSTM`_ 構造函數參數定義。
            compile (dict):  訓練配置模型定義。定義可用屬性參見 `compile`_ 函數定義。

        Returns:

        .. _Dense:
        https://keras.io/zh/layers/core/#dense
        .. _LSTM:
        https://keras.io/zh/layers/recurrent/#lstm
        .. _compile:
        https://keras.io/zh/models/model/#compile

        """
        for layer in layers:
            t = layer.pop('type')
            if t == 'dense':
                # https://keras.io/zh/layers/core/
                self.__model.add(Dense.from_config(layer))
            elif t == 'lstm':
                # https://keras.io/zh/layers/recurrent/#lstm
                self.__model.add(LSTM.from_config(layer))
            elif t == 'dropout':
                # https://keras.io/zh/layers/recurrent/#Dropout
                self.__model.add(Dropout.from_config(layer))
            elif t == 'cudnnlstm':
                # https://keras.io/zh/layers/recurrent/#Dropout
                self.__model.add(CuDNNLSTM.from_config(layer))

        # https://keras.io/zh/models/model/#compile
        self.__model.compile(**compile)
示例#2
0
    # json_string = model.to_json()
    # print(json_string)
    # import json
    # with open('s2s.json', 'w') as f:
    #     json.dump(json_string, f)
    # #sys.exit()

else:
    print('load model')

    model = load_model(args.model_path)
    num_encoder_tokens = model.layers[0].input_shape[2]
    num_decoder_tokens = model.layers[1].input_shape[2]
    encoder_inputs = Input(shape=(None, num_encoder_tokens))
    encoder = LSTM.from_config(model.layers[2].get_config())
    encoder_outputs, state_h, state_c = encoder(encoder_inputs)
    # We discard `encoder_outputs` and only keep the states.
    encoder_states = [state_h, state_c]

    # Set up the decoder, using `encoder_states` as initial state.
    decoder_inputs = Input(shape=(None, num_decoder_tokens))
    # We set up our decoder to return full output sequences,
    # and to return internal states as well. We don't use the
    # return states in the training model, but we will use them in inference.
    decoder_lstm = LSTM.from_config(model.layers[3].get_config())
    decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
                                         initial_state=encoder_states)
    decoder_dense = Dense.from_config(model.layers[4].get_config())
    decoder_outputs = decoder_dense(decoder_outputs)
示例#3
0
    for layer in model.layers:
        print("{}: {} => {}".format(layer.name, layer.input_shape,
                                    layer.output_shape))
    print()


model = load_model("./models/kuzushiji_cnn_lstm_1565827392.348016.h5")
print_model(model, title="Original model:")

encode_and_transform = Model(inputs=model.input,
                             outputs=model.get_layer('multiply_1').output)
print_model(encode_and_transform, title="Encoder:")
decoder_input_h = Input(shape=(max_seq_length, ))
decoder_input_c = Input(shape=(max_seq_length, ))
decoder_internal_state = [decoder_input_h, decoder_input_c]
decoder_lstm = LSTM.from_config(model.get_layer('lstm_1').get_config())

encoder_output_to_decoder_input = Input(
    shape=model.get_layer('multiply_1').output_shape[1:])
lstm_outputs, state_h, state_c = decoder_lstm(
    encoder_output_to_decoder_input, initial_state=decoder_internal_state)
decoder_states = [state_h, state_c]
decoder_outputs = model.get_layer('time_distributed_1')(lstm_outputs)
decoder_model = Model([encoder_output_to_decoder_input] +
                      decoder_internal_state,
                      [decoder_outputs] + decoder_states)
print_model(decoder_model, title="Modified decoder:")

# extract encoder & encode image
#image_tensor = encoder.predict(input_image)
# extract and setup decoder