def create_params(label2ids):
    params = Params()
    params.max_sentence_length = label2ids['max_sentence_length']
    params.max_n_analyses = label2ids['max_n_analysis']
    params.batch_size = 1
    params.n_subepochs = 40
    params.max_surface_form_length = label2ids['max_surface_form_length']
    params.max_word_root_length = label2ids['max_word_root_length']
    params.max_analysis_length = label2ids['max_analysis_length']
    params.char_vocabulary_size = label2ids['character_unique_count']['value']
    params.tag_vocabulary_size = label2ids['morph_token_unique_count']['value']
    params.char_lstm_dim = 100
    params.char_embedding_dim = 100
    params.tag_lstm_dim = params.char_lstm_dim
    params.tag_embedding_dim = 100
    params.sentence_level_lstm_dim = 2 * params.char_lstm_dim
    return params
        train_and_test_sentences, label2ids = read_datafile(args.train_filepath, args.test_filepath)

        params = Params()

        params.max_sentence_length = label2ids['max_sentence_length']
        params.max_n_analyses = label2ids['max_n_analysis']

        params.batch_size = 1
        params.n_subepochs = 40

        params.max_surface_form_length = label2ids['max_surface_form_length']
        params.max_word_root_length = label2ids['max_word_root_length']
        params.max_analysis_length = label2ids['max_analysis_length']

        params.char_vocabulary_size = label2ids['character_unique_count']['value']
        params.tag_vocabulary_size = label2ids['morph_token_unique_count']['value']

        params.char_lstm_dim = 100
        params.char_embedding_dim = 100

        params.tag_lstm_dim = params.char_lstm_dim
        params.tag_embedding_dim = 100

        params.sentence_level_lstm_dim = 2 * params.char_lstm_dim

        # train_and_test_sentences, label2ids = read_datafile("test.merge.utf8", "test.merge.utf8")

        model = build_model(params)

        model.compile(optimizer='rmsprop',
                      loss='categorical_crossentropy',