def create_params(label2ids):
    params = Params()
    params.max_sentence_length = label2ids['max_sentence_length']
    params.max_n_analyses = label2ids['max_n_analysis']
    params.batch_size = 1
    params.n_subepochs = 40
    params.max_surface_form_length = label2ids['max_surface_form_length']
    params.max_word_root_length = label2ids['max_word_root_length']
    params.max_analysis_length = label2ids['max_analysis_length']
    params.char_vocabulary_size = label2ids['character_unique_count']['value']
    params.tag_vocabulary_size = label2ids['morph_token_unique_count']['value']
    params.char_lstm_dim = 100
    params.char_embedding_dim = 100
    params.tag_lstm_dim = params.char_lstm_dim
    params.tag_embedding_dim = 100
    params.sentence_level_lstm_dim = 2 * params.char_lstm_dim
    return params
    model.load_weights(args.model_path)
    return label2ids, params, model

if __name__ == "__main__":

    parser = create_parser()

    args = parser.parse_args()

    if args.command == "train":

        train_and_test_sentences, label2ids = read_datafile(args.train_filepath, args.test_filepath)

        params = Params()

        params.max_sentence_length = label2ids['max_sentence_length']
        params.max_n_analyses = label2ids['max_n_analysis']

        params.batch_size = 1
        params.n_subepochs = 40

        params.max_surface_form_length = label2ids['max_surface_form_length']
        params.max_word_root_length = label2ids['max_word_root_length']
        params.max_analysis_length = label2ids['max_analysis_length']

        params.char_vocabulary_size = label2ids['character_unique_count']['value']
        params.tag_vocabulary_size = label2ids['morph_token_unique_count']['value']

        params.char_lstm_dim = 100
        params.char_embedding_dim = 100