コード例 #1
0
def load_parameters(parameters_filepath, arguments=None, verbose=True):
    '''
    Load parameters from the ini file if specified, take into account any command line argument, and ensure that each parameter is cast to the correct type.
    Command line arguments take precedence over parameters specified in the parameter file.
    '''

    parameters = {}
    # If a parameter file is specified, load it
    if len(parameters_filepath) > 0:
        conf_parameters = configparser.ConfigParser()
        conf_parameters.read(parameters_filepath)
        nested_parameters = utils.convert_configparser_to_dictionary(conf_parameters)
        for k,v in nested_parameters.items():
            parameters.update(v)
    # Ensure that any arguments the specified in the command line overwrite parameters specified in the parameter file
    for k,v in arguments.items():
        if arguments[k] != arguments['argument_default_value']:
            parameters[k] = v
    for k,v in parameters.items():
        # If the value is a list delimited with a comma, choose one element at random.
        if ',' in v:
            v = random.choice(v.split(','))
            parameters[k] = v
        # Ensure that each parameter is cast to the correct type
        if k in ['character_embedding_dimension','character_lstm_hidden_state_dimension','token_embedding_dimension',
                 'token_lstm_hidden_state_dimension','patience','maximum_number_of_epochs','maximum_training_time','number_of_cpu_threads','number_of_gpus']:
            parameters[k] = int(v)
        elif k in ['dropout_rate', 'learning_rate', 'gradient_clipping_value']:
            parameters[k] = float(v)
        elif k in ['remap_unknown_tokens_to_unk', 'use_character_lstm', 'use_crf', 'train_model', 'use_pretrained_model', 'debug', 'verbose',
                 'reload_character_embeddings', 'reload_character_lstm', 'reload_token_embeddings', 'reload_token_lstm', 'reload_feedforward', 'reload_crf',
                 'check_for_lowercase', 'check_for_digits_replaced_with_zeros', 'freeze_token_embeddings', 'load_only_pretrained_token_embeddings']:
            parameters[k] = distutils.util.strtobool(v)
    if verbose: pprint(parameters)
    return parameters, conf_parameters
コード例 #2
0
ファイル: run_experiments.py プロジェクト: braemy/NeuroNER
def parse_parameters(conf_parameters):
    nested_parameters = utils.convert_configparser_to_dictionary(conf_parameters)
    parameters = {}
    for k, v in nested_parameters.items():
        parameters.update(v)
    for k, v in parameters.items():
        # If the value is a list delimited with a comma, choose one element at random.
        if ',' in v:
            v = random.choice(v.split(','))
            parameters[k] = v
        # Ensure that each parameter is cast to the correct type
        if k in ['character_embedding_dimension', 'character_lstm_hidden_state_dimension', 'token_embedding_dimension',
                 'token_lstm_hidden_state_dimension', 'patience', 'maximum_number_of_epochs', 'maximum_training_time',
                 'number_of_cpu_threads', 'number_of_gpus',
                 'character_hidden_layer', 'token_hidden_layer', 'embedding_dimension', 'batch_size',
                 'cross_validation', 'data_to_use']:
            if k in parameters:
                parameters[k] = int(v)
        elif k in ['dropout_rate', 'learning_rate', 'gradient_clipping_value']:
            parameters[k] = float(v)
        elif k in ['remap_unknown_tokens_to_unk', 'use_character_lstm', 'use_crf', 'train_model',
                   'use_pretrained_model', 'debug', 'verbose',
                   'reload_character_embeddings', 'reload_character_lstm', 'reload_token_embeddings',
                   'reload_token_lstm', 'reload_feedforward', 'reload_crf',
                   'check_for_lowercase', 'check_for_digits_replaced_with_zeros', 'freeze_token_embeddings',
                   'load_only_pretrained_token_embeddings',
                   'gru_neuron']:
            parameters[k] = distutils.util.strtobool(v)
    # if verbose: pprint(parameters)
    return parameters, conf_parameters
コード例 #3
0
def load_parameters(
        parameters_filepath=os.path.join('.', 'parameters.ini'), verbose=True):
    '''
    Load parameters from the ini file, and ensure that each parameter is cast to the correct type
    '''
    conf_parameters = configparser.ConfigParser()
    conf_parameters.read(parameters_filepath)
    nested_parameters = utils.convert_configparser_to_dictionary(
        conf_parameters)
    parameters = {}
    for k, v in nested_parameters.items():
        parameters.update(v)
    for k, v in parameters.items():
        if k in [
                'character_embedding_dimension',
                'character_lstm_hidden_state_dimension',
                'token_embedding_dimension',
                'token_lstm_hidden_state_dimension', 'patience',
                'maximum_number_of_epochs', 'maximum_training_time',
                'number_of_cpu_threads', 'number_of_gpus'
        ]:
            parameters[k] = int(v)
        if k in ['dropout_rate', 'learning_rate']:
            parameters[k] = float(v)
        if k in [
                'remap_unknown_tokens_to_unk', 'use_character_lstm', 'use_crf',
                'train_model', 'use_pretrained_model', 'debug', 'verbose'
        ]:
            parameters[k] = distutils.util.strtobool(v)
    if verbose: pprint(parameters)
    return parameters, conf_parameters
コード例 #4
0
def _get_config_param(param_filepath=None):
    """
    Get the parameters from the config file.
    """
    param = {}

    # If a parameter file is specified, load it
    if param_filepath:
        param_file_txt = configparser.ConfigParser()
        param_file_txt.read(param_filepath, encoding="UTF-8")
        nested_parameters = utils.convert_configparser_to_dictionary(
            param_file_txt)

        for k, v in nested_parameters.items():
            param.update(v)

    return param, param_file_txt
コード例 #5
0
ファイル: main.py プロジェクト: revirevy/SelfSent
def load_parameters(
        parameters_filepath=os.path.join('.', 'parameters.ini'), verbose=True):
    '''
    Load parameters from the ini file, and ensure that each parameter is cast to the correct type
    '''
    conf_parameters = configparser.ConfigParser()
    conf_parameters.read(parameters_filepath)
    nested_parameters = utils.convert_configparser_to_dictionary(
        conf_parameters)
    parameters = {}
    for k, v in nested_parameters.items():
        parameters.update(v)

    for k, v in parameters.items():
        # If the value is a list delimited with a comma, choose one element at random.
        # Ensure that each parameter is cast to the correct type
        if k in [
                'gpu_device', 'max_length_sentence',
                'maximum_number_of_epochs', 'patience', 'seed', 'train_size',
                'valid_size', 'test_size', 'remap_to_unk_count_threshold',
                'token_embedding_dimension', 'number_of_cpu_threads',
                'number_of_gpus', 'lstm_hidden_state_dimension', 'batch_size',
                'da', 'r', 'mlp_hidden_layer_1_units'
        ]:
            parameters[k] = int(v)
        elif k in [
                'attention_visualization_conf', 'beta_penalized', 'beta_l2',
                'learning_rate', 'gradient_clipping_value', 'dropout_rate'
        ]:
            parameters[k] = float(v)
        elif k in [
                'train_model', 'freeze_token_embeddings', 'do_split',
                'remap_unknown_tokens_to_unk', 'verbose', 'debug',
                'use_pretrained_model',
                'load_only_pretrained_token_embeddings', 'check_for_lowercase',
                'check_for_digits_replaced_with_zeros'
        ]:
            parameters[k] = distutils.util.strtobool(v)

    if verbose:
        pprint.pprint(parameters)

    return parameters, conf_parameters
コード例 #6
0
ファイル: main.py プロジェクト: Por123/NeuroNER
def load_parameters():
    '''
    Load parameters from the ini file, and ensure that each parameter is cast to the correct type
    '''
    conf_parameters = configparser.ConfigParser()
    conf_parameters.read(os.path.join('.', 'parameters.ini'))
    nested_parameters = utils.convert_configparser_to_dictionary(
        conf_parameters)
    parameters = {}
    for k, v in nested_parameters.items():
        parameters.update(v)
    for k, v in parameters.items():
        if k in [
                'remove_unknown_tokens', 'character_embedding_dimension',
                'character_lstm_hidden_state_dimension',
                'token_embedding_dimension',
                'token_lstm_hidden_state_dimension', 'patience',
                'maximum_number_of_epochs', 'maximum_training_time',
                'number_of_cpu_threads', 'number_of_gpus'
        ]:
            parameters[k] = int(v)
        if k in ['dropout_rate', 'learning_rate']:
            parameters[k] = float(v)
        if k in [
                'use_character_lstm', 'is_character_lstm_bidirect',
                'is_token_lstm_bidirect', 'use_crf'
        ]:
            parameters[k] = distutils.util.strtobool(v)
    pprint(parameters)

    dataset_filepaths = {}
    dataset_filepaths['train'] = os.path.join(
        parameters['dataset_text_folder'], 'train.txt')
    dataset_filepaths['valid'] = os.path.join(
        parameters['dataset_text_folder'], 'valid.txt')
    dataset_filepaths['test'] = os.path.join(parameters['dataset_text_folder'],
                                             'test.txt')

    return parameters, dataset_filepaths
コード例 #7
0
def load_parameters(parameters_filepath, arguments={}, verbose=True):
    """
  Load parameters from the ini file if specified, take into account any
  command line argument, and ensure that each parameter is cast to the
  correct type.
  Command line arguments take precedence over parameters specified in the
  parameter file.
  """
    parameters = {
        'pretrained_model_folder': '../trained_models/conll_2003_en',
        'dataset_text_folder': '../data/conll2003/en',
        'character_embedding_dimension': 25,
        'character_lstm_hidden_state_dimension': 25,
        'check_for_digits_replaced_with_zeros': True,
        'check_for_lowercase': True,
        'debug': False,
        'dropout_rate': 0.5,
        'experiment_name': 'experiment',
        'freeze_token_embeddings': False,
        'gradient_clipping_value': 5.0,
        'learning_rate': 0.005,
        'load_only_pretrained_token_embeddings': False,
        'main_evaluation_mode': 'conll',
        'maximum_number_of_epochs': 100,
        'number_of_cpu_threads': 8,
        'number_of_gpus': 0,
        'optimizer': 'sgd',
        'output_folder': '../output',
        'patience': 10,
        'plot_format': 'pdf',
        'reload_character_embeddings': True,
        'reload_character_lstm': True,
        'reload_crf': True,
        'reload_feedforward': True,
        'reload_token_embeddings': True,
        'reload_token_lstm': True,
        'remap_unknown_tokens_to_unk': True,
        'spacylanguage': 'en',
        'tagging_format': 'bioes',
        'token_embedding_dimension': 100,
        'token_lstm_hidden_state_dimension': 100,
        'token_pretrained_embedding_filepath':
        '../data/word_vectors/glove.6B.100d.txt',
        'tokenizer': 'spacy',
        'train_model': True,
        'use_character_lstm': True,
        'use_crf': True,
        'use_pretrained_model': False,
        'verbose': False
    }

    # If a parameter file is specified, load it
    if len(parameters_filepath) > 0:
        conf_parameters = configparser.ConfigParser()
        conf_parameters.read(parameters_filepath, encoding="UTF-8")
        nested_parameters = utils.convert_configparser_to_dictionary(
            conf_parameters)
        for k, v in nested_parameters.items():
            parameters.update(v)
    # Ensure that any arguments the specified in the command line overwrite
    # parameters specified in the parameter file
    for k, v in arguments.items():
        if arguments[k] != arguments['argument_default_value']:
            parameters[k] = v
    for k, v in parameters.items():
        v = str(v)
        # If the value is a list delimited with a comma, choose one element at
        # random.
        if ',' in v:
            v = random.choice(v.split(','))
            parameters[k] = v
        # Ensure that each parameter is cast to the correct type
        if k in [
                'character_embedding_dimension',
                'character_lstm_hidden_state_dimension',
                'token_embedding_dimension',
                'token_lstm_hidden_state_dimension', 'patience',
                'maximum_number_of_epochs', 'maximum_training_time',
                'number_of_cpu_threads', 'number_of_gpus'
        ]:
            parameters[k] = int(v)
        elif k in ['dropout_rate', 'learning_rate', 'gradient_clipping_value']:
            parameters[k] = float(v)
        elif k in [
                'remap_unknown_tokens_to_unk', 'use_character_lstm', 'use_crf',
                'train_model', 'use_pretrained_model', 'debug', 'verbose',
                'reload_character_embeddings', 'reload_character_lstm',
                'reload_token_embeddings', 'reload_token_lstm',
                'reload_feedforward', 'reload_crf', 'check_for_lowercase',
                'check_for_digits_replaced_with_zeros',
                'freeze_token_embeddings',
                'load_only_pretrained_token_embeddings'
        ]:
            parameters[k] = strtobool(v)
    # If loading pretrained model, set the model hyperparameters according to
    # the pretraining parameters
    if parameters['use_pretrained_model']:
        pretraining_parameters = load_parameters(
            parameters_filepath=os.path.join(
                parameters['pretrained_model_folder'], 'parameters.ini'),
            verbose=False)[0]
        for name in [
                'use_character_lstm', 'character_embedding_dimension',
                'character_lstm_hidden_state_dimension',
                'token_embedding_dimension',
                'token_lstm_hidden_state_dimension', 'use_crf'
        ]:
            if parameters[name] != pretraining_parameters[name]:
                print(
                    'WARNING: parameter {0} was overwritten from {1} to {2} to be '
                    'consistent with the pretrained model'.format(
                        name, parameters[name], pretraining_parameters[name]))
                parameters[name] = pretraining_parameters[name]
    if verbose: pprint(parameters)
    # TODO: update conf_parameters to reflect the overriding
    return parameters, conf_parameters
コード例 #8
0
def main():


    #### Parameters - start
    conf_parameters = configparser.ConfigParser()
    conf_parameters.read(os.path.join('.','parameters.ini'))
    nested_parameters = utils.convert_configparser_to_dictionary(conf_parameters)
    parameters = {}
    for k,v in nested_parameters.items():
        parameters.update(v)
    for k,v in parameters.items():
        if k in ['remove_unknown_tokens','character_embedding_dimension','character_lstm_hidden_state_dimension','token_embedding_dimension','token_lstm_hidden_state_dimension',
                 'patience','maximum_number_of_epochs','maximum_training_time','number_of_cpu_threads','number_of_gpus']:
            parameters[k] = int(v)
        if k in ['dropout_rate']:
            parameters[k] = float(v)
        if k in ['use_character_lstm','is_character_lstm_bidirect','is_token_lstm_bidirect','use_crf']:
            parameters[k] = distutils.util.strtobool(v)
    pprint(parameters)

    # Load dataset
    dataset_filepaths = {}
    dataset_filepaths['train'] = os.path.join(parameters['dataset_text_folder'], 'train.txt')
    dataset_filepaths['valid'] = os.path.join(parameters['dataset_text_folder'], 'valid.txt')
    dataset_filepaths['test']  = os.path.join(parameters['dataset_text_folder'], 'test.txt')
    dataset = ds.Dataset()
    dataset.load_dataset(dataset_filepaths, parameters)


    with tf.Graph().as_default():
        session_conf = tf.ConfigProto(
          device_count={'CPU': 1, 'GPU': 1},
          allow_soft_placement=True, #  automatically choose an existing and supported device to run the operations in case the specified one doesn't exist
          log_device_placement=False
          )

        sess = tf.Session(config=session_conf)

        with sess.as_default():
            model = EntityLSTM(dataset, parameters)

            # Define training procedure
            global_step = tf.Variable(0, name="global_step", trainable=False)
            if parameters['optimizer'] == 'adam':
                optimizer = tf.train.AdamOptimizer(1e-3)
            elif parameters['optimizer'] == 'sgd':
                optimizer = tf.train.GradientDescentOptimizer(0.005)
            else:
                raise ValueError("The lr_method parameter must be either adam or sgd.")

            # https://github.com/google/prettytensor/issues/6
            # https://www.tensorflow.org/api_docs/python/framework/graph_collections

            #print('tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) : {0}'.format(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) ))
            #print('tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) : {0}'.format(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) ))
            #print('tf.get_collection(tf.GraphKeys.MODEL_VARIABLES) : {0}'.format(tf.get_collection(tf.GraphKeys.MODEL_VARIABLES) ))

            # https://github.com/blei-lab/edward/issues/286#ref-pullrequest-181330211 : utility function to get all tensorflow variables a node depends on


            grads_and_vars = optimizer.compute_gradients(model.loss)

            # By defining a global_step variable and passing it to the optimizer we allow TensorFlow handle the counting of training steps for us.
            # The global step will be automatically incremented by one every time you execute train_op.
            train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)


            # Initialize all variables
            sess.run(tf.global_variables_initializer())

            # Load pretrained token embeddings
            if not parameters['token_pretrained_embedding_filepath'] == '':
                load_token_embeddings(sess, model.W, dataset, parameters)


            estop = False  # early stop
            start_time = time.time()
            experiment_timestamp = utils.get_current_time_in_miliseconds()
            results = {}
            #results['model_options'] = copy.copy(model_options)
            #results['model_options'].pop('optimizer', None)
            results['epoch'] = {}
            # save/initialize execution details
            results['execution_details'] = {}
            results['execution_details']['train_start'] = start_time
            results['execution_details']['time_stamp'] = experiment_timestamp
            results['execution_details']['early_stop'] = False
            results['execution_details']['keyboard_interrupt'] = False
            results['execution_details']['num_epochs'] = 0
            results['model_options'] = copy.copy(parameters)

            dataset_name = utils.get_basename_without_extension(parameters['dataset_text_folder']) #opts.train.replace('/', '_').split('.')[0] # 'conll2003en'
            model_name = '{0}_{1}'.format(dataset_name, results['execution_details']['time_stamp'])

            output_folder=os.path.join('..', 'output')
            stats_graph_folder=os.path.join(output_folder, model_name) # Folder where to save graphs
            utils.create_folder_if_not_exists(output_folder)
            print('stats_graph_folder: {0}'.format(stats_graph_folder))
            utils.create_folder_if_not_exists(stats_graph_folder)
            model_folder = os.path.join(stats_graph_folder, 'model')
            utils.create_folder_if_not_exists(model_folder)

            step = 0
            bad_counter = 0
            previous_best_valid_f1_score = 0
            transition_params_trained = np.random.rand(len(dataset.unique_labels),len(dataset.unique_labels))
            try:
                while True:
                    epoch_number = math.floor(step / len(dataset.token_indices['train']))
                    print('epoch_number: {0}'.format(epoch_number))

                    epoch_start_time = time.time()

                    #print('step: {0}'.format(step))

                    # Train model: loop over all sequences of training set with shuffling
                    sequence_numbers=list(range(len(dataset.token_indices['train'])))
                    random.shuffle(sequence_numbers)
                    for sequence_number in sequence_numbers:
                        transition_params_trained = train_step(sess, dataset, sequence_number, train_op, global_step, model, transition_params_trained, parameters)
                        step += 1
                        if sequence_number % 100 == 0:
                            print('.',end='', flush=True)
                            #break

                    # Evaluate model
                    print('step: {0}'.format(step))
                    all_predictions = {}
                    all_y_true  = {}
                    output_filepaths = {}
                    for dataset_type in ['train', 'valid', 'test']:
                        print('dataset_type:     {0}'.format(dataset_type))
                        all_predictions[dataset_type], all_y_true[dataset_type], output_filepaths[dataset_type] = evaluate_model(sess, dataset, dataset_type, model, transition_params_trained, step, stats_graph_folder, epoch_number, parameters)
                        model_options = None

                    # Save and plot results
                    # TODO: remove uidx
                    uidx = 0
                    results['epoch'][epoch_number] = []
                    results['execution_details']['num_epochs'] = epoch_number

                    epoch_elapsed_training_time = time.time() - epoch_start_time
                    print('epoch_elapsed_training_time: {0:02f} seconds'.format(epoch_elapsed_training_time))

                    assess_model.assess_and_save(results, dataset, model_options, all_predictions, all_y_true, stats_graph_folder, epoch_number, uidx, epoch_start_time)
                    assess_model.plot_f1_vs_epoch(results, stats_graph_folder, 'f1_score')
                    assess_model.plot_f1_vs_epoch(results, stats_graph_folder, 'accuracy_score')

                    # CoNLL evaluation script
                    for dataset_type in ['train', 'valid', 'test']:
                        conll_evaluation_script = os.path.join('.', 'conlleval')
                        conll_output_filepath = '{0}_conll_evaluation.txt'.format(output_filepaths[dataset_type])
                        shell_command = 'perl {0} < {1} > {2}'.format(conll_evaluation_script, output_filepaths[dataset_type], conll_output_filepath)
                        print('shell_command: {0}'.format(shell_command))
                        #subprocess.call([shell_command])
                        os.system(shell_command)
                        conll_parsed_output = utils_nlp.get_parsed_conll_output(conll_output_filepath)
                        print('conll_parsed_output: {0}'.format(conll_parsed_output))
                        results['epoch'][epoch_number][0][dataset_type]['conll'] = conll_parsed_output
                        results['epoch'][epoch_number][0][dataset_type]['f1_conll'] = {}
                        results['epoch'][epoch_number][0][dataset_type]['f1_conll']['micro'] = results['epoch'][epoch_number][0][dataset_type]['conll']['all']['f1']
                    assess_model.plot_f1_vs_epoch(results, stats_graph_folder, 'f1_conll', from_json=False)

                    #end_time = time.time()
                    #results['execution_details']['train_duration'] = end_time - start_time
                    #results['execution_details']['train_end'] = end_time

                    # Early stop
                    valid_f1_score = results['epoch'][epoch_number][0]['valid']['f1_score']['micro']
                    if  valid_f1_score > previous_best_valid_f1_score:
                        bad_counter = 0
                        previous_best_valid_f1_score = valid_f1_score
                    else:
                        bad_counter += 1


                    if bad_counter > parameters['patience']:
                        print('Early Stop!')
                        results['execution_details']['early_stop'] = True
                        break

                    if epoch_number > parameters['maximum_number_of_epochs']: break

            except KeyboardInterrupt:
                results['execution_details']['keyboard_interrupt'] = True
        #         assess_model.save_results(results, stats_graph_folder)
                print('Training interrupted')

            print('Finishing the experiment')
            end_time = time.time()
            results['execution_details']['train_duration'] = end_time - start_time
            results['execution_details']['train_end'] = end_time
            assess_model.save_results(results, stats_graph_folder)

    sess.close() # release the session's resources
コード例 #9
0
ファイル: main.py プロジェクト: vasswhite/NeuroNER
def main():

    #### Parameters - start
    conf_parameters = configparser.ConfigParser()
    conf_parameters.read(os.path.join('.', 'parameters.ini'))
    nested_parameters = utils.convert_configparser_to_dictionary(
        conf_parameters)
    parameters = {}
    for k, v in nested_parameters.items():
        parameters.update(v)
    for k, v in parameters.items():
        if k in [
                'remove_unknown_tokens', 'character_embedding_dimension',
                'character_lstm_hidden_state_dimension',
                'token_embedding_dimension',
                'token_lstm_hidden_state_dimension', 'patience',
                'maximum_number_of_epochs', 'maximum_training_time',
                'number_of_cpu_threads', 'number_of_gpus'
        ]:
            parameters[k] = int(v)
        if k in ['dropout_rate']:
            parameters[k] = float(v)
        if k in [
                'use_character_lstm', 'is_character_lstm_bidirect',
                'is_token_lstm_bidirect', 'use_crf'
        ]:
            parameters[k] = distutils.util.strtobool(v)
    pprint(parameters)

    # Load dataset
    dataset_filepaths = {}
    dataset_filepaths['train'] = os.path.join(
        parameters['dataset_text_folder'], 'train.txt')
    dataset_filepaths['valid'] = os.path.join(
        parameters['dataset_text_folder'], 'valid.txt')
    dataset_filepaths['test'] = os.path.join(parameters['dataset_text_folder'],
                                             'test.txt')
    dataset = ds.Dataset()
    dataset.load_dataset(dataset_filepaths, parameters)

    with tf.Graph().as_default():
        session_conf = tf.ConfigProto(
            device_count={
                'CPU': 1,
                'GPU': 1
            },
            allow_soft_placement=
            True,  #  automatically choose an existing and supported device to run the operations in case the specified one doesn't exist
            log_device_placement=False)

        sess = tf.Session(config=session_conf)

        with sess.as_default():
            # Instantiate model
            model = EntityLSTM(dataset, parameters)
            sess.run(tf.global_variables_initializer())
            model.load_pretrained_token_embeddings(sess, dataset, parameters)

            # Initialize and save execution details
            start_time = time.time()
            experiment_timestamp = utils.get_current_time_in_miliseconds()
            results = {}
            #results['model_options'] = copy.copy(model_options)
            #results['model_options'].pop('optimizer', None)
            results['epoch'] = {}
            results['execution_details'] = {}
            results['execution_details']['train_start'] = start_time
            results['execution_details']['time_stamp'] = experiment_timestamp
            results['execution_details']['early_stop'] = False
            results['execution_details']['keyboard_interrupt'] = False
            results['execution_details']['num_epochs'] = 0
            results['model_options'] = copy.copy(parameters)

            dataset_name = utils.get_basename_without_extension(
                parameters['dataset_text_folder']
            )  #opts.train.replace('/', '_').split('.')[0] # 'conll2003en'
            model_name = '{0}_{1}'.format(
                dataset_name, results['execution_details']['time_stamp'])

            output_folder = os.path.join('..', 'output')
            utils.create_folder_if_not_exists(output_folder)
            stats_graph_folder = os.path.join(
                output_folder, model_name)  # Folder where to save graphs
            #print('stats_graph_folder: {0}'.format(stats_graph_folder))
            utils.create_folder_if_not_exists(stats_graph_folder)
            #             model_folder = os.path.join(stats_graph_folder, 'model')
            #             utils.create_folder_if_not_exists(model_folder)

            step = 0
            bad_counter = 0
            previous_best_valid_f1_score = 0
            transition_params_trained = np.random.rand(
                len(dataset.unique_labels), len(dataset.unique_labels))
            try:
                while True:
                    epoch_number = math.floor(
                        step / len(dataset.token_indices['train']))
                    print('\nStarting epoch {0}'.format(epoch_number), end='')

                    epoch_start_time = time.time()
                    #print('step: {0}'.format(step))

                    # Train model: loop over all sequences of training set with shuffling
                    sequence_numbers = list(
                        range(len(dataset.token_indices['train'])))
                    random.shuffle(sequence_numbers)
                    for sequence_number in sequence_numbers:
                        transition_params_trained = train.train_step(
                            sess, dataset, sequence_number, model,
                            transition_params_trained, parameters)
                        step += 1
                        if step % 100 == 0:
                            print('.', end='', flush=True)
                            #break
                    print('.', flush=True)
                    #print('step: {0}'.format(step))

                    # Predict labels using trained model
                    all_predictions = {}
                    all_y_true = {}
                    output_filepaths = {}
                    for dataset_type in ['train', 'valid', 'test']:
                        #print('dataset_type:     {0}'.format(dataset_type))
                        prediction_output = train.prediction_step(
                            sess, dataset, dataset_type, model,
                            transition_params_trained, step,
                            stats_graph_folder, epoch_number, parameters)
                        all_predictions[dataset_type], all_y_true[
                            dataset_type], output_filepaths[
                                dataset_type] = prediction_output
#                         model_options = None

                    epoch_elapsed_training_time = time.time(
                    ) - epoch_start_time
                    print(
                        'epoch_elapsed_training_time: {0:.2f} seconds'.format(
                            epoch_elapsed_training_time))

                    results['execution_details']['num_epochs'] = epoch_number

                    # Evaluate model: save and plot results
                    evaluate.evaluate_model(results, dataset, all_predictions,
                                            all_y_true, stats_graph_folder,
                                            epoch_number, epoch_start_time,
                                            output_filepaths)

                    # Early stop
                    valid_f1_score = results['epoch'][epoch_number][0][
                        'valid']['f1_score']['micro']
                    if valid_f1_score > previous_best_valid_f1_score:
                        bad_counter = 0
                        previous_best_valid_f1_score = valid_f1_score
                    else:
                        bad_counter += 1

                    if bad_counter > parameters['patience']:
                        print('Early Stop!')
                        results['execution_details']['early_stop'] = True
                        break

                    if epoch_number > parameters['maximum_number_of_epochs']:
                        break


#                     break # debugging

            except KeyboardInterrupt:
                results['execution_details']['keyboard_interrupt'] = True
                #         assess_model.save_results(results, stats_graph_folder)
                print('Training interrupted')

            print('Finishing the experiment')
            end_time = time.time()
            results['execution_details'][
                'train_duration'] = end_time - start_time
            results['execution_details']['train_end'] = end_time
            evaluate.save_results(results, stats_graph_folder)

    sess.close()  # release the session's resources