コード例 #1
0
def soft_train(network, args):
    device = torch.device("cuda" if args.gpu_flag is True else "cpu")
    optimizer, scheduler = get_optimizer(network, args)

    train_data_set = get_data_set(args, train_flag=True)
    test_data_set = get_data_set(args, train_flag=False)
    train_data_loader = torch.utils.data.DataLoader(train_data_set,
                                                    batch_size=args.batch_size,
                                                    shuffle=True)
    test_data_loader = torch.utils.data.DataLoader(test_data_set,
                                                   batch_size=args.batch_size,
                                                   shuffle=False)

    print("-*-" * 10 + "\n\t\tTrain network\n" + "-*-" * 10)
    for epoch in range(0, args.epoch):
        network = network.cpu()
        if args.network is "vgg":
            network = soft_prune_vgg_step(network, args.prune_rate[0])
        elif args.network == 'resnet':
            network = soft_prune_resnet_step(network, args.prune_rate)
        network = network.to(device)
        train_step(network, train_data_loader, test_data_loader, optimizer,
                   device, epoch)
        if scheduler is not None:
            scheduler.step()

    return network
コード例 #2
0
ファイル: main.py プロジェクト: rickyota/CSML
def Cell_Segmentation():

    print("Start Cell Segmentation.")
    print(datetime.now().strftime("%Y/%m/%d %H:%M:%S"))

    args = argument()

    if not args.flaginfer:
        print("Train and infer.")
        try:
            start = time.time()
            train_step(fname_train=os.path.join("data", args.train),
                       fname_label=os.path.join("data", args.label),
                       fname_model=os.path.join("data", args.model),
                       N_test=args.ntest, N_train=args.ntrain, N_epoch=args.nepoch, batchsize=args.nbatch,
                       hgh=args.height, wid=args.width,
                       mode=args.mode)
            print("train time", time.time() - start)
        except Exception as e:
            raise Exception(e, "Got an error in training step.")
        try:
            start = time.time()
            infer_step(fname_infer=os.path.join("data", args.infer),
                       fname_save=os.path.join("result", args.output),
                       fname_model=os.path.join("data", args.model),
                       thre_discard=args.discard, wid_dilate=args.close,
                       fstats=args.nostats)
            print("infer time", time.time() - start)
        except Exception as e:
            raise Exception(e, "Got an error in inference step.")

        print("All done.")

    else:
        print("Only Infer.")
        try:
            start = time.time()
            infer_step(fname_infer=os.path.join("data", args.infer),
                       fname_save=os.path.join("result", args.output),
                       fname_model=os.path.join("data", args.model),
                       thre_discard=args.discard, wid_dilate=args.close,
                       fstats=args.nostats)
            print("infer time", time.time() - start)
        except Exception as e:
            raise Exception(e, "Got an error in inference step.")

        print("All done.")
コード例 #3
0
    def test_train_one_step(self):
        batch = train.get_batch(128)

        optimizer = create_test_optimizer()
        key = random.PRNGKey(0)
        _, train_metrics = train.train_step(optimizer, batch, key)

        self.assertLessEqual(train_metrics['loss'], 5)
        self.assertGreaterEqual(train_metrics['accuracy'], 0)
コード例 #4
0
ファイル: train_test.py プロジェクト: mfuntowicz/flax
    def test_train_one_step(self):
        batch = train.get_batch(128)
        rng = random.PRNGKey(0)

        model = train.create_model(rng)
        optimizer = train.create_optimizer(model, 0.003)
        optimizer, train_metrics = train.train_step(optimizer, batch)

        self.assertLessEqual(train_metrics['loss'], 5)
        self.assertGreaterEqual(train_metrics['accuracy'], 0)
コード例 #5
0
ファイル: train_test.py プロジェクト: vballoli/flax
    def test_train_one_step(self):
        batch = train.get_batch(128)
        rng = random.PRNGKey(0)

        with nn.stochastic(rng):
            model = train.create_model(nn.make_rng())
            optimizer = train.create_optimizer(model, 0.003)
            optimizer, train_metrics = train.train_step(
                optimizer, batch, nn.make_rng())

        self.assertLessEqual(train_metrics['loss'], 5)
        self.assertGreaterEqual(train_metrics['accuracy'], 0)
コード例 #6
0
    def train(self, x_train_unlabeled, x_train_labeled, x_val_unlabeled, lr,
              drop, patience, num_epochs):
        # create handler for early stopping and learning rate scheduling
        self.lh = LearningHandler(lr=lr,
                                  drop=drop,
                                  lr_tensor=self.learning_rate,
                                  patience=patience)

        losses = np.empty((num_epochs, ))
        val_losses = np.empty((num_epochs, ))

        # begin spectralnet training loop
        self.lh.on_train_begin()
        for i in range(num_epochs):
            # train spectralnet
            losses[i] = train.train_step(return_var=[self.loss],
                                         updates=self.net.updates +
                                         [self.train_step],
                                         x_unlabeled=x_train_unlabeled,
                                         inputs=self.inputs,
                                         y_true=self.y_true,
                                         batch_sizes=self.batch_sizes,
                                         x_labeled=x_train_labeled,
                                         y_labeled=self.y_train_labeled_onehot,
                                         batches_per_epoch=100)[0]

            # get validation loss
            val_losses[i] = train.predict_sum(
                self.loss,
                x_unlabeled=x_val_unlabeled,
                inputs=self.inputs,
                y_true=self.y_true,
                x_labeled=x_train_unlabeled[0:0],
                y_labeled=self.y_train_labeled_onehot,
                batch_sizes=self.batch_sizes)

            # do early stopping if necessary
            if self.lh.on_epoch_end(i, val_losses[i]):
                print('STOPPING EARLY')
                break

            # print training status
            print("Epoch: {}, loss={:2f}, val_loss={:2f}".format(
                i, losses[i], val_losses[i]))

        return losses[:i], val_losses[:i]
コード例 #7
0
ファイル: train_test.py プロジェクト: yanndupis/flax
 def test_single_train_step(self):
   prng = random.PRNGKey(0)
   #       0 (0)
   #      / \
   # (0) 1 - 2 (1)
   edge_list = [(0, 0), (1, 2), (2, 0)]
   node_labels = [0, 0, 1]
   node_feats, node_labels, sources, targets = train.create_graph_data(edge_list=edge_list,
                                                                       node_labels=node_labels)
   _, initial_params = GNN.init(prng, node_x=node_feats, edge_x=None,
                                sources=sources, targets=targets)
   model = nn.Model(GNN, initial_params)
   optimizer = optim.Adam(learning_rate=0.01).create(model)
   _, loss = train.train_step(optimizer=optimizer, node_feats=node_feats,
                              sources=sources, targets=targets)
   
   self.assertGreater(loss, 0.0)
コード例 #8
0
ファイル: train_test.py プロジェクト: yanndupis/flax
    def test_single_train_step(self):
        train_ds, test_ds = train.get_datasets()
        batch_size = 32
        model = train.create_model(random.PRNGKey(0))
        optimizer = train.create_optimizer(model, 0.1, 0.9)

        # test single train step.
        optimizer, train_metrics = train.train_step(
            optimizer=optimizer,
            batch={k: v[:batch_size]
                   for k, v in train_ds.items()})
        self.assertLessEqual(train_metrics['loss'], 2.302)
        self.assertGreaterEqual(train_metrics['accuracy'], 0.0625)

        # Run eval model.
        loss, accuracy = train.eval_model(optimizer.target, test_ds)
        self.assertLess(loss, 2.252)
        self.assertGreater(accuracy, 0.2597)
コード例 #9
0
def translate(args):
    if os.path.exists("checkpoint/encoder.h5") and \
            os.path.exists("checkpoint/decoder.h5"):
        hidden = [tf.zeros((1, 256))]

        # runs forward propagation through the models to build the model
        _ = train.train_step(encoder, decoder, tf.ones((1, 2)), tf.ones(
            (1, 2)), hidden)

        encoder.load_weights("checkpoint/encoder.h5")
        decoder.load_weights("checkpoint/decoder.h5")
        print("models loaded....")

    result = ''
    attention_plot = np.zeros((10, 10))
    sentence = Utils.normalizeString(args.sentence)
    sentence = Utils.sentenceToIndexes(sentence, train.input_lang)
    sentence = Utils.keras.preprocessing.sequence.pad_sequences(
        [sentence], padding='post', maxlen=args.max_length, truncating='post')

    hidden = [tf.zeros((1, 256))]

    enc_out, enc_hidden = encoder(sentence, hidden)

    dec_hidden = enc_hidden
    SOS_tensor = np.array([SOS_token])
    dec_input = tf.squeeze(tf.expand_dims([SOS_tensor], 1), -1)

    for tx in range(args.max_length):
        dec_out, dec_hidden, attn_weights = decoder(dec_input, enc_out,
                                                    dec_hidden)
        attn_weights = tf.reshape(attn_weights, (-1, ))
        attention_plot[tx] = attn_weights.numpy()
        pred = tf.argmax(dec_out, axis=1).numpy()
        result += train.output_lang.index2word[pred[0]] + " "
        if train.output_lang.index2word[pred[0]] == "EOS":
            break
        dec_input = tf.expand_dims(pred, axis=1)
    return result, attention_plot
コード例 #10
0
ファイル: main.py プロジェクト: justeuer/sopro-nlpwithnn
        variables = encoder.trainable_variables + decoder.trainable_variables
        gradients = tape.gradient(loss, variables)
        optimizer.apply_gradients(zip(gradients, variables))

        return batch_loss
# train
EPOCHS = 10

for epoch in range(EPOCHS):
  start = time.time()

  encoded_hidden = encoder.initialize_hidden_state()
  total_loss = 0

  for (batch, (input, target)) in enumerate(dataset.take(steps_per_epoch)):
      batch_loss = train_step(input, target, encoded_hidden)
      total_loss += batch_loss

      if batch % 100 == 0:
          print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
                                                   batch,
                                                   batch_loss.numpy()))
  # saving (checkpoint) the model every 2 epochs
  if (epoch + 1) % 2 == 0:
    checkpoint.save(file_prefix=checkpoint_prefix)

  print('Epoch {} Loss {:.4f}'.format(epoch + 1,
                                      total_loss / steps_per_epoch))
  print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))

def evaluate(sentence):
コード例 #11
0
ファイル: main.py プロジェクト: lunnada/transferNER
def main(argv=sys.argv):
    ''' NeuroNER main method

    Args:
        parameters_filepath the path to the parameters file
        output_folder the path to the output folder
    '''
    arguments = parse_arguments(argv[1:])
    parameters, conf_parameters = load_parameters(
        arguments['parameters_filepath'], arguments=arguments)
    dataset_filepaths, dataset_brat_folders = get_valid_dataset_filepaths(
        parameters)
    check_parameter_compatiblity(parameters, dataset_filepaths)

    # Load dataset
    dataset = ds.Dataset(verbose=parameters['verbose'],
                         debug=parameters['debug'])
    dataset.load_dataset(dataset_filepaths, parameters)

    # Create graph and session
    with tf.device('/gpu:0'):
        with tf.Graph().as_default():
            session_conf = tf.ConfigProto(
                intra_op_parallelism_threads=parameters[
                    'number_of_cpu_threads'],
                inter_op_parallelism_threads=parameters[
                    'number_of_cpu_threads'],
                device_count={
                    'CPU': 1,
                    'GPU': parameters['number_of_gpus']
                },
                allow_soft_placement=True,
                # automatically choose an existing and supported device to run the operations in case the specified one doesn't exist
                log_device_placement=False)

            sess = tf.Session(config=session_conf)

            with sess.as_default():

                start_time = time.time()
                experiment_timestamp = utils.get_current_time_in_miliseconds()
                results = {}
                results['epoch'] = {}
                results['execution_details'] = {}
                results['execution_details']['train_start'] = start_time
                results['execution_details'][
                    'time_stamp'] = experiment_timestamp
                results['execution_details']['early_stop'] = False
                results['execution_details']['keyboard_interrupt'] = False
                results['execution_details']['num_epochs'] = 0
                results['model_options'] = copy.copy(parameters)

                dataset_name = utils.get_basename_without_extension(
                    parameters['dataset_text_folder'])
                model_name = dataset_name
                utils.create_folder_if_not_exists(parameters['output_folder'])
                stats_graph_folder = os.path.join(
                    parameters['output_folder'],
                    model_name)  # Folder where to save graphs
                final_weights_folder = os.path.join(
                    parameters['output_folder'], 'weights')
                utils.create_folder_if_not_exists(stats_graph_folder)
                utils.create_folder_if_not_exists(final_weights_folder)
                model_folder = os.path.join(stats_graph_folder, 'model')
                utils.create_folder_if_not_exists(model_folder)
                # saving the parameter setting to the output model dir. For later resuming training
                with open(os.path.join(model_folder, 'parameters.ini'),
                          'w') as parameters_file:
                    conf_parameters.write(parameters_file)
                tensorboard_log_folder = os.path.join(stats_graph_folder,
                                                      'tensorboard_logs')
                utils.create_folder_if_not_exists(tensorboard_log_folder)
                tensorboard_log_folders = {}
                for dataset_type in dataset_filepaths.keys():
                    tensorboard_log_folders[dataset_type] = os.path.join(
                        stats_graph_folder, 'tensorboard_logs', dataset_type)
                    utils.create_folder_if_not_exists(
                        tensorboard_log_folders[dataset_type])
                pickle.dump(
                    dataset,
                    open(os.path.join(model_folder, 'dataset.pickle'), 'wb'))

                # Instantiate the model
                # graph initialization should be before FileWriter, otherwise the graph will not appear in TensorBoard
                model = EntityLSTM(dataset, parameters)

                # Instantiate the writers for TensorBoard
                writers = {}
                for dataset_type in dataset_filepaths.keys():
                    writers[dataset_type] = tf.summary.FileWriter(
                        tensorboard_log_folders[dataset_type],
                        graph=sess.graph)
                # embedding_writer has to write in model_folder, otherwise TensorBoard won't be able to view embeddings
                embedding_writer = tf.summary.FileWriter(model_folder)

                embeddings_projector_config = projector.ProjectorConfig()
                tensorboard_token_embeddings = embeddings_projector_config.embeddings.add(
                )
                tensorboard_token_embeddings.tensor_name = model.token_embedding_weights.name
                token_list_file_path = os.path.join(
                    model_folder, 'tensorboard_metadata_tokens.tsv')
                tensorboard_token_embeddings.metadata_path = os.path.relpath(
                    token_list_file_path, '..')

                tensorboard_character_embeddings = embeddings_projector_config.embeddings.add(
                )
                tensorboard_character_embeddings.tensor_name = model.character_embedding_weights.name
                character_list_file_path = os.path.join(
                    model_folder, 'tensorboard_metadata_characters.tsv')
                tensorboard_character_embeddings.metadata_path = os.path.relpath(
                    character_list_file_path, '..')

                projector.visualize_embeddings(embedding_writer,
                                               embeddings_projector_config)

                # Write metadata for TensorBoard embeddings
                token_list_file = codecs.open(token_list_file_path, 'w',
                                              'latin-1')
                for token_index in range(dataset.vocabulary_size):
                    token_list_file.write('{0}\n'.format(
                        dataset.index_to_token[token_index]))
                token_list_file.close()

                character_list_file = codecs.open(character_list_file_path,
                                                  'w', 'latin-1')
                for character_index in range(dataset.alphabet_size):
                    if character_index == dataset.PADDING_CHARACTER_INDEX:
                        character_list_file.write('PADDING\n')
                    else:
                        character_list_file.write('{0}\n'.format(
                            dataset.index_to_character[character_index]))
                character_list_file.close()

                # Initialize the model
                sess.run(tf.global_variables_initializer())
                if not parameters['use_pretrained_model']:
                    model.load_pretrained_token_embeddings(
                        sess, dataset, parameters)

                # Start training + evaluation loop. Each iteration corresponds to 1 epoch.
                patience_counter = 0
                f1_score_best = 0
                f1_scores = {'train-F1': [], 'valid-F1': [], 'test-F1': []}
                f1_scores_conll = {
                    'train-F1': [],
                    'valid-F1': [],
                    'test-F1': []
                }
                transition_params_trained = np.random.rand(
                    len(dataset.unique_labels) + 2,
                    len(dataset.unique_labels) + 2)
                model_saver = tf.train.Saver(
                    max_to_keep=parameters['num_of_model_to_keep'])
                epoch_number = -1
                try:
                    while True:
                        step = 0
                        epoch_number += 1
                        print('\nStarting epoch {0}'.format(epoch_number))

                        epoch_start_time = time.time()

                        # use pre-trained model and epoch_number = 0
                        if parameters[
                                'use_pretrained_model'] and epoch_number == 0:

                            if parameters['use_adapter']:
                                parameters['use_adapter'] = False
                                transition_params_trained = train.restore_pretrained_model(
                                    parameters, dataset, sess, model,
                                    model_saver)
                                print(
                                    'Getting the 3-label predictions from the step1 model.'
                                )
                                all_pred_labels, y_pred_for_adapter, y_true_for_adapter, \
                                output_filepaths = train.predict_labels(sess, model,
                                                                        transition_params_trained,
                                                                        parameters, dataset,
                                                                        epoch_number,
                                                                        stats_graph_folder,
                                                                        dataset_filepaths,
                                                                        for_adapter=True)
                                # use the label2idx mapping (for adapter) in the dataset to transform all_pred_labels
                                all_pred_indices = {}
                                for dataset_type in dataset_filepaths.keys():
                                    all_pred_indices[dataset_type] = []
                                    for i in range(
                                            len(all_pred_labels[dataset_type])
                                    ):
                                        indices = [
                                            dataset.
                                            label_adapter_to_index[label]
                                            for label in
                                            all_pred_labels[dataset_type][i]
                                        ]
                                        all_pred_indices[dataset_type].append(
                                            indices)

                                # and use binarizer to transform to ndarray
                                label_binarizer_adapter = sklearn.preprocessing.LabelBinarizer(
                                )
                                label_binarizer_adapter.fit(
                                    range(
                                        max(dataset.index_to_label_adapter.
                                            keys()) + 1))
                                predicted_label_adapter_vector_indices = {}
                                for dataset_type in dataset_filepaths.keys():
                                    predicted_label_adapter_vector_indices[
                                        dataset_type] = []
                                    for label_indices_sequence in all_pred_indices[
                                            dataset_type]:
                                        predicted_label_adapter_vector_indices[
                                            dataset_type].append(
                                                label_binarizer_adapter.
                                                transform(
                                                    label_indices_sequence))
                                parameters['use_adapter'] = True

                            if parameters['train_model'] and parameters[
                                    'add_class']:
                                transition_params_trained, model, glo_step = \
                                    train.restore_model_parameters_from_pretrained_model(parameters, dataset, sess,
                                                                                         model, model_saver)
                                init_new_vars_op = tf.initialize_variables(
                                    [glo_step])
                                sess.run(init_new_vars_op)
                            else:
                                transition_params_trained = \
                                    train.restore_pretrained_model(parameters, dataset, sess, model, model_saver)

                            for dataset_type in dataset_filepaths.keys():
                                writers[dataset_type] = tf.summary.FileWriter(
                                    tensorboard_log_folders[dataset_type],
                                    graph=sess.graph)
                                # embedding_writer has to write in model_folder, otherwise TensorBoard won't be able to view embeddings
                                embedding_writer = tf.summary.FileWriter(
                                    model_folder)

                        # epoch_number != 0, no matter use or not use pre-trained model
                        elif epoch_number != 0:
                            # Train model: loop over all sequences of training set with shuffling
                            sequence_numbers = list(
                                range(len(dataset.token_indices['train'])))
                            random.shuffle(sequence_numbers)
                            for sequence_number in sequence_numbers:
                                transition_params_trained, W_before_crf = train.train_step(
                                    sess, dataset, sequence_number, model,
                                    transition_params_trained, parameters)
                                step += 1
                        epoch_elapsed_training_time = time.time(
                        ) - epoch_start_time
                        print('Training completed in {0:.2f} seconds'.format(
                            epoch_elapsed_training_time),
                              flush=False)

                        if parameters[
                                'use_adapter']:  # model evaluation, using adapter
                            # pass the pred_for_adapter as label_indices vector
                            original_label_adapter_vector_indices = dataset.label_adapter_vector_indices
                            dataset.label_adapter_vector_indices = predicted_label_adapter_vector_indices
                            y_pred, y_true, output_filepaths = train.predict_labels(
                                sess, model, transition_params_trained,
                                parameters, dataset, epoch_number,
                                stats_graph_folder, dataset_filepaths)

                            evaluate.evaluate_model(results, dataset, y_pred,
                                                    y_true, stats_graph_folder,
                                                    epoch_number,
                                                    epoch_start_time,
                                                    output_filepaths,
                                                    parameters)
                            dataset.label_adapter_vector_indices = original_label_adapter_vector_indices

                        else:  # model evaluation,  not using adapter
                            y_pred, y_true, output_filepaths = train.predict_labels(
                                sess, model, transition_params_trained,
                                parameters, dataset, epoch_number,
                                stats_graph_folder, dataset_filepaths)

                            # Evaluate model: save and plot results
                            evaluate.evaluate_model(results, dataset, y_pred,
                                                    y_true, stats_graph_folder,
                                                    epoch_number,
                                                    epoch_start_time,
                                                    output_filepaths,
                                                    parameters)

                        summary = sess.run(model.summary_op, feed_dict=None)
                        writers['train'].add_summary(summary, epoch_number)
                        writers['train'].flush()
                        utils.copytree(writers['train'].get_logdir(),
                                       model_folder)

                        # Early stopping
                        train_f1_score = results['epoch'][epoch_number][0][
                            'train']['f1_score']['weighted']
                        valid_f1_score = results['epoch'][epoch_number][0][
                            'valid']['f1_score']['weighted']
                        test_f1_score = results['epoch'][epoch_number][0][
                            'test']['f1_score']['weighted']
                        f1_scores['train-F1'].append(train_f1_score)
                        f1_scores['valid-F1'].append(valid_f1_score)
                        f1_scores['test-F1'].append(test_f1_score)

                        train_f1_score_conll = results['epoch'][epoch_number][
                            0]['train']['f1_conll']['micro']
                        valid_f1_score_conll = results['epoch'][epoch_number][
                            0]['valid']['f1_conll']['micro']
                        test_f1_score_conll = results['epoch'][epoch_number][
                            0]['test']['f1_conll']['micro']
                        f1_scores_conll['train-F1'].append(
                            train_f1_score_conll)
                        f1_scores_conll['valid-F1'].append(
                            valid_f1_score_conll)
                        f1_scores_conll['test-F1'].append(test_f1_score_conll)

                        if valid_f1_score > f1_score_best:
                            patience_counter = 0
                            f1_score_best = valid_f1_score
                            # Save the best model
                            model_saver.save(
                                sess,
                                os.path.join(model_folder, 'best_model.ckpt'))
                            print(
                                'updated model to current epoch : epoch {:d}'.
                                format(epoch_number))
                            print('the model is saved in: {:s}'.format(
                                model_folder))
                        else:
                            patience_counter += 1
                        print("In epoch {:d}, the valid F1 is : {:f}".format(
                            epoch_number, valid_f1_score))
                        print(
                            "The last {0} epochs have not shown improvements on the validation set."
                            .format(patience_counter))

                        if patience_counter >= parameters['patience']:
                            print('Early Stop!')
                            results['execution_details']['early_stop'] = True
                            # save last model
                            model_saver.save(
                                sess,
                                os.path.join(model_folder, 'last_model.ckpt'))
                            print('the last model is saved in: {:s}'.format(
                                model_folder))

                            break

                        if epoch_number >= parameters[
                                'maximum_number_of_epochs'] and not parameters[
                                    'refine_with_crf']:
                            break
                    if not parameters['use_pretrained_model']:
                        plot_name = 'F1-summary-step1.svg'
                    else:
                        plot_name = 'F1-summary-step2.svg'

                    print('Sklearn result:')
                    for k, l in f1_scores.items():
                        print(k, l)

                    print('Conll result:')
                    for k, l in f1_scores_conll.items():
                        print(k, l)
                    utils_plots.plot_f1(
                        f1_scores,
                        os.path.join(stats_graph_folder, '..', plot_name),
                        'F1 score summary')

                    # TODO: in step 1, for task a, add the best deploy data to step 2 train set, and call script
                    print('(sklearn micro) test F1:')
                    micro_f1 = ','.join([
                        str(results['epoch'][ep][0]['test']['f1_score']
                            ['micro']) for ep in range(epoch_number + 1)
                    ])
                    print(micro_f1)
                    print('(sklearn macro) test F1:')
                    macro_f1 = ','.join([
                        str(results['epoch'][ep][0]['test']['f1_score']
                            ['macro']) for ep in range(epoch_number + 1)
                    ])
                    print(macro_f1)

                except KeyboardInterrupt:
                    results['execution_details']['keyboard_interrupt'] = True
                    print('Training interrupted')

                print('Finishing the experiment')
                end_time = time.time()
                results['execution_details'][
                    'train_duration'] = end_time - start_time
                results['execution_details']['train_end'] = end_time
                evaluate.save_results(results, stats_graph_folder)
                for dataset_type in dataset_filepaths.keys():
                    writers[dataset_type].close()

    sess.close()  # release the session's resources
コード例 #12
0
model = MobileNetv2().to(device).float()
optimizer = optim.Adam(model.parameters(), lr=0.001)
scheduler = ReduceLROnPlateau(optimizer,
                              mode='max',
                              factor=0.1,
                              patience=2,
                              verbose=True)
n_epochs = 50
stats = {'epoch': [], 'train_loss': [], 'val_loss': [], 'acc': []}
val_loss_min = np.Inf
for epoch in range(n_epochs):
    batch_train_loss = []
    epoch_loss = 0
    stats['epoch'].append(epoch)
    for images, labels in train_loader:
        loss = train_step(images, labels, model, criterion, optimizer)
        epoch_loss += loss.item()
        batch_train_loss.append(loss.item())
    stats['train_loss'].append(batch_train_loss)
    print(
        f'Epoch {epoch+0:03}: | Train Loss: {epoch_loss/len(train_loader):.5f}'
    )

    model.eval()
    with torch.no_grad():
        batch_val_loss = []
        batch_acc = []
        epoch_val_loss = 0
        epoch_acc = 0
        for images, labels in val_loader:
            images, labels = images.to(device).float(), labels.to(
コード例 #13
0
                    for epoch in range(args.epochs):
                        #print(exp_name_)
                        #print(torch.cuda.get_device_name(torch.cuda.current_device()))
                        for idx, (batch, label) in enumerate(train_loader, 0):
                            # print("batch {}".format(batch.size()))
                            # print("label grad{}".format(label.requires_grad))
                            # print("batch{}".format(idx))
                            # new_label = label.clone().detach()
                            # new_label = new_label.to(device)
                            # print('label size {}'.format(label.size()))
                            if args.count_shuffled_unshuffled:
                                new_label = restore_labels(label)
                                #new_label, ratio = shuffle_batch_label(label, shuffle_ratio, device)
                                #new_label = new_label.detach()
                                loss = train_step(model, batch, new_label,
                                                  device, model_optimizer,
                                                  loss_func)
                                #del new_label
                            else:
                                loss = train_step(model, batch, label, device,
                                                  model_optimizer, loss_func)
                            #del label
                            #del batch
                            #print("ratio: {}".format(ratio))
                            loss = loss.detach()  #important!
                            total_loss += loss.item()
                            #del batch
                            del loss
                            #del label, new_label

                        #print(epoch)
コード例 #14
0
def main():
    torch.manual_seed(233)
    torch.cuda.set_device(0)
    args = get_args()
    print("generating config")
    config = Config(
        state_dim=args.hidden,
        input_dim=args.input_dim,
        hidden=args.hidden,
        output_dim=args.num_classes,
        epsilon=args.epsilon
    )
    gamma = args.gamma
    reward_amplify = args.reward_amplify
    passive_drive = args.passive_drive
    memory = models.Memory(args.capacity)
    m = args.batch_size
    print("initializing networks")
    E = models.Shared_Encoder(config)
    Q = models.Simple_Q_Net(config)    # 2-dim x-or problem
    Q_t = models.Simple_Q_Net(config)
    Q_t.load_state_dict(Q.state_dict())     # let Q and Q_t be identical initially

    C = models.SimpleNNClassifier(config)

    episode_length = args.episode_length
    episode_number = args.episode_number

    print("initializing optimizers")
    optimizer_E = torch.optim.Adam(E.parameters(), lr=args.lr, betas=(0., 0.999))
    optimizer_C = torch.optim.Adam(C.parameters(), lr=args.lr, betas=(0., 0.999))
    optimizer_Q = torch.optim.Adam(Q.parameters(), lr=args.lr, betas=(0., 0.999))

    # enable gpu
    E.cuda()
    C.cuda()
    Q.cuda()
    Q_t.cuda()

    #test_C(C, E)
    loss_last = Variable(torch.tensor([0.])).cuda()
    X_eval, Y_eval = xor_data_generate(args.eval_set_size)
    X_eval = X_eval.cuda()
    Y_eval = Y_eval.cuda()
    for i in range(episode_number):
        #X_eval, Y_eval = xor_data_generate(args.eval_set_size)
        #X_eval = X_eval.cuda()
        #Y_eval = Y_eval.cuda()
        X, Y = xor_data_generate(m)
        X = X.cuda()
        Y = Y.cuda()
        for t in range(episode_length):
            try:
                X, Y, loss_last, reward = train_step(E=E,
                                                     C=C,
                                                     Q=Q,
                                                     Q_t=Q_t,
                                                     X=X,
                                                     Y=Y,
                                                     eval_X=X_eval,
                                                     eval_Y=Y_eval,
                                                     gamma=gamma,
                                                     loss_last=loss_last,
                                                     memory=memory,
                                                     optimizer_C=optimizer_C,
                                                     optimizer_E=optimizer_E,
                                                     optimizer_Q=optimizer_Q,
                                                     reward_amplify=reward_amplify,
                                                     passive_drive=passive_drive)
                print("Episode %i step %i, loss=%f, reward=%f" % (
                    i, t, loss_last.detach().cpu().numpy(), reward.detach().cpu().numpy()))
            except Exception as e:
                print("Cannot train the model on this step, error:", e)

        Q_t = Q
        if i % 20 == 0:
            test_C(C, E)
            state = {
                'E_state_dict': E.state_dict(),
                'E_optimizer': optimizer_E.state_dict(),
                'C_state_dict': C.state_dict(),
                'C_optimizer': optimizer_C.state_dict(),
                'Q_state_dict': Q.state_dict(),
                'Q_optimizer': optimizer_Q.state_dict(),

            }
            model_name = "cog396test_main_episode_" + str(i) + ".tr"
            torch.save(state, model_name)
コード例 #15
0
    for batch in range(args.num_batches):
        start = batch * args.batch_size
        end = (batch + 1) * args.batch_size

        train_data = (advantages, rewards_to_go, values, actions, observations,
                      policy_probs)
        # slice out batches from train_data
        batch_data = [x[start:end] for x in train_data]
        batch_data = [torch.tensor(x).to(device) for x in batch_data]

        # flatten (batch_size,num_steps,...) into ((batch_size*num_steps,...)
        batch_data = [x.reshape((-1, ) + x.shape[2:]) for x in batch_data]

        # Step batch
        for epoch in range(args.ppo_epochs):
            train_step(model, optim, batch_data, args, i, tracker)

    tracker.log_iteration_time(args.num_workers * args.num_steps, i)
    if i % 5 == 0:
        tracker.add_histogram("episode/episode_length",
                              game_player.episode_length, i)
        tracker.add_histogram("episode/episode_rewards",
                              game_player.episode_rewards, i)
    if i % 25 == 0:
        tracker.add_histogram("training/raw_advantages",
                              raw_advantages, i)
        tracker.add_histogram("training/rewards",
                              rewards, i)
        tracker.add_histogram("training/observations",
                              observations, i)
        tracker.add_histogram("training/reward_std",
コード例 #16
0
import tensorflow as tf
from keras.utils import Progbar
from get_data import *
from train import train_step
import numpy as np
from config import *
epochs = train_config.epochs

for epoch in range(epochs):
  tf.print("{}/{} epoch".format(epoch+1, epochs))
  pbar = Progbar(target = 60000, unit_name = "WGAN_GP")
  prev_dis_loss,prev_gen_loss = 0,0
  for x in train_x_batched:
    dis_loss, gen_loss = train_step(x,prev_dis_loss,prev_gen_loss)
    prev_dis_loss = dis_loss
    prev_gen_loss = gen_loss
    values=[("Critic Loss", np.round(dis_loss.numpy(),4)), ("Generator Loss", np.round(gen_loss.numpy(),4))]
    pbar.add(x.shape[0],values=values)
コード例 #17
0
ファイル: main.py プロジェクト: revirevy/SelfSent
def main():
    file_params = 'parameters_yelp_50k.ini'
    if len(sys.argv) > 1 and '.ini' in sys.argv[1]:
        file_params = sys.argv[1]

    # Load config
    parameters, conf_parameters = load_parameters(
        parameters_filepath=os.path.join('.', file_params))
    dataset_filepaths = get_valid_dataset_filepaths(parameters)
    #check_parameter_compatiblity(parameters, dataset_filepaths)

    if parameters['seed'] != -1:
        random.seed(parameters['seed'])

    # Create annotator
    annotator = stanford_corenlp_pywrapper.CoreNLP(
        configdict={
            'annotators': 'tokenize, ssplit',
            'ssplit.eolonly': True
        },
        corenlp_jars=[parameters['stanford_folder'] + '/*'])
    # Load dataset
    dataset = ds.Dataset(verbose=parameters['verbose'],
                         debug=parameters['debug'])
    dataset.load_dataset(dataset_filepaths, parameters, annotator)

    # Adapt train/valid/test to be multiple of batch_size
    for size in ['train_size', 'valid_size', 'test_size']:
        if parameters[size] % parameters['batch_size'] != 0:
            parameters[size] = int(
                parameters[size] /
                parameters['batch_size']) * parameters['batch_size']
            print('Changed {}'.format(size))

    # Set GPU device if more GPUs are specified
    if parameters['number_of_gpus'] > 1 and parameters['gpu_device'] != -1:
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = parameters['gpu_device']

    # GPUs
    print(device_lib.list_local_devices())
    # Create graph and session
    with tf.Graph().as_default():
        session_conf = tf.ConfigProto(
            intra_op_parallelism_threads=parameters['number_of_cpu_threads'],
            inter_op_parallelism_threads=parameters['number_of_cpu_threads'],
            device_count={
                'CPU': 1,
                'GPU': parameters['number_of_gpus']
            },
            allow_soft_placement=
            True,  # automatically choose an existing and supported device to run the operations in case the specified one doesn't exist
            log_device_placement=False)

        sess = tf.Session(config=session_conf)

        with sess.as_default():
            if parameters['seed'] != -1:
                tf.set_random_seed(parameters['seed'])

                # Initialize and save execution details
                start_time = time.time()
                experiment_timestamp = utils.get_current_time_in_miliseconds()

                results = {}
                results['epoch'] = {}
                results['execution_details'] = {}
                results['execution_details']['train_start'] = start_time
                results['execution_details'][
                    'time_stamp'] = experiment_timestamp
                results['execution_details']['early_stop'] = False
                results['execution_details']['keyboard_interrupt'] = False
                results['execution_details']['num_epochs'] = 0
                results['model_options'] = copy.copy(parameters)

                dataset_name = utils.get_basename_without_extension(
                    parameters['dataset_folder'])
                model_name = '{0}_{1}'.format(
                    dataset_name, results['execution_details']['time_stamp'])

                output_folder = os.path.join('..', 'output')
                utils.create_folder_if_not_exists(output_folder)

                stats_graph_folder = os.path.join(
                    output_folder, model_name)  # Folder where to save graphs
                utils.create_folder_if_not_exists(stats_graph_folder)
                model_folder = os.path.join(stats_graph_folder, 'model')
                utils.create_folder_if_not_exists(model_folder)

                with open(os.path.join(model_folder, file_params),
                          'w') as parameters_file:
                    conf_parameters.write(parameters_file)

                pickle.dump(
                    dataset,
                    open(os.path.join(model_folder, 'dataset.pickle'), 'wb'))

                # Instantiate the model
                # graph initialization should be before FileWriter, otherwise the graph will not appear in TensorBoard
                model = SelfSent(dataset, parameters)

                # Initialize the model
                sess.run(tf.global_variables_initializer())
                if not parameters['use_pretrained_model']:
                    model.load_pretrained_token_embeddings(
                        sess, dataset, parameters)

                # Start training + evaluation loop. Each iteration corresponds to 1 epoch.
                bad_counter = 0  # number of epochs with no improvement on the validation test
                previous_best_valid_accuracy = 0
                previous_best_test_accuracy = 0
                model_saver = tf.train.Saver(
                    max_to_keep=parameters['maximum_number_of_epochs']
                )  # defaults to saving all variables
                epoch_number = -1
                try:
                    while True:
                        epoch_number += 1
                        print('\nStarting epoch {0}'.format(epoch_number))

                        epoch_start_time = time.time()

                        if parameters[
                                'use_pretrained_model'] and epoch_number == 0:
                            # Restore pretrained model parameters
                            dataset = train.restore_model_parameters_from_pretrained_model(
                                parameters, dataset, sess, model_saver)
                            dataset.load_deploy(
                                os.path.join(parameters['dataset_folder'],
                                             '{0}.json'.format('deploy')),
                                parameters, annotator)
                            y_pred, y_true, output_filepaths, attentions = train.predict_labels(
                                sess,
                                model,
                                parameters,
                                dataset,
                                epoch_number,
                                stats_graph_folder,
                                dataset_filepaths,
                                only_deploy=True)
                            y_pred = y_pred['deploy']

                            with open(
                                    output_filepaths['deploy']
                                [:output_filepaths['deploy'].rfind('/') + 1] +
                                    'attention.txt',
                                    'w',
                                    encoding='utf-8') as fp:
                                # Compute attention
                                tokens_with_attentions = []
                                for sample_id in range(len(y_pred)):
                                    attention = attentions[int(
                                        sample_id / parameters['batch_size'])][
                                            sample_id %
                                            parameters['batch_size']]
                                    # Remove padded dimension
                                    attention = attention[:dataset.
                                                          token_lengths[
                                                              'deploy']
                                                          [sample_id]]

                                    # Save current attention
                                    fp.write("{}\t{:05.2f}\t".format(
                                        y_pred[sample_id][0],
                                        y_pred[sample_id][1]))
                                    fp.write(' '.join(dataset.tokens['deploy']
                                                      [sample_id]) + '\t')
                                    fp.write(' '.join(
                                        [str(a)
                                         for a in attention.flatten()]) + '\n')

                                    # Sum over columns (we combine all the annotation vectors)
                                    attention = np.sum(attention, axis=1)
                                    # Normalize to sum at 1
                                    attention = attention / np.linalg.norm(
                                        attention)

                                    # Keep only high confidence
                                    if y_pred[sample_id][1] >= parameters[
                                            'attention_visualization_conf']:
                                        tokens_with_attentions.append(
                                            (y_pred[sample_id][0],
                                             y_pred[sample_id][1],
                                             dataset.tokens['deploy']
                                             [sample_id], attention))

                            # Plot attention
                            utils_plots.visualize_attention(
                                tokens_with_attentions, dataset.unique_labels,
                                output_filepaths['deploy']
                                [:output_filepaths['deploy'].rfind('/') + 1],
                                parameters['attention_visualization_conf'])
                            break
                        elif epoch_number != 0:
                            total_loss, total_accuracy = train.train_step(
                                sess, dataset, model, parameters)
                            print('Mean loss: {:.2f}\tMean accuracy: {:.2f}'.
                                  format(np.mean(total_loss),
                                         100.0 * np.mean(total_accuracy)),
                                  flush=True)

                        epoch_elapsed_training_time = time.time(
                        ) - epoch_start_time
                        print('Training completed in {0:.2f} seconds'.format(
                            epoch_elapsed_training_time),
                              flush=True)

                        y_pred, y_true, output_filepaths, _ = train.predict_labels(
                            sess, model, parameters, dataset, epoch_number,
                            stats_graph_folder, dataset_filepaths)

                        # Evaluate model: save and plot results
                        evaluate.evaluate_model(results, dataset, y_pred,
                                                y_true, stats_graph_folder,
                                                epoch_number, epoch_start_time,
                                                output_filepaths, parameters)

                        # Save model
                        model_saver.save(
                            sess,
                            os.path.join(
                                model_folder,
                                'model_{0:05d}.ckpt'.format(epoch_number)))

                        # Early stop
                        valid_accuracy = results['epoch'][epoch_number][0][
                            'valid']['accuracy_score']
                        if valid_accuracy > previous_best_valid_accuracy:
                            bad_counter = 0
                            previous_best_valid_accuracy = valid_accuracy
                            previous_best_test_accuracy = results['epoch'][
                                epoch_number][0]['test']['accuracy_score']
                        else:
                            bad_counter += 1
                        print(
                            "The last {0} epochs have not shown improvements on the validation set."
                            .format(bad_counter))
                        print("Best valid with test performances in epoch " +
                              str(epoch_number - bad_counter) +
                              ": {:05.2f}%\t{:05.2f}%".format(
                                  previous_best_valid_accuracy,
                                  previous_best_test_accuracy))
                        if bad_counter >= parameters['patience']:
                            print('Early Stop!')
                            results['execution_details']['early_stop'] = True
                            break

                        if epoch_number >= parameters[
                                'maximum_number_of_epochs']:
                            break

                except KeyboardInterrupt:
                    results['execution_details']['keyboard_interrupt'] = True
                    print('Training interrupted')

                print('Finishing the experiment')
                end_time = time.time()
                results['execution_details'][
                    'train_duration'] = end_time - start_time
                results['execution_details']['train_end'] = end_time
                evaluate.save_results(results, stats_graph_folder)

            sess.close()  # release the session's resources
コード例 #18
0
ファイル: main.py プロジェクト: mns0/CS547-CycleGAN
                                       betas=(0, 0.9))
    optimizer_G_y2x = torch.optim.Adam(G_y2x.parameters(),
                                       lr=0.0001,
                                       betas=(0, 0.9))
    optimizer_D_x = torch.optim.Adam(D_x.parameters(),
                                     lr=0.0001,
                                     betas=(0, 0.9))
    optimizer_D_y = torch.optim.Adam(D_y.parameters(),
                                     lr=0.0001,
                                     betas=(0, 0.9))

    criterion_image = nn.L1Loss()
    criterion_type = nn.L1Loss()
    criterion_identity = nn.L1Loss()

    x_fake_sample = Sample_from_Pool()
    y_fake_sample = Sample_from_Pool()

    for epoch in range(num_epochs):
        train_step(epoch,trainloader, G_x2y, G_y2x,D_x,D_y,\
               optimizer_G_x2y,optimizer_G_y2x,optimizer_D_x, optimizer_D_y,\
               criterion_image,criterion_type,criterion_identity, batch_size,\
               x_fake_sample,y_fake_sample,lambda_identity_loss,\
               lambda_idt_x,lambda_idt_y, wgan_lambda, save_dir,save_log_file,fh)

    fh.close()

else:
    #Potential validation code
    pass
コード例 #19
0
ファイル: neuroner.py プロジェクト: hmchuong/NeuroNER
    def fit(self):
        '''
        Dùng để train data
        '''
        parameters = self.parameters
        conf_parameters = self.conf_parameters
        dataset_filepaths = self.dataset_filepaths
        dataset = self.dataset
        dataset_brat_folders = self.dataset_brat_folders
        sess = self.sess
        model = self.model
        transition_params_trained = self.transition_params_trained
        stats_graph_folder, experiment_timestamp = self._create_stats_graph_folder(parameters)

        # Khởi tạo và lưu các thông tin của lần chạy
        start_time = time.time()
        results = {}
        results['epoch'] = {}
        '''
        An epoch, in Machine Learning, is the entire processing by the learning algorithm of the entire train-set.
        Ex:
        The MNIST train set is composed by 55000 samples. Once the algorithm processed all those 55000 samples an epoch is passed.
        '''
        results['execution_details'] = {}
        results['execution_details']['train_start'] = start_time                # Thời gian bắt đầu chạy
        results['execution_details']['time_stamp'] = experiment_timestamp       # Nhãn thời gian
        results['execution_details']['early_stop'] = False                      # Cho biết có lỗi xảy ra nên bị dừng sớm ko
        results['execution_details']['keyboard_interrupt'] = False              # Cho biết có bị dừng bởi keyboard
        results['execution_details']['num_epochs'] = 0                          # Số lượng epoch đã chạy
        results['model_options'] = copy.copy(parameters)                        # Các tham số

        model_folder = os.path.join(stats_graph_folder, 'model')                # output/en.../model
        utils.create_folder_if_not_exists(model_folder)
        # Save value cac parameters vao file parameters.ini
        with open(os.path.join(model_folder, 'parameters.ini'), 'w') as parameters_file:
            conf_parameters.write(parameters_file)                                          # Log các tham số ra file
        pickle.dump(dataset, open(os.path.join(model_folder, 'dataset.pickle'), 'wb'))      # Dump dataset thành pickle file để lần sau chạy

        # Tạo folder tensorboard logs để dùng cho việc vẽ biểu đồ sau này
        tensorboard_log_folder = os.path.join(stats_graph_folder, 'tensorboard_logs')       # folder lưu file log của tensorboard -> dùng cho việc plot biểu đồ lên
        utils.create_folder_if_not_exists(tensorboard_log_folder)
        tensorboard_log_folders = {}
        for dataset_type in dataset_filepaths.keys():
            tensorboard_log_folders[dataset_type] = os.path.join(stats_graph_folder, 'tensorboard_logs', dataset_type)
            utils.create_folder_if_not_exists(tensorboard_log_folders[dataset_type])

        # Khởi tạo các writers cho tensorboard
        writers = {} # Có nhiều nhất 4 writers train, test, valid, deploy
        for dataset_type in dataset_filepaths.keys():
            writers[dataset_type] = tf.summary.FileWriter(tensorboard_log_folders[dataset_type], graph=sess.graph)
        embedding_writer = tf.summary.FileWriter(model_folder) # embedding_writer has to write in model_folder, otherwise TensorBoard won't be able to view embeddings

        # Dùng cho việc visualize embedding bằng tensorboard
        embeddings_projector_config = projector.ProjectorConfig()
        tensorboard_token_embeddings = embeddings_projector_config.embeddings.add()
        tensorboard_token_embeddings.tensor_name = model.token_embedding_weights.name
        token_list_file_path = os.path.join(model_folder, 'tensorboard_metadata_tokens.tsv')
        tensorboard_token_embeddings.metadata_path = 'tensorboard_metadata_tokens.tsv'#os.path.relpath(token_list_file_path, '..')

        tensorboard_character_embeddings = embeddings_projector_config.embeddings.add()
        tensorboard_character_embeddings.tensor_name = model.character_embedding_weights.name
        character_list_file_path = os.path.join(model_folder, 'tensorboard_metadata_characters.tsv')
        tensorboard_character_embeddings.metadata_path = 'tensorboard_metadata_characters.tsv'#os.path.relpath(character_list_file_path, '..')

        # Saves a configuration file that TensorBoard will read during startup.
        projector.visualize_embeddings(embedding_writer, embeddings_projector_config)

        # Ghi token vào file tsv dùng làm metadata cho embedding
        token_list_file = codecs.open(token_list_file_path,'w', 'UTF-8')
        for token_index in range(dataset.vocabulary_size):
            token_list_file.write('{0}\n'.format(dataset.index_to_token[token_index]))
        token_list_file.close()

        # Ghi characters vào file tsv dùng làm metadata cho embedding
        character_list_file = codecs.open(character_list_file_path,'w', 'UTF-8')
        for character_index in range(dataset.alphabet_size):
            if character_index == dataset.PADDING_CHARACTER_INDEX:
                character_list_file.write('PADDING\n')
            else:
                character_list_file.write('{0}\n'.format(dataset.index_to_character[character_index]))
        character_list_file.close()


        # Start training + evaluation loop. Each iteration corresponds to 1 epoch.
        bad_counter = 0 # number of epochs with no improvement on the validation test in terms of F1-score
        previous_best_valid_f1_score = 0 # f1-Score tốt nhất ở các lần chạy trước
        epoch_number = -1
        try:
            while True:
                step = 0
                epoch_number += 1
                print('\nStarting epoch {0}'.format(epoch_number))

                epoch_start_time = time.time()

                if epoch_number != 0:
                    # Train model: loop over all sequences of training set with shuffling
                    sequence_numbers=list(range(len(dataset.token_indices['train'])))
                    print("----****____")
                    print(dataset.token_indices['train'][:10])
                    random.shuffle(sequence_numbers)
                    # Thuc hien train
                    for sequence_number in sequence_numbers:
                        transition_params_trained = train.train_step(sess, dataset, sequence_number, model, parameters)
                        step += 1
                        if step % 10 == 0:
                            print('Training {0:.2f}% done'.format(step/len(sequence_numbers)*100), end='\r', flush=True)

                # Tinh thoi gian thuc hien 1 epoch
                epoch_elapsed_training_time = time.time() - epoch_start_time
                print('Training completed in {0:.2f} seconds'.format(epoch_elapsed_training_time), flush=True)

                y_pred, y_true, output_filepaths = train.predict_labels(sess, model, transition_params_trained, parameters, dataset, epoch_number, stats_graph_folder, dataset_filepaths)

                # Evaluate model: save and plot results
                evaluate.evaluate_model(results, dataset, y_pred, y_true, stats_graph_folder, epoch_number, epoch_start_time, output_filepaths, parameters)

                if parameters['use_pretrained_model'] and not parameters['train_model']:
                    conll_to_brat.output_brat(output_filepaths, dataset_brat_folders, stats_graph_folder)
                    break

                # Save model
                model.saver.save(sess, os.path.join(model_folder, 'model_{0:05d}.ckpt'.format(epoch_number)))

                # Save TensorBoard logs
                summary = sess.run(model.summary_op, feed_dict=None)
                writers['train'].add_summary(summary, epoch_number)
                writers['train'].flush()
                utils.copytree(writers['train'].get_logdir(), model_folder)


                # Early stop
                valid_f1_score = results['epoch'][epoch_number][0]['valid']['f1_score']['micro']
                # If do chinh xac cua epoch > do chinh xac cua epoch truoc
                if  valid_f1_score > previous_best_valid_f1_score:
                    bad_counter = 0
                    previous_best_valid_f1_score = valid_f1_score
                    conll_to_brat.output_brat(output_filepaths, dataset_brat_folders, stats_graph_folder, overwrite=True)
                    self.transition_params_trained = transition_params_trained
                else:
                    bad_counter += 1
                print("The last {0} epochs have not shown improvements on the validation set.".format(bad_counter))

                # If bad_counter den mot muc gioi han parameters['patience'] = 10 (gia tri khoi tao) finish train
                if bad_counter >= parameters['patience']:
                    print('Early Stop!')
                    results['execution_details']['early_stop'] = True
                    break

                # Neu so epoch >= so luong epoch toi da quy dinh --> ket thuc train
                if epoch_number >= parameters['maximum_number_of_epochs']: break


        except KeyboardInterrupt:
            results['execution_details']['keyboard_interrupt'] = True
            print('Training interrupted')

        # Ket thuc train luu cac tham so time, ket qua
        print('Finishing the experiment')
        end_time = time.time()
        results['execution_details']['train_duration'] = end_time - start_time
        results['execution_details']['train_end'] = end_time
        evaluate.save_results(results, stats_graph_folder)
        for dataset_type in dataset_filepaths.keys():
            writers[dataset_type].close()
コード例 #20
0
ファイル: run_experiments.py プロジェクト: braemy/NeuroNER
def main(languages):
    #embeddings_type = ['polyglot', 'fasttext']
    #embeddings_type = ['fasttext', 'fasttext_noOOV']
    embeddings_type = ['fasttext_noOOV']
    character_lstm = [True]
    embedding_language = ['target', 'source']
    combination = product(languages, embeddings_type, embedding_language, character_lstm)
    create_folder_if_not_exists(os.path.join("..", "log"))
    experiment_timestamp = utils.get_current_time_in_miliseconds()
    log_file = os.path.join("..", "log", "experiment-{}.log".format(experiment_timestamp))

    for language, emb_type, emb_language, char_lstm in combination:
        conf_parameters = load_parameters()
        conf_parameters = set_datasets(conf_parameters, language)
        conf_parameters.set('ann','use_character_lstm', str(char_lstm))
        conf_parameters.set('ann','embedding_type', emb_type)
        conf_parameters.set('ann','embedding_language', emb_language)
        if emb_type == 'polyglot':
            conf_parameters.set('ann', 'embedding_dimension', str(64))
        elif 'fasttext' in emb_type:
            conf_parameters.set('ann', 'embedding_dimension', str(300))
        else:
            raise("Uknown embedding type")
        if emb_language == 'source':
            conf_parameters.set('dataset', 'language', constants.MAPPING_LANGUAGE[language])
        else:
            conf_parameters.set('dataset', 'language', language)
        parameters, conf_parameters = parse_parameters(conf_parameters)

        start_time = time.time()
        experiment_timestamp = utils.get_current_time_in_miliseconds()

        results = {}
        results['epoch'] = {}
        results['execution_details'] = {}
        results['execution_details']['train_start'] = start_time
        results['execution_details']['time_stamp'] = experiment_timestamp
        results['execution_details']['early_stop'] = False
        results['execution_details']['keyboard_interrupt'] = False
        results['execution_details']['num_epochs'] = 0
        results['model_options'] = copy.copy(parameters)

        dataset_name = utils.get_basename_without_extension(parameters['dataset_train'])
        model_name = '{0}_{1}_{2}_{3}_{4}'.format(language, emb_type, char_lstm, emb_language,
                                                  results['execution_details']['time_stamp'])

        sys.stdout = open(os.path.join("..", "log", model_name), "w")
        print(language, emb_type, char_lstm, emb_language)

        with open(log_file, "a") as file:
            file.write("Experiment: {}\n".format(model_name))
            file.write("Start time:{}\n".format(experiment_timestamp))
            file.write("-------------------------------------\n\n")
        pprint(parameters)
        dataset_filepaths = get_valid_dataset_filepaths(parameters)
        check_parameter_compatiblity(parameters, dataset_filepaths)
        previous_best_valid_epoch = -1

        # Load dataset
        dataset = ds.Dataset(verbose=parameters['verbose'], debug=parameters['debug'])
        dataset.load_vocab_word_embeddings(parameters)
        dataset.load_dataset(dataset_filepaths, parameters)

        # Create graph and session
        with tf.Graph().as_default():
            session_conf = tf.ConfigProto(
                intra_op_parallelism_threads=parameters['number_of_cpu_threads'],
                inter_op_parallelism_threads=parameters['number_of_cpu_threads'],
                device_count={'CPU': 1, 'GPU': parameters['number_of_gpus']},
                allow_soft_placement=True,
                # automatically choose an existing and supported device to run the operations in case the specified one doesn't exist
                log_device_placement=False
            )

            session_conf.gpu_options.allow_growth = True

            sess = tf.Session(config=session_conf)

            with sess.as_default():
                # Initialize and save execution details

                print(model_name)
                output_folder = os.path.join('..', 'output')
                utils.create_folder_if_not_exists(output_folder)
                stats_graph_folder = os.path.join(output_folder, model_name)  # Folder where to save graphs
                utils.create_folder_if_not_exists(stats_graph_folder)
                model_folder = os.path.join(stats_graph_folder, 'model')
                utils.create_folder_if_not_exists(model_folder)
                with open(os.path.join(model_folder, 'parameters.ini'), 'w') as parameters_file:
                    conf_parameters.write(parameters_file)
                tensorboard_log_folder = os.path.join(stats_graph_folder, 'tensorboard_logs')
                utils.create_folder_if_not_exists(tensorboard_log_folder)
                tensorboard_log_folders = {}
                for dataset_type in dataset_filepaths.keys():
                    tensorboard_log_folders[dataset_type] = os.path.join(stats_graph_folder, 'tensorboard_logs',
                                                                         dataset_type)
                    utils.create_folder_if_not_exists(tensorboard_log_folders[dataset_type])
                # del dataset.embeddings_matrix
                if not parameters['use_pretrained_model']:
                    pickle.dump(dataset, open(os.path.join(model_folder, 'dataset.pickle'), 'wb'))
                # dataset.load_pretrained_word_embeddings(parameters)
                # Instantiate the model
                # graph initialization should be before FileWriter, otherwise the graph will not appear in TensorBoard
                model = EntityLSTM(dataset, parameters)

                # Instantiate the writers for TensorBoard
                writers = {}
                for dataset_type in dataset_filepaths.keys():
                    writers[dataset_type] = tf.summary.FileWriter(tensorboard_log_folders[dataset_type],
                                                                  graph=sess.graph)
                embedding_writer = tf.summary.FileWriter(
                    model_folder)  # embedding_writer has to write in model_folder, otherwise TensorBoard won't be able to view embeddings

                embeddings_projector_config = projector.ProjectorConfig()
                tensorboard_token_embeddings = embeddings_projector_config.embeddings.add()
                tensorboard_token_embeddings.tensor_name = model.token_embedding_weights.name
                token_list_file_path = os.path.join(model_folder, 'tensorboard_metadata_tokens.tsv')
                tensorboard_token_embeddings.metadata_path = os.path.relpath(token_list_file_path, '..')

                if parameters['use_character_lstm']:
                    tensorboard_character_embeddings = embeddings_projector_config.embeddings.add()
                    tensorboard_character_embeddings.tensor_name = model.character_embedding_weights.name
                    character_list_file_path = os.path.join(model_folder, 'tensorboard_metadata_characters.tsv')
                    tensorboard_character_embeddings.metadata_path = os.path.relpath(character_list_file_path, '..')

                projector.visualize_embeddings(embedding_writer, embeddings_projector_config)

                # Write metadata for TensorBoard embeddings
                token_list_file = codecs.open(token_list_file_path, 'w', 'UTF-8')
                for token_index in range(len(dataset.index_to_token)):
                    token_list_file.write('{0}\n'.format(dataset.index_to_token[token_index]))
                token_list_file.close()

                if parameters['use_character_lstm']:
                    character_list_file = codecs.open(character_list_file_path, 'w', 'UTF-8')
                    for character_index in range(dataset.alphabet_size):
                        if character_index == dataset.PADDING_CHARACTER_INDEX:
                            character_list_file.write('PADDING\n')
                        else:
                            character_list_file.write('{0}\n'.format(dataset.index_to_character[character_index]))
                    character_list_file.close()

                try:
                    # Initialize the model
                    sess.run(tf.global_variables_initializer())
                    if not parameters['use_pretrained_model']:
                        model.load_pretrained_token_embeddings(sess, dataset, parameters)

                    # Start training + evaluation loop. Each iteration corresponds to 1 epoch.
                    bad_counter = 0  # number of epochs with no improvement on the validation test in terms of F1-score
                    previous_best_valid_f1_score = -1
                    transition_params_trained = np.random.rand(len(dataset.unique_labels), len(
                        dataset.unique_labels))  # TODO np.random.rand(len(dataset.unique_labels)+2,len(dataset.unique_labels)+2)
                    model_saver = tf.train.Saver(
                        max_to_keep=None)  # parameters['maximum_number_of_epochs'])  # defaults to saving all variables
                    epoch_number = 0

                    while True:
                        step = 0
                        epoch_number += 1
                        print('\nStarting epoch {0}'.format(epoch_number))

                        epoch_start_time = time.time()

                        if parameters['use_pretrained_model'] and epoch_number == 1:
                            # Restore pretrained model parameters
                            transition_params_trained = train.restore_model_parameters_from_pretrained_model(parameters,
                                                                                                             dataset,
                                                                                                             sess,
                                                                                                             model,
                                                                                                             model_saver)
                        elif epoch_number != 0:
                            # Train model: loop over all sequences of training set with shuffling
                            sequence_numbers = list(range(len(dataset.token_indices['train'])))
                            random.shuffle(sequence_numbers)
                            data_counter = 0
                            sub_id = 0
                            for i in tqdm(range(0, len(sequence_numbers), parameters['batch_size']), "Training epoch {}".format(epoch_number),
                                          mininterval=1):
                                data_counter += parameters['batch_size']
                                if data_counter >= 20000:
                                    data_counter = 0
                                    sub_id += 0.001
                                    print("Intermediate evaluation number: ", sub_id)
                                    epoch_elapsed_training_time = time.time() - epoch_start_time
                                    print('Training completed in {0:.2f} seconds'.format(epoch_elapsed_training_time),
                                          flush=True)

                                    y_pred, y_true, output_filepaths = train.predict_labels(sess, model,
                                                                                            transition_params_trained,
                                                                                            parameters, dataset,
                                                                                            epoch_number + sub_id,
                                                                                            stats_graph_folder,
                                                                                            dataset_filepaths)
                                    # Evaluate model: save and plot results
                                    evaluate.evaluate_model(results, dataset, y_pred, y_true, stats_graph_folder,
                                                            epoch_number, epoch_start_time, output_filepaths,
                                                            parameters)
                                    # Save model
                                    model_saver.save(sess, os.path.join(model_folder,
                                                                        'model_{0:07.3f}.ckpt'.format(
                                                                            epoch_number + sub_id)))
                                    # Save TensorBoard logs
                                    summary = sess.run(model.summary_op, feed_dict=None)
                                    writers['train'].add_summary(summary, epoch_number)
                                    writers['train'].flush()
                                    utils.copytree(writers['train'].get_logdir(), model_folder)
                                    # Early stop
                                    valid_f1_score = results['epoch'][epoch_number][0]['valid']['f1_score']['micro']
                                    if valid_f1_score > previous_best_valid_f1_score:
                                        bad_counter = 0
                                        previous_best_valid_f1_score = valid_f1_score
                                    else:
                                        bad_counter += 1

                                sequence_number = sequence_numbers[i: i + parameters['batch_size']]
                                transition_params_trained, loss = train.train_step(sess, dataset, sequence_number,
                                                                                   model, transition_params_trained,
                                                                                   parameters)
                        epoch_elapsed_training_time = time.time() - epoch_start_time
                        print('Training completed in {0:.2f} seconds'.format(epoch_elapsed_training_time), flush=True)

                        y_pred, y_true, output_filepaths = train.predict_labels(sess, model, transition_params_trained,
                                                                                parameters, dataset, epoch_number,
                                                                                stats_graph_folder, dataset_filepaths)

                        # Evaluate model: save and plot results
                        evaluate.evaluate_model(results, dataset, y_pred, y_true, stats_graph_folder, epoch_number,
                                                epoch_start_time, output_filepaths, parameters)

                        # Save model
                        model_saver.save(sess, os.path.join(model_folder, 'model_{0:05d}.ckpt'.format(epoch_number)))

                        # Save TensorBoard logs
                        summary = sess.run(model.summary_op, feed_dict=None)
                        writers['train'].add_summary(summary, epoch_number)
                        writers['train'].flush()
                        utils.copytree(writers['train'].get_logdir(), model_folder)

                        # Early stop
                        valid_f1_score = results['epoch'][epoch_number][0]['valid']['f1_score']['micro']
                        if valid_f1_score > previous_best_valid_f1_score:
                            bad_counter = 0
                            previous_best_valid_f1_score = valid_f1_score
                            previous_best_valid_epoch = epoch_number
                        else:
                            bad_counter += 1
                        print("The last {0} epochs have not shown improvements on the validation set.".format(
                            bad_counter))

                        if bad_counter >= parameters['patience']:
                            print('Early Stop!')
                            results['execution_details']['early_stop'] = True
                            break

                        if epoch_number >= parameters['maximum_number_of_epochs']: break

                    keep_only_best_model(model_folder,previous_best_valid_epoch ,parameters['maximum_number_of_epochs']+1)

                except KeyboardInterrupt:
                    results['execution_details']['keyboard_interrupt'] = True
                    print('Training interrupted')
                    # remove the experiment
                    remove_experiment = input("Do you want to remove the experiment? (yes/y/Yes)")
                    if remove_experiment in ["Yes", "yes", "y"]:
                        shutil.rmtree(stats_graph_folder)
                        print("Folder removed")
                    else:
                        print('Finishing the experiment')
                        end_time = time.time()
                        results['execution_details']['train_duration'] = end_time - start_time
                        results['execution_details']['train_end'] = end_time
                        evaluate.save_results(results, stats_graph_folder)
                    sys.stdout.close()
                except Exception:
                    logging.exception("")
                    remove_experiment = input("Do you want to remove the experiment? (yes/y/Yes)")
                    if remove_experiment in ["Yes", "yes", "y"]:
                        shutil.rmtree(stats_graph_folder)
                        print("Folder removed")
                    sys.stdout.close()

            sess.close()  # release the session's resources
            sys.stdout.close()
コード例 #21
0
    def train(self, max_number_of_epoch, model_folder, dropout_rate=0.5):
        # stats_graph_folder, experiment_timestamp = utils.create_stats_graph_folder(parameters)

        # Initialize and save execution details
        start_time = time.time()

        utils.create_folder_if_not_exists(model_folder)

        pickle.dump(self.dataset,
                    open(os.path.join(model_folder, 'dataset.pickle'), 'wb'))
        pickle.dump(
            self.parameters,
            open(os.path.join(model_folder, 'parameters.pickle'), 'wb'))

        bad_counter = 0  # number of epochs with no improvement on the validation test in terms of F1-score
        previous_best_valid_f1_score = -100
        epoch_number = -1

        while True:

            step = 0
            epoch_number += 1
            print('\nStarting epoch {0}'.format(epoch_number))

            epoch_start_time = time.time()

            if epoch_number != 0:
                # Train model: loop over all sequences of training set with shuffling
                sequence_numbers = list(
                    range(len(self.dataset.token_indices['train'])))
                random.shuffle(sequence_numbers)
                for sequence_number in sequence_numbers:
                    self.transition_params_trained = train.train_step(
                        self.sess, self.dataset, sequence_number, self.model,
                        dropout_rate)
                    step += 1
                    if step % 10 == 0:
                        print('Training {0:.2f}% done'.format(
                            step / len(sequence_numbers) * 100),
                              end='\r',
                              flush=True)

            epoch_elapsed_training_time = time.time() - epoch_start_time
            print('Training completed in {0:.2f} seconds'.format(
                epoch_elapsed_training_time),
                  flush=True)
            f1_score = {}
            for data_type in ['train', 'valid', 'test']:
                if data_type not in self.dataset.label_indices.keys():
                    continue
                _, _, f1_score[data_type] = train.evaluate_step(
                    sess=self.sess,
                    dataset_type=data_type,
                    dataset=self.dataset,
                    model=self.model,
                    transition_params_trained=self.transition_params_trained,
                    tagging_format=self.tagging_format)
            #     if epoch_number % 3 ==0:
            self.model.saver.save(self.sess,
                                  os.path.join(model_folder, 'model.ckpt'))
            if abs(f1_score['valid'][-2] - previous_best_valid_f1_score) < 0.1:
                bad_counter += 1
            else:
                bad_counter = 0
            if bad_counter > 10:
                break
            previous_best_valid_f1_score = f1_score['valid'][-2]
            if epoch_number > max_number_of_epoch:
                break
コード例 #22
0
ファイル: main.py プロジェクト: liah-chan/progressiveNER
def main(argv=sys.argv):

    arguments = parse_arguments(argv[1:])

    parameters, conf_parameters = load_parameters(
        arguments['parameters_filepath'], arguments=arguments)
    dataset_filepaths, dataset_brat_folders = get_valid_dataset_filepaths(
        parameters)
    check_parameter_compatiblity(parameters, dataset_filepaths)

    # Load dataset
    dataset = ds.Dataset(verbose=parameters['verbose'],
                         debug=parameters['debug'])
    dataset.load_dataset(dataset_filepaths, parameters)

    # Create graph and session
    with tf.device('/gpu:0'):
        with tf.Graph().as_default():
            session_conf = tf.ConfigProto(
                intra_op_parallelism_threads=parameters[
                    'number_of_cpu_threads'],
                inter_op_parallelism_threads=parameters[
                    'number_of_cpu_threads'],
                device_count={
                    'CPU': 1,
                    'GPU': parameters['number_of_gpus']
                },
                allow_soft_placement=True,
                log_device_placement=False)

            sess = tf.Session(config=session_conf)

            with sess.as_default():
                start_time = time.time()
                experiment_timestamp = utils.get_current_time_in_miliseconds()
                results = {}
                results['epoch'] = {}
                results['execution_details'] = {}
                results['execution_details']['train_start'] = start_time
                results['execution_details'][
                    'time_stamp'] = experiment_timestamp
                results['execution_details']['early_stop'] = False
                results['execution_details']['keyboard_interrupt'] = False
                results['execution_details']['num_epochs'] = 0
                results['model_options'] = copy.copy(parameters)

                dataset_name = utils.get_basename_without_extension(
                    parameters['dataset_text_folder'])
                model_name = dataset_name
                utils.create_folder_if_not_exists(parameters['output_folder'])
                stats_graph_folder = os.path.join(
                    parameters['output_folder'],
                    model_name)  # Folder where to save graphs
                final_weights_folder = os.path.join(
                    parameters['output_folder'], 'weights')
                utils.create_folder_if_not_exists(stats_graph_folder)
                utils.create_folder_if_not_exists(final_weights_folder)
                model_folder = os.path.join(stats_graph_folder, 'model')
                utils.create_folder_if_not_exists(model_folder)
                with open(os.path.join(model_folder, 'parameters.ini'),
                          'w') as parameters_file:
                    conf_parameters.write(parameters_file)
                tensorboard_log_folder = os.path.join(stats_graph_folder,
                                                      'tensorboard_logs')
                utils.create_folder_if_not_exists(tensorboard_log_folder)
                tensorboard_log_folders = {}
                for dataset_type in dataset_filepaths.keys():
                    tensorboard_log_folders[dataset_type] = os.path.join(
                        stats_graph_folder, 'tensorboard_logs', dataset_type)
                    utils.create_folder_if_not_exists(
                        tensorboard_log_folders[dataset_type])
                pickle.dump(
                    dataset,
                    open(os.path.join(model_folder, 'dataset.pickle'), 'wb'))

                model = EntityLSTM(dataset, parameters)

                writers = {}
                for dataset_type in dataset_filepaths.keys():
                    writers[dataset_type] = tf.summary.FileWriter(
                        tensorboard_log_folders[dataset_type],
                        graph=sess.graph)
                embedding_writer = tf.summary.FileWriter(model_folder)

                embeddings_projector_config = projector.ProjectorConfig()
                tensorboard_token_embeddings = embeddings_projector_config.embeddings.add(
                )
                tensorboard_token_embeddings.tensor_name = model.token_embedding_weights.name
                token_list_file_path = os.path.join(
                    model_folder, 'tensorboard_metadata_tokens.tsv')
                tensorboard_token_embeddings.metadata_path = os.path.relpath(
                    token_list_file_path, '..')

                tensorboard_character_embeddings = embeddings_projector_config.embeddings.add(
                )
                tensorboard_character_embeddings.tensor_name = model.character_embedding_weights.name
                character_list_file_path = os.path.join(
                    model_folder, 'tensorboard_metadata_characters.tsv')
                tensorboard_character_embeddings.metadata_path = os.path.relpath(
                    character_list_file_path, '..')

                projector.visualize_embeddings(embedding_writer,
                                               embeddings_projector_config)

                token_list_file = codecs.open(token_list_file_path, 'w',
                                              'latin-1')
                for token_index in range(dataset.vocabulary_size):
                    token_list_file.write('{0}\n'.format(
                        dataset.index_to_token[token_index]))
                token_list_file.close()

                character_list_file = codecs.open(character_list_file_path,
                                                  'w', 'latin-1')
                for character_index in range(dataset.alphabet_size):
                    if character_index == dataset.PADDING_CHARACTER_INDEX:
                        character_list_file.write('PADDING\n')
                    else:
                        character_list_file.write('{0}\n'.format(
                            dataset.index_to_character[character_index]))
                character_list_file.close()

                # Initialize the model
                sess.run(tf.global_variables_initializer())
                if not parameters['use_pretrained_model']:
                    model.load_pretrained_token_embeddings(
                        sess, dataset, parameters)

                patience_counter = 0  # number of epochs with no improvement on the validation test in terms of F1-score
                f1_score_best = 0
                f1_scores = {'train-F1': [], 'valid-F1': [], 'test-F1': []}
                transition_params_trained = np.random.rand(
                    len(dataset.unique_labels) + 2,
                    len(dataset.unique_labels) + 2)
                model_saver = tf.train.Saver(
                    max_to_keep=parameters['num_of_model_to_keep']
                )  #, reshape= True)  # defaults to saving all variables
                epoch_number = -1
                try:
                    while True:
                        step = 0
                        epoch_number += 1
                        print('\nStarting epoch {0}'.format(epoch_number))

                        epoch_start_time = time.time()

                        if parameters[
                                'use_pretrained_model'] and epoch_number == 0:

                            if parameters['use_corrector']:
                                parameters['use_corrector'] = False
                                transition_params_trained = train.restore_pretrained_model(
                                    parameters, dataset, sess, model,
                                    model_saver)
                                print(
                                    'Getting the 3-label predictions from the step1 model.'
                                )
                                all_pred_labels, y_pred_for_corrector, y_true_for_corrector, \
                                output_filepaths = train.predict_labels(sess, model,
                                                                        transition_params_trained,
                                                                        parameters, dataset,
                                                                        epoch_number,
                                                                        stats_graph_folder,
                                                                        dataset_filepaths,
                                                                        for_corrector = True)
                                all_pred_indices = {}  #defaultdict(list)
                                for dataset_type in dataset_filepaths.keys():
                                    all_pred_indices[dataset_type] = []
                                    for i in range(
                                            len(all_pred_labels[dataset_type])
                                    ):
                                        indices = [
                                            dataset.
                                            label_corrector_to_index[label]
                                            for label in
                                            all_pred_labels[dataset_type][i]
                                        ]
                                        all_pred_indices[dataset_type].append(
                                            indices)

                                label_binarizer_corrector = sklearn.preprocessing.LabelBinarizer(
                                )
                                label_binarizer_corrector.fit(
                                    range(
                                        max(dataset.index_to_label_corrector.
                                            keys()) + 1))
                                predicted_label_corrector_vector_indices = {}
                                for dataset_type in dataset_filepaths.keys():
                                    predicted_label_corrector_vector_indices[
                                        dataset_type] = []
                                    for label_indices_sequence in all_pred_indices[
                                            dataset_type]:
                                        predicted_label_corrector_vector_indices[
                                            dataset_type].append(
                                                label_binarizer_corrector.
                                                transform(
                                                    label_indices_sequence))
                                parameters['use_corrector'] = True

                            transition_params_trained, model, glo_step = \
                                train.restore_model_parameters_from_pretrained_model(parameters, dataset, sess, model, model_saver)

                            for dataset_type in dataset_filepaths.keys():
                                writers[dataset_type] = tf.summary.FileWriter(
                                    tensorboard_log_folders[dataset_type],
                                    graph=sess.graph)
                                embedding_writer = tf.summary.FileWriter(
                                    model_folder)
                            init_new_vars_op = tf.initialize_variables(
                                [glo_step])
                            sess.run(init_new_vars_op)

                        elif epoch_number != 0:
                            sequence_numbers = list(
                                range(len(dataset.token_indices['train'])))
                            random.shuffle(sequence_numbers)
                            for sequence_number in sequence_numbers:
                                transition_params_trained, W_before_crf = train.train_step(
                                    sess, dataset, sequence_number, model,
                                    transition_params_trained, parameters)
                                step += 1

                        epoch_elapsed_training_time = time.time(
                        ) - epoch_start_time
                        print('Training completed in {0:.2f} seconds'.format(
                            epoch_elapsed_training_time),
                              flush=False)
                        if parameters['use_corrector']:
                            original_label_corrector_vector_indices = dataset.label_corrector_vector_indices
                            dataset.label_corrector_vector_indices = predicted_label_corrector_vector_indices
                            y_pred, y_true, output_filepaths = train.predict_labels(
                                sess, model, transition_params_trained,
                                parameters, dataset, epoch_number,
                                stats_graph_folder, dataset_filepaths)

                            # Evaluate model: save and plot results
                            evaluate.evaluate_model(results, dataset, y_pred,
                                                    y_true, stats_graph_folder,
                                                    epoch_number,
                                                    epoch_start_time,
                                                    output_filepaths,
                                                    parameters)
                            dataset.label_corrector_vector_indices = original_label_corrector_vector_indices
                        else:
                            y_pred, y_true, output_filepaths = train.predict_labels(
                                sess, model, transition_params_trained,
                                parameters, dataset, epoch_number,
                                stats_graph_folder, dataset_filepaths)

                            # Evaluate model: save and plot results
                            evaluate.evaluate_model(results, dataset, y_pred,
                                                    y_true, stats_graph_folder,
                                                    epoch_number,
                                                    epoch_start_time,
                                                    output_filepaths,
                                                    parameters)

                        summary = sess.run(model.summary_op, feed_dict=None)
                        writers['train'].add_summary(summary, epoch_number)
                        writers['train'].flush()
                        utils.copytree(writers['train'].get_logdir(),
                                       model_folder)

                        # Early stopping
                        train_f1_score = results['epoch'][epoch_number][0][
                            'train']['f1_score']['micro']
                        valid_f1_score = results['epoch'][epoch_number][0][
                            'valid']['f1_score']['micro']
                        test_f1_score = results['epoch'][epoch_number][0][
                            'test']['f1_score']['micro']
                        f1_scores['train-F1'].append(train_f1_score)
                        f1_scores['valid-F1'].append(valid_f1_score)
                        f1_scores['test-F1'].append(test_f1_score)

                        if valid_f1_score > f1_score_best:
                            patience_counter = 0
                            f1_score_best = valid_f1_score
                            # Save the best model
                            model_saver.save(
                                sess,
                                os.path.join(model_folder, 'best_model.ckpt'))
                            print(
                                'updated model to current epoch : epoch {:d}'.
                                format(epoch_number))
                            print('the model is saved in: {:s}'.format(
                                model_folder))
                            ### newly deleted
                        else:
                            patience_counter += 1
                        print("In epoch {:d}, the valid F1 is : {:f}".format(
                            epoch_number, valid_f1_score))
                        print(
                            "The last {0} epochs have not shown improvements on the validation set."
                            .format(patience_counter))

                        if patience_counter >= parameters['patience']:
                            print('Early Stop!')
                            results['execution_details']['early_stop'] = True

                        if epoch_number >= parameters[
                                'maximum_number_of_epochs'] and parameters[
                                    'refine_with_crf']:
                            model = train.refine_with_crf(
                                parameters, sess, model, model_saver)
                            print('refine model with CRF ...')

                            for additional_epoch in range(
                                    parameters['additional_epochs_with_crf']):
                                print('Additional {:d}th epoch'.format(
                                    additional_epoch))
                                sequence_numbers = list(
                                    range(len(dataset.token_indices['train'])))
                                random.shuffle(sequence_numbers)
                                for sequence_number in sequence_numbers:
                                    transition_params_trained, W_before_crf = train.train_step(
                                        sess, dataset, sequence_number, model,
                                        transition_params_trained, parameters)
                                    step += 1
                                epoch_elapsed_training_time = time.time(
                                ) - epoch_start_time
                                print(
                                    'Additional training completed in {0:.2f} seconds'
                                    .format(epoch_elapsed_training_time),
                                    flush=False)

                                y_pred, y_true, output_filepaths = train.predict_labels(
                                    sess, model, transition_params_trained,
                                    parameters, dataset, epoch_number,
                                    stats_graph_folder, dataset_filepaths)

                                evaluate.evaluate_model(
                                    results, dataset, y_pred, y_true,
                                    stats_graph_folder, epoch_number,
                                    epoch_start_time, output_filepaths,
                                    parameters)

                                summary = sess.run(model.summary_op,
                                                   feed_dict=None)
                                writers['train'].add_summary(
                                    summary, epoch_number)
                                writers['train'].flush()
                                utils.copytree(writers['train'].get_logdir(),
                                               model_folder)

                        if epoch_number >= parameters[
                                'maximum_number_of_epochs'] and not parameters[
                                    'refine_with_crf']:
                            break
                    if not parameters['use_pretrained_model']:
                        plot_name = 'F1-summary-step1.svg'
                    else:
                        plot_name = 'F1-summary-step2.svg'
                    for k, l in f1_scores.items():
                        print(k, l)
                    utils_plots.plot_f1(
                        f1_scores,
                        os.path.join(stats_graph_folder, '..', plot_name),
                        'F1 score summary')

                except KeyboardInterrupt:
                    results['execution_details']['keyboard_interrupt'] = True
                    print('Training interrupted')

                print('Finishing the experiment')
                end_time = time.time()
                results['execution_details'][
                    'train_duration'] = end_time - start_time
                results['execution_details']['train_end'] = end_time
                evaluate.save_results(results, stats_graph_folder)
                for dataset_type in dataset_filepaths.keys():
                    writers[dataset_type].close()

    sess.close()
コード例 #23
0
ファイル: main.py プロジェクト: Franck-Dernoncourt/testing
def main():

    parameters, conf_parameters = load_parameters()
    dataset_filepaths, dataset_brat_folders = get_valid_dataset_filepaths(parameters)
    check_parameter_compatiblity(parameters, dataset_filepaths)

    # Load dataset
    dataset = ds.Dataset(verbose=parameters['verbose'], debug=parameters['debug'])
    dataset.load_dataset(dataset_filepaths, parameters)

    # Create graph and session
    with tf.Graph().as_default():
        session_conf = tf.ConfigProto(
            intra_op_parallelism_threads=parameters['number_of_cpu_threads'],
            inter_op_parallelism_threads=parameters['number_of_cpu_threads'],
            device_count={'CPU': 1, 'GPU': parameters['number_of_gpus']},
            allow_soft_placement=True, # automatically choose an existing and supported device to run the operations in case the specified one doesn't exist
            log_device_placement=False
            )

        sess = tf.Session(config=session_conf)

        with sess.as_default():
            # Initialize and save execution details
            start_time = time.time()
            experiment_timestamp = utils.get_current_time_in_miliseconds()
            results = {}
            results['epoch'] = {}
            results['execution_details'] = {}
            results['execution_details']['train_start'] = start_time
            results['execution_details']['time_stamp'] = experiment_timestamp
            results['execution_details']['early_stop'] = False
            results['execution_details']['keyboard_interrupt'] = False
            results['execution_details']['num_epochs'] = 0
            results['model_options'] = copy.copy(parameters)

            dataset_name = utils.get_basename_without_extension(parameters['dataset_text_folder'])
            model_name = '{0}_{1}'.format(dataset_name, results['execution_details']['time_stamp'])

            output_folder=os.path.join('..', 'output')
            utils.create_folder_if_not_exists(output_folder)
            stats_graph_folder=os.path.join(output_folder, model_name) # Folder where to save graphs
            utils.create_folder_if_not_exists(stats_graph_folder)
            model_folder = os.path.join(stats_graph_folder, 'model')
            utils.create_folder_if_not_exists(model_folder)
            with open(os.path.join(model_folder, 'parameters.ini'), 'w') as parameters_file:
                conf_parameters.write(parameters_file)
            tensorboard_log_folder = os.path.join(stats_graph_folder, 'tensorboard_logs')
            utils.create_folder_if_not_exists(tensorboard_log_folder)
            tensorboard_log_folders = {}
            for dataset_type in dataset_filepaths.keys():
                tensorboard_log_folders[dataset_type] = os.path.join(stats_graph_folder, 'tensorboard_logs', dataset_type)
                utils.create_folder_if_not_exists(tensorboard_log_folders[dataset_type])
            pickle.dump(dataset, open(os.path.join(model_folder, 'dataset.pickle'), 'wb'))

            # Instantiate the model
            # graph initialization should be before FileWriter, otherwise the graph will not appear in TensorBoard
            model = EntityLSTM(dataset, parameters)

            # Instantiate the writers for TensorBoard
            writers = {}
            for dataset_type in dataset_filepaths.keys():
                writers[dataset_type] = tf.summary.FileWriter(tensorboard_log_folders[dataset_type], graph=sess.graph)
            embedding_writer = tf.summary.FileWriter(model_folder) # embedding_writer has to write in model_folder, otherwise TensorBoard won't be able to view embeddings

            embeddings_projector_config = projector.ProjectorConfig()
            tensorboard_token_embeddings = embeddings_projector_config.embeddings.add()
            tensorboard_token_embeddings.tensor_name = model.token_embedding_weights.name
            token_list_file_path = os.path.join(model_folder, 'tensorboard_metadata_tokens.tsv')
            tensorboard_token_embeddings.metadata_path = os.path.relpath(token_list_file_path, '..')

            tensorboard_character_embeddings = embeddings_projector_config.embeddings.add()
            tensorboard_character_embeddings.tensor_name = model.character_embedding_weights.name
            character_list_file_path = os.path.join(model_folder, 'tensorboard_metadata_characters.tsv')
            tensorboard_character_embeddings.metadata_path = os.path.relpath(character_list_file_path, '..')

            projector.visualize_embeddings(embedding_writer, embeddings_projector_config)

            # Write metadata for TensorBoard embeddings
            token_list_file = codecs.open(token_list_file_path,'w', 'UTF-8')
            for token_index in range(dataset.vocabulary_size):
                token_list_file.write('{0}\n'.format(dataset.index_to_token[token_index]))
            token_list_file.close()

            character_list_file = codecs.open(character_list_file_path,'w', 'UTF-8')
            for character_index in range(dataset.alphabet_size):
                if character_index == dataset.PADDING_CHARACTER_INDEX:
                    character_list_file.write('PADDING\n')
                else:
                    character_list_file.write('{0}\n'.format(dataset.index_to_character[character_index]))
            character_list_file.close()


            # Initialize the model
            sess.run(tf.global_variables_initializer())
            if not parameters['use_pretrained_model']:
                model.load_pretrained_token_embeddings(sess, dataset, parameters)

            # Start training + evaluation loop. Each iteration corresponds to 1 epoch.
            bad_counter = 0 # number of epochs with no improvement on the validation test in terms of F1-score
            previous_best_valid_f1_score = 0
            transition_params_trained = np.random.rand(len(dataset.unique_labels)+2,len(dataset.unique_labels)+2)
            model_saver = tf.train.Saver(max_to_keep=parameters['maximum_number_of_epochs'])  # defaults to saving all variables
            epoch_number = -1
            try:
                while True:
                    step = 0
                    epoch_number += 1
                    print('\nStarting epoch {0}'.format(epoch_number))

                    epoch_start_time = time.time()

                    if parameters['use_pretrained_model'] and epoch_number == 0:
                        # Restore pretrained model parameters
                        transition_params_trained = train.restore_model_parameters_from_pretrained_model(parameters, dataset, sess, model, model_saver)
                    elif epoch_number != 0:
                        # Train model: loop over all sequences of training set with shuffling
                        sequence_numbers=list(range(len(dataset.token_indices['train'])))
                        random.shuffle(sequence_numbers)
                        for sequence_number in sequence_numbers:
                            transition_params_trained = train.train_step(sess, dataset, sequence_number, model, transition_params_trained, parameters)
                            step += 1
                            if step % 10 == 0:
                                print('Training {0:.2f}% done'.format(step/len(sequence_numbers)*100), end='\r', flush=True)

                    epoch_elapsed_training_time = time.time() - epoch_start_time
                    print('Training completed in {0:.2f} seconds'.format(epoch_elapsed_training_time), flush=True)

                    y_pred, y_true, output_filepaths = train.predict_labels(sess, model, transition_params_trained, parameters, dataset, epoch_number, stats_graph_folder, dataset_filepaths)

                    # Evaluate model: save and plot results
                    evaluate.evaluate_model(results, dataset, y_pred, y_true, stats_graph_folder, epoch_number, epoch_start_time, output_filepaths, parameters)

                    if parameters['use_pretrained_model'] and not parameters['train_model']:
                        conll_to_brat.output_brat(output_filepaths, dataset_brat_folders, stats_graph_folder)
                        break

                    # Save model
                    model_saver.save(sess, os.path.join(model_folder, 'model_{0:05d}.ckpt'.format(epoch_number)))

                    # Save TensorBoard logs
                    summary = sess.run(model.summary_op, feed_dict=None)
                    writers['train'].add_summary(summary, epoch_number)
                    writers['train'].flush()
                    utils.copytree(writers['train'].get_logdir(), model_folder)


                    # Early stop
                    valid_f1_score = results['epoch'][epoch_number][0]['valid']['f1_score']['micro']
                    if  valid_f1_score > previous_best_valid_f1_score:
                        bad_counter = 0
                        previous_best_valid_f1_score = valid_f1_score
                        conll_to_brat.output_brat(output_filepaths, dataset_brat_folders, stats_graph_folder, overwrite=True)
                    else:
                        bad_counter += 1
                    print("The last {0} epochs have not shown improvements on the validation set.".format(bad_counter))

                    if bad_counter >= parameters['patience']:
                        print('Early Stop!')
                        results['execution_details']['early_stop'] = True
                        break

                    if epoch_number >= parameters['maximum_number_of_epochs']: break


            except KeyboardInterrupt:
                results['execution_details']['keyboard_interrupt'] = True
                print('Training interrupted')

            print('Finishing the experiment')
            end_time = time.time()
            results['execution_details']['train_duration'] = end_time - start_time
            results['execution_details']['train_end'] = end_time
            print('ok1')
            evaluate.save_results(results, stats_graph_folder)
            print('ok2')
        print('ok3')
        #sess.close() # release the session's resources
    print('ok4')
コード例 #24
0
ファイル: main.py プロジェクト: vasswhite/NeuroNER
def main():

    #### Parameters - start
    conf_parameters = configparser.ConfigParser()
    conf_parameters.read(os.path.join('.', 'parameters.ini'))
    nested_parameters = utils.convert_configparser_to_dictionary(
        conf_parameters)
    parameters = {}
    for k, v in nested_parameters.items():
        parameters.update(v)
    for k, v in parameters.items():
        if k in [
                'remove_unknown_tokens', 'character_embedding_dimension',
                'character_lstm_hidden_state_dimension',
                'token_embedding_dimension',
                'token_lstm_hidden_state_dimension', 'patience',
                'maximum_number_of_epochs', 'maximum_training_time',
                'number_of_cpu_threads', 'number_of_gpus'
        ]:
            parameters[k] = int(v)
        if k in ['dropout_rate']:
            parameters[k] = float(v)
        if k in [
                'use_character_lstm', 'is_character_lstm_bidirect',
                'is_token_lstm_bidirect', 'use_crf'
        ]:
            parameters[k] = distutils.util.strtobool(v)
    pprint(parameters)

    # Load dataset
    dataset_filepaths = {}
    dataset_filepaths['train'] = os.path.join(
        parameters['dataset_text_folder'], 'train.txt')
    dataset_filepaths['valid'] = os.path.join(
        parameters['dataset_text_folder'], 'valid.txt')
    dataset_filepaths['test'] = os.path.join(parameters['dataset_text_folder'],
                                             'test.txt')
    dataset = ds.Dataset()
    dataset.load_dataset(dataset_filepaths, parameters)

    with tf.Graph().as_default():
        session_conf = tf.ConfigProto(
            device_count={
                'CPU': 1,
                'GPU': 1
            },
            allow_soft_placement=
            True,  #  automatically choose an existing and supported device to run the operations in case the specified one doesn't exist
            log_device_placement=False)

        sess = tf.Session(config=session_conf)

        with sess.as_default():
            # Instantiate model
            model = EntityLSTM(dataset, parameters)
            sess.run(tf.global_variables_initializer())
            model.load_pretrained_token_embeddings(sess, dataset, parameters)

            # Initialize and save execution details
            start_time = time.time()
            experiment_timestamp = utils.get_current_time_in_miliseconds()
            results = {}
            #results['model_options'] = copy.copy(model_options)
            #results['model_options'].pop('optimizer', None)
            results['epoch'] = {}
            results['execution_details'] = {}
            results['execution_details']['train_start'] = start_time
            results['execution_details']['time_stamp'] = experiment_timestamp
            results['execution_details']['early_stop'] = False
            results['execution_details']['keyboard_interrupt'] = False
            results['execution_details']['num_epochs'] = 0
            results['model_options'] = copy.copy(parameters)

            dataset_name = utils.get_basename_without_extension(
                parameters['dataset_text_folder']
            )  #opts.train.replace('/', '_').split('.')[0] # 'conll2003en'
            model_name = '{0}_{1}'.format(
                dataset_name, results['execution_details']['time_stamp'])

            output_folder = os.path.join('..', 'output')
            utils.create_folder_if_not_exists(output_folder)
            stats_graph_folder = os.path.join(
                output_folder, model_name)  # Folder where to save graphs
            #print('stats_graph_folder: {0}'.format(stats_graph_folder))
            utils.create_folder_if_not_exists(stats_graph_folder)
            #             model_folder = os.path.join(stats_graph_folder, 'model')
            #             utils.create_folder_if_not_exists(model_folder)

            step = 0
            bad_counter = 0
            previous_best_valid_f1_score = 0
            transition_params_trained = np.random.rand(
                len(dataset.unique_labels), len(dataset.unique_labels))
            try:
                while True:
                    epoch_number = math.floor(
                        step / len(dataset.token_indices['train']))
                    print('\nStarting epoch {0}'.format(epoch_number), end='')

                    epoch_start_time = time.time()
                    #print('step: {0}'.format(step))

                    # Train model: loop over all sequences of training set with shuffling
                    sequence_numbers = list(
                        range(len(dataset.token_indices['train'])))
                    random.shuffle(sequence_numbers)
                    for sequence_number in sequence_numbers:
                        transition_params_trained = train.train_step(
                            sess, dataset, sequence_number, model,
                            transition_params_trained, parameters)
                        step += 1
                        if step % 100 == 0:
                            print('.', end='', flush=True)
                            #break
                    print('.', flush=True)
                    #print('step: {0}'.format(step))

                    # Predict labels using trained model
                    all_predictions = {}
                    all_y_true = {}
                    output_filepaths = {}
                    for dataset_type in ['train', 'valid', 'test']:
                        #print('dataset_type:     {0}'.format(dataset_type))
                        prediction_output = train.prediction_step(
                            sess, dataset, dataset_type, model,
                            transition_params_trained, step,
                            stats_graph_folder, epoch_number, parameters)
                        all_predictions[dataset_type], all_y_true[
                            dataset_type], output_filepaths[
                                dataset_type] = prediction_output
#                         model_options = None

                    epoch_elapsed_training_time = time.time(
                    ) - epoch_start_time
                    print(
                        'epoch_elapsed_training_time: {0:.2f} seconds'.format(
                            epoch_elapsed_training_time))

                    results['execution_details']['num_epochs'] = epoch_number

                    # Evaluate model: save and plot results
                    evaluate.evaluate_model(results, dataset, all_predictions,
                                            all_y_true, stats_graph_folder,
                                            epoch_number, epoch_start_time,
                                            output_filepaths)

                    # Early stop
                    valid_f1_score = results['epoch'][epoch_number][0][
                        'valid']['f1_score']['micro']
                    if valid_f1_score > previous_best_valid_f1_score:
                        bad_counter = 0
                        previous_best_valid_f1_score = valid_f1_score
                    else:
                        bad_counter += 1

                    if bad_counter > parameters['patience']:
                        print('Early Stop!')
                        results['execution_details']['early_stop'] = True
                        break

                    if epoch_number > parameters['maximum_number_of_epochs']:
                        break


#                     break # debugging

            except KeyboardInterrupt:
                results['execution_details']['keyboard_interrupt'] = True
                #         assess_model.save_results(results, stats_graph_folder)
                print('Training interrupted')

            print('Finishing the experiment')
            end_time = time.time()
            results['execution_details'][
                'train_duration'] = end_time - start_time
            results['execution_details']['train_end'] = end_time
            evaluate.save_results(results, stats_graph_folder)

    sess.close()  # release the session's resources
コード例 #25
0
  
    loss_metric.reset_states()
  
    # Iterate over the batches of the dataset.
    for step, x_batch_train in enumerate(train_dataset):

      Rinnumpy = x_batch_train[0].numpy()
      Idx = comput_inter_list_2d(Rinnumpy, L,  radious, max_num_neighs)
      neigh_list = tf.Variable(Idx)

      # print(neigh_list.shape)
      # print(x_batch_train[0].shape)
      # print(x_batch_train[1].shape)

      loss = train_step(model, optimizer, loss_cross,
                        x_batch_train[0], neigh_list,
                        x_batch_train[1])
      loss_metric(loss)
  
      if step % 100 == 0:
        print('step %s: mean loss = %s' % (step, str(loss_metric.result().numpy())))

    # mean loss saved in the metric
    meanLossStr = str(loss_metric.result().numpy())
    # learning rate using the decay 
    lrStr = str(optimizer._decayed_lr('float32').numpy())
    print('epoch %s: mean loss = %s  learning rate = %s'%(epoch,
                                                          meanLossStr,
                                                          lrStr))

  print("saving the weights")
コード例 #26
0
ファイル: test_model.py プロジェクト: phamngoclinh96/ML
try:
    while True:

        step = 0
        epoch_number += 1
        print('\nStarting epoch {0}'.format(epoch_number))

        epoch_start_time = time.time()

        if epoch_number != 0:
            # Train model: loop over all sequences of training set with shuffling
            sequence_numbers = list(range(len(dataset.token_indices['train'])))
            random.shuffle(sequence_numbers)
            for sequence_number in sequence_numbers:
                transition_params_trained = train.train_step(
                    sess, dataset, sequence_number, model,
                    parameters['dropout_rate'])
                step += 1
                if step % 10 == 0:
                    print('Training {0:.2f}% done'.format(
                        step / len(sequence_numbers) * 100),
                          end='\r',
                          flush=True)

        epoch_elapsed_training_time = time.time() - epoch_start_time
        print('Training completed in {0:.2f} seconds'.format(
            epoch_elapsed_training_time),
              flush=True)

        y_pred, y_true, output_filepaths = train.predict_labels_lite(
            sess=sess,
コード例 #27
0
ファイル: neuroner.py プロジェクト: saikswaroop/NeuroNER
    def fit(self):
        parameters = self.parameters
        conf_parameters = self.conf_parameters
        dataset_filepaths = self.dataset_filepaths
        dataset = self.dataset
        dataset_brat_folders = self.dataset_brat_folders
        sess = self.sess
        model = self.model
        transition_params_trained = self.transition_params_trained
        stats_graph_folder, experiment_timestamp = self._create_stats_graph_folder(parameters)

        # Initialize and save execution details
        start_time = time.time()
        results = {}
        results['epoch'] = {}
        results['execution_details'] = {}
        results['execution_details']['train_start'] = start_time
        results['execution_details']['time_stamp'] = experiment_timestamp
        results['execution_details']['early_stop'] = False
        results['execution_details']['keyboard_interrupt'] = False
        results['execution_details']['num_epochs'] = 0
        results['model_options'] = copy.copy(parameters)

        model_folder = os.path.join(stats_graph_folder, 'model')
        utils.create_folder_if_not_exists(model_folder)
        with open(os.path.join(model_folder, 'parameters.ini'), 'w') as parameters_file:
            conf_parameters.write(parameters_file)
        pickle.dump(dataset, open(os.path.join(model_folder, 'dataset.pickle'), 'wb'))
            
        tensorboard_log_folder = os.path.join(stats_graph_folder, 'tensorboard_logs')
        utils.create_folder_if_not_exists(tensorboard_log_folder)
        tensorboard_log_folders = {}
        for dataset_type in dataset_filepaths.keys():
            tensorboard_log_folders[dataset_type] = os.path.join(stats_graph_folder, 'tensorboard_logs', dataset_type)
            utils.create_folder_if_not_exists(tensorboard_log_folders[dataset_type])
                
        # Instantiate the writers for TensorBoard
        writers = {}
        for dataset_type in dataset_filepaths.keys():
            writers[dataset_type] = tf.summary.FileWriter(tensorboard_log_folders[dataset_type], graph=sess.graph)
        embedding_writer = tf.summary.FileWriter(model_folder) # embedding_writer has to write in model_folder, otherwise TensorBoard won't be able to view embeddings

        embeddings_projector_config = projector.ProjectorConfig()
        tensorboard_token_embeddings = embeddings_projector_config.embeddings.add()
        tensorboard_token_embeddings.tensor_name = model.token_embedding_weights.name
        token_list_file_path = os.path.join(model_folder, 'tensorboard_metadata_tokens.tsv')
        tensorboard_token_embeddings.metadata_path = os.path.relpath(token_list_file_path, '..')

        tensorboard_character_embeddings = embeddings_projector_config.embeddings.add()
        tensorboard_character_embeddings.tensor_name = model.character_embedding_weights.name
        character_list_file_path = os.path.join(model_folder, 'tensorboard_metadata_characters.tsv')
        tensorboard_character_embeddings.metadata_path = os.path.relpath(character_list_file_path, '..')

        projector.visualize_embeddings(embedding_writer, embeddings_projector_config)

        # Write metadata for TensorBoard embeddings
        token_list_file = codecs.open(token_list_file_path,'w', 'UTF-8')
        for token_index in range(dataset.vocabulary_size):
            token_list_file.write('{0}\n'.format(dataset.index_to_token[token_index]))
        token_list_file.close()

        character_list_file = codecs.open(character_list_file_path,'w', 'UTF-8')
        for character_index in range(dataset.alphabet_size):
            if character_index == dataset.PADDING_CHARACTER_INDEX:
                character_list_file.write('PADDING\n')
            else:
                character_list_file.write('{0}\n'.format(dataset.index_to_character[character_index]))
        character_list_file.close()


        # Start training + evaluation loop. Each iteration corresponds to 1 epoch.
        bad_counter = 0 # number of epochs with no improvement on the validation test in terms of F1-score
        previous_best_valid_f1_score = 0
        epoch_number = -1
        try:
            while True:
                step = 0
                epoch_number += 1
                print('\nStarting epoch {0}'.format(epoch_number))

                epoch_start_time = time.time()

                if epoch_number != 0:
                    # Train model: loop over all sequences of training set with shuffling
                    sequence_numbers=list(range(len(dataset.token_indices['train'])))
                    random.shuffle(sequence_numbers)
                    for sequence_number in sequence_numbers:
                        transition_params_trained = train.train_step(sess, dataset, sequence_number, model, parameters)
                        step += 1
                        if step % 10 == 0:
                            print('Training {0:.2f}% done'.format(step/len(sequence_numbers)*100), end='\r', flush=True)

                epoch_elapsed_training_time = time.time() - epoch_start_time
                print('Training completed in {0:.2f} seconds'.format(epoch_elapsed_training_time), flush=True)

                y_pred, y_true, output_filepaths = train.predict_labels(sess, model, transition_params_trained, parameters, dataset, epoch_number, stats_graph_folder, dataset_filepaths)

                # Evaluate model: save and plot results
                evaluate.evaluate_model(results, dataset, y_pred, y_true, stats_graph_folder, epoch_number, epoch_start_time, output_filepaths, parameters)

                if parameters['use_pretrained_model'] and not parameters['train_model']:
                    conll_to_brat.output_brat(output_filepaths, dataset_brat_folders, stats_graph_folder)
                    break

                # Save model
                model.saver.save(sess, os.path.join(model_folder, 'model_{0:05d}.ckpt'.format(epoch_number)))

                # Save TensorBoard logs
                summary = sess.run(model.summary_op, feed_dict=None)
                writers['train'].add_summary(summary, epoch_number)
                writers['train'].flush()
                utils.copytree(writers['train'].get_logdir(), model_folder)


                # Early stop
                valid_f1_score = results['epoch'][epoch_number][0]['valid']['f1_score']['micro']
                if  valid_f1_score > previous_best_valid_f1_score:
                    bad_counter = 0
                    previous_best_valid_f1_score = valid_f1_score
                    conll_to_brat.output_brat(output_filepaths, dataset_brat_folders, stats_graph_folder, overwrite=True)
                    self.transition_params_trained = transition_params_trained
                else:
                    bad_counter += 1
                print("The last {0} epochs have not shown improvements on the validation set.".format(bad_counter))

                if bad_counter >= parameters['patience']:
                    print('Early Stop!')
                    results['execution_details']['early_stop'] = True
                    break

                if epoch_number >= parameters['maximum_number_of_epochs']: break


        except KeyboardInterrupt:
            results['execution_details']['keyboard_interrupt'] = True
            print('Training interrupted')

        print('Finishing the experiment')
        end_time = time.time()
        results['execution_details']['train_duration'] = end_time - start_time
        results['execution_details']['train_end'] = end_time
        evaluate.save_results(results, stats_graph_folder)
        for dataset_type in dataset_filepaths.keys():
            writers[dataset_type].close()
コード例 #28
0
ファイル: main.py プロジェクト: braemy/NeuroNER
def main():

    parameters, conf_parameters = load_parameters()
    pprint(parameters)
    dataset_filepaths = get_valid_dataset_filepaths(parameters)
    check_parameter_compatiblity(parameters, dataset_filepaths)

    cross_validation = parameters[
        'cross_validation'] if 'cross_validation' in parameters else 1
    valid_fscores = []
    valid_precisions = []
    valid_recalls = []
    for cv in range(0, cross_validation):
        if "als" in dataset_filepaths['train'] and cross_validation > 1:
            train_files = list(range(0, cv)) + list(
                range(cv + 1, cross_validation))
            test_file = cv
            file_train = "tmp_combined.train"
            file_valid = "tmp_combined.test"
            output = []
            for i in train_files:
                with open(dataset_filepaths['train'] + "_" + str(i),
                          "r",
                          encoding="utf-8") as file:
                    output.append(file.read())
            with open(file_train, "w", encoding="utf-8") as file:
                file.write("\n\n".join(output))
            output = []
            with open(dataset_filepaths['train'] + "_" + str(test_file),
                      "r",
                      encoding="utf-8") as file:
                output.append(file.read())
            with open(file_valid, "w", encoding="utf-8") as file:
                file.write("\n\n".join(output))
            dataset_filepaths['train'] = file_train
            dataset_filepaths['valid'] = file_valid
        # Load dataset
        dataset = ds.Dataset(verbose=parameters['verbose'],
                             debug=parameters['debug'])
        dataset.load_vocab_word_embeddings(parameters)
        dataset.load_dataset(dataset_filepaths, parameters)

        # Create graph and session
        with tf.Graph().as_default():
            session_conf = tf.ConfigProto(
                intra_op_parallelism_threads=parameters[
                    'number_of_cpu_threads'],
                inter_op_parallelism_threads=parameters[
                    'number_of_cpu_threads'],
                device_count={
                    'CPU': 1,
                    'GPU': parameters['number_of_gpus']
                },
                allow_soft_placement=
                True,  # automatically choose an existing and supported device to run the operations in case the specified one doesn't exist
                log_device_placement=False)

            session_conf.gpu_options.allow_growth = True

            sess = tf.Session(config=session_conf)

            with sess.as_default():
                # Initialize and save execution details
                start_time = time.time()
                experiment_timestamp = utils.get_current_time_in_miliseconds()
                results = {}
                results['epoch'] = {}
                results['execution_details'] = {}
                results['execution_details']['train_start'] = start_time
                results['execution_details'][
                    'time_stamp'] = experiment_timestamp
                results['execution_details']['early_stop'] = False
                results['execution_details']['keyboard_interrupt'] = False
                results['execution_details']['num_epochs'] = 0
                results['model_options'] = copy.copy(parameters)

                dataset_name = utils.get_basename_without_extension(
                    parameters['dataset_train'])
                if 'data_to_use' in parameters:
                    model_name = '{0}_{1}'.format(
                        parameters['language'] + "_" + dataset_name + "_small",
                        results['execution_details']['time_stamp'])
                else:
                    model_name = '{0}_{1}'.format(
                        parameters['language'] + "_" + dataset_name,
                        results['execution_details']['time_stamp'])

                output_folder = os.path.join('..', 'output')
                utils.create_folder_if_not_exists(output_folder)
                stats_graph_folder = os.path.join(
                    output_folder, model_name)  # Folder where to save graphs
                utils.create_folder_if_not_exists(stats_graph_folder)
                model_folder = os.path.join(stats_graph_folder, 'model')
                utils.create_folder_if_not_exists(model_folder)
                with open(os.path.join(model_folder, 'parameters.ini'),
                          'w') as parameters_file:
                    conf_parameters.write(parameters_file)
                tensorboard_log_folder = os.path.join(stats_graph_folder,
                                                      'tensorboard_logs')
                utils.create_folder_if_not_exists(tensorboard_log_folder)
                tensorboard_log_folders = {}
                for dataset_type in dataset_filepaths.keys():
                    tensorboard_log_folders[dataset_type] = os.path.join(
                        stats_graph_folder, 'tensorboard_logs', dataset_type)
                    utils.create_folder_if_not_exists(
                        tensorboard_log_folders[dataset_type])
                #del dataset.embeddings_matrix
                if not parameters['use_pretrained_model']:
                    pickle.dump(
                        dataset,
                        open(os.path.join(model_folder, 'dataset.pickle'),
                             'wb'))
                #dataset.load_pretrained_word_embeddings(parameters)
                # Instantiate the model
                # graph initialization should be before FileWriter, otherwise the graph will not appear in TensorBoard
                model = EntityLSTM(dataset, parameters)

                # Instantiate the writers for TensorBoard
                writers = {}
                for dataset_type in dataset_filepaths.keys():
                    writers[dataset_type] = tf.summary.FileWriter(
                        tensorboard_log_folders[dataset_type],
                        graph=sess.graph)
                embedding_writer = tf.summary.FileWriter(
                    model_folder
                )  # embedding_writer has to write in model_folder, otherwise TensorBoard won't be able to view embeddings

                embeddings_projector_config = projector.ProjectorConfig()
                tensorboard_token_embeddings = embeddings_projector_config.embeddings.add(
                )
                tensorboard_token_embeddings.tensor_name = model.token_embedding_weights.name
                token_list_file_path = os.path.join(
                    model_folder, 'tensorboard_metadata_tokens.tsv')
                tensorboard_token_embeddings.metadata_path = os.path.relpath(
                    token_list_file_path, '..')

                if parameters['use_character_lstm']:
                    tensorboard_character_embeddings = embeddings_projector_config.embeddings.add(
                    )
                    tensorboard_character_embeddings.tensor_name = model.character_embedding_weights.name
                    character_list_file_path = os.path.join(
                        model_folder, 'tensorboard_metadata_characters.tsv')
                    tensorboard_character_embeddings.metadata_path = os.path.relpath(
                        character_list_file_path, '..')

                projector.visualize_embeddings(embedding_writer,
                                               embeddings_projector_config)

                # Write metadata for TensorBoard embeddings
                token_list_file = codecs.open(token_list_file_path, 'w',
                                              'UTF-8')
                for token_index in range(len(dataset.index_to_token)):
                    token_list_file.write('{0}\n'.format(
                        dataset.index_to_token[token_index]))
                token_list_file.close()

                if parameters['use_character_lstm']:
                    character_list_file = codecs.open(character_list_file_path,
                                                      'w', 'UTF-8')
                    for character_index in range(dataset.alphabet_size):
                        if character_index == dataset.PADDING_CHARACTER_INDEX:
                            character_list_file.write('PADDING\n')
                        else:
                            character_list_file.write('{0}\n'.format(
                                dataset.index_to_character[character_index]))
                    character_list_file.close()

                try:
                    # Initialize the model
                    sess.run(tf.global_variables_initializer())
                    if not parameters['use_pretrained_model']:
                        model.load_pretrained_token_embeddings(
                            sess, dataset, parameters)

                    # Start training + evaluation loop. Each iteration corresponds to 1 epoch.
                    bad_counter = 0  # number of epochs with no improvement on the validation test in terms of F1-score
                    previous_best_valid_f1_score = 0
                    transition_params_trained = np.random.rand(
                        len(dataset.unique_labels), len(dataset.unique_labels)
                    )  #TODO np.random.rand(len(dataset.unique_labels)+2,len(dataset.unique_labels)+2)
                    model_saver = tf.train.Saver(
                        max_to_keep=None
                    )  #parameters['maximum_number_of_epochs'])  # defaults to saving all variables
                    epoch_number = 0

                    while True:
                        epoch_number += 1
                        print('\nStarting epoch {0}'.format(epoch_number))

                        epoch_start_time = time.time()

                        if parameters[
                                'use_pretrained_model'] and epoch_number == 1:
                            # Restore pretrained model parameters
                            transition_params_trained = train.restore_model_parameters_from_pretrained_model(
                                parameters, dataset, sess, model, model_saver)
                        elif epoch_number != 0:
                            # Train model: loop over all sequences of training set with shuffling
                            sequence_numbers = list(
                                range(len(dataset.token_indices['train'])))
                            random.shuffle(sequence_numbers)
                            data_counter = 0
                            sub_id = 0
                            for i in tqdm(range(0, len(sequence_numbers),
                                                parameters['batch_size']),
                                          "Training",
                                          mininterval=1):
                                data_counter += parameters['batch_size']
                                if data_counter >= 20000:
                                    data_counter = 0
                                    sub_id += 0.001
                                    print("Intermediate evaluation number: ",
                                          sub_id)

                                    #model_saver.save(sess,
                                    #                 os.path.join(model_folder, 'model_{0:05d}_{1}.ckpt'.format(epoch_number, len(sequence_numbers)/4/len(sequence_numbers))))
                                    epoch_elapsed_training_time = time.time(
                                    ) - epoch_start_time
                                    print(
                                        'Training completed in {0:.2f} seconds'
                                        .format(epoch_elapsed_training_time),
                                        flush=True)

                                    y_pred, y_true, output_filepaths = train.predict_labels(
                                        sess, model, transition_params_trained,
                                        parameters, dataset,
                                        epoch_number + sub_id,
                                        stats_graph_folder, dataset_filepaths)

                                    # Evaluate model: save and plot results
                                    evaluate.evaluate_model(
                                        results, dataset, y_pred, y_true,
                                        stats_graph_folder, epoch_number,
                                        epoch_start_time, output_filepaths,
                                        parameters)

                                    # Save model
                                    model_saver.save(
                                        sess,
                                        os.path.join(
                                            model_folder,
                                            'model_{0:07.3f}.ckpt'.format(
                                                epoch_number + sub_id)))

                                    # Save TensorBoard logs
                                    summary = sess.run(model.summary_op,
                                                       feed_dict=None)
                                    writers['train'].add_summary(
                                        summary, epoch_number)
                                    writers['train'].flush()
                                    utils.copytree(
                                        writers['train'].get_logdir(),
                                        model_folder)

                                    # Early stop
                                    valid_f1_score = results['epoch'][
                                        epoch_number][0]['valid']['f1_score'][
                                            'micro']
                                    # valid_precision = results['epoch'][epoch_number][0]['valid']['precision']['micro']
                                    # valid_recall = results['epoch'][epoch_number][0]['valid']['recall']['micro']

                                    # valid_fscores.append(valid_f1_score)
                                    if valid_f1_score > previous_best_valid_f1_score:
                                        bad_counter = 0
                                        previous_best_valid_f1_score = valid_f1_score
                                        # previous_best_valid_precision = valid_precision
                                        # previous_best_valid_recall = valid_recall
                                    else:
                                        bad_counter += 1

                                sequence_number = sequence_numbers[
                                    i:i + parameters['batch_size']]
                                transition_params_trained, loss = train.train_step(
                                    sess, dataset, sequence_number, model,
                                    transition_params_trained, parameters)
                        epoch_elapsed_training_time = time.time(
                        ) - epoch_start_time
                        print('Training completed in {0:.2f} seconds'.format(
                            epoch_elapsed_training_time),
                              flush=True)

                        y_pred, y_true, output_filepaths = train.predict_labels(
                            sess, model, transition_params_trained, parameters,
                            dataset, epoch_number, stats_graph_folder,
                            dataset_filepaths)

                        # Evaluate model: save and plot results
                        evaluate.evaluate_model(results, dataset, y_pred,
                                                y_true, stats_graph_folder,
                                                epoch_number, epoch_start_time,
                                                output_filepaths, parameters)

                        # Save model
                        model_saver.save(
                            sess,
                            os.path.join(
                                model_folder,
                                'model_{0:05d}.ckpt'.format(epoch_number)))

                        # Save TensorBoard logs
                        summary = sess.run(model.summary_op, feed_dict=None)
                        writers['train'].add_summary(summary, epoch_number)
                        writers['train'].flush()
                        utils.copytree(writers['train'].get_logdir(),
                                       model_folder)

                        # Early stop
                        valid_f1_score = results['epoch'][epoch_number][0][
                            'valid']['f1_score']['micro']
                        #valid_precision = results['epoch'][epoch_number][0]['valid']['precision']['micro']
                        #valid_recall = results['epoch'][epoch_number][0]['valid']['recall']['micro']

                        #valid_fscores.append(valid_f1_score)
                        if valid_f1_score > previous_best_valid_f1_score:
                            bad_counter = 0
                            previous_best_valid_f1_score = valid_f1_score
                            #previous_best_valid_precision = valid_precision
                            #previous_best_valid_recall = valid_recall
                        else:
                            bad_counter += 1
                        print(
                            "The last {0} epochs have not shown improvements on the validation set."
                            .format(bad_counter))

                        if bad_counter >= parameters['patience']:
                            print('Early Stop!')
                            results['execution_details']['early_stop'] = True
                            break

                        if epoch_number >= parameters[
                                'maximum_number_of_epochs']:
                            break

                except KeyboardInterrupt:
                    results['execution_details']['keyboard_interrupt'] = True
                    print('Training interrupted')
                    # remove the experiment
                    remove_experiment = input(
                        "Do you want to remove the experiment? (yes/y/Yes)")
                    if remove_experiment in ["Yes", "yes", "y"]:
                        shutil.rmtree(stats_graph_folder)
                        print("Folder removed")
                    else:
                        print('Finishing the experiment')
                        end_time = time.time()
                        results['execution_details'][
                            'train_duration'] = end_time - start_time
                        results['execution_details']['train_end'] = end_time
                        evaluate.save_results(results, stats_graph_folder)
                except Exception:
                    logging.exception("")
                    remove_experiment = input(
                        "Do you want to remove the experiment? (yes/y/Yes)")
                    if remove_experiment in ["Yes", "yes", "y"]:
                        shutil.rmtree(stats_graph_folder)
                        print("Folder removed")

        sess.close()  # release the session's resources
        if 'cross_validation' in parameters and parameters[
                'cross_validation'] > 1:
            valid_fscores.append(previous_best_valid_f1_score)
            #valid_precisions.append(previous_best_valid_precision)
            #valid_recalls.append(previous_best_valid_recall)
    if 'cross_validation' in parameters and parameters['cross_validation'] > 1:
        print("mean f1score:", np.mean(valid_fscores))
        #print("mean precision:", np.mean(valid_precisions))
        #print("mean recall:", np.mean(valid_recalls))
        with codecs.open(os.path.join(stats_graph_folder, "result_cv.txt"),
                         "w") as file:
            file.write("F1score " + ", ".join(map(str, valid_fscores)))
            # file.write("Precision " + valid_precisions)
            # file.write("Recall " + valid_recalls)
            file.write("Mean F1score " + str(np.mean(valid_fscores)))
コード例 #29
0
ファイル: main.py プロジェクト: PDEUXA/AIF_CYCLEGAN
    ckpt.restore(ckpt_manager.latest_checkpoint)
    print('Latest checkpoint restored!!')

# Draw a sample
sample_a = next(iter(train_a))
sample_b = next(iter(train_b))

# Train the models
seed = tf.random.normal([1, 256, 256, 3])
for epoch in range(10):
    start = time.time()
    n = 0
    for image_A, image_B in tf.data.Dataset.zip((train_a, train_b)):
        print("trainstep")
        train_step(image_A, image_B, gen_a, gen_b, disc_a, disc_b,
                   generator_a_optimizer, generator_b_optimizer,
                   discriminator_a_optimizer, discriminator_b_optimizer)
        if n % 10 == 0:
            print(n / 10, end=' ')
        n += 1

    # Using a consistent image (sample_horse) so that the progress of the model
    # is clearly visible.
    generate_and_save_images(gen_a, epoch + 1, sample_a, 'A')
    generate_and_save_images(gen_b, epoch + 1, sample_b, 'B')

    if (epoch + 1) % 5 == 0:
        ckpt_save_path = ckpt_manager.save()
        print('Saving checkpoint for epoch {} at {}'.format(
            epoch + 1, ckpt_save_path))
コード例 #30
0
ファイル: main.py プロジェクト: Por123/NeuroNER
def main():

    parameters, dataset_filepaths = load_parameters()

    # Load dataset
    dataset = ds.Dataset()
    dataset.load_dataset(dataset_filepaths, parameters)

    # Create graph and session
    with tf.Graph().as_default():
        session_conf = tf.ConfigProto(
            device_count={
                'CPU': 1,
                'GPU': 1
            },
            allow_soft_placement=
            True,  #  automatically choose an existing and supported device to run the operations in case the specified one doesn't exist
            log_device_placement=False)

        sess = tf.Session(config=session_conf)

        with sess.as_default():
            # Initialize and save execution details
            start_time = time.time()
            experiment_timestamp = utils.get_current_time_in_miliseconds()
            results = {}
            results['epoch'] = {}
            results['execution_details'] = {}
            results['execution_details']['train_start'] = start_time
            results['execution_details']['time_stamp'] = experiment_timestamp
            results['execution_details']['early_stop'] = False
            results['execution_details']['keyboard_interrupt'] = False
            results['execution_details']['num_epochs'] = 0
            results['model_options'] = copy.copy(parameters)

            dataset_name = utils.get_basename_without_extension(
                parameters['dataset_text_folder'])
            model_name = '{0}_{1}'.format(
                dataset_name, results['execution_details']['time_stamp'])

            output_folder = os.path.join('..', 'output')
            utils.create_folder_if_not_exists(output_folder)
            stats_graph_folder = os.path.join(
                output_folder, model_name)  # Folder where to save graphs
            utils.create_folder_if_not_exists(stats_graph_folder)
            model_folder = os.path.join(stats_graph_folder, 'model')
            utils.create_folder_if_not_exists(model_folder)
            tensorboard_log_folder = os.path.join(stats_graph_folder,
                                                  'tensorboard_logs')
            utils.create_folder_if_not_exists(tensorboard_log_folder)
            tensorboard_log_folders = {}
            for dataset_type in ['train', 'valid', 'test']:
                tensorboard_log_folders[dataset_type] = os.path.join(
                    stats_graph_folder, 'tensorboard_logs', dataset_type)
                utils.create_folder_if_not_exists(
                    tensorboard_log_folders[dataset_type])

            pickle.dump(
                dataset,
                open(os.path.join(stats_graph_folder, 'dataset.pickle'), 'wb'))

            # Instantiate the model
            # graph initialization should be before FileWriter, otherwise the graph will not appear in TensorBoard
            model = EntityLSTM(dataset, parameters)

            # Instantiate the writers for TensorBoard
            writers = {}
            for dataset_type in ['train', 'valid', 'test']:
                writers[dataset_type] = tf.summary.FileWriter(
                    tensorboard_log_folders[dataset_type], graph=sess.graph)
            embedding_writer = tf.summary.FileWriter(
                model_folder
            )  # embedding_writer has to write in model_folder, otherwise TensorBoard won't be able to view embeddings

            embeddings_projector_config = projector.ProjectorConfig()
            tensorboard_token_embeddings = embeddings_projector_config.embeddings.add(
            )
            tensorboard_token_embeddings.tensor_name = model.token_embedding_weights.name
            token_list_file_path = os.path.join(
                model_folder, 'tensorboard_metadata_tokens.tsv')
            tensorboard_token_embeddings.metadata_path = os.path.relpath(
                token_list_file_path, '..')

            tensorboard_character_embeddings = embeddings_projector_config.embeddings.add(
            )
            tensorboard_character_embeddings.tensor_name = model.character_embedding_weights.name
            character_list_file_path = os.path.join(
                model_folder,
                'tensorboard_metadata_characters.tsv')  #  'metadata.tsv'
            tensorboard_character_embeddings.metadata_path = os.path.relpath(
                character_list_file_path, '..')

            projector.visualize_embeddings(embedding_writer,
                                           embeddings_projector_config)

            # Write metadata for TensorBoard embeddings
            token_list_file = open(token_list_file_path, 'w')
            for token_index in range(dataset.vocabulary_size):
                token_list_file.write('{0}\n'.format(
                    dataset.index_to_token[token_index]))
            token_list_file.close()

            character_list_file = open(character_list_file_path, 'w')
            print('len(dataset.character_to_index): {0}'.format(
                len(dataset.character_to_index)))
            print('len(dataset.index_to_character): {0}'.format(
                len(dataset.index_to_character)))
            for character_index in range(dataset.alphabet_size):
                if character_index == dataset.PADDING_CHARACTER_INDEX:
                    character_list_file.write('PADDING\n')
                else:
                    character_list_file.write('{0}\n'.format(
                        dataset.index_to_character[character_index]))
            character_list_file.close()

            # Initialize the model
            sess.run(tf.global_variables_initializer())
            model.load_pretrained_token_embeddings(sess, dataset, parameters)

            # Start training + evaluation loop. Each iteration corresponds to 1 epoch.
            step = 0
            bad_counter = 0  # number of epochs with no improvement on the validation test in terms of F1-score
            previous_best_valid_f1_score = 0
            transition_params_trained = np.random.rand(
                len(dataset.unique_labels), len(dataset.unique_labels))
            model_saver = tf.train.Saver(
                max_to_keep=parameters['maximum_number_of_epochs']
            )  # defaults to saving all variables
            epoch_number = -1
            try:
                while True:
                    epoch_number += 1
                    #epoch_number = math.floor(step / len(dataset.token_indices['train']))
                    print('\nStarting epoch {0}'.format(epoch_number), end='')

                    epoch_start_time = time.time()
                    #print('step: {0}'.format(step))

                    # Train model: loop over all sequences of training set with shuffling
                    sequence_numbers = list(
                        range(len(dataset.token_indices['train'])))
                    random.shuffle(sequence_numbers)
                    for sequence_number in sequence_numbers:
                        transition_params_trained = train.train_step(
                            sess, dataset, sequence_number, model,
                            transition_params_trained, parameters)
                        step += 1
                        if step % 100 == 0:
                            print('.', end='', flush=True)
                            #break
                    print('.', flush=True)
                    #print('step: {0}'.format(step))

                    # Predict labels using trained model
                    y_pred = {}
                    y_true = {}
                    output_filepaths = {}
                    for dataset_type in ['train', 'valid', 'test']:
                        #print('dataset_type:     {0}'.format(dataset_type))
                        prediction_output = train.prediction_step(
                            sess, dataset, dataset_type, model,
                            transition_params_trained, step,
                            stats_graph_folder, epoch_number, parameters)
                        y_pred[dataset_type], y_true[
                            dataset_type], output_filepaths[
                                dataset_type] = prediction_output
#                         model_options = None

                    epoch_elapsed_training_time = time.time(
                    ) - epoch_start_time
                    print(
                        'epoch_elapsed_training_time: {0:.2f} seconds'.format(
                            epoch_elapsed_training_time))

                    results['execution_details']['num_epochs'] = epoch_number

                    # Evaluate model: save and plot results
                    evaluate.evaluate_model(results, dataset, y_pred, y_true,
                                            stats_graph_folder, epoch_number,
                                            epoch_start_time, output_filepaths,
                                            parameters)

                    # Save model
                    model_saver.save(
                        sess,
                        os.path.join(model_folder,
                                     'model_{0:05d}.ckpt'.format(epoch_number))
                    )  #, global_step, latest_filename, meta_graph_suffix, write_meta_graph, write_state)

                    # Save TensorBoard logs
                    summary = sess.run(model.summary_op, feed_dict=None)
                    writers['train'].add_summary(summary, epoch_number)

                    # Early stop
                    valid_f1_score = results['epoch'][epoch_number][0][
                        'valid']['f1_score']['micro']
                    if valid_f1_score > previous_best_valid_f1_score:
                        bad_counter = 0
                        previous_best_valid_f1_score = valid_f1_score
                    else:
                        bad_counter += 1

                    if bad_counter > parameters['patience']:
                        print('Early Stop!')
                        results['execution_details']['early_stop'] = True
                        break

                    if epoch_number > parameters['maximum_number_of_epochs']:
                        break


#                     break # debugging

            except KeyboardInterrupt:
                results['execution_details']['keyboard_interrupt'] = True
                #         assess_model.save_results(results, stats_graph_folder)
                print('Training interrupted')

            print('Finishing the experiment')
            end_time = time.time()
            results['execution_details'][
                'train_duration'] = end_time - start_time
            results['execution_details']['train_end'] = end_time
            evaluate.save_results(results, stats_graph_folder)

    sess.close()  # release the session's resources