Exemplo n.º 1
0
        # use 20& of the training set for validation
        train_X, val_X, train_y, val_y = train_test_split(train_X, train_y,
                                                          test_size=0.2, random_state=0)
        # model 
        model = deepmoji_architecture(nb_classes=nb_classes,
                                      nb_tokens=nb_tokens,
                                      maxlen=MAX_LEN, embed_dropout_rate=0.25, final_dropout_rate=0.5, embed_l2=1E-6)
        model.summary()

        # load pretrained representation model
        load_specific_weights(model, model_path, nb_tokens, MAX_LEN,
                              exclude_names=["softmax"])
        
        # train model
        model, acc = finetune(model, [train_X, val_X, test_X], [train_y, val_y, test_y], nb_classes, 100,
                              method="chain-thaw", verbose=2)
        
        pred_y_prob = model.predict(test_X)

        if nb_classes == 2:
            pred_y = [0 if p < 0.5 else 1 for p in pred_y_prob]
        else:
            pred_y = np.argmax(pred_y_prob, axis=1)

        # evaluation
        print("*****************************************")
        print("Fold %d" % fold)
        accuracy = accuracy_score(test_y, pred_y)
        print("Accuracy: %.3f" % accuracy)

        precision = precision_score(test_y, pred_y, average=None)
Exemplo n.º 2
0
def selftrain(model_name_or_path, train_file, infer_file, output_dir,
              **kwargs):
    """Self-training a pre-trained model on a downstream task.

  Args:
    model_name_or_path: Path to pretrained model or model identifier from
      huggingface.co/models.
    train_file: A csv or a json file containing the training data.
    infer_file: A csv or a json file containing the data to predict on.
    output_dir: The output directory where the model predictions and checkpoints
      will be written.
    **kwargs: Dictionary of key/value pairs with which to update the
      configuration object after loading. The values in kwargs of any keys which
      are configuration attributes will be used to override the loaded values.
  """
    # Initialize the accelerator. We will let the accelerator handle device
    # placement for us.
    accelerator = Accelerator()
    # Make one log on every process with the configuration for debugging.
    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO,
    )
    logger.info(accelerator.state)

    # Setup logging, we only want one process per machine to log things on the
    # screen. accelerator.is_local_main_process is only True for one process per
    # machine.
    logger.setLevel(
        logging.INFO if accelerator.is_local_main_process else logging.ERROR)

    if accelerator.is_local_main_process:
        datasets.utils.logging.set_verbosity_warning()
        transformers.utils.logging.set_verbosity_info()
    else:
        datasets.utils.logging.set_verbosity_error()
        transformers.utils.logging.set_verbosity_error()

    model_args = STModelArguments(model_name_or_path=model_name_or_path)
    data_args = STDataArguments(train_file=train_file, infer_file=infer_file)
    training_args = STTrainingArguments(output_dir=output_dir)
    args = argparse.Namespace()

    for arg_class in (model_args, data_args, training_args):
        for key, value in vars(arg_class).items():
            setattr(args, key, value)

    for key, value in kwargs.items():
        if hasattr(args, key):
            setattr(args, key, value)

    # Sanity checks
    data_files = {}
    args.data_file_extension = None

    # You need to provide the training data and the data to predict on
    assert args.train_file is not None
    assert args.infer_file is not None
    data_files['train'] = args.train_file
    data_files['infer'] = args.infer_file

    if args.evaluation_strategy != IntervalStrategy.NO.value:
        assert args.eval_file is not None
        data_files['eval'] = args.eval_file

    for key in data_files:
        extension = data_files[key].split('.')[-1]
        assert extension in ['csv', 'json'
                             ], f'`{key}_file` should be a csv or a json file.'
        if args.data_file_extension is None:
            args.data_file_extension = extension
        else:
            assert (
                extension == args.data_file_extension
            ), f'`{key}_file` should be a {args.data_file_extension} file`.'

    assert (
        args.eval_metric in datasets.list_metrics()
    ), f'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'

    # If passed along, set the training seed now.
    if args.seed is not None:
        set_seed(args.seed)

    logger.info('Creating the initial data directory for self-training...')
    data_dir_format = f'{args.output_dir}/self-train_iter-{{}}'.format
    initial_data_dir = data_dir_format(0)

    if accelerator.is_main_process:
        if args.output_dir is not None:
            os.makedirs(args.output_dir, exist_ok=True)
            os.makedirs(initial_data_dir, exist_ok=True)
    accelerator.wait_for_everyone()

    best_iteration = None
    best_eval_result = None
    early_stopping_patience_counter = 0
    should_training_stop = False
    # Show the progress bar
    progress_bar = tqdm(range(args.max_selftrain_iterations),
                        disable=not accelerator.is_local_main_process)

    # Self-train
    for iteration in range(0, int(args.max_selftrain_iterations)):
        current_data_dir = data_dir_format(iteration)
        assert os.path.exists(current_data_dir)

        # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
        # iteration > 0
        current_output_dir = os.path.join(current_data_dir, 'stage-1')
        arguments_dict = {
            'accelerator':
            accelerator,
            'model_name_or_path':
            args.model_name_or_path,
            'cache_dir':
            args.cache_dir,
            'do_train':
            True,
            'train_file':
            data_files['train']
            if iteration == 0 else data_files['train_pseudo'],
            'do_eval':
            True if args.eval_file is not None else False,
            'eval_file':
            data_files['eval'],
            'do_predict':
            True,
            'infer_file':
            data_files['infer'],
            'task_name':
            args.task_name,
            'label_list':
            args.label_list,
            'output_dir':
            current_output_dir,
            'eval_metric':
            args.eval_metric,
            'evaluation_strategy':
            args.evaluation_strategy,
            'early_stopping_patience':
            args.early_stopping_patience,
            'early_stopping_threshold':
            args.early_stopping_threshold,
            'seed':
            args.seed,
        }
        # Add additional training arguments
        for key, value in kwargs.items():
            if key not in arguments_dict and not hasattr(training_args, key):
                arguments_dict.update({key: value})

        model_bin_file_path = os.path.join(current_output_dir,
                                           'best-checkpoint', MODEL_BIN_FILE)
        if os.path.exists(model_bin_file_path):
            logger.info(
                'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.',
                model_bin_file_path, iteration)
        else:
            logger.info(
                '***** Running self-training: iteration: %d, stage: 1 *****',
                iteration)
            finetune(**arguments_dict)
            accelerator.wait_for_everyone()
            assert os.path.exists(model_bin_file_path)
            logger.info(
                'Self-training job completed: iteration: %d, stage: 1.',
                iteration)

        if iteration > 0 and args.finetune_on_labeled_data:
            # Stage 2 (optional): fine-tuning on the original labeled data
            model_path = os.path.join(current_output_dir, 'best-checkpoint')
            current_output_dir = os.path.join(current_data_dir, 'stage-2')
            # Update arguments_dict
            arguments_dict['model_name_or_path'] = model_path
            arguments_dict['train_file'] = data_files['train']
            arguments_dict['output_dir'] = current_output_dir

            model_bin_file_path = os.path.join(current_output_dir,
                                               'best-checkpoint',
                                               MODEL_BIN_FILE)
            if os.path.exists(model_bin_file_path):
                logger.info(
                    'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.',
                    model_bin_file_path, iteration)
            else:
                logger.info(
                    '***** Running self-training: iteration: %d, stage: 2 *****',
                    iteration)
                finetune(**arguments_dict)
                accelerator.wait_for_everyone()
                assert os.path.exists(model_bin_file_path)
                logger.info(
                    'Self-training job completed: iteration: %d, stage: 2.',
                    iteration)

        new_iteration = iteration
        next_data_dir = data_dir_format(iteration + 1)

        config = AutoConfig.from_pretrained(
            os.path.join(current_output_dir, 'best-checkpoint'))
        id2label = config.id2label
        eval_results_file = os.path.join(current_output_dir,
                                         'eval_results_best-checkpoint.json')
        test_results_file = os.path.join(current_output_dir,
                                         'test_results_best-checkpoint.json')
        assert os.path.exists(eval_results_file)

        with open(eval_results_file, 'r') as f:
            eval_result = float(json.load(f)[args.eval_metric])
        infer_output_file = os.path.join(current_output_dir,
                                         'infer_output_best-checkpoint.csv')
        assert os.path.exists(infer_output_file)
        # Loading the dataset from local csv or json files.
        infer_input = load_dataset(args.data_file_extension,
                                   data_files={'data':
                                               data_files['infer']})['data']
        infer_output = load_dataset('csv',
                                    data_files={'data':
                                                infer_output_file})['data']

        if accelerator.is_main_process:
            os.makedirs(next_data_dir, exist_ok=True)
            shutil.copy(
                eval_results_file,
                os.path.join(output_dir,
                             f'eval_results_iter-{iteration}.json'))
            if os.path.exists(test_results_file):
                shutil.copy(
                    eval_results_file,
                    os.path.join(output_dir,
                                 f'test_results_iter-{iteration}.json'))
            create_pseudo_labeled_data(args, infer_input, infer_output,
                                       eval_result, id2label, next_data_dir)
        accelerator.wait_for_everyone()

        data_files['train_pseudo'] = os.path.join(
            next_data_dir, f'train_pseudo.{args.data_file_extension}')

        if args.evaluation_strategy != IntervalStrategy.NO.value:
            new_eval_result = eval_result

            if best_iteration is None:
                best_iteration = new_iteration
                best_eval_result = new_eval_result
            else:
                if new_eval_result - best_eval_result > args.early_stopping_threshold:
                    best_iteration = new_iteration
                    best_eval_result = new_eval_result
                    early_stopping_patience_counter = 0
                else:
                    if new_eval_result == best_eval_result:
                        best_iteration = new_iteration
                        best_eval_result = new_eval_result
                    early_stopping_patience_counter += 1

                if early_stopping_patience_counter >= args.early_stopping_patience:
                    should_training_stop = True

        progress_bar.update(1)

        if should_training_stop:
            break

    if best_iteration is not None:
        # Save the best iteration
        logger.info('Best iteration: %d', best_iteration)
        logger.info('Best evaluation result: %s = %f', args.eval_metric,
                    best_eval_result)
        accelerator.wait_for_everyone()
        if accelerator.is_main_process:
            shutil.copy(
                os.path.join(output_dir,
                             f'eval_results_iter-{iteration}.json'),
                os.path.join(output_dir, 'eval_results_best-iteration.json'))
    else:
        # Assume that the last iteration is the best
        logger.info('Best iteration: %d', args.max_selftrain_iterations - 1)
        logger.info('Best evaluation result: %s = %f', args.eval_metric,
                    eval_result)
        accelerator.wait_for_everyone()
        if accelerator.is_main_process:
            shutil.copy(
                os.path.join(
                    output_dir,
                    f'eval_results_iter-{args.max_selftrain_iterations - 1}.json'
                ), os.path.join(output_dir,
                                'eval_results_best-iteration.json'))
Exemplo n.º 3
0
        data.to_csv(DATASET_PATH, sep='\t', index=False)

    data = pd.read_csv(DATASET_PATH, sep='\t')
    data = data[:5000]

    vocab = load_vocab(data)

    print(len(vocab))

    # Load dataset
    data_tr_cv_ts = load_benchmark(data, vocab)

    # Set up model and finetune
    # init
    model = deepmoji_transfer(
        nb_classes, data_tr_cv_ts['maxlen'])  # from PRETRAINED_PATH load model
    # print_layer_summary
    model.summary()
    #
    print(len(data_tr_cv_ts['texts'][0][0]))
    model, acc = finetune(model,
                          data_tr_cv_ts['texts'],
                          data_tr_cv_ts['labels'],
                          nb_classes,
                          data_tr_cv_ts['batch_size'],
                          method='last',
                          epoch_size=5000,
                          nb_epochs=1000,
                          verbose=5)  #'last')
    print('Acc: {}'.format(acc))
Exemplo n.º 4
0
import os.path
import numpy as np

#%% Load Data: Profound in and out.
datadir = "OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt"
X, Y = preprocessing.get_splits(sites=['hyytiala'],
                                years=[2001, 2002, 2003, 2004, 2005, 2006],
                                datadir=os.path.join(datadir, "data"),
                                dataset="profound",
                                simulations=None)

#%%
pretrained_model = visualizations.losses("mlp", 7, "")

running_losses, performance, y_tests, y_preds = finetuning.finetune(
    X, Y, epochs=100, model="mlp", pretrained_type=7)
#%%
visualizations.plot_running_losses(running_losses["mae_train"],
                                   running_losses["mae_val"], "", "mlp")
print(np.mean(np.array(performance), axis=0))

res_mlp = visualizations.losses("mlp", 0, "")

#%%
import setup.models as models
import torch
model = models.MLPmod(7, [64, 64, 16, 1], nn.ReLU)
model.load_state_dict(
    torch.load(os.path.join(datadir,
                            f"python\outputs\models\mlp6\model0.pth")))
Exemplo n.º 5
0
                                      embed_dropout_rate=0.25,
                                      final_dropout_rate=0.5,
                                      embed_l2=1E-6)
        model.summary()

        # load pretrained representation model
        load_specific_weights(model,
                              model_path,
                              nb_tokens,
                              MAX_LEN,
                              exclude_names=['softmax'])

        # train model
        model, acc = finetune(model, [train_X, val_X, test_X],
                              [train_y, val_y, test_y],
                              nb_classes,
                              100,
                              method='chain-thaw')

        pred_y_prob = model.predict(test_X)

        if nb_classes == 2:
            pred_y = [0 if p < 0.5 else 1 for p in pred_y_prob]
        else:
            pred_y = np.argmax(pred_y_prob, axis=1)

        # evaluation
        print('*****************************************')
        print("Fold %d" % fold)
        accuracy = accuracy_score(test_y, pred_y)
        print("Accuracy: %.3f" % accuracy)