Ejemplo n.º 1
0
def compute_energyusage(annotation_filepath, model_dir, model_file):
    try:
        tmplogdir = tempfile.mkdtemp()
        print(f'Logging energy evaluation in directory: {tmplogdir}')

        tracker = ImpactTracker(tmplogdir)
        nlc2cmd_dl = get_dataloader(annotation_filepath)

        tracker.launch_impact_monitor()
        grnd_truth, _, _ = get_predictions(nlc2cmd_dl, model_dir, model_file)
        n = len(grnd_truth)

        info = tracker.get_latest_info_and_check_for_errors()

        tracker.p.terminate()
        experiment_impact_tracker.data_utils.log_final_info(tracker.logdir)

        stats = compute_tracker.read_latest_stats(tmplogdir)
        energy_watts = stats.get('rapl_estimated_attributable_power_draw', 0.0)
        energy_mwatts = (energy_watts * 1000.0) / n

        result = {'status': 'success', 'energy_mwh': energy_mwatts}

    except Exception as err:
        result = {
            'status': 'error',
            'error_message': str(err),
            'energy_mwh': 0.0
        }

        print(f'Exception occurred in energy consumption computation')
        print(traceback.format_exc())

    finally:
        return result
def test_ram_attribution():
    p = psutil.Process()
    fname = tempfile.mkdtemp()

    with ImpactTracker(fname) as tracker:
        # Allocate 512Mb and chill for a bit
        x = bytearray(512000000)
        time.sleep(30)
        # Raises an errors in the main thread
        tracker.get_latest_info_and_check_for_errors()

    fname2 = tempfile.mkdtemp()
    del x

    with ImpactTracker(fname2) as tracker:
        # Allocate 1024Mb and chill for a bit
        y = bytearray(1024000000)
        time.sleep(30)
        # Raises an errors in the main thread
        tracker.get_latest_info_and_check_for_errors()

    del y

    di = DataInterface(fname)
    di2 = DataInterface(fname2)

    np.testing.assert_almost_equal(di.total_power * 2,
                                   di2.total_power,
                                   decimal=4)
Ejemplo n.º 3
0
def main(add_extra_parser_options, worker):
    argv = sys.argv[1:]
    conf_parser = argparse.ArgumentParser(description=__doc__,
            formatter_class=argparse.RawDescriptionHelpFormatter,
            add_help=False)
    conf_parser.add_argument('-c', '--conf-file', help='Specify config file')
    args, remaining_argv = conf_parser.parse_known_args(args=argv)

    defaults = {}

    if args.conf_file:
        config = configparser.SafeConfigParser()
        config.read(args.conf_file)
        defaults.update(dict(config.items('Defaults')))
        for key, value in [(k,v.lower()) for k,v in defaults.items()]:
            if value in ('yes', 'true'):
                defaults[key] = True
            elif value in ('no', 'false'):
                defaults[key] = False
            elif value in ('none'):
                defaults[key] = None

    parser = argparse.ArgumentParser(description='CuLE', parents=[conf_parser])
    parser = add_global_parser_options(parser)
    parser = add_extra_parser_options(parser)
    parser.set_defaults(**defaults)
    args = parser.parse_args(remaining_argv)

    if args.local_rank == 0:
        pprint(vars(args))

    from experiment_impact_tracker.compute_tracker import ImpactTracker, get_flop_count_tensorflow

    tracker = ImpactTracker(args.log_dir)

    tracker.launch_impact_monitor()

    maybe_restart(args, worker)
Ejemplo n.º 4
0
def test_relative_accuracy():
    """Test that one obviously more intensive job returns more power than another

    :return:
    """
    if not is_intel_compatible():
        # For now, we have a requirement that at least the CPU info be recorded
        # TODO: in the future we want to be able to only record GPU or whatever info is available
        return

    fname1 = tempfile.mkdtemp()

    with ImpactTracker(fname1):
        _helper_function(50)

    fname2 = tempfile.mkdtemp()

    with ImpactTracker(fname2):
        _helper_function(100)

    di = DataInterface([fname1])
    di2 = DataInterface([fname2])

    assert di2.total_power > di.total_power and di2.kg_carbon > di.kg_carbon
Ejemplo n.º 5
0
def do_preds(models, test_values, pred_items, runs=3, impact_run_name=False):
    # track impact is either the run name, or False
    run_times = []
    model_names = list(models.keys())

    if impact_run_name:
        from experiment_impact_tracker.compute_tracker import ImpactTracker

    # eval loop
    for i in range(runs):
        pred_times = {}
        print('Run', i+1, 'of', runs)

        # shuffle function list each loop to reduce ordering effects
        random.shuffle(model_names)

        for func_name in tqdm(model_names):
            model = models[func_name]
            pred_item_count = 0
            #print(func_name)
            if impact_run_name: # are we trying to track carbon impact?
                with ImpactTracker(impact_run_name + '_' + func_name + '_' + str(i)) as tracker:
                    tracker.launch_impact_monitor()
                    start = time.perf_counter()
                    # use capped test_values size to conserve memory;
                    # go around until target # predictions made
                    while pred_item_count < pred_items:
                        predictions = model(test_values)
                        pred_item_count += test_values.shape[0]
                    elapsed = time.perf_counter() - start

            else:
                start = time.perf_counter()
                # use capped test_values size to conserve memory;
                # go around until target # predictions made
                while pred_item_count < pred_items:
                    predictions = model(test_values)
                    pred_item_count += test_values.shape[0]
                elapsed = time.perf_counter() - start

            #print('elapsed:', elapsed)
            #models[func_name] = m
            pred_times[func_name] = elapsed

        run_times.append(pred_times)
    return run_times
Ejemplo n.º 6
0
def train(
    d: str = "cpu",
    log_dir: str = tempfile.mkdtemp(),
    epochs: int = 200,
    track_impact: bool = True,
    check_for_errors: bool = True,
):
    if track_impact:
        tracker = ImpactTracker(os.path.join(log_dir, ""))

        tracker.launch_impact_monitor()
    device = torch.device(d)

    # N is batch size; D_in is input dimension;
    # H is hidden dimension; D_out is output dimension.
    N, D_in, H, D_out = 1024, 10000, 1000, 100

    # Create random input and output data
    x = torch.randn(N, D_in, device=device)
    y = torch.randn(N, D_out, device=device)

    # Randomly initialize weights
    w1 = torch.randn(D_in, H, device=device)
    w2 = torch.randn(H, D_out, device=device)

    learning_rate = 1e-6
    for t in range(epochs):
        # Forward pass: compute predicted y
        h = x.mm(w1)
        h_relu = h.clamp(min=0)
        y_pred = h_relu.mm(w2)

        # Backprop to compute gradients of w1 and w2 with respect to loss
        grad_y_pred = 2.0 * (y_pred - y)
        grad_w2 = h_relu.t().mm(grad_y_pred)
        grad_h_relu = grad_y_pred.mm(w2.t())
        grad_h = grad_h_relu.clone()
        grad_h[h < 0] = 0
        grad_w1 = x.t().mm(grad_h)

        # Update weights using gradient descent
        w1 -= learning_rate * grad_w1
        w2 -= learning_rate * grad_w2
        if check_for_errors:
            tracker.get_latest_info_and_check_for_errors()

    print("SUCCESS")
Ejemplo n.º 7
0
def my_experiment() -> None:
    tmp_dir = tempfile.mkdtemp()
    # Init tracker with log path
    tracker = ImpactTracker(tmp_dir)
    # Start tracker in a separate process
    tracker.launch_impact_monitor()

    exp = Experiment()

    for t in range(100):
        if t % 10 == 9:
            print(f"Pass: {t}")
            # Optional. Adding this will ensure that your experiment stops if impact tracker throws an exception and exit.
            tracker.get_latest_info_and_check_for_errors()
        exp.train()

    print(f"Please find your experiment logs in: {tmp_dir}")
Ejemplo n.º 8
0
        print("Using MPI for multiprocessing with {} workers".format(MPI.COMM_WORLD.Get_size()))
        rank = MPI.COMM_WORLD.Get_rank()
        print("Worker rank: {}".format(rank))

        args.seed += rank
        if rank != 0:
            args.verbose = 0
            args.tensorboard_log = ''

    for env_id in env_ids:
        tensorboard_log = None if args.tensorboard_log == '' else os.path.join(args.tensorboard_log, env_id)
        os.environ["OPENAI_LOG_FORMAT"] = 'csv'
        os.environ["OPENAI_LOGDIR"] = os.path.abspath(tensorboard_log)
        logger.configure()

        tracker = ImpactTracker(tensorboard_log)

        tracker.launch_impact_monitor()

        is_atari = False
        if 'NoFrameskip' in env_id:
            is_atari = True

        print("=" * 10, env_id, "=" * 10)

        # Load hyperparameters from yaml file
        if args.hparam_file:
            hparam_file_name = args.hparam_file
        else:
            hparam_file_name = 'hyperparams/{}.yml'.format(args.algo)
        with open(hparam_file_name, 'r') as f:
Ejemplo n.º 9
0
def main():
    parser = argparse.ArgumentParser()
    # Required parameters
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The input data dir. Should contain the .tsv files (or other data files) for the task.",
    )
    parser.add_argument(
        "--model_type",
        default=None,
        type=str,
        required=True,
        help="Model type selected in the list: " +
        ", ".join(MODEL_CLASSES.keys()),
    )
    parser.add_argument(
        "--model_name_or_path",
        default=None,
        type=str,
        required=True,
        help="Path to pre-trained model or shortcut name selected in the list: "
        + ", ".join(ALL_MODELS),
    )
    parser.add_argument(
        "--task_name",
        default=None,
        type=str,
        required=True,
        help="The name of the task to train selected in the list: " +
        ", ".join(processors.keys()),
    )
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The output directory where the model predictions and checkpoints will be written.",
    )

    # Other parameters
    parser.add_argument(
        "--config_name",
        default="",
        type=str,
        help=
        "Pretrained config name or path if not the same as model_name_or_path",
    )
    parser.add_argument(
        "--tokenizer_name",
        default="",
        type=str,
        help=
        "Pretrained tokenizer name or path if not the same as model_name_or_path",
    )
    parser.add_argument(
        "--cache_dir",
        default="",
        type=str,
        help=
        "Where do you want to store the pre-trained models downloaded from s3",
    )
    parser.add_argument(
        "--data_subset",
        type=int,
        default=-1,
        help="If > 0: limit the data to a subset of data_subset instances.")
    parser.add_argument("--overwrite_output_dir",
                        action="store_true",
                        help="Whether to overwrite data in output directory")
    parser.add_argument(
        "--overwrite_cache",
        action="store_true",
        help="Overwrite the cached training and evaluation sets")

    parser.add_argument("--dont_normalize_importance_by_layer",
                        action="store_true",
                        help="Don't normalize importance score by layers")
    parser.add_argument(
        "--dont_normalize_global_importance",
        action="store_true",
        help="Don't normalize all importance scores between 0 and 1",
    )
    parser.add_argument("--use_train_data",
                        action="store_true",
                        help="Use training set for computing masks")
    parser.add_argument(
        "--masking_threshold",
        default=0.9,
        type=float,
        help=
        "masking threshold in term of metrics (stop masking when metric < threshold * original metric value).",
    )
    parser.add_argument(
        "--masking_amount",
        default=0.1,
        type=float,
        help="Amount to heads to masking at each masking step.")
    parser.add_argument("--metric_name",
                        default=None,
                        type=str,
                        help="Metric to use for head masking.")

    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, sequences shorter padded.",
    )
    parser.add_argument("--batch_size",
                        default=1,
                        type=int,
                        help="Batch size.")

    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument("--no_cuda",
                        action="store_true",
                        help="Whether not to use CUDA when available")
    parser.add_argument("--server_ip",
                        type=str,
                        default="",
                        help="Can be used for distant debugging.")
    parser.add_argument("--server_port",
                        type=str,
                        default="",
                        help="Can be used for distant debugging.")
    args = parser.parse_args()

    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd

        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port),
                            redirect_output=True)
        ptvsd.wait_for_attach()

    # Setup devices and distributed training
    if args.local_rank == -1 or args.no_cuda:
        args.device = torch.device("cuda" if torch.cuda.is_available()
                                   and not args.no_cuda else "cpu")
        args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        args.device = torch.device("cuda", args.local_rank)
        args.n_gpu = 1
        torch.distributed.init_process_group(
            backend="nccl")  # Initializes the distributed backend

    # Setup logging
    logging.basicConfig(
        level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
    logger.info("device: {} n_gpu: {}, distributed: {}".format(
        args.device, args.n_gpu, bool(args.local_rank != -1)))

    # Set seeds
    set_seed(args)

    tracker = ImpactTracker(args.output_dir)
    tracker.launch_impact_monitor()

    # Prepare GLUE task
    args.task_name = args.task_name.lower()
    if args.task_name not in processors:
        raise ValueError("Task not found: %s" % (args.task_name))

    if args.metric_name is None:
        args.metric_name = {
            "cola": "mcc",
            "mnli": "acc",
            "mnli-mm": "acc",
            "mrpc": "acc",
            "sst-2": "acc",
            "sts-b": "corr",
            "qqp": "acc",
            "qnli": "acc",
            "rte": "acc",
            "wnli": "acc",
            "hans": "acc",
            "mnli_two": "acc",
            "hans_mnli": "acc"
        }[args.task_name]

    processor = processors[args.task_name]()
    args.output_mode = output_modes[args.task_name]
    label_list = processor.get_labels()
    num_labels = len(label_list)

    # Load pretrained model and tokenizer
    if args.local_rank not in [-1, 0]:
        torch.distributed.barrier(
        )  # Make sure only the first process in distributed training will download model & vocab

    MODEL_CLASSES["bert"] = (BertConfig, BertForSequenceClassification,
                             MODEL_CLASSES["bert"][2])

    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
    if args.model_type != "bert":
        raise NotImplemented("Not implemented for non BERT classes yet.")

    config = config_class.from_pretrained(
        args.config_name if args.config_name else args.model_name_or_path,
        num_labels=num_labels,
        finetuning_task=args.task_name,
        output_attentions=True,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )
    tokenizer = tokenizer_class.from_pretrained(
        args.tokenizer_name
        if args.tokenizer_name else args.model_name_or_path,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )
    model = model_class.from_pretrained(
        args.model_name_or_path,
        from_tf=bool(".ckpt" in args.model_name_or_path),
        config=config,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )

    if args.local_rank == 0:
        torch.distributed.barrier(
        )  # Make sure only the first process in distributed training will download model & vocab

    # Distributed and parallel training
    model.to(args.device)
    if args.local_rank != -1:
        model = torch.nn.parallel.DistributedDataParallel(
            model,
            device_ids=[args.local_rank],
            output_device=args.local_rank,
            find_unused_parameters=True)
    elif args.n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Print/save training arguments
    torch.save(args, os.path.join(args.output_dir, "run_args.bin"))
    logger.info("Training/evaluation parameters %s", args)

    # Prepare dataset for the GLUE task

    eval_data = load_and_cache_examples(args,
                                        args.task_name,
                                        tokenizer,
                                        evaluate=True)
    true_eval_len = len(eval_data)

    if args.use_train_data:
        train_data = load_and_cache_examples(args,
                                             args.task_name,
                                             tokenizer,
                                             evaluate=False)
        eval_data = random_split(
            train_data,
            [true_eval_len, len(train_data) - true_eval_len])[0]

    if args.data_subset > 0:
        eval_data = Subset(eval_data,
                           list(range(min(args.data_subset, len(eval_data)))))

    eval_sampler = SequentialSampler(
        eval_data) if args.local_rank == -1 else DistributedSampler(eval_data)
    eval_dataloader = DataLoader(eval_data,
                                 sampler=eval_sampler,
                                 batch_size=args.batch_size)

    _, masked_amount = mask_global(args, model, eval_dataloader)

    # Generating a random mask of equal size
    fresh_model = model_class.from_pretrained(
        args.model_name_or_path,
        from_tf=bool(".ckpt" in args.model_name_or_path),
        config=config,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )
    mask = prune_model(fresh_model, masked_amount, prune.RandomUnstructured)
    torch.save(mask, os.path.join(args.output_dir, "random_mask.p"))

    fresh_model = model_class.from_pretrained(
        args.model_name_or_path,
        from_tf=bool(".ckpt" in args.model_name_or_path),
        config=config,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )
    mask = prune_model(fresh_model, masked_amount, L1UnstructuredInvert)
    torch.save(mask, os.path.join(args.output_dir, "bad_mask.pt"))
Ejemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser()

    # Required parameters
    parser.add_argument(
        "--model_type",
        default=None,
        type=str,
        required=True,
        help="Model type selected in the list: " + ", ".join(MODEL_TYPES),
    )
    parser.add_argument(
        "--model_name_or_path",
        default=None,
        type=str,
        required=True,
        help=
        "Path to pretrained model or model identifier from huggingface.co/models",
    )
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The output directory where the model checkpoints and predictions will be written.",
    )

    # Other parameters
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        help="The input data dir. Should contain the .json files for the task."
        +
        "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
    )
    parser.add_argument(
        "--train_file",
        default=None,
        type=str,
        help=
        "The input training file. If a data dir is specified, will look for the file there"
        +
        "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
    )
    parser.add_argument(
        "--predict_file",
        default=None,
        type=str,
        help=
        "The input evaluation file. If a data dir is specified, will look for the file there"
        +
        "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
    )
    parser.add_argument(
        "--config_name",
        default="",
        type=str,
        help="Pretrained config name or path if not the same as model_name")
    parser.add_argument(
        "--tokenizer_name",
        default="",
        type=str,
        help="Pretrained tokenizer name or path if not the same as model_name",
    )
    parser.add_argument(
        "--cache_dir",
        default="",
        type=str,
        help=
        "Where do you want to store the pre-trained models downloaded from s3",
    )

    parser.add_argument(
        "--version_2_with_negative",
        action="store_true",
        help=
        "If true, the SQuAD examples contain some that do not have an answer.",
    )
    parser.add_argument(
        "--null_score_diff_threshold",
        type=float,
        default=0.0,
        help=
        "If null_score - best_non_null is greater than the threshold predict null.",
    )

    parser.add_argument(
        "--max_seq_length",
        default=384,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. Sequences "
        "longer than this will be truncated, and sequences shorter than this will be padded.",
    )
    parser.add_argument(
        "--doc_stride",
        default=128,
        type=int,
        help=
        "When splitting up a long document into chunks, how much stride to take between chunks.",
    )
    parser.add_argument(
        "--max_query_length",
        default=64,
        type=int,
        help=
        "The maximum number of tokens for the question. Questions longer than this will "
        "be truncated to this length.",
    )
    parser.add_argument("--do_train",
                        action="store_true",
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        action="store_true",
                        help="Whether to run eval on the dev set.")
    parser.add_argument(
        "--evaluate_during_training",
        action="store_true",
        help="Run evaluation during training at each logging step.")
    parser.add_argument(
        "--do_lower_case",
        action="store_true",
        help="Set this flag if you are using an uncased model.")

    parser.add_argument("--per_gpu_train_batch_size",
                        default=8,
                        type=int,
                        help="Batch size per GPU/CPU for training.")
    parser.add_argument("--per_gpu_eval_batch_size",
                        default=8,
                        type=int,
                        help="Batch size per GPU/CPU for evaluation.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument(
        "--gradient_accumulation_steps",
        type=int,
        default=1,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass.",
    )
    parser.add_argument("--weight_decay",
                        default=0.0,
                        type=float,
                        help="Weight decay if we apply some.")
    parser.add_argument("--adam_epsilon",
                        default=1e-8,
                        type=float,
                        help="Epsilon for Adam optimizer.")
    parser.add_argument("--max_grad_norm",
                        default=1.0,
                        type=float,
                        help="Max gradient norm.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument(
        "--max_steps",
        default=-1,
        type=int,
        help=
        "If > 0: set total number of training steps to perform. Override num_train_epochs.",
    )
    parser.add_argument("--warmup_steps",
                        default=0,
                        type=int,
                        help="Linear warmup over warmup_steps.")
    parser.add_argument(
        "--n_best_size",
        default=20,
        type=int,
        help=
        "The total number of n-best predictions to generate in the nbest_predictions.json output file.",
    )
    parser.add_argument(
        "--max_answer_length",
        default=30,
        type=int,
        help=
        "The maximum length of an answer that can be generated. This is needed because the start "
        "and end predictions are not conditioned on one another.",
    )
    parser.add_argument(
        "--verbose_logging",
        action="store_true",
        help=
        "If true, all of the warnings related to data processing will be printed. "
        "A number of warnings are expected for a normal SQuAD evaluation.",
    )
    parser.add_argument(
        "--lang_id",
        default=0,
        type=int,
        help=
        "language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)",
    )

    parser.add_argument("--logging_steps",
                        type=int,
                        default=500,
                        help="Log every X updates steps.")
    parser.add_argument("--save_steps",
                        type=int,
                        default=500,
                        help="Save checkpoint every X updates steps.")
    parser.add_argument(
        "--eval_all_checkpoints",
        action="store_true",
        help=
        "Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
    )
    parser.add_argument("--no_cuda",
                        action="store_true",
                        help="Whether not to use CUDA when available")
    parser.add_argument("--overwrite_output_dir",
                        action="store_true",
                        help="Overwrite the content of the output directory")
    parser.add_argument(
        "--overwrite_cache",
        action="store_true",
        help="Overwrite the cached training and evaluation sets")
    parser.add_argument("--seed",
                        type=int,
                        default=42,
                        help="random seed for initialization")

    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument(
        "--fp16",
        action="store_true",
        help=
        "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
    )
    parser.add_argument(
        "--fp16_opt_level",
        type=str,
        default="O1",
        help=
        "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
        "See details at https://nvidia.github.io/apex/amp.html",
    )
    parser.add_argument("--server_ip",
                        type=str,
                        default="",
                        help="Can be used for distant debugging.")
    parser.add_argument("--server_port",
                        type=str,
                        default="",
                        help="Can be used for distant debugging.")

    parser.add_argument(
        "--threads",
        type=int,
        default=1,
        help="multiple threads for converting example to features")
    parser.add_argument("--log_energy_consumption",
                        action="store_true",
                        help="Whether to track energy consumption")
    parser.add_argument(
        "--energy_output_dir",
        default=None,
        type=str,
        help=
        "The output directory where the model checkpoints and predictions will be written.",
    )
    args = parser.parse_args()

    if args.doc_stride >= args.max_seq_length - args.max_query_length:
        logger.warning(
            "WARNING - You've set a doc stride which may be superior to the document length in some "
            "examples. This could result in errors when building features from the examples. Please reduce the doc "
            "stride or increase the maximum length to ensure the features are correctly built."
        )

    if (os.path.exists(args.output_dir) and os.listdir(args.output_dir)
            and args.do_train and not args.overwrite_output_dir):
        raise ValueError(
            "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome."
            .format(args.output_dir))

    # Setup distant debugging if needed
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd

        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port),
                            redirect_output=True)
        ptvsd.wait_for_attach()

    # Setup CUDA, GPU & distributed training
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend="nccl")
        args.n_gpu = 1
    args.device = device

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
    )
    logger.warning(
        "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
        args.local_rank,
        device,
        args.n_gpu,
        bool(args.local_rank != -1),
        args.fp16,
    )
    if args.log_energy_consumption:
        from experiment_impact_tracker.compute_tracker import ImpactTracker

        logger.info("Launching impact tracker...")
        tracker = ImpactTracker(args.energy_output_dir)
        tracker.launch_impact_monitor()
    # Set seed
    set_seed(args)

    # Load pretrained model and tokenizer
    if args.local_rank not in [-1, 0]:
        # Make sure only the first process in distributed training will download model & vocab
        torch.distributed.barrier()

    args.model_type = args.model_type.lower()
    config = AutoConfig.from_pretrained(
        args.config_name if args.config_name else args.model_name_or_path,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )
    tokenizer = AutoTokenizer.from_pretrained(
        args.tokenizer_name
        if args.tokenizer_name else args.model_name_or_path,
        do_lower_case=args.do_lower_case,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )
    model = AutoModelForQuestionAnswering.from_pretrained(
        args.model_name_or_path,
        from_tf=bool(".ckpt" in args.model_name_or_path),
        config=config,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )

    if args.local_rank == 0:
        # Make sure only the first process in distributed training will download model & vocab
        torch.distributed.barrier()

    model.to(args.device)

    logger.info("Training/evaluation parameters %s", args)

    # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
    # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
    # remove the need for this code, but it is still valid.
    if args.fp16:
        try:
            import apex

            apex.amp.register_half_function(torch, "einsum")
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
            )

    # Training
    if args.do_train:
        train_dataset = load_and_cache_examples(args,
                                                tokenizer,
                                                evaluate=False,
                                                output_examples=False)
        global_step, tr_loss = train(args, train_dataset, model, tokenizer)
        logger.info(" global_step = %s, average loss = %s", global_step,
                    tr_loss)

    # Save the trained model and the tokenizer
    if args.do_train and (args.local_rank == -1
                          or torch.distributed.get_rank() == 0):
        logger.info("Saving model checkpoint to %s", args.output_dir)
        # Save a trained model, configuration and tokenizer using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
        # Take care of distributed/parallel training
        model_to_save = model.module if hasattr(model, "module") else model
        model_to_save.save_pretrained(args.output_dir)
        tokenizer.save_pretrained(args.output_dir)

        # Good practice: save your training arguments together with the trained model
        torch.save(args, os.path.join(args.output_dir, "training_args.bin"))

        # Load a trained model and vocabulary that you have fine-tuned
        model = AutoModelForQuestionAnswering.from_pretrained(
            args.output_dir)  # , force_download=True)
        tokenizer = AutoTokenizer.from_pretrained(
            args.output_dir, do_lower_case=args.do_lower_case)
        model.to(args.device)

    # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory
    results = {}
    if args.do_eval and args.local_rank in [-1, 0]:
        if args.do_train:
            logger.info(
                "Loading checkpoints saved during training for evaluation")
            checkpoints = [args.output_dir]
            if args.eval_all_checkpoints:
                checkpoints = list(
                    os.path.dirname(c) for c in sorted(
                        glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME,
                                  recursive=True)))
                logging.getLogger("transformers.modeling_utils").setLevel(
                    logging.WARN)  # Reduce model loading logs
        else:
            logger.info("Loading checkpoint %s for evaluation",
                        args.model_name_or_path)
            checkpoints = [args.model_name_or_path]

        logger.info("Evaluate the following checkpoints: %s", checkpoints)

        for checkpoint in checkpoints:
            # Reload the model
            global_step = checkpoint.split(
                "-")[-1] if len(checkpoints) > 1 else ""
            model = AutoModelForQuestionAnswering.from_pretrained(
                checkpoint)  # , force_download=True)
            model.to(args.device)

            # Evaluate
            result = evaluate(args, model, tokenizer, prefix=global_step)

            result = dict(
                (k + ("_{}".format(global_step) if global_step else ""), v)
                for k, v in result.items())
            results.update(result)

    logger.info("Results: {}".format(results))

    return results
Ejemplo n.º 11
0
def main():
    parser = argparse.ArgumentParser()

    # Required parameters
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The input data dir. Should contain the .tsv files (or other data files) for the task.",
    )
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help="The output directory where the benchmarks would be put.",
    )
    parser.add_argument(
        "--experiment",
        type=str,
        default="baseline",
        required=True,
        help=
        "The randomization experiment to run. Default `baseline` does no randomization"
    )
    parser.add_argument(
        "--include_predictions",
        action="store_true",
        help=
        "Set this flag if you want to save the predictions for the experiment.",
    )
    parser.add_argument(
        "--models_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The fine-tuned models directory where all the tasks with respective model seed checkpoints are stored.",
    )

    parser.add_argument(
        "--model_type",
        default=None,
        type=str,
        required=True,
        help="Model type",
    )
    # Other parameters
    parser.add_argument(
        "--global_masks_dir",
        default=None,
        type=str,
        required=False,
        help=
        "Global masks to be applied before running the experiment. (Used only for baseline experiment)",
    )
    parser.add_argument(
        "--global_mask_file_name",
        default=None,
        type=str,
        required=False,
        help=
        "Global masks to be applied before running the experiment. (Used only for baseline experiment)",
    )
    parser.add_argument(
        "--head_masks_dir",
        default=None,
        type=str,
        required=False,
        help=
        "Head masks to be applied before running the experiment. (Used only for baseline experiment)",
    )
    parser.add_argument(
        "--mlp_masks_dir",
        default=None,
        type=str,
        required=False,
        help=
        "MLP masks to be applied before running the experiment. (Used only for baseline experiment)",
    )
    parser.add_argument("--mask_mode",
                        choices=["use", "invert", "random", "bad"],
                        default="use",
                        help="use,invert,random")
    parser.add_argument(
        "--cache_dir",
        default="",
        type=str,
        help=
        "Where do you want to store the pre-trained models downloaded from s3",
    )
    parser.add_argument(
        "--overwrite_cache",
        action="store_true",
        help="Overwrite the cached training and evaluation sets",
    )
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help=
        "The maximum total input sequence length after tokenization. Sequences longer "
        "than this will be truncated, sequences shorter will be padded.",
    )
    parser.add_argument(
        "--do_lower_case",
        action="store_true",
        help="Set this flag if you are using an uncased model.",
    )
    parser.add_argument(
        "--per_gpu_eval_batch_size",
        default=64,
        type=int,
        help="Batch size per GPU/CPU for evaluation.",
    )
    parser.add_argument("--no_cuda",
                        action="store_true",
                        help="Avoid using CUDA when available")

    parser.add_argument("--seed",
                        type=int,
                        default=42,
                        help="random seed for initialization")
    args = parser.parse_args()

    args.local_rank = -1
    # Setup CUDA, GPU & distributed training
    device = torch.device(
        "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
    args.device = device
    args.model_type = args.model_type.lower()
    args.experiment = args.experiment.lower()
    args.output_dir = f"{args.output_dir}/{args.experiment}"

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO,
    )

    # Set seed
    set_seed(args)

    tracker = ImpactTracker(args.output_dir)
    tracker.launch_impact_monitor()

    # Prepare GLUE task
    experiment_map = {
        # Only the `baseline` is used for the paper results.
        "baseline": experiment_baseline,
        # The rest can be ignored.
        "randomize_embeddings": experiment_randomize_embeddings,
        "randomize_qkv": experiment_randomize_qkv,
        "randomize_fc": experiment_randomize_fc,
        "randomize_qkv_together": experiment_randomize_qkv_together,
        "randomize_qkv_together_pairwise":
        experiment_randomize_qkv_together_pairwise,
        "zero_out_qkv": experiment_zero_out_qkv,
        "randomize_full_layerwise": experiment_randomize_full_layerwise,
        "randomize_components": experiment_randomize_components,
        "revert_embeddings": experiment_revert_embeddings,
        "revert_qkv": experiment_revert_qkv,
        "revert_fc": experiment_revert_fc,
        "revert_embeddings_rotate": experiment_revert_embeddings_rotate,
        "ablate_residuals": experiment_ablate_residuals,
        "ablate_pruning": experiment_prune
    }
    experiment_map[args.experiment](args)
Ejemplo n.º 12
0
def main():
    parser = argparse.ArgumentParser()

    # Required parameters
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The input data dir. Should contain the .tsv files (or other data files) for the task.",
    )
    parser.add_argument(
        "--model_type",
        default=None,
        type=str,
        required=True,
        help="Model type selected in the list: " +
        ", ".join(MODEL_CLASSES.keys()),
    )
    parser.add_argument(
        "--model_name_or_path",
        default=None,
        type=str,
        required=True,
        help="Path to pre-trained model or shortcut name selected in the list: "
        + ", ".join(ALL_MODELS),
    )
    parser.add_argument(
        "--task_name",
        default=None,
        type=str,
        required=True,
        help="The name of the task to train selected in the list: " +
        ", ".join(processors.keys()),
    )
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The output directory where the model predictions and checkpoints will be written.",
    )

    # Other parameters
    parser.add_argument(
        "--config_name",
        default="",
        type=str,
        help="Pretrained config name or path if not the same as model_name",
    )
    parser.add_argument(
        "--tokenizer_name",
        default="",
        type=str,
        help="Pretrained tokenizer name or path if not the same as model_name",
    )
    parser.add_argument(
        "--cache_dir",
        default="",
        type=str,
        help=
        "Where do you want to store the pre-trained models downloaded from s3",
    )
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help=
        "The maximum total input sequence length after tokenization. Sequences longer "
        "than this will be truncated, sequences shorter will be padded.",
    )
    parser.add_argument("--do_train",
                        action="store_true",
                        help="Whether to run training.")
    parser.add_argument(
        "--train_mode",
        choices=["fine_tune", "random", "random_frozen", "frozen"],
        default="fine_tune")

    parser.add_argument("--do_eval",
                        action="store_true",
                        help="Whether to run eval on the dev set.")
    parser.add_argument(
        "--evaluate_during_training",
        action="store_true",
        help="Run evaluation during training at each logging step.",
    )
    parser.add_argument(
        "--do_lower_case",
        action="store_true",
        help="Set this flag if you are using an uncased model.",
    )

    parser.add_argument(
        "--per_gpu_train_batch_size",
        default=8,
        type=int,
        help="Batch size per GPU/CPU for training.",
    )
    parser.add_argument(
        "--per_gpu_eval_batch_size",
        default=8,
        type=int,
        help="Batch size per GPU/CPU for evaluation.",
    )
    parser.add_argument(
        "--gradient_accumulation_steps",
        type=int,
        default=1,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass.",
    )
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--weight_decay",
                        default=0.0,
                        type=float,
                        help="Weight decay if we apply some.")
    parser.add_argument("--adam_epsilon",
                        default=1e-8,
                        type=float,
                        help="Epsilon for Adam optimizer.")
    parser.add_argument("--max_grad_norm",
                        default=1.0,
                        type=float,
                        help="Max gradient norm.")
    parser.add_argument(
        "--num_train_epochs",
        default=3.0,
        type=float,
        help="Total number of training epochs to perform.",
    )
    parser.add_argument(
        "--max_steps",
        default=-1,
        type=int,
        help=
        "If > 0: set total number of training steps to perform. Override num_train_epochs.",
    )
    parser.add_argument("--warmup_steps",
                        default=0,
                        type=int,
                        help="Linear warmup over warmup_steps.")

    parser.add_argument("--logging_steps",
                        type=int,
                        default=500,
                        help="Log every X updates steps.")
    parser.add_argument("--save_steps",
                        type=int,
                        default=500,
                        help="Save checkpoint every X updates steps.")
    parser.add_argument(
        "--eval_all_checkpoints",
        action="store_true",
        help=
        "Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
    )
    parser.add_argument("--no_cuda",
                        action="store_true",
                        help="Avoid using CUDA when available")
    parser.add_argument(
        "--overwrite_output_dir",
        action="store_true",
        help="Overwrite the content of the output directory",
    )
    parser.add_argument(
        "--overwrite_cache",
        action="store_true",
        help="Overwrite the cached training and evaluation sets",
    )
    parser.add_argument("--seed",
                        type=int,
                        default=42,
                        help="random seed for initialization")

    parser.add_argument(
        "--fp16",
        action="store_true",
        help=
        "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
    )
    parser.add_argument(
        "--fp16_opt_level",
        type=str,
        default="O1",
        help=
        "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
        "See details at https://nvidia.github.io/apex/amp.html",
    )
    parser.add_argument(
        "--head_mask",
        type=str,
        default=None,
        help=
        "Path to head_mask used for pruning the heads of the pre-trained model, before fine-tuning."
    )
    parser.add_argument(
        "--mlp_mask",
        default=None,
        type=str,
        required=False,
        help="MLP mask to be applied before running the experiment.",
    )
    parser.add_argument("--mask_mode",
                        choices=["use", "invert", "random", "bad"],
                        default="use",
                        help="use,invert,random")
    parser.add_argument(
        "--global_mask",
        default=None,
        type=str,
        required=False,
        help="Global mask to be applied before running the experiment.",
    )
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="For distributed training: local_rank")
    parser.add_argument("--server_ip",
                        type=str,
                        default="",
                        help="For distant debugging.")
    parser.add_argument("--server_port",
                        type=str,
                        default="",
                        help="For distant debugging.")
    args = parser.parse_args()

    if (os.path.exists(args.output_dir) and os.listdir(args.output_dir)
            and args.do_train and not args.overwrite_output_dir):
        raise ValueError(
            "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome."
            .format(args.output_dir))

    # Setup distant debugging if needed
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd

        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port),
                            redirect_output=True)
        ptvsd.wait_for_attach()

    # Setup CUDA, GPU & distributed training
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend="nccl")
        args.n_gpu = 1
    args.device = device

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
    )
    logger.warning(
        "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
        args.local_rank,
        device,
        args.n_gpu,
        bool(args.local_rank != -1),
        args.fp16,
    )
    energy_logs_dir = args.output_dir + "/energy_logs/"
    if not os.path.exists(energy_logs_dir):
        os.makedirs(energy_logs_dir)
    tracker = ImpactTracker(energy_logs_dir)
    tracker.launch_impact_monitor()
    # Set seed
    set_seed(args)

    # Prepare GLUE task
    args.task_name = args.task_name.lower()
    if args.task_name not in processors:
        raise ValueError("Task not found: %s" % (args.task_name))
    processor = processors[args.task_name]()
    args.output_mode = output_modes[args.task_name]
    label_list = processor.get_labels()
    num_labels = len(label_list)

    # Load pretrained model and tokenizer
    if args.local_rank not in [-1, 0]:
        torch.distributed.barrier(
        )  # Make sure only the first process in distributed training will download model & vocab

    args.model_type = args.model_type.lower()
    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
    config = config_class.from_pretrained(
        args.config_name if args.config_name else args.model_name_or_path,
        num_labels=num_labels,
        finetuning_task=args.task_name,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )
    tokenizer = tokenizer_class.from_pretrained(
        args.tokenizer_name
        if args.tokenizer_name else args.model_name_or_path,
        do_lower_case=args.do_lower_case,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )

    if args.train_mode in ("random", "random_frozen"):
        model = model_class(config)
    else:
        model = model_class.from_pretrained(
            args.model_name_or_path,
            from_tf=bool(".ckpt" in args.model_name_or_path),
            config=config,
            cache_dir=args.cache_dir if args.cache_dir else None,
        )

    global_mask = None
    if args.global_mask:
        global_mask = torch.load(args.global_mask)
        add_masks(model, global_mask)

    if args.head_mask is not None:
        head_mask = np.load(args.head_mask)
        if args.mask_mode == "random":
            logger.info(
                f"Creating random head_mask with about {head_mask.sum()} elements"
            )
            p_unpruned = head_mask.sum() / head_mask.size
            head_mask = np.zeros_like(head_mask)
            uniform_random = np.random.rand(*head_mask.shape)
            head_mask[uniform_random < p_unpruned] = 1
        elif args.mask_mode == "invert":
            head_mask = 1 - head_mask
            for layer in range(head_mask.shape[0]):
                if head_mask[layer].sum() == 0:
                    head_to_unprune = np.random.choice(head_mask.shape[1])
                    logger.info(
                        f"Unpruning head {head_to_unprune} in layer {layer} because randomly we allocated zero heads."
                    )
                    head_mask[layer][head_to_unprune] = 1
            logger.info(
                f"Invert head_mask {head_mask} with {head_mask.sum()} elements"
            )
        elif args.mask_mode == "bad":
            total_good = int(head_mask.sum())
            total_bad = int((1 - head_mask).sum())
            if total_good > total_bad:
                bad_indices = np.argwhere(head_mask == 0).tolist()
                remaining_indices = random.sample(
                    np.argwhere(head_mask == 1).tolist(),
                    total_good - total_bad)
                bad_indices.extend(
                    remaining_indices
                )  # Remaining heads sampled from "good" heads.
            else:
                bad_indices = random.sample(
                    np.argwhere(head_mask == 0).tolist(), total_good)
            head_mask = np.zeros_like(head_mask)
            for idx in bad_indices:
                head_mask[idx[0], idx[1]] = 1
            assert int(head_mask.sum()) == total_good

        head_mask = torch.from_numpy(head_mask)
        heads_to_prune = {}
        for layer in range(len(head_mask)):
            heads_to_mask = [
                h[0] for h in (1 - head_mask[layer].long()).nonzero().tolist()
            ]
            heads_to_prune[layer] = heads_to_mask
        assert sum(len(h) for h in heads_to_prune.values()) == (
            1 - head_mask.long()).sum().item()
        logger.info(f"Pruning heads {heads_to_prune}")
        model.prune_heads(heads_to_prune)

    if args.mlp_mask is not None:
        mlp_mask = np.load(args.mlp_mask)
        if args.mask_mode == "random":
            p_unpruned = mlp_mask.sum() / mlp_mask.size
            mlp_mask = np.zeros_like(mlp_mask)
            uniform_random = np.random.rand(*mlp_mask.shape)
            mlp_mask[uniform_random < p_unpruned] = 1
        elif args.mask_mode == "invert":
            mlp_mask = 1 - mlp_mask
        elif args.mask_mode == "bad":
            total_good = int(mlp_mask.sum())
            total_bad = int((1 - mlp_mask).sum())
            if total_good > total_bad:
                bad_indices = np.argwhere(mlp_mask == 0).tolist()
                remaining_indices = random.sample(
                    np.argwhere(mlp_mask == 1).tolist(),
                    total_good - total_bad)
                bad_indices.extend(
                    remaining_indices
                )  # Remaining heads sampled from "good" heads.
            else:
                bad_indices = random.sample(
                    np.argwhere(mlp_mask == 0).tolist(), total_good)
            mlp_mask = np.zeros_like(mlp_mask)
            for idx in bad_indices:
                mlp_mask[idx[0]] = 1
            assert int(mlp_mask.sum()) == total_good

        mlps_to_prune = [
            h[0]
            for h in (1 -
                      torch.from_numpy(mlp_mask).long()).nonzero().tolist()
        ]
        logger.info(f"MLPS to prune - {mlps_to_prune}")
        model.prune_mlps(mlps_to_prune)

    if args.train_mode in ("frozen", "random_frozen"):
        logger.info(f"FREEZING model parameters")
        for name, param in model.named_parameters():
            if 'classifier' not in name:  # classifier layer
                param.requires_grad = False

    if args.local_rank == 0:
        torch.distributed.barrier(
        )  # Make sure only the first process in distributed training will download model & vocab

    model.to(args.device)

    logger.info("Training/evaluation parameters %s", args)

    # Training
    if args.do_train:
        train_dataset = load_and_cache_examples(args,
                                                args.task_name,
                                                tokenizer,
                                                evaluate=False)
        global_step, tr_loss = train(args, train_dataset, model, tokenizer)
        logger.info(" global_step = %s, average loss = %s", global_step,
                    tr_loss)

    # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
    if args.do_train and (args.local_rank == -1
                          or torch.distributed.get_rank() == 0):
        # Create output directory if needed
        if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
            os.makedirs(args.output_dir)

        logger.info("Saving model checkpoint to %s", args.output_dir)
        # Save a trained model, configuration and tokenizer using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
        model_to_save = (model.module if hasattr(model, "module") else model
                         )  # Take care of distributed/parallel training
        model_to_save.save_pretrained(args.output_dir)
        tokenizer.save_pretrained(args.output_dir)

        # Good practice: save your training arguments together with the trained model
        torch.save(args, os.path.join(args.output_dir, "training_args.bin"))

        # Load a trained model and vocabulary that you have fine-tuned
        model = load_trained_model(args.output_dir, model_class, config_class)
        tokenizer = tokenizer_class.from_pretrained(args.output_dir)
        model.to(args.device)

    # Evaluation
    results = {}
    if args.do_eval and args.local_rank in [-1, 0]:
        tokenizer = tokenizer_class.from_pretrained(
            args.output_dir, do_lower_case=args.do_lower_case)
        checkpoints = [args.output_dir]
        if args.eval_all_checkpoints:
            checkpoints = list(
                os.path.dirname(c) for c in sorted(
                    glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME,
                              recursive=True)))
            logging.getLogger("transformers.modeling_utils").setLevel(
                logging.WARN)  # Reduce logging
        logger.info("Evaluate the following checkpoints: %s", checkpoints)
        for checkpoint in checkpoints:
            global_step = checkpoint.split(
                "-")[-1] if len(checkpoints) > 1 else ""
            prefix = checkpoint.split(
                "/")[-1] if checkpoint.find("checkpoint") != -1 else ""
            model = load_trained_model(checkpoint, model_class, config_class)
            model.to(args.device)
            result = evaluate(args, model, tokenizer, prefix=prefix)
            result = dict(
                (k + "_{}".format(global_step), v) for k, v in result.items())
            results.update(result)

    return results
Ejemplo n.º 13
0
args = parser.parse_args()
args.tied = not args.not_tied

if args.d_embed < 0:
    args.d_embed = args.d_model

assert args.ext_len >= 0, 'extended context length must be non-negative'
assert args.batch_size % args.batch_chunk == 0

args.work_dir = os.path.join(args.work_dir, time.strftime('%Y%m%d-%H%M%S'))
logging = create_exp_dir(args.work_dir,
    scripts_to_save=['train.py', 'mem_transformer.py'], debug=args.debug)

try:
    from experiment_impact_tracker.compute_tracker import ImpactTracker
    tracker = ImpactTracker(args.work_dir)
    tracker.launch_impact_monitor()
except ImportError as e:
    logging(str(e))

# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
    if not args.cuda:
        print('WARNING: You have a CUDA device, so you should probably run with --cuda')
    else:
        torch.cuda.manual_seed_all(args.seed)

# Validate `--fp16` option
if args.fp16:
Ejemplo n.º 14
0
def main(args):
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    set_logger(args)

    tracker = ImpactTracker(args.save_path)
    tracker.launch_impact_monitor()

    with open(os.path.join(args.data_path, 'entities.dict')) as fin:
        entity2id = dict()
        for line in fin:
            eid, entity = line.strip().split('\t')
            entity2id[entity] = int(eid)

    with open(os.path.join(args.data_path, 'relations.dict')) as fin:
        relation2id = dict()
        for line in fin:
            rid, relation = line.strip().split('\t')
            relation2id[relation] = int(rid)

    nentity = len(entity2id)
    nrelation = len(relation2id)

    logging.info('Data Path: %s' % args.data_path)
    logging.info('#entity: %d' % nentity)
    logging.info('#relation: %d' % nrelation)

    rel_ent_dict = read_triple(os.path.join(args.data_path, 'train.txt'),
                               entity2id, relation2id)
    logging.info('#train: %d' % len(rel_ent_dict))

    model = ProcrustEs(rel_ent_dict, nentity, nrelation, args.total_dim,
                       args.sub_dim, args.cuda, args.save_path, args.eps)
    optimizer = torch.optim.Adam(
        model.parameters(),
        lr=args.learning_rate,
        eps=args.eps,
        weight_decay=args.reg,
    )
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=args.save_step,
                                                gamma=args.gamma,
                                                last_epoch=-1)

    old_loss = torch.tensor(float("Inf"))

    if args.cuda:
        model = model.cuda()
        old_loss = old_loss.cuda()

    # training loop
    for epoch in range(args.max_step):
        info = tracker.get_latest_info_and_check_for_errors()
        model.normalise()
        save_flag = not ((epoch + 1) % args.save_step)
        loss = model(save=save_flag)
        logging.info(loss.item())

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        scheduler.step()
        old_loss = loss
    model(save=True)
Ejemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser()

    # Required parameters
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
    )
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help="The output directory where the benchmarks would be put.",
    )
    parser.add_argument(
        "--experiment",
        type=str,
        default="none",
        required=True,
        help="The randomization experiment to run.")
    parser.add_argument(
        "--include_predictions", action="store_true", help="Set this flag if you want to save the predictions for the experiment.",
    )
    parser.add_argument(
        "--models_dir",
        default=None,
        type=str,
        required=True,
        help="The fine-tuned models directory where all the tasks with respective model seed checkpoints are stored.",
    )
    parser.add_argument(
        "--masks_dir",
        default=None,
        type=str,
        required=True,
        help="The directory where final masks after pruning are stored.",
    )
    parser.add_argument(
        "--model_type",
        default=None,
        type=str,
        required=True,
        help="Model type",
    )
    # Other parameters
    parser.add_argument(
        "--cache_dir",
        default="",
        type=str,
        help="Where do you want to store the pre-trained models downloaded from s3",
    )
    parser.add_argument(
        "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
    )
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help="The maximum total input sequence length after tokenization. Sequences longer "
        "than this will be truncated, sequences shorter will be padded.",
    )
    parser.add_argument(
        "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
    )
    parser.add_argument(
        "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
    )
    parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")

    parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
    args = parser.parse_args()


    args.local_rank = -1
    # Setup CUDA, GPU & distributed training
    device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
    args.device = device
    args.model_type = args.model_type.lower()
    args.experiment = args.experiment.lower()
    args.output_dir = f"{args.output_dir}/{args.experiment}"

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO,
    )

    # Set seed
    set_seed(args)

    tracker = ImpactTracker(args.output_dir)
    tracker.launch_impact_monitor()

    # Prepare GLUE task
    results = evaluate_all_tasks(args)
    results_file_path = f"{args.output_dir}/results.json"
    write_results(results, results_file_path)
Ejemplo n.º 16
0
                                                args.batch_sizes):

        n_layer, d_model, batch_size = int(n_layer), int(d_model), int(
            batch_size)
        if args.reload:
            if results.get(str((n_layer, d_model, batch_size))) is not None:
                print(f"{(n_layer, d_model, batch_size)} already in results")
                continue

        corpus = get_lm_corpus(default_args.data, default_args.dataset)
        ntokens = len(corpus.vocab)
        default_args.n_token = ntokens

        if args.tracking:
            from experiment_impact_tracker.compute_tracker import ImpactTracker
            tracker = ImpactTracker(f"impact/{n_layer}_{d_model}_{batch_size}")
            tracker.launch_impact_monitor()

        n_head, d_head = head_repartition_rule(d_model)
        d_inner = d_model

        model = MemTransformerLM(ntokens,
                                 n_layer,
                                 n_head,
                                 d_model,
                                 d_head,
                                 d_inner,
                                 default_args.dropout,
                                 default_args.dropatt,
                                 tie_weight=default_args.tied,
                                 d_embed=d_model,
# Code in file tensor/two_layer_net_tensor.py
import time

import torch

from experiment_impact_tracker.compute_tracker import ImpactTracker

tracker = ImpactTracker("./testlogs/")

tracker.launch_impact_monitor()
device = torch.device("cpu")

# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 1024, 10000, 1000, 100

# Create random input and output data
x = torch.randn(N, D_in, device=device)
y = torch.randn(N, D_out, device=device)

# Randomly initialize weights
w1 = torch.randn(D_in, H, device=device)
w2 = torch.randn(H, D_out, device=device)

learning_rate = 1e-6
for t in range(1000):
    # Forward pass: compute predicted y
    h = x.mm(w1)
    h_relu = h.clamp(min=0)
    y_pred = h_relu.mm(w2)