예제 #1
0
from utils import DataFiles

dataFiles = DataFiles(__file__)

inputRaw = dataFiles.input
inputRawEX = dataFiles.inputEX


def calc_score(q1, q2):
    ans = 0

    if (len(q1) > 0):
        for i in range(1, len(q1) + 1):
            ans += i * q1[-i]
    else:
        for i in range(1, len(q2) + 1):
            ans += i * q2[-i]

    return ans


def part1(q1, q2):
    while (len(q1) > 0 and len(q2) > 0):
        c1 = q1[0]
        c2 = q2[0]

        if c1 > c2:
            q1 = q1[1:] + [c1, c2]
            q2 = q2[1:]
        else:
            q1 = q1[1:]
예제 #2
0
from utils import DataFiles

dataFiles = DataFiles(__file__)

input = dataFiles.get_input()  # inputRaw = dataFiles.input
inputEX = dataFiles.get_inputEX()  # inputRawEX = dataFiles.inputEX

numbers = [int(line) for line in input]
numbers.sort()

diff_1 = 0
diff_3 = 1

prev = 0

for curr in numbers:
    diff = curr - prev
    if diff == 1:
        diff_1 += 1
    if diff == 3:
        diff_3 += 1
    prev = curr

print('Part 1:', diff_1 * diff_3)

numbers.insert(0, 0)

dp = [1] + [0] * len(numbers)

for i in range(len(numbers)):
    for j in range(i):
예제 #3
0
def main():
    parser = argparse.ArgumentParser()

    # Required parameters
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The input data dir. Should contain the .tsv files (or other data files) for the task.",
    )
    parser.add_argument(
        "--model_type",
        default=None,
        type=str,
        required=True,
        help="Model type selected in the list: " +
        ", ".join(MODEL_CLASSES.keys()),
    )
    parser.add_argument(
        "--model_name_or_path",
        default=None,
        type=str,
        required=True,
        help="Path to pre-trained model or shortcut name selected in the list: "
        + ", ".join(ALL_MODELS),
    )
    parser.add_argument(
        "--task_name",
        default=None,
        type=str,
        required=True,
        help="The name of the task to train selected in the list: " +
        ", ".join(processors.keys()),
    )
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The output directory where the model predictions and checkpoints will be written.",
    )
    parser.add_argument(
        "--bert_representation",
        default="pool",
        choices=["avg", "pool"],
        type=str,
        help="The BERT representation type",
    )
    parser.add_argument(
        "--similarity_function",
        default="pool",
        choices=["dot"],
        type=str,
        help="The similarity scoring function",
    )

    # Other parameters
    parser.add_argument(
        "--config_name",
        default="",
        type=str,
        help="Pretrained config name or path if not the same as model_name",
    )
    parser.add_argument(
        "--tokenizer_name",
        default="",
        type=str,
        help="Pretrained tokenizer name or path if not the same as model_name",
    )
    parser.add_argument(
        "--cache_dir",
        default="",
        type=str,
        help=
        "Where do you want to store the pre-trained models downloaded from s3",
    )
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help=
        "The maximum total input sequence length after tokenization. Sequences longer "
        "than this will be truncated, sequences shorter will be padded.",
    )
    parser.add_argument("--do_train",
                        action="store_true",
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        action="store_true",
                        help="Whether to run eval on the dev set.")
    parser.add_argument(
        "--evaluate_during_training",
        action="store_true",
        help="Run evaluation during training at each logging step.",
    )
    parser.add_argument(
        "--do_lower_case",
        action="store_true",
        help="Set this flag if you are using an uncased model.",
    )

    parser.add_argument(
        "--per_gpu_train_batch_size",
        default=8,
        type=int,
        help="Batch size per GPU/CPU for training.",
    )
    parser.add_argument(
        "--per_gpu_eval_batch_size",
        default=8,
        type=int,
        help="Batch size per GPU/CPU for evaluation.",
    )
    parser.add_argument(
        "--gradient_accumulation_steps",
        type=int,
        default=1,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass.",
    )
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--weight_decay",
                        default=0.0,
                        type=float,
                        help="Weight decay if we apply some.")
    parser.add_argument("--adam_epsilon",
                        default=1e-8,
                        type=float,
                        help="Epsilon for Adam optimizer.")
    parser.add_argument("--max_grad_norm",
                        default=1.0,
                        type=float,
                        help="Max gradient norm.")
    parser.add_argument(
        "--num_train_epochs",
        default=3.0,
        type=float,
        help="Total number of training epochs to perform.",
    )
    parser.add_argument(
        "--max_steps",
        default=-1,
        type=int,
        help=
        "If > 0: set total number of training steps to perform. Override num_train_epochs.",
    )
    parser.add_argument("--warmup_steps",
                        default=0,
                        type=int,
                        help="Linear warmup over warmup_steps.")

    parser.add_argument("--logging_steps",
                        type=int,
                        default=500,
                        help="Log every X updates steps.")
    parser.add_argument("--save_steps",
                        type=int,
                        default=500,
                        help="Save checkpoint every X updates steps.")
    parser.add_argument(
        "--eval_all_checkpoints",
        action="store_true",
        help=
        "Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
    )
    parser.add_argument("--no_cuda",
                        action="store_true",
                        help="Avoid using CUDA when available")
    parser.add_argument(
        "--overwrite_output_dir",
        action="store_true",
        help="Overwrite the content of the output directory",
    )
    parser.add_argument(
        "--overwrite_cache",
        action="store_true",
        help="Overwrite the cached training and evaluation sets",
    )
    parser.add_argument("--seed",
                        type=int,
                        default=42,
                        help="random seed for initialization")

    parser.add_argument(
        "--fp16",
        action="store_true",
        help=
        "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
    )
    parser.add_argument(
        "--fp16_opt_level",
        type=str,
        default="O1",
        help=
        "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
        "See details at https://nvidia.github.io/apex/amp.html",
    )
    parser.add_argument(
        "--save_total_limit",
        type=int,
        default=None,
        help=
        "Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
    )
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="For distributed training: local_rank")
    parser.add_argument("--server_ip",
                        type=str,
                        default="",
                        help="For distant debugging.")
    parser.add_argument("--server_port",
                        type=str,
                        default="",
                        help="For distant debugging.")
    args = parser.parse_args()

    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
    )
    if (os.path.exists(args.output_dir) and os.listdir(args.output_dir)
            and args.do_train and not args.overwrite_output_dir):
        # set to load the latest checkpoint for training
        args.model_name_or_path = args.output_dir
        all_model_checkpoints = [
            ckpt for ckpt in os.listdir(args.model_name_or_path)
            if os.path.isdir(os.path.join(args.model_name_or_path, ckpt))
        ]
        all_model_checkpoints = [(ckpt.split("-")[-1] if "-" in ckpt else -1,
                                  ckpt) for ckpt in all_model_checkpoints]
        all_model_checkpoints.sort(reverse=True)
        args.model_name_or_path = os.path.join(args.model_name_or_path,
                                               all_model_checkpoints[0][1])
        logger.info("setting to load the model from %s",
                    args.model_name_or_path)

    # Setup distant debugging if needed
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd

        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port),
                            redirect_output=True)
        ptvsd.wait_for_attach()

    # Setup CUDA, GPU & distributed training
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend="nccl")
        args.n_gpu = 1
    args.device = device

    # Setup logging
    logger.warning(
        "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
        args.local_rank,
        device,
        args.n_gpu,
        bool(args.local_rank != -1),
        args.fp16,
    )

    # Set seed
    set_seed(args)

    # Prepare GLUE task
    args.task_name = args.task_name.lower()
    if args.task_name not in processors:
        raise ValueError("Task not found: %s" % (args.task_name))
    processor = processors[args.task_name]()
    args.output_mode = output_modes[args.task_name]
    num_labels = 2

    # Load pretrained model and tokenizer
    if args.local_rank not in [-1, 0]:
        torch.distributed.barrier(
        )  # Make sure only the first process in distributed training will download model & vocab

    args.model_type = args.model_type.lower()
    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
    config = config_class.from_pretrained(
        args.config_name if args.config_name else args.model_name_or_path,
        num_labels=num_labels,
        finetuning_task=args.task_name,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )
    tokenizer = tokenizer_class.from_pretrained(
        args.tokenizer_name
        if args.tokenizer_name else args.model_name_or_path,
        do_lower_case=args.do_lower_case,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )
    model = model_class.from_pretrained(
        args.model_name_or_path,
        from_tf=bool(".ckpt" in args.model_name_or_path),
        config=config,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )

    if args.local_rank == 0:
        torch.distributed.barrier(
        )  # Make sure only the first process in distributed training will download model & vocab

    model.to(args.device)

    logger.info("Training/evaluation parameters %s", args)

    # Training
    if args.do_train:
        datafiles = DataFiles(args.data_dir)
        if os.path.isfile(
                os.path.join(args.model_name_or_path, "datafiles.txt")):
            datafiles.load(
                os.path.join(args.model_name_or_path, "datafiles.txt"))
        global_step = 0

        shard_count = 0
        if args.local_rank in [-1, 0]:
            tb_writer = SummaryWriter()

        while True:
            todo_file = datafiles.next()
            if not todo_file:
                break
            if args.local_rank == 0:
                torch.distributed.barrier()

            train_dataset = load_and_cache_examples(args, args.task_name,
                                                    tokenizer, todo_file)

            args.train_batch_size = args.per_gpu_train_batch_size * max(
                1, args.n_gpu)
            train_sampler = RandomSampler(
                train_dataset
            ) if args.local_rank == -1 else DistributedSampler(train_dataset)
            train_dataloader = DataLoader(train_dataset,
                                          sampler=train_sampler,
                                          batch_size=args.train_batch_size)

            if shard_count == 0:  # if this is the first shard, create the optimizer or load from the previous checkpoint
                # Prepare optimizer and schedule (linear warmup and decay)
                no_decay = ["bias", "LayerNorm.weight"]
                optimizer_grouped_parameters = [
                    {
                        "params": [
                            p for n, p in model.named_parameters()
                            if not any(nd in n for nd in no_decay)
                        ],
                        "weight_decay":
                        args.weight_decay,
                    },
                    {
                        "params": [
                            p for n, p in model.named_parameters()
                            if any(nd in n for nd in no_decay)
                        ],
                        "weight_decay":
                        0.0
                    },
                ]

                t_total = len(
                    train_dataloader
                ) // args.gradient_accumulation_steps * args.num_train_epochs * len(
                    datafiles.all_files)  # 280 shards of data files in total
                optimizer = AdamW(optimizer_grouped_parameters,
                                  lr=args.learning_rate,
                                  eps=args.adam_epsilon)
                scheduler = get_linear_schedule_with_warmup(
                    optimizer,
                    num_warmup_steps=args.warmup_steps,
                    num_training_steps=t_total)

                # Check if saved optimizer or scheduler states exist
                if os.path.isfile(
                        os.path.join(args.model_name_or_path,
                                     "optimizer.pt")) and os.path.isfile(
                                         os.path.join(args.model_name_or_path,
                                                      "scheduler.pt")):
                    logger.info("loading optimizer and scheduler from %s",
                                args.model_name_or_path)
                    # Load in optimizer and scheduler states
                    optimizer.load_state_dict(
                        torch.load(
                            os.path.join(args.model_name_or_path,
                                         "optimizer.pt")))
                    scheduler.load_state_dict(
                        torch.load(
                            os.path.join(args.model_name_or_path,
                                         "scheduler.pt")))

            if args.fp16:
                try:
                    from apex import amp
                except ImportError:
                    raise ImportError(
                        "Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
                    )
                model, optimizer = amp.initialize(
                    model, optimizer, opt_level=args.fp16_opt_level)

            # multi-gpu training (should be after apex fp16 initialization)
            if args.n_gpu > 1:
                model = torch.nn.DataParallel(model)

            # Distributed training (should be after apex fp16 initialization)
            if args.local_rank != -1:
                model = torch.nn.parallel.DistributedDataParallel(
                    model,
                    device_ids=[args.local_rank],
                    output_device=args.local_rank,
                    find_unused_parameters=True,
                )

            # Train!
            logger.info("***** Running training *****")
            logger.info("  Num examples = %d", len(train_dataset))
            logger.info("  Num Epochs = %d", args.num_train_epochs)
            logger.info("  Instantaneous batch size per GPU = %d",
                        args.per_gpu_train_batch_size)
            logger.info(
                "  Total train batch size (w. parallel, distributed & accumulation) = %d",
                args.train_batch_size * args.gradient_accumulation_steps *
                (torch.distributed.get_world_size()
                 if args.local_rank != -1 else 1),
            )
            logger.info("  Gradient Accumulation steps = %d",
                        args.gradient_accumulation_steps)
            logger.info("  Total optimization steps = %d", t_total)

            if shard_count == 0:
                # Check if continuing training from a checkpoint
                if os.path.exists(args.model_name_or_path):
                    # set global_step to global_step of last saved checkpoint from model path
                    try:
                        global_step = int(
                            args.model_name_or_path.split("-")[-1].split("/")
                            [0])
                    except ValueError:
                        global_step = 0
                    epochs_trained = global_step // (
                        len(train_dataloader) //
                        args.gradient_accumulation_steps)

                    logger.info("  Continuing training from checkpoint %s",
                                args.model_name_or_path)
                    logger.info("  Continuing training from global step %d",
                                global_step)

            global_step, tr_loss, optimizer, scheduler = train(
                args,
                train_dataset,
                train_dataloader,
                model,
                tokenizer,
                optimizer,
                scheduler,
                tb_writer,
                global_step=global_step)
            logger.info(" global_step = %s, average loss = %s", global_step,
                        tr_loss)

            # Save model checkpoint
            output_dir = os.path.join(args.output_dir,
                                      "checkpoint-{}".format(global_step))
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)
            model_to_save = (
                model.module if hasattr(model, "module") else model
            )  # Take care of distributed/parallel training
            model_to_save.save_pretrained(output_dir)
            tokenizer.save_pretrained(output_dir)

            torch.save(args, os.path.join(output_dir, "training_args.bin"))
            logger.info("Saving model checkpoint to %s", output_dir)

            torch.save(optimizer.state_dict(),
                       os.path.join(output_dir, "optimizer.pt"))
            torch.save(scheduler.state_dict(),
                       os.path.join(output_dir, "scheduler.pt"))
            datafiles.save(os.path.join(output_dir, "datafiles.txt"))
            logger.info("Saving optimizer and scheduler states to %s",
                        output_dir)

            _rotate_checkpoints(args, "checkpoint")
            shard_count += 1

        if args.local_rank in [-1, 0]:
            tb_writer.close()

    # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
    if args.do_train and (args.local_rank == -1
                          or torch.distributed.get_rank() == 0):
        # Create output directory if needed
        if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
            os.makedirs(args.output_dir)

        logger.info("Saving model checkpoint to %s", args.output_dir)
        # Save a trained model, configuration and tokenizer using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
        model_to_save = (model.module if hasattr(model, "module") else model
                         )  # Take care of distributed/parallel training
        model_to_save.save_pretrained(args.output_dir)
        tokenizer.save_pretrained(args.output_dir)

        # Good practice: save your training arguments together with the trained model
        torch.save(args, os.path.join(args.output_dir, "training_args.bin"))

        # Load a trained model and vocabulary that you have fine-tuned
        model = model_class.from_pretrained(args.output_dir)
        tokenizer = tokenizer_class.from_pretrained(args.output_dir)
        model.to(args.device)

    # Evaluation
    results = {}
    if args.do_eval and args.local_rank in [-1, 0]:
        tokenizer = tokenizer_class.from_pretrained(
            args.output_dir, do_lower_case=args.do_lower_case)
        checkpoints = [args.output_dir]
        if args.eval_all_checkpoints:
            checkpoints = list(
                os.path.dirname(c) for c in sorted(
                    glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME,
                              recursive=True)))
            logging.getLogger("transformers.modeling_utils").setLevel(
                logging.WARN)  # Reduce logging
        logger.info("Evaluate the following checkpoints: %s", checkpoints)
        for checkpoint in checkpoints:
            global_step = checkpoint.split(
                "-")[-1] if len(checkpoints) > 1 else ""
            prefix = checkpoint.split(
                "/")[-1] if checkpoint.find("checkpoint") != -1 else ""

            model = model_class.from_pretrained(checkpoint)
            model.to(args.device)
            result = evaluate(args, model, tokenizer, prefix=prefix)
            result = dict(
                (k + "_{}".format(global_step), v) for k, v in result.items())
            results.update(result)

    return results
예제 #4
0
import itertools
import numpy as np
from utils import DataFiles

dataFiles = DataFiles(__file__)
input = dataFiles.get_input()


def part1_iterative(numbers):
    for i in range(preamble, len(numbers)):
        sums = set()

        for nr_j in numbers[i - preamble: i]:
            for nr_k in numbers[i - preamble: i]:
                sums.add(nr_j + nr_k)

        if numbers[i] not in sums:
            return numbers[i]


def part1_numpy(numbers):
    for i in range(preamble, len(numbers)):
        partial_sum = np.array(numbers[i - preamble: i], dtype=np.int64)
        sums = partial_sum.reshape(1, -1) + partial_sum.reshape(-1, 1)

        for aux in range(0, preamble):
            sums[aux][aux] = -1

        if numbers[i] not in sums:
            return numbers[i]