Ejemplo n.º 1
0
def run():
    root = Tk()

    # Try making window a dialog if the system allows it.
    try:
        root.attributes('-type', 'dialog')
    except TclError:
        pass

    args = parser.parse_args()
    if args.manual:
        player = UserPlayer(root)
    else:
        player = SelectedPlayer()

    adversary = RandomAdversary(DEFAULT_SEED)
    board = Board(BOARD_WIDTH, BOARD_HEIGHT)

    def runner():
        for move in board.run(player, adversary):
            # When not playing manually, allow some time to see the move.
            if not args.manual:
                sleep(0.001)

    Visual(board)

    background = Thread(target=runner)
    background.daemon = True
    background.start()

    root.mainloop()

    raise SystemExit
Ejemplo n.º 2
0
def run():
    board = Board(BOARD_WIDTH, BOARD_HEIGHT)
    adversary = RandomAdversary(DEFAULT_SEED)

    args = parser.parse_args()
    if args.manual:
        player = UserPlayer()
    else:
        player = SelectedPlayer()

    pygame.init()

    screen = pygame.display.set_mode([(BOARD_WIDTH + 6) * CELL_WIDTH,
                                      BOARD_HEIGHT * CELL_HEIGHT])

    clock = pygame.time.Clock()

    # Set timer to force block down when no input is given.
    pygame.time.set_timer(EVENT_FORCE_DOWN, INTERVAL)

    for move in board.run(player, adversary):
        render(screen, board)
        pygame.display.flip()

        # If we are not playing manually, clear the events.
        if not args.manual:
            check_stop()

        clock.tick(FRAMES_PER_SECOND)

    while True:
        check_stop()
Ejemplo n.º 3
0
def main():
    args = parser.parse_args()
    modify_arguments(args)

    # setting random seeds
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    with open(args.config_file, 'r') as stream:
        config = yaml.load(stream)
        args.config = Munch(modify_config(args, config))
    logger.info(args)

    if args.mode == 'train':
        train.train(args, device)
    elif args.mode == 'test':
        pass
    elif args.mode == 'analysis':
        analysis.analyze(args, device)
    elif args.mode == 'generate':
        pass
    elif args.mode == 'classify':
        analysis.classify(args, device)
    elif args.mode == 'classify_coqa':
        analysis.classify_coqa(args, device)
    elif args.mode == 'classify_final':
        analysis.classify_final(args, device)
Ejemplo n.º 4
0
def run(window):
    board = Board(BOARD_WIDTH, BOARD_HEIGHT)
    adversary = RandomAdversary(DEFAULT_SEED)

    args = parser.parse_args()
    if args.manual:
        window.timeout(INTERVAL)
        player = UserPlayer(window)
    else:
        window.timeout(0)
        player = SelectedPlayer()

    for move in board.run(player, adversary):
        render(window, board)

        if not args.manual:
            while True:
                key = window.getch()
                if key == -1:
                    break
                elif key == curses.ascii.ESC:
                    raise SystemExit
            sleep(0.1)

    window.timeout(-1)
    window.getch()
Ejemplo n.º 5
0
def run(window, seed):
    global score
    board = Board(BOARD_WIDTH, BOARD_HEIGHT)
    adversary = RandomAdversary(seed, BLOCK_LIMIT)

    args = parser.parse_args()
    if args.manual:
        window.timeout(INTERVAL)
        player = UserPlayer(window)
    else:
        window.timeout(0)
        player = SelectedPlayer()

    try:
        for move in board.run(player, adversary):
            render(window, board)

            if not args.manual:
                while True:
                    key = window.getch()
                    if key == -1:
                        break
                    elif key == curses.ascii.ESC:
                        raise SystemExit
            #sleep(0.1)
    finally:
        score = board.score
Ejemplo n.º 6
0
def main():
    # Parse arguments
    global args
    args = parser.parse_args()

    # Extract parameters
    filename_left = args.CONFIG_LEFT[0]
    filename_right = args.CONFIG_RIGHT[0]

    # Start parsing
    comparer = KComparer(filename_left, filename_right)
    comparer.load_files()
    comparer.parse_files()

    # Compare
    compare_result = comparer.start_compare()

    # Apply filter and regex
    filtered_compare_result = comparer.filter_results(
        results=compare_result,
        filter=args.filter[0],
        regex_conf_name=args.regex[0]
    )

    # Output result
    table_exporter = TableExporter(filtered_compare_result, filename_left, filename_right)
    table_exporter.print_table()

    # Print warning for duplicated items
    comparer.warn_duplicated_items()

    # At the end of program, close stderr stream to mute down BrokenPipeError info
    # This is a disadvantage of Python...
    sys.stderr.close()
Ejemplo n.º 7
0
def main():
    """The main method of script."""
    args = parser.parse_args()
    with open(args.config_file, 'r') as stream:
        args.config = munchify(yaml.load(stream))
    args.save_dir = os.path.join(args.save_dir, args.job_id)
    args.best_dir = os.path.join(args.best_dir, args.job_id)
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)
    if not os.path.exists(args.best_dir):
        os.makedirs(args.best_dir)
    logger.info(args)
    if args.mode == 'train':
        train(args)
    elif args.mode == 'test' or args.mode == 'valid':
        test(args)
    elif args.mode == 'generate':
        generate(args)
Ejemplo n.º 8
0
def run(window):
    board = Board(BOARD_WIDTH, BOARD_HEIGHT)
    adversary = RandomAdversary(DEFAULT_SEED)

    args = parser.parse_args()
    if args.manual:
        window.timeout(INTERVAL)
        player = UserPlayer(window)
    else:
        window.timeout(0)
        player = SelectedPlayer()

    for move in board.run(player, adversary):
        render(window, board)

        if not args.manual:
            while True:
                key = window.getch()
                if key == -1:
                    break
                elif key == curses.ascii.ESC:
                    raise SystemExit
            sleep(0.1)


#    window.timeout(-1)
    window.getch()
    try:
        fin = open('scores.txt', 'r')
        content = fin.readlines()
        fin.close()
        content.append(str(board.score) + '\n')

    except FileNotFoundError:
        content = [f'{board.score}\n']

    fin = open('scores.txt', 'w')
    fin.writelines(content)
    fin.close()
Ejemplo n.º 9
0
def main():
    arguments = parser.parse_args()
    command = parse_command(arguments)
    command.validate()
    command.execute()
Ejemplo n.º 10
0
def train():
    args = parser.parse_args()

    # logging is set to INFO (resp. WARN) for main (resp. auxiliary) process. logger.info => log main process only, logger.warning => log all processes
    logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
    logger.warning("Running process %d", args.local_rank)  # This is a logger.warning: it will be printed by all distributed processes
    logger.info("Arguments: %s", pformat(args))

    # Initialize distributed training if needed
    args.distributed = (args.local_rank != -1)
    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        args.device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend='nccl', init_method='env://')

    logger.info("Prepare tokenizer, pretrained model and optimizer - add special tokens for fine-tuning")
    tokenizer_class = GPT2Tokenizer if "gpt2" in args.model_checkpoint else OpenAIGPTTokenizer
    tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint)

    model_class = GPT2LMHeadModel if "gpt2" in args.model_checkpoint else OpenAIGPTLMHeadModel
    model = model_class.from_pretrained(args.model_checkpoint)
    tokenizer.set_special_tokens(SPECIAL_TOKENS)
    model.set_num_special_tokens(len(SPECIAL_TOKENS))
    model.to(args.device)
    optimizer = OpenAIAdam(model.parameters(), lr=args.lr)

    # Prepare model for FP16 and distributed training if needed (order is important, distributed should be the last)
    if args.fp16:
        from apex import amp  # Apex is only required if we use fp16 training
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16)
    if args.distributed:
        model = DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)

    logger.info("Prepare datasets")
    train_loader, val_loader, train_sampler, valid_sampler = get_data_loaders(args, tokenizer)

    # Training function and trainer
    def update(engine, batch):
        model.train()
        batch = tuple(input_tensor.to(args.device) for input_tensor in batch)
        lm_loss = model(*batch)
        loss = lm_loss / args.gradient_accumulation_steps
        if args.fp16:
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
            torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_norm)
        else:
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_norm)
        if engine.state.iteration % args.gradient_accumulation_steps == 0:
            optimizer.step()
            optimizer.zero_grad()
        return loss.item()
    trainer = Engine(update)

    # Evaluation function and evaluator (evaluator output is the input of the metrics)
    def inference(engine, batch):
        model.eval()
        with torch.no_grad():
            batch = tuple(input_tensor.to(args.device) for input_tensor in batch)
            input_ids, lm_labels, token_type_ids = batch

            # logger.info(tokenizer.decode(input_ids[0, :].tolist()))
            model_outputs = model(input_ids, token_type_ids=token_type_ids)
            lm_logits = model_outputs[0]

            lm_logits_flat_shifted = lm_logits[..., :-1, :].contiguous().view(-1, lm_logits.size(-1))
            lm_labels_flat_shifted = lm_labels[..., 1:].contiguous().view(-1)

            return lm_logits_flat_shifted, lm_labels_flat_shifted
    evaluator = Engine(inference)

    # Attach evaluation to trainer: we evaluate when we start the training and at the end of each epoch
    trainer.add_event_handler(Events.EPOCH_COMPLETED, lambda _: evaluator.run(val_loader))
    if args.n_epochs < 1:
        trainer.add_event_handler(Events.COMPLETED, lambda _: evaluator.run(val_loader))
    if args.eval_before_start:
        trainer.add_event_handler(Events.STARTED, lambda _: evaluator.run(val_loader))

    # Make sure distributed data samplers split the dataset nicely between the distributed processes
    if args.distributed:
        trainer.add_event_handler(Events.EPOCH_STARTED, lambda engine: train_sampler.set_epoch(engine.state.epoch))
        evaluator.add_event_handler(Events.EPOCH_STARTED, lambda engine: valid_sampler.set_epoch(engine.state.epoch))

    # Linearly decrease the learning rate from lr to zero
    scheduler = PiecewiseLinear(optimizer, "lr", [(0, args.lr), (args.n_epochs * len(train_loader), 0.0)])
    trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)

    # Prepare metrics - note how we compute distributed metrics
    RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
    metrics = {
        "nll": Loss(torch.nn.CrossEntropyLoss(ignore_index=-1))
    }
    metrics.update({
        "average_nll": MetricsLambda(average_distributed_scalar, metrics["nll"], args)
    })
    metrics["average_ppl"] = MetricsLambda(math.exp, metrics["average_nll"])
    for name, metric in metrics.items():
        metric.attach(evaluator, name)

    # On the main process: add progress bar, tensorboard, checkpoints and save model, configuration and tokenizer before we start to train
    if args.local_rank in [-1, 0]:
        pbar = ProgressBar(persist=True)
        pbar.attach(trainer, metric_names=["loss"])
        evaluator.add_event_handler(Events.COMPLETED, lambda _: pbar.log_message("Validation: %s" % pformat(evaluator.state.metrics)))

        tb_logger = TensorboardLogger(log_dir=args.output_dir)
        tb_logger.attach(trainer, log_handler=OutputHandler(tag="training", metric_names=["loss"]), event_name=Events.ITERATION_COMPLETED)
        tb_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED)
        tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metric_names=list(metrics.keys()), another_engine=trainer), event_name=Events.EPOCH_COMPLETED)

        checkpoint_handler = ModelCheckpoint(tb_logger.writer.log_dir, 'checkpoint', save_interval=1, n_saved=3)
        trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {'mymodel': getattr(model, 'module', model)})  # "getattr" take care of distributed encapsulation

        torch.save(args, tb_logger.writer.log_dir + '/model_training_args.bin')
        getattr(model, 'module', model).config.to_json_file(os.path.join(tb_logger.writer.log_dir, CONFIG_NAME))
        tokenizer.save_vocabulary(tb_logger.writer.log_dir)

    # Run the training
    trainer.run(train_loader, max_epochs=args.n_epochs)

    # On the main process: close tensorboard logger and rename the last checkpoint (for easy re-loading with OpenAIGPTModel.from_pretrained method)
    if args.local_rank in [-1, 0] and args.n_epochs > 0:
        os.rename(checkpoint_handler._saved[-1][1][-1], os.path.join(tb_logger.writer.log_dir, WEIGHTS_NAME))  # TODO: PR in ignite to have better access to saved file paths (cleaner)
        tb_logger.close()
Ejemplo n.º 11
0
        if not historic
    ]

    if not dry:
        calendar = get_calendar_service()

        create_remote_appointments(appointments_to_be_created, calendar)
        delete_remote_appointments(appointments_to_be_deleted, calendar)

        appointments_to_be_cached = [
            appointment for appointment in schedule_appointments
            if not appointment.is_historic and appointment.remote_event_id
        ]
        save_remote_appointments_to_cache(appointments_to_be_cached)

        save_appointment_meta(appointment_meta)
    else:
        logging.info(
            f"Would create {len(appointments_to_be_created)} event(s) in total."
        )
        logging.info(
            f"Would delete {len(appointments_to_be_deleted)} event(s) in total."
        )


if __name__ == '__main__':
    options = parser.parse_args()
    logging.basicConfig(level=logging.getLevelName(options.log_level))

    main(options.dry)
Ejemplo n.º 12
0
import csv
import logging
import sys
import asyncio

import download_media
from arguments import parser
import twint
import os

args = parser.parse_args()

csv_file_path = f"{args.destination_directory.rstrip(os.sep)}{os.sep}{args.user}{os.sep}data.csv"
if os.path.isfile(csv_file_path):
    logging.info(f"{csv_file_path} already exists; exiting.")
    sys.exit(-1)

destination_directory = f"{args.destination_directory.rstrip(os.sep)}{os.sep}{args.user}"
os.makedirs(destination_directory, exist_ok=True)

twint_config = twint.Config()
twint_config.Username = args.user
twint_config.Store_csv = True

twint_config.Output = csv_file_path
twint_config.Profile_full = True


async def main():
    with open(csv_file_path) as csv_vile:
        for row in csv.DictReader(csv_vile, delimiter=","):
Ejemplo n.º 13
0
import threading, datetime
from pprint import pprint

sys.path.append('.')

import scons
import SCons.Script
import workers
from arguments import parser
from qt_meta import QTFile
from context_managers import filedescriptor, restore_cwd, eyecandy
from utils import Eyecandy
import conf

# C:/Users/kosi/AppData/Local/Amazon/Kindle/application/Kindle.exe 
args = parser.parse_args([r'C:\Users\kosii\AppData\Local\Amazon\Kindle\application\Kindle.exe'])
conf.debug = bool(args.debug)

if args.do_not_regenerate:
	with filedescriptor(args.input_file, os.O_RDONLY) as fd:
		with contextlib.closing(mmap.mmap(fd, length=0, access=mmap.ACCESS_READ)) as mmapped_file:
		    with open('injector/injected.cpp', 'w') as injected_dll_source:
		    	print 'Identifying Qt classes in {input_file} ...'.format(input_file=args.input_file)
		    	with eyecandy():
			    	injected_dll_source.write(QTFile(mmapped_file, n=args.n).render())

if args.do_not_recompile:
	with restore_cwd():
		os.environ['SCONSFLAGS'] = "-C injector -Q -s"
		try:
			print "Compiling injected dll ..."