Example #1
0
def main():
    args = parse_args()

    predictor = Predictor.create_predictor(args)

    args.model_type = args.model_type.lower()
    model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
    tokenizer = tokenizer_class.from_pretrained(
        os.path.dirname(args.model_name_or_path))

    if args.version_2_with_negative:
        raw_dataset = load_dataset('squad_v2', split='validation')
    else:
        raw_dataset = load_dataset('squad', split='validation')
    column_names = raw_dataset.column_names
    dataset = raw_dataset.map(partial(prepare_validation_features,
                                      tokenizer=tokenizer,
                                      args=args),
                              batched=True,
                              remove_columns=column_names,
                              num_proc=4)

    batchify_fn = lambda samples, fn=Dict(
        {
            "input_ids": Pad(axis=0, pad_val=tokenizer.pad_token_id),
            "token_type_ids": Pad(axis=0, pad_val=tokenizer.pad_token_type_id)
        }): fn(samples)
    predictor = Predictor.create_predictor(args)
    predictor.predict(dataset, raw_dataset, args=args, collate_fn=batchify_fn)
Example #2
0
    def process_args(self, **args):
        if len(args) == 0:
            args = vars(parse_args())
        for key, val in args.iteritems():
            if val is None and hasattr(self, key): continue
            setattr(self, key, val)

            # Use +1 here just so we have some cushion
        trial_zeros = str(
            1 + int(math.ceil(math.log(self.exp_trials) / math.log(10))))
        trial_fmt = '%0' + trial_zeros + 'i'
        trial_str = trial_fmt % self.exp_trial
        self.output_file = results.ResultFile.filename_from_keys(
            agent=self.agent_type,
            size=self.domain_size,
            seed=self.domain_seed,
            complexity=self.domain_complexity,
            handicap=self.domain_handicap,
            trial=self.exp_trial)
        self.description = '{agent_type} agent, seed {seed}, handicap {handicap}, trial {trial}'.format(
            agent_type=self.agent_type,
            seed='%02i' % self.domain_seed,
            handicap='%02i' % self.domain_handicap,
            trial=trial_str)
        self.output_path = os.path.join(self.exp_directory, self.output_file)
        self._convert_handicap()
        self._process_gui_setting()
Example #3
0
def main():
    args = parse_args()

    env = SimpleEnv(n_bars = 2)
    agent = Agent(
        env,
        in_dim = len(env.get_state()),
        out_dim = env.action_space.n,
        learning_rate=LR,
        gamma=GAMMA)

    print("Optimizer", agent.optimizer)
    print("")
    print("Network", agent.model)
    print("")

    if args.mode == "train":
        train(
            env,
            agent,
            n_episodes=N_EPISODES,
            max_steps=MAX_STEPS,
            batch_size=BATCH_SIZE,
            eps_decay=EPS_DECAY,
            log=LOG,
            log_interval=LOG_INTERVAL if LOG_INTERVAL <= N_EPISODES else N_EPISODES,
            save_model_to=SAVE_MODEL_TO,
            load_model_from=args.checkpoint
        )

    elif args.mode == "test":
        test(env, agent, checkpoint_path=args.checkpoint)
Example #4
0
File: anma.py Project: zuh0/anma
def main():
    """Run the program.

    * Parse command-line arguments for output file name, config name etc.
    * Get colors scheme for each note from config file.
    * Initialize PyGame library.
    * Dump config if verbose is active.
    * Create an instance of the AnmaHandle class to handle everything.
    * Loop and get notes until user exits.
    * Write the resulting MIDI file.
    * Cleanup.
    """
    arg = args.parse_args()
    colors = config.parse_colors(arg.conf)
    screen, player = config.init_pygame(arg.midi_output, arg.background)
    led_hndl = config.init_gpio()

    if arg.verbose:
        args.dump_config(arg)

    hndl = handle.AnmaHandle(arg, colors, screen, player, led_hndl)
    for i in range(60, 76):
        if '#' not in hndl.notes[i].name:    
            hndl.notes[i].update_screen(hndl, (255, 255, 255))

    for i in range(60, 76):
        if '#' in hndl.notes[i].name:    
            hndl.notes[i].update_screen(hndl, (0, 0, 0))

    if arg.verbose:
        for note in hndl.notes:
            print("{} {}".format(note.name, note.pitch))

    while hndl.loop():
        pass
def main():
    args = parse_args()

    list = ["ci.jpg", "ci2.jpg", "ci3.jpg", "ci4.jpg"]

    calc = Calculator(args)
    print(calc.distance_matrix(list))
Example #6
0
def main():
    args, unknown = parse_args()
    signal.signal(signal.SIGINT, signal_handler)
    global launcher
    launcher = TmuxLauncher(args, unknown)
    launcher.run()
    signal.pause
Example #7
0
def main():
    args = parse_args()
    data_train, data_test, target_train, target_test = get_train_test_data(
        preprocessor=args.preprocessor, test_size=args.test_size)

    classifier = get_algorithm(args)
    model = classifier.fit(data_train, target_train)
    targets_predicted = model.predict(data_test)
    correct = 1 - np.mean(target_test != targets_predicted)
    print("Accuracy: {:.2%}".format(correct))

    lib_classifier = get_library_version(args)
    lib_model = lib_classifier.fit(data_train, target_train)
    lib_targets_predicted = lib_model.predict(data_test)
    lib_correct = 1 - np.mean(target_test != lib_targets_predicted)
    print("Library Accuracy: {:.2%}".format(lib_correct))

    print("Difference from Library version: {:+.2%}".format(correct -
                                                            lib_correct))

    # K-Fold Cross Validation
    data = get_data(args.preprocessor)
    k_fold = KFold(n_splits=len(data.data), shuffle=True, random_state=7)
    k_fold_accuracy = cross_val_score(lib_classifier,
                                      data.data,
                                      data.target,
                                      cv=k_fold,
                                      n_jobs=1,
                                      scoring='accuracy').mean()
    print("K-Fold Accuracy: {:.2%}".format(k_fold_accuracy))
Example #8
0
def main():

    args = parse_args()

    source_trn = open("/scratch/rjh347/input/source_trn.txt", "w")
    target_trn = open("/scratch/rjh347/input/target_trn.txt", "w")

    # creat train data -- simply split file1 into target / source
    with open(args.saveto + 'file1_s.txt', 'r') as train:
        for line in train:
            src, tgt = line.split(',')
            source_trn.write(src)
            target_trn.write(tgt)
    train.close()
    source_trn.close()
    target_trn.close()

    source_val = open("/scratch/rjh347/input/source_val.txt", "w")
    target_val = open("/scratch/rjh347/input/target_val.txt", "w")

    # creat evaluation data -- split file2 into target / source, save with 0.2 chance
    with open(args.saveto + 'file2_s.txt', 'r') as val:
        for line in val:
            src, tgt = line.split(',')
            thresh = random.randint(0, 4)
            if thresh > 3:
                source_val.write(src)
                target_val.write(tgt)
    val.close()
    source_val.close()
    target_val.close()
Example #9
0
def infer():
    args = parse_args()
    print(args)

    if args.use_gpu == 1:
        place = fluid.CUDAPlace(0)
    else:
        place = fluid.CPUPlace()
    inference_scope = fluid.Scope()

    test_files = [
        os.path.join(args.test_data_dir, x)
        for x in os.listdir(args.test_data_dir)
    ]
    criteo_dataset = CriteoDataset()
    test_reader = paddle.batch(criteo_dataset.test(test_files),
                               batch_size=args.batch_size)

    startup_program = fluid.framework.Program()
    test_program = fluid.framework.Program()
    cur_model_path = os.path.join(args.model_output_dir,
                                  'epoch_' + args.test_epoch)

    with fluid.scope_guard(inference_scope):
        with fluid.framework.program_guard(test_program, startup_program):
            loss, auc, data_list, auc_states = eval('network_conf.' +
                                                    args.model_name)(
                                                        args.embedding_size,
                                                        args.num_field,
                                                        args.num_feat,
                                                        args.layer_sizes_dnn,
                                                        args.act, args.reg,
                                                        args.layer_sizes_cin)

            exe = fluid.Executor(place)
            feeder = fluid.DataFeeder(feed_list=data_list, place=place)
            fluid.io.load_persistables(
                executor=exe,
                dirname=cur_model_path,
                main_program=fluid.default_main_program())

            for var in auc_states:  # reset auc states
                set_zero(var.name, scope=inference_scope, place=place)

            loss_all = 0
            num_ins = 0
            for batch_id, data_test in enumerate(test_reader()):
                loss_val, auc_val = exe.run(test_program,
                                            feed=feeder.feed(data_test),
                                            fetch_list=[loss.name, auc.name])

                num_ins += len(data_test)
                loss_all += loss_val * len(data_test)
                logger.info('TEST --> batch: {} loss: {} auc_val: {}'.format(
                    batch_id + 1, loss_all / num_ins, auc_val))

            print(
                'The last log info is the total Logloss and AUC for all test data. '
            )
Example #10
0
def main(argv=None):
    ''' Main entry point '''
    args = parse_args(argv)
    print(f'Running torch {torch.version.__version__}')

    profile_cuda_memory = args.config.cuda.profile_cuda_memory
    pin_memory = 'cuda' in args.device.type and not profile_cuda_memory
    dataloader = get_dataloader(args.config.data,
                                args.seed_fn,
                                pin_memory,
                                args.num_devices,
                                shuffle=args.shuffle)
    print(dataloader.dataset.stats)

    model = args.model(args.config.model, dataloader.dataset)
    action = args.action(args.action_config, model, dataloader, args.device)
    if args.action_type == 'train' and args.action_config.early_stopping:
        args.config.data.split = 'valid'
        args.config.data.max_examples = 0
        action.validation_dataloader = get_dataloader(args.config.data,
                                                      args.seed_fn,
                                                      pin_memory,
                                                      args.num_devices,
                                                      shuffle=args.shuffle)

    if args.config.cuda.profile_cuda_memory:
        print('Profiling CUDA memory')
        memory_profiler = profile.CUDAMemoryProfiler(
            action.modules.values(), filename=profile_cuda_memory)

        sys.settrace(memory_profiler)
        threading.settrace(memory_profiler)

    step = 0
    epoch = 0
    if args.restore:
        restore_modules = {
            module_name: module
            for module_name, module in action.modules.items()
            if module_name not in args.reset_parameters
        }

        epoch, step = restore(args.restore,
                              restore_modules,
                              num_checkpoints=args.average_checkpoints,
                              map_location=args.device.type,
                              strict=not args.reset_parameters)

        model.reset_named_parameters(args.reset_parameters)
        if 'step' in args.reset_parameters:
            step = 0
            epoch = 0

    args.experiment.set_step(step)

    with ExitStack() as stack:
        stack.enter_context(profiler.emit_nvtx(args.config.cuda.profile_cuda))
        stack.enter_context(set_detect_anomaly(args.detect_anomalies))
        action(epoch, args.experiment, args.verbose)
Example #11
0
def main():
  args = parse_args()
  classifier = get_algorithm(args.algorithm)
  data_train, data_test, target_train, target_test = get_train_test_data(filename=args.file, test_size=args.test_size)
  model = classifier.fit(data_train, target_train)
  targets_predicted = model.predict(data_test)
  correct = 1 - np.mean(target_test != targets_predicted)
  print("Accuracy: {:.2%}".format(correct))
Example #12
0
def main():
    args = parse_args(sys.argv[1:])
    create_config_dir()

    if args.load:
        walls = wallpaper.get_wallpapers(args.load)
        if walls:
            wallpaper.set_wallpapers(walls)
Example #13
0
def main():
    args = parse_args()
    if args.debug:
        t1 = time.time()
    if args.reduce_sci:
        reduce_science(args)                                        
    if args.debug:
        t2 = time.time()
        print("Total Time taken: %0.2f s" %(t2-t1))
Example #14
0
def main():
    args = parse_args()

    # Generate base path: ".../$(args.output_dir)/run-$(date)-$(tag)"
    base_dir = generate_run_base_dir(args.model, args.tag, args.output_dir)
    log_file = os.path.join(base_dir, "log.txt")

    # Setup logging in base_dir/log.txt
    setup_logging(level=args.log_level, filename=log_file)

    # Wrap experiment in try-catch block to send error via email if it occurs
    try:
        # Run experiment
        experiment = Experiment(args, base_dir=base_dir)
        experiment.train()
        experiment.test()

        test_res_str = results_to_csv_str(experiment.test_result,
                                          experiment.test_result_pp)

        logger.info("Test results: ")
        logger.info(test_res_str)

        logger.info("Training took {}".format(experiment.training_time))

        # Notify the user via e-mail and send the log file
        if args.notify is not None:
            subject = f"[MONOLAB {args.tag}] Training Finished!"
            message = (
                f"The experiment in {base_dir} has finished training and "
                f"took {experiment.training_time}. Best loss: {experiment.best_val_loss}"
                f"Test results: \n"
                f"{ test_res_str }")

            notify_mail(address=args.notify,
                        subject=subject,
                        message=message,
                        filename=log_file)
    except Exception as e:
        # Log error message
        tbstr = "".join(traceback.extract_tb(e.__traceback__).format())
        errormsg = f"Traceback:\n{tbstr}\nError: {e}"
        logger.error(errormsg)

        # Notify exception
        if args.notify:
            subject = f"[MONOLAB {args.tag}] Training Error!"
            message = (
                f"The experiment in {base_dir} has failed. An error occurred "
                f"during training:\n\n{errormsg}")
            notify_mail(address=args.notify,
                        subject=subject,
                        message=message,
                        filename=log_file)

        raise e
Example #15
0
def train():
    args = parse_args()
    # add ce
    if args.enable_ce:
        SEED = 102
        fluid.default_main_program().random_seed = SEED
        fluid.default_startup_program().random_seed = SEED

    print('---------- Configuration Arguments ----------')
    for key, value in args.__dict__.items():
        print(key + ':' + str(value))

    if not os.path.isdir(args.model_output_dir):
        os.mkdir(args.model_output_dir)

    loss, auc, data_list, auc_states = ctr_deepfm_model(
        args.embedding_size, args.num_field, args.num_feat, args.layer_sizes,
        args.act, args.reg)
    optimizer = fluid.optimizer.SGD(
        learning_rate=args.lr,
        regularization=fluid.regularizer.L2DecayRegularizer(args.reg))
    optimizer.minimize(loss)

    exe = fluid.Executor(fluid.CPUPlace())
    exe.run(fluid.default_startup_program())

    dataset = fluid.DatasetFactory().create_dataset()
    dataset.set_use_var(data_list)
    pipe_command = 'python criteo_reader.py {}'.format(args.feat_dict)
    dataset.set_pipe_command(pipe_command)
    dataset.set_batch_size(args.batch_size)
    dataset.set_thread(args.num_thread)
    train_filelist = [
        os.path.join(args.train_data_dir, x)
        for x in os.listdir(args.train_data_dir)
    ]

    print('---------------------------------------------')
    for epoch_id in range(args.num_epoch):
        start = time.time()
        dataset.set_filelist(train_filelist)
        exe.train_from_dataset(
            program=fluid.default_main_program(),
            dataset=dataset,
            fetch_list=[loss, auc],
            fetch_info=['epoch %d batch loss' % (epoch_id + 1), "auc"],
            print_period=1000,
            debug=False)
        model_dir = os.path.join(args.model_output_dir,
                                 'epoch_' + str(epoch_id + 1))
        sys.stderr.write('epoch%d is finished and takes %f s\n' %
                         ((epoch_id + 1), time.time() - start))
        fluid.io.save_persistables(executor=exe,
                                   dirname=model_dir,
                                   main_program=fluid.default_main_program())
Example #16
0
def main():

    args = parse_args()
    batch = load_data(args)

    count = 0
    for item in batch:
        print(item)
        count += 1
        if count > 10:
            break
Example #17
0
def main():

    # get command line parameters and lists of file specs
    arg_values = args.parse_args(sys.argv[1:])
    cache_arg_files = args.glob_list_of_arg_values(arg_values.cache)
    no_cache_arg_files = args.glob_list_of_arg_values(arg_values.nocache)
    output_manifest_file_name = arg_values.output
    doc_root = None if not arg_values.doc_root else arg_values.doc_root[0]
    url_prefix = None if not arg_values.url_prefix else arg_values.url_prefix[0]
    force = arg_values.force

    # filter out files in cached list told to not cache
    filenames_to_cache = [
        fn for fn in cache_arg_files if fn not in no_cache_arg_files
    ]

    # get a temporary file
    (fd, temp_file_name) = tempfile.mkstemp(prefix="temp_manifestgen_")
    os.close(fd)

    #convert file names on disk to urls
    cached_urls = convert_filenames_to_urls(filenames_to_cache, doc_root,
                                            url_prefix)

    # digest value of contents of all cached files
    content_md5_digest = digest_for_content(filenames_to_cache)

    # write temp manifest that reflects the current state
    write_cache_manifest(open(temp_file_name, "w"), cached_urls,
                         arg_values.network, arg_values.fallback)
    write_name_value_comment(open(temp_file_name, "a"), "contentmd5",
                             content_md5_digest)

    if force:
        write_name_value_comment(open(temp_file_name, "a"), "timestamp",
                                 str(datetime.datetime.now()))

    open(temp_file_name, "a").write("\n")  # terminate last line

    #compare the newly created with the old and copy if it has changed
    digest_of_new_manifest = digest_for_cache_manifest(temp_file_name)
    digest_of_old_manifest = digest_for_cache_manifest(
        output_manifest_file_name)
    if digest_of_old_manifest != digest_of_new_manifest:
        print "digests differ, copying %s to %s" % (temp_file_name,
                                                    output_manifest_file_name)
        open(output_manifest_file_name,
             "w").write(open(temp_file_name, "r").read())
    else:
        print "cache manifest contents have not changed"

    # remove the temporary file
    if os.path.exists(temp_file_name):
        os.unlink(temp_file_name)
Example #18
0
def main():
    args = parse_args()

    predictor = Predictor.create_predictor(args)
    test_loader, src_vocab_size, tgt_vocab_size, bos_id, eos_id = create_infer_loader(
        args)
    _, vocab = IWSLT15.get_vocab()
    trg_idx2word = vocab.idx_to_token

    predictor.predict(test_loader, args.infer_output_file, trg_idx2word,
                      bos_id, eos_id)
Example #19
0
def main():
    opts = args.parse_args()
    db   = get_db(opts)

    importer = Importer(db, opts)
    if (not opts.skip_rankings and not opts.only_uservisits):
        importer.import_data_set('rankings')
    if (not opts.skip_uservisits or opts.only_uservisits):
        importer.import_data_set('uservisits')
    if (not opts.skip_crawls and not opts.only_uservisits):
        importer.import_data_set('crawl')
Example #20
0
def main():
    args = parse_args()

    predictor = Predictor.create_predictor(args)
    test_loader, src_vocab_size, tgt_vocab_size, bos_id, eos_id = create_infer_loader(
        args)
    tgt_vocab = Vocab.load_vocabulary(**test_loader.dataset.vocab_info['vi'])
    trg_idx2word = tgt_vocab.idx_to_token

    predictor.predict(test_loader, args.infer_output_file, trg_idx2word,
                      bos_id, eos_id)
Example #21
0
def main():
    args = parse_args()

    testing_suite = BenchmarkSuite(args.capsule_dir, args.parallelism)
    results = testing_suite.test(args.num_samples)

    df = pd.DataFrame.from_records(results,
                                   columns=BenchmarkSuite.Result._fields)
    df.sort_values(by=list(df.columns), inplace=True, ignore_index=True)

    output.generate_output(output=df,
                           csv_path=args.output_csv,
                           graph_path=args.output_graph)
def train():
    args = parse_args()
    # add ce
    if args.enable_ce:
        SEED = 102
        fluid.default_main_program().random_seed = SEED
        fluid.default_startup_program().random_seed = SEED

    print(args)
    if not os.path.isdir(args.model_output_dir):
        os.mkdir(args.model_output_dir)

    loss, auc, data_list, auc_states = eval('network_conf.' + args.model_name)(
        args.embedding_size, args.num_field, args.num_feat,
        args.layer_sizes_dnn, args.act, args.reg, args.layer_sizes_cin)
    optimizer = fluid.optimizer.SGD(
        learning_rate=args.lr,
        regularization=fluid.regularizer.L2DecayRegularizer(args.reg))
    optimizer.minimize(loss)

    dataset = fluid.DatasetFactory().create_dataset()
    dataset.set_use_var(data_list)
    dataset.set_pipe_command('python criteo_reader.py')
    dataset.set_batch_size(args.batch_size)
    dataset.set_filelist([
        os.path.join(args.train_data_dir, x)
        for x in os.listdir(args.train_data_dir)
    ])

    if args.use_gpu == 1:
        exe = fluid.Executor(fluid.CUDAPlace(0))
        dataset.set_thread(1)
    else:
        exe = fluid.Executor(fluid.CPUPlace())
        dataset.set_thread(args.num_thread)
    exe.run(fluid.default_startup_program())

    for epoch_id in range(args.num_epoch):
        start = time.time()
        sys.stderr.write('\nepoch%d start ...\n' % (epoch_id + 1))
        exe.train_from_dataset(program=fluid.default_main_program(),
                               dataset=dataset,
                               fetch_list=[loss, auc],
                               fetch_info=['loss', 'auc'],
                               debug=False,
                               print_period=args.print_steps)
        model_dir = os.path.join(args.model_output_dir,
                                 'epoch_' + str(epoch_id + 1), "checkpoint")
        sys.stderr.write('epoch%d is finished and takes %f s\n' %
                         ((epoch_id + 1), time.time() - start))
        fluid.io.save_persistables(fluid.default_main_program(), model_dir)
Example #23
0
def train():
    args = parse_args()
    if not os.path.isdir(args.model_output_dir):
        os.mkdir(args.model_output_dir)

    dense_input = fluid.layers.data(name="dense_input",
                                    shape=[dense_feature_dim],
                                    dtype='float32')
    sparse_input_ids = [
        fluid.layers.data(name="C" + str(i),
                          shape=[1],
                          lod_level=1,
                          dtype="int64") for i in range(1, 27)
    ]
    label = fluid.layers.data(name='label', shape=[1], dtype='int64')

    loss, auc_var, batch_auc_var = ctr_dnn_model_dataset(
        dense_input, sparse_input_ids, label, args.embedding_size,
        args.sparse_feature_dim)

    optimizer = fluid.optimizer.SGD(learning_rate=1e-4)
    optimizer.minimize(loss)

    exe = fluid.Executor(fluid.CPUPlace())
    exe.run(fluid.default_startup_program())
    dataset = fluid.DatasetFactory().create_dataset()
    dataset.set_use_var([dense_input] + sparse_input_ids + [label])
    pipe_command = "python criteo_reader.py %d" % args.sparse_feature_dim
    dataset.set_pipe_command(pipe_command)
    dataset.set_batch_size(100)
    thread_num = 10
    dataset.set_thread(thread_num)
    whole_filelist = [
        "raw_data/part-%d" % x for x in range(len(os.listdir("raw_data")))
    ]

    epochs = 20
    for i in range(epochs):
        dataset.set_filelist(whole_filelist[:int(0.8 * len(whole_filelist))])
        exe.train_from_dataset(program=fluid.default_main_program(),
                               dataset=dataset,
                               fetch_list=[auc_var],
                               fetch_info=["auc"],
                               debug=False)
        model_dir = args.model_output_dir + '/epoch' + str(i + 1) + ".model"
        sys.stderr.write("epoch%d finished" % (i + 1))
        fluid.io.save_inference_model(
            model_dir,
            [dense_input.name] + [x.name
                                  for x in sparse_input_ids] + [label.name],
            [loss, auc_var], exe)
Example #24
0
def run_cmdline_args(CONFIG_PATH):
    "Parse the command line arguments"
    global options
    options = parse_args(CONFIG_PATH)

    # Copy a default file if none exists
    if not path.isfile(options.filename):
        default = path.join(path.dirname(__file__), '../data/default_config.cfg')
        other = pkg_resources.resource_filename('poezio', 'default_config.cfg')
        if path.isfile(default):
            copy2(default, options.filename)
        elif path.isfile(other):
            copy2(other, options.filename)
        global firstrun
        firstrun = True
Example #25
0
def test_game():

    global game

    args = parse_args()

    game = Game(args)
    play_game(game.clone(), True)

    # timeit
    t = timeit.timeit("play_game(game.clone(), False)",
                      globals=globals(),
                      number=args.timeit_count)
    print("timeit : %s ms  [%d] [total %s s]" % (round(
        (t / args.timeit_count) * 1000, 2), args.timeit_count, round(t, 2)))
Example #26
0
async def test_mcts():

    args = parse_args()

    game = Game(args)
    model = Model(game.state_shape(), game.action_size(), args)

    mcts = MCTS(model, args)

    experiences = await mcts.self_play(game)
    for result in experiences:
        print(result[2])

    print("Game Score:", game.game_score())
    for move in game.moves():
        print(move)
Example #27
0
def run_cmdline_args(CONFIG_PATH):
    "Parse the command line arguments"
    global options
    options = parse_args(CONFIG_PATH)

    # Copy a default file if none exists
    if not path.isfile(options.filename):
        default = path.join(path.dirname(__file__),
                            '../data/default_config.cfg')
        other = path.join(path.dirname(__file__), 'default_config.cfg')
        if path.isfile(default):
            copy2(default, options.filename)
        elif path.isfile(other):
            copy2(other, options.filename)
        global firstrun
        firstrun = True
Example #28
0
def main():

    # get command line parameters and lists of file specs
    arg_values = args.parse_args(sys.argv[1:])
    cache_arg_files = args.glob_list_of_arg_values(arg_values.cache)
    no_cache_arg_files = args.glob_list_of_arg_values(arg_values.nocache)
    output_manifest_file_name = arg_values.output
    doc_root = None if not arg_values.doc_root else arg_values.doc_root[0]
    url_prefix = None if not arg_values.url_prefix else arg_values.url_prefix[0]
    force = arg_values.force

    # filter out files in cached list told to not cache
    filenames_to_cache = [ fn for fn in cache_arg_files if fn not in no_cache_arg_files]

    # get a temporary file 
    (fd, temp_file_name) = tempfile.mkstemp(prefix="temp_manifestgen_")
    os.close(fd)

    #convert file names on disk to urls
    cached_urls = convert_filenames_to_urls(filenames_to_cache, doc_root, url_prefix)

    # digest value of contents of all cached files
    content_md5_digest = digest_for_content(filenames_to_cache)

    # write temp manifest that reflects the current state
    write_cache_manifest(open(temp_file_name, "w"), cached_urls, arg_values.network, arg_values.fallback)
    write_name_value_comment(open(temp_file_name, "a"), "contentmd5", content_md5_digest)

    if force:
        write_name_value_comment(open(temp_file_name, "a"), "timestamp", str(datetime.datetime.now()))

    open(temp_file_name, "a").write("\n") # terminate last line


    #compare the newly created with the old and copy if it has changed
    digest_of_new_manifest = digest_for_cache_manifest(temp_file_name)
    digest_of_old_manifest = digest_for_cache_manifest(output_manifest_file_name)
    if digest_of_old_manifest != digest_of_new_manifest:
        print "digests differ, copying %s to %s"%(temp_file_name, output_manifest_file_name)
        open(output_manifest_file_name, "w").write(open(temp_file_name, "r").read())
    else:
        print "cache manifest contents have not changed"
    

    # remove the temporary file
    if os.path.exists(temp_file_name):
        os.unlink(temp_file_name)
Example #29
0
def main():
    from args import parse_args
    args = parse_args()
    print(args)
    dataset = args.trainset + args.testset + args.devset
    print(dataset)
    for data_path in dataset:
        create_header_file(data_path)
        reader = MmapFile(data_path)

        # test sample
        line = reader.getvalue('10')
        try:
            sample = bytes.decode(line.strip())
            print(sample)
        except:
            print(line)
def main():
    args = parse_args()
    loaded_data = load_data()
    (X_train, y_train, X_test) = process(loaded_data)
    X = pd.concat([X_train, X_test])

    trained_models = []
    for target in TARGET_COLUMNS:
        model = make_simple_model(target)
        model = model.fit(X_train, y_train[target])
        trained_models.append(model)

    y_fits = []
    y_preds = []
    for m in trained_models:
        result = m.predict(X)
        result_df = pd.DataFrame(result, index=X.index)

        y_fitted = result_df.loc[:"2020-04-30 23:30:00", :]
        y_pred = result_df.loc["2020-05-01 00:00:00":"2020-07-22 23:30:00", :]

        y_fits.append(y_fitted)
        y_preds.append(y_pred)

    y_fits_df = pd.concat(y_fits, axis=1)
    y_fits_df.columns = [f"{c}_fitted" for c in TARGET_COLUMNS]

    fits = pd.concat([y_fits_df, y_train.loc[:"2020-04-30 23:30:00":, :]],
                     axis=1)

    sub = pd.concat(y_preds, axis=1)
    sub.columns = TARGET_COLUMNS
    sub.to_csv(f'submission.csv')

    if args.verbose:
        show_results(trained_models, fits)
        for trained_model in trained_models:
            print(
                pd.DataFrame(trained_model.cv_results_)[[
                    'mean_train_score', 'std_train_score', 'mean_test_score',
                    'std_test_score'
                ]])
        fits.plot()
        sub.plot()
        plt.show()
Example #31
0
async def on_message(message):
    if message.content == '!stop' and message.author.guild_permissions.administrator:
        FileLogger.info('User requested shutdown')
        execute()
        await client.logout()
        return

    # we do not want the bot to reply to itself
    if message.author.bot:
        return

    if message.content.startswith('!') or message.content.startswith('!'):
        setup_guild_channel_list(message.author.guild)
        setup_guild_member_list(message.author.guild)

        user_auth = {
            'guild_id': message.author.guild.id,
            'user_id': message.author.id,
            'user_admin': message.author.guild_permissions.administrator,
            'channel_id': message.channel.id
        }

        content = message.content[1:]
        if message.attachments:
            content += f' {message.attachments[0].url}'
        msg = parse_args(user_auth, content)
        if msg:
            if isinstance(msg, Mapping):
                # it's a dict
                embed = get_embed(message.author, msg)
                await message.channel.send(embed=embed)
            elif isinstance(msg, list):
                # it's a list
                embed = embed_template(message.author)
                for i in range(len(msg)):
                    embed.add_field(name=i, value=msg[i], inline=False)
                await message.channel.send(embed=embed)
            elif is_url(msg):
                # it's an url
                embed = Embed(color=0xffa200)
                embed.set_image(url=msg)
                await message.channel.send(embed=embed)
            else:
                await message.channel.send(msg)
Example #32
0
def main():
    args = parse_args()
    data_train, data_test, target_train, target_test = get_train_test_data(
        filename=args.file, test_size=args.test_size)

    classifier = get_algorithm(args)
    model = classifier.fit(data_train, target_train)
    targets_predicted = model.predict(data_test)
    correct = 1 - np.mean(target_test != targets_predicted)
    print("Accuracy: {:.2%}".format(correct))

    lib_classifier = get_library_version(args)
    lib_model = lib_classifier.fit(data_train, target_train)
    lib_targets_predicted = lib_model.predict(data_test)
    lib_correct = 1 - np.mean(target_test != lib_targets_predicted)
    print("Library Accuracy: {:.2%}".format(lib_correct))

    print("Difference from Library version: {:+.2%}".format(correct -
                                                            lib_correct))
Example #33
0
def main():
    args = parse_args()
    if args.debug:
        t1 = time.time()
    if args.make_masterbias:
        make_masterbias(args)
    if args.make_masterdark:
        make_masterdark(args)
    if args.custom:
        args.reduce_twi = False
        args.reduce_sci = False
        custom(args)
    if args.reduce_twi:
        reduce_twighlight(args)
    if args.reduce_sci:
        reduce_science(args)                                        
    if args.debug:
        t2=time.time()
        print("Total Time taken: %0.2f s" %(t2-t1))
Example #34
0
def run_cmdline_args(CONFIG_PATH):
    "Parse the command line arguments"
    global options
    options = parse_args(CONFIG_PATH)

    # Copy a default file if none exists
    if not path.isfile(options.filename):
        default = path.join(path.dirname(__file__), '../data/default_config.cfg')
        other = pkg_resources.resource_filename('poezio', 'default_config.cfg')
        if path.isfile(default):
            copy2(default, options.filename)
        elif path.isfile(other):
            copy2(other, options.filename)

        # Inside the nixstore and possibly other distributions, the reference
        # file is readonly, so is the copy.
        # Make it writable by the user who just created it.
        if os.path.exists(options.filename):
            os.chmod(options.filename,
                     os.stat(options.filename).st_mode | stat.S_IWUSR)

        global firstrun
        firstrun = True
Example #35
0
from args import parse_args
import server

if __name__ == "__main__":
    args = parse_args()
    print "Starting cortext beacon"
    print "  - Listening on port: " + str(args.port)
    print " ".join(["  - Using", "_mocked_" if args.mock else "_real_", "data"])

    if args.port < 1024:
        print "You selected a port < 1024, this requires special priveleges"  

    print "Press Ctrl-C to shut down..."
    server.start_beacon_server(args.port, args.mock)
Example #36
0
        """
        self.already_run.append((sync, None))

    def can_sync_run(self, sync, args = None):
        """
        Return False if a sync should not be run, True otherwise
        """
        if sync.name in self.exclude:
            logger.debug('sync "%s" has been excluded', sync.name)
            return False
        if not self.allow_rerun and (sync, args) in self.already_run:
            logger.debug('sync "%s" (%s) has already been run', sync.name, args)
            return False
        return True


# run! run! run!
args = args.parse_args()
if args.get('verbosity'):
    logging.basicConfig(level=logging.DEBUG)

synctrex = Synctrex(**args)
config = args.get('config')
for config in utils.as_list(config):
    synctrex.load(config)

synctrex.prepare()
synctrex.run()


Example #37
0
        """
        Set the value of an option temporarily
        """
        try:
            RawConfigParser.set(self, section, option, value)
        except NoSectionError:
            pass


firstrun = False

# creates the configuration directory if it doesn't exist
# and copy the default config in it
CONFIG_HOME = environ.get("XDG_CONFIG_HOME")
if not CONFIG_HOME:
    CONFIG_HOME = path.join(environ.get('HOME'), '.config')
CONFIG_PATH = path.join(CONFIG_HOME, 'poezio')
try:
    makedirs(CONFIG_PATH)
except OSError:
    pass

if not path.isfile(path.join(CONFIG_PATH, 'poezio.cfg')):
    copy2(path.join(path.dirname(__file__), '../data/default_config.cfg'), path.join(CONFIG_PATH, 'poezio.cfg'))
    firstrun = True

options = parse_args(CONFIG_PATH)
config = Config(options.filename)
if firstrun:
    config.set('firstrun', True)
Example #38
0
  # Append any remaning files in the reindex queue to the rename queue
  # These are going on the end so we can let new_indexes have as much
  # room as it wants
  if reindex_queue:
    rename_queue.extend(new_indexes(last_index, None, reindex_queue))
    
  process_rename_queue(syncdir, toread, rename_queue)

def main():
  'Read the toread list'
  calibredb = CalibreDB()
  toread = ToRead(ARGS.toread, validator=calibredb.issue)
  syncdir = ExportDirectory(ARGS.syncdir)

  # Grab the ids of the first count entries
  wanted = toread.keys()[:ARGS.count]

  # Remove any files not in the list
  syncdir.keep_files(wanted)

  # Export any files not already present
  calibredb.export_files(wanted, syncdir)

  # Rename files so they sort in reading list order
  rename_files(syncdir, toread)

if __name__ == '__main__':
  args.parse_args()
  logs.set_logging()
  main()