Beispiel #1
0
    def take_item(self, item):
        if item.weight > self.capacity:
            print(
                f"{item.name} is too heavy for you... you'll have to drop some stuff if you really want this"
            )
            will_drop = yes_or_no(
                "Would you like to drop something in your inventory? "
                "\n(Y) to choose what to drop (N) to forget this item.")
            if will_drop:
                name_weight_list = "\n\t".join(
                    f"Item name: {item.name} --- weight: {item.weight}")

        if isinstance(item, items.Weapon):
            equip_prompt = f"""
Looks like you've picked up a weapon! 
Would you like to equip {item.name} as your new weapon?

Current weapon: {self.equipped_weapon}
New weapon: {item}
"""
            will_equip = yes_or_no(equip_prompt)
            if will_equip:
                print(
                    f"Swapping out {self.equipped_weapon.name} for {item.name}"
                )
                self.equipped_weapon = item
Beispiel #2
0
async def reroll_all(ctx, value: int = None):
    if await bot.is_owner(ctx.author):
        desc = f"Reroll **all** totems  ?\n\nType **yes** to proceed, or **no** to cancel."
        embed = discord.Embed(description=desc, color=Color.red())
        embed.set_footer(text=f"Requested by {ctx.author}")
        await ctx.send(embed=embed)
        check = lambda m: m.author == ctx.author and m.channel == ctx.channel and utils.yes_or_no(
            m.content)
        try:
            reply = await bot.wait_for("message", check=check, timeout=5)
        except asyncio.TimeoutError:
            pass
        else:
            if reply.content.lower() == "yes":
                settings = db.get_settings()
                if value is not None:
                    count = value
                elif settings:
                    count = settings['global_rc'] + 1
                else:
                    count = 101
                db.update_settings(global_rc=count, last_update=utils.now())
    else:
        m = await ctx.send("**Nice try**")
        await asyncio.sleep(2)
        await ctx.channel.delete_messages((ctx.message, m))
Beispiel #3
0
def build_vocabulary():
    pathlib.Path(params.data_path).parent.mkdir(parents=True, exist_ok=True)
    pathlib.Path(params.save_path).parent.mkdir(parents=True, exist_ok=True)

    model = None
    if os.path.isfile(params.save_path):
        if not yes_or_no("Vocabulary existed! Do you want to overwrite?"):
            model = Word2Vec.load(params.save_path)

    if model is None:
        items = pickle.load(open(params.data_path, 'rb'))
        normalized_sentences = get_sentences(items)
        del items

        stopwords = load_stopwords()
        sentence_iterator = DictionaryGenerator(
            normalized_sentences,
            stopwords=stopwords)
        del normalized_sentences
        print("\nTotal of sentences: %d" % len(sentence_iterator.sentences))

        model = Word2Vec(sentence_iterator, seed=3695, min_count=1, sg=params.alg, size=params.word_embedding_size,
                         window=params.window_size, sample=params.sample_rate, negative=params.n_negative,
                         workers=max(1, multiprocessing.cpu_count()), iter=params.n_epochs)

        sentence_iterator.save_special_words()
        # del sentences
        del sentence_iterator

        print("Saving dictionary at " + params.save_path)
        model.save(params.save_path)

    word_vectors = model.wv
    del model
    print("Done. Vocabulary size is: %d" % len(word_vectors.vocab))
Beispiel #4
0
def get_zerofree_status(args: argparse.Namespace) -> bool:
    if utils.is_virtual():
        if args.zerofree:
            print(
                "Virtual machine detected. Enabling zeroing of free space as requested."
            )
            return True
        print(
            "This seems to be a virtual machine. Do you want to zero free space?"
        )
        print("THIS IS FOR VIRTUALBOX GUESTS ONLY")
        return utils.yes_or_no()
    if args.zerofree:
        print(
            "This does not seem to be a virtual machine. Are you sure you want to zero free space regardless?"
        )
        print("THIS IS FOR VIRTUALBOX GUESTS ONLY")
        return utils.yes_or_no()
    return False
Beispiel #5
0
async def fusion(ctx, head="?", body="?", color="0"):
    guild = db.find_guild(ctx.guild)
    if guild:
        lang = pokedex.Language(guild['lang'])
    else:
        lang = pokedex.Language.DEFAULT
        db.update_guild(ctx.guild, lang=lang.value, name=ctx.guild.name)

    head_result = dex.resolve(head, lang)
    body_result = dex.resolve(body, lang)
    color_result = dex.resolve(color, lang) if color != "0" else (color, "")
    if None in (head_result, body_result, color_result):
        head_guess = dex.guess(head, lang)[0] if head_result is None else head
        body_guess = dex.guess(body, lang)[0] if body_result is None else body
        color_guess = dex.guess(color,
                                lang)[0] if color_result is None else color
        body_tmp = body_guess
        color_tmp = color_guess if color_guess != "0" else ""
        if body_tmp in pokedex.Pokedex.RANDOM_QUERIES and not color_tmp:
            body_tmp = ""
        cmd = utils.strict_whitespace(
            f"**{bot.command_prefix}f {head_guess} {body_tmp} {color_tmp}**")
        desc = f"Did you mean   {cmd}   ?\n\nType **yes** to proceed, or **no** to cancel."
        embed = discord.Embed(description=desc, color=Color.light_grey())
        embed.set_thumbnail(url="https://i.imgur.com/Rcys72H.png")
        embed.set_footer(text=f"Requested by {ctx.author}")
        await ctx.send(embed=embed)
        check = lambda m: m.author == ctx.author and m.channel == ctx.channel and utils.yes_or_no(
            m.content)
        try:
            reply = await bot.wait_for("message", check=check, timeout=60)
        except asyncio.TimeoutError:
            pass
        else:
            if reply.content.lower() == "yes":
                await ctx.invoke(fusion,
                                 head=head_guess,
                                 body=body_guess,
                                 color=color_guess)
    else:
        h_id, h = head_result
        b_id, b = body_result
        c_id, c = color_result
        last_queries[ctx.message.channel] = h_id, b_id, c_id

        file = api.PokeFusion.get_fusion_as_file(head_id=h_id,
                                                 body_id=b_id,
                                                 color_id=c_id)
        if file:
            if c_id == "0":
                filename = f"fusion_{h_id}{h}_{b_id}{b}.png"
            else:
                filename = f"fusion_{h_id}{h}_{b_id}{b}_{c_id}{c}.png"
            f = discord.File(fp=file, filename=filename)
            color = Color.from_rgb(*utils.get_dominant_color(file))
            share_url = f"http://pokefusion.japeal.com/{b_id}/{h_id}/{c_id}"
            embed = discord.Embed(title="PokéFusion",
                                  url=share_url,
                                  color=color)
            embed.add_field(name="Head",
                            value=f"{h.title()} #{h_id}",
                            inline=True)
            embed.add_field(name="Body",
                            value=f"{b.title()} #{b_id}",
                            inline=True)
            if c_id != "0":
                embed.add_field(name="Colors", value=f"{c.title()} #{c_id}")
            embed.set_image(
                url=f"attachment://{filename.replace('(', '').replace(')', '')}"
            )
            embed.set_footer(text=f"Requested by {ctx.author}")
            await ctx.send(embed=embed, file=f)
Beispiel #6
0
def main(args):
    # Setup logging
    logger = setup_logging(args)

    # Read params of model
    params = fetch_model_params(args.model)

    # Fetch appropriate input functions
    input_fn = generic_text
    pred_input_fn = pred_input
    handle_pred_output_fn = handle_pred_output

    if params["mlm_training"]:
        mlm_sample_text_fn = partial(mlm_sample_text, params)
        input_fn = partial(generic_text, sample_text_fn=mlm_sample_text_fn)

    # Fetch encoder per params
    encoder = fetch_encoder(params)

    pred_input_fn = partial(pred_input_fn,
                            path_to_prompt=args.prompt,
                            logger=logger,
                            enc=encoder)

    # Sample from Dataset if check dataset flag is on
    if args.check_dataset:
        check_dataset(input_fn)

    # Confirm deletion of checkpoint files if --new flag is set
    if args.new:
        if yes_or_no(
                f"Are you sure you want to remove '{params['model_path']}' to start afresh?"
        ):
            remove_gs_or_filepath(params["model_path"])
        else:
            exit()

    # Save config to logdir for experiment management
    save_config(params, params["model_path"])

    # Add to params: auto_layout, auto_layout_and_mesh_shape, use_tpu, num_cores
    mesh_shape = mtf.convert_to_shape(params["mesh_shape"])
    params["num_cores"] = mesh_shape.size
    params["auto_layout"] = args.auto_layout
    params["auto_layout_and_mesh_shape"] = args.auto_layout_and_mesh_shape
    params["use_tpu"] = True if not args.tpu is None else False
    params["gpu_ids"] = args.gpu_ids
    params["steps_per_checkpoint"] = args.steps_per_checkpoint
    # Expand attention types param
    params["attention_types"] = expand_attention_types_params(
        params["attention_types"])
    assert len(params["attention_types"]) == params[
        "n_layer"]  # Assert that the length of expanded list = num layers
    params["predict_batch_size"] = params.get("predict_batch_size",
                                              1)  # Default to 1
    params["predict"] = args.predict
    params['model'] = params.get(
        "model", "GPT"
    )  # Default model selection to GPT since it's the only option for now

    # Sample quality of MoE models suffers when using the faster sampling method, so default to slow_sampling if
    # moe layers are present
    params[
        "slow_sampling"] = True if params["moe_layers"] is not None else False

    logger.info(f"params = {params}")

    # Get eval tasks from params
    eval_tasks = params.get("eval_tasks", [])
    has_predict_or_eval_steps_or_eval_tasks = params[
        "predict_steps"] > 0 or params["eval_steps"] > 0 or len(eval_tasks) > 0

    for t in eval_tasks:
        assert t in task_descriptors, f"Eval task '{t}' is not known"
        task_descriptors[t]["init_fn"](params)

    # Set up TPUs and Estimator
    if args.tpu == "colab":
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
        ) if params["use_tpu"] else None
    else:
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            args.tpu) if params["use_tpu"] else None

    config = tpu_config.RunConfig(
        cluster=tpu_cluster_resolver,
        model_dir=params["model_path"],
        save_checkpoints_steps=None,  # Disable the default saver
        save_checkpoints_secs=None,  # Disable the default saver
        log_step_count_steps=params["iterations"],
        save_summary_steps=params["iterations"],
        tpu_config=tpu_config.TPUConfig(
            num_shards=mesh_shape.size,
            iterations_per_loop=params["iterations"],
            num_cores_per_replica=1,
            per_host_input_for_training=tpu_config.InputPipelineConfig.
            BROADCAST))

    estimator = tpu_estimator.TPUEstimator(
        use_tpu=params["use_tpu"],
        model_fn=model_fn,
        config=config,
        train_batch_size=params["train_batch_size"],
        eval_batch_size=params["train_batch_size"],
        predict_batch_size=params["predict_batch_size"],
        params=params)

    def _make_task_estimator(task):
        task_params = params.copy()
        task_params["eval_task"] = task
        return tpu_estimator.TPUEstimator(
            use_tpu=params["use_tpu"],
            model_fn=model_fn,
            config=config,
            train_batch_size=params["train_batch_size"],
            eval_batch_size=params["train_batch_size"],
            predict_batch_size=params["predict_batch_size"],
            params=task_params)

    eval_task_estimators = {
        task: _make_task_estimator(task)
        for task in eval_tasks
    }

    current_step = int(
        estimator_lib._load_global_step_from_checkpoint_dir(
            params["model_path"]))
    logger.info(f"Current step {current_step}")

    if args.predict:
        # Predict
        predictions = estimator.predict(input_fn=pred_input_fn)
        logger.info("Predictions generated")
        enc = fetch_encoder(params)
        handle_pred_output_fn(predictions,
                              logger,
                              enc,
                              params,
                              out_name=f"predictions_{current_step}")
        return

    elif has_predict_or_eval_steps_or_eval_tasks:
        # Eval and train - stop and predict and/or eval every checkpoint
        while current_step < params["train_steps"]:
            next_checkpoint = min(current_step + args.steps_per_checkpoint,
                                  params["train_steps"])

            estimator.train(input_fn=partial(input_fn, eval=False),
                            max_steps=next_checkpoint)
            current_step = next_checkpoint

            if params["predict_steps"] > 0:
                logger.info("Running prediction...")
                predictions = estimator.predict(input_fn=pred_input_fn)
                enc = fetch_encoder(params)
                handle_pred_output_fn(predictions,
                                      logger,
                                      enc,
                                      params,
                                      out_name=f"predictions_{current_step}")

            if params["eval_steps"] > 0:
                logger.info("Running evaluation...")
                eval_results = estimator.evaluate(input_fn=partial(input_fn,
                                                                   eval=True),
                                                  steps=params["eval_steps"])
                logger.info(f"Eval results: {eval_results}")

            for task in eval_tasks:
                logger.info(f"Starting evaluation task '{task}'")
                task_info = task_descriptors[task]["get_task_info_fn"](params)
                task_estimator = eval_task_estimators[task]
                task_input_fn = task_descriptors[task]["input_fn"]
                eval_results = task_estimator.evaluate(
                    input_fn=task_input_fn,
                    steps=task_info["n_steps"],
                    name=task)
                logger.info(f"Eval task '{task}' results: {eval_results}")
        return
    else:
        # Else, just train
        while current_step < params["train_steps"]:
            # Else, don't stop and restart
            estimator.train(input_fn=partial(input_fn, eval=False),
                            max_steps=params["train_steps"])
Beispiel #7
0
### imports
from datetime import datetime
import pandas as pd
import os
from utils import am_or_pm, yes_or_no
from steps import steps

### establish circumstances

# time
AM = datetime.now().hour <= 12
PM = datetime.now().hour > 12

# prompts
OILY = yes_or_no('Is your skin feeling > oily < right now? (y/-)')

if not OILY:
    DRY = yes_or_no('Is your skin feeling > dry < right now? (y/-)')

skin_feel = 'oily' if OILY else ('dry' if DRY else 'normal')

# whether a product has been applied that disrupts peptides
LOWPH = False

print(f'Recommendation for > {skin_feel} < skin at > {am_or_pm()} <:')

recommendation = []

if PM:
    recommendation.append('mar')