Beispiel #1
0
from pgpy import PGPKey, PGPMessage, PGPUID
from pgpy.constants import PubKeyAlgorithm, KeyFlags, HashAlgorithm, SymmetricKeyAlgorithm, CompressionAlgorithm
from typer import run, secho, colors


def generate(username: str):
    primary_key = PGPKey.new(PubKeyAlgorithm.RSAEncryptOrSign, 4096)
    primary_key.add_uid(PGPUID.new(username),
                        usage={KeyFlags.EncryptCommunications},
                        uidhashes=[HashAlgorithm.SHA512],
                        ciphers=[SymmetricKeyAlgorithm.AES256],
                        compression=[CompressionAlgorithm.ZIP])

    secho("Primary key, to be saved in server", fg=colors.BLUE)
    secho(primary_key.fingerprint, fg=colors.RED)
    secho(str(primary_key), fg=colors.YELLOW)

    secho("Public key, to be saved in server", fg=colors.BLUE)
    secho(primary_key.fingerprint, fg=colors.RED)
    secho(str(primary_key.pubkey), fg=colors.GREEN)


if __name__ == '__main__':
    run(generate)
Beispiel #2
0
import typer
from jinja2 import Template, Environment, FileSystemLoader

def run(api_endpoint_dns_name: str = ""):

    template_path = './code/templates'
    output_path = './code/'
    templates = ['streamlit_app.py.tmpl']

    loader = FileSystemLoader(template_path)
    env = Environment(loader=loader) #, variable_start_string='@@=', variable_end_string='=@@')

    for template_file_name in templates:

        template = env.get_template(template_file_name)
        final_file_name = template_file_name[0:template_file_name.rindex('.')]
        output_from_parsed_template = template.render(api_endpoint_dns_name=api_endpoint_dns_name)

        with open(output_path + final_file_name, "w") as fh:
            fh.write(output_from_parsed_template)

if __name__ == "__main__":
    typer.run(run)
Beispiel #3
0
from typing import List

import typer

valid_completion_items = [
    ("Camila", "The reader of books."),
    ("Carlos", "The writer of scripts."),
    ("Sebastian", "The type hints guy."),
]


def complete_name(ctx: typer.Context, incomplete: str):
    names = ctx.params.get("name") or []
    for name, help_text in valid_completion_items:
        if name.startswith(incomplete) and name not in names:
            yield (name, help_text)


def main(name: List[str] = typer.Option(["World"],
                                        help="The name to say hi to.",
                                        autocompletion=complete_name)):
    for n in name:
        print(f"Hello {n}")


if __name__ == "__main__":
    typer.run(main)
    targetname = f"{outputname}_{start}_to_{stop}.mp4".replace(':', '')

    if not os.path.exists(f"{outputname}.mp4"):
        subprocess.call(f"yes Y | you-get -O {outputname} {videourl}",
                        shell=True)

    try:
        start = sum(x * int(t)
                    for x, t in zip([1, 60, 3600], reversed(start.split(':'))))
        stop = sum(x * int(t)
                   for x, t in zip([1, 60, 3600], reversed(stop.split(':'))))
    except:
        pass

    if None not in (start, stop):
        ffmpeg_extract_subclip(f"{outputname}.mp4",
                               start,
                               stop,
                               targetname=targetname)

    #TODO Add ability to Accept multiple Clip entries.
    #TODO Add ability to combine all Spliced Clips into single Video.


if __name__ == "__main__":
    typer.run(splicevideo)

# https://pyscenedetect.readthedocs.io/en/latest/examples/usage-example/
# https://pypi.org/project/youtube-transcript-api/
# https://github.com/senko/python-video-converter
        columns=["DescriptorName", "DescriptorUI", "TreeNumberList"])

    for mesh in tqdm(mesh_tree.getroot()):
        try:
            # TreeNumberList e.g. A11.118.637.555.567.550.500.100
            mesh_tree = mesh[-2][0].text
            # DescriptorUI e.g. M000616943
            mesh_code = mesh[0].text
            # DescriptorName e.g. Mucosal-Associated Invariant T Cells
            mesh_name = mesh[1][0].text
        except IndexError:
            # TODO: Add logger
            # print("ERROR", file=sys.stderr)
            pass
        if (mesh_tree.startswith("C") and not mesh_tree.startswith("C22")
                or mesh_tree.startswith("F03")):
            mesh_Df = mesh_Df.append(
                {
                    "DescriptorName": mesh_name,
                    "DescriptorUI": mesh_code,
                    "TreeNumberList": mesh_tree,
                },
                ignore_index=True,
            )
    mesh_Df.to_csv(filtered_mesh_tags_path)
    return mesh_Df


if __name__ == "__main__":
    typer.run(filter_mesh_tags)
    nlp = spacy.load(model_name)

    # create a new config as a copy of the loaded pipeline's config
    config = nlp.config.copy()

    # revert most training settings to the current defaults
    default_config = spacy.blank(nlp.lang).config
    config["corpora"] = default_config["corpora"]
    config["training"] = default_config["training"]
    config["paths"]["train"] = train_path
    config["paths"]["dev"] = dev_path

    # set the vectors if the loaded pipeline has vectors
    if len(nlp.vocab.vectors) > 0:
        config["paths"]["vectors"] = model_name

    # source all components from the loaded pipeline and freeze all except the
    # component to update
    config["training"]["frozen_components"] = []
    for pipe_name in nlp.component_names:
        config["components"][pipe_name] = {"source": model_name}
        if pipe_name != component_to_update:
            config["training"]["frozen_components"].append(pipe_name)

    # save the config
    config.to_disk(output_path)


if __name__ == "__main__":
    typer.run(create_config)
Beispiel #7
0
            "Log10(MSE)",
            "MAE",
            "Log10(MAE)",
            "SNR",
            "PSNR",
        ]
        cols_out = [
            "model",
            "name",
            "weights",
            "mse",
            "log_mse",
            "mae",
            "log_mae",
            "snr",
            "psnr",
        ]
    df["log_mse"] = np.log10(df["mse"])
    df["log_mae"] = np.log10(df["mae"])

    out = pd.DataFrame()
    out[cols_out_aliases] = df[cols_out]
    out["Model"] = out["Model"].apply(lambda x: ALIASES[x])
    out["Rodzaj wag"] = out["Rodzaj wag"].apply(lambda x: WEIGHT_ALIASES[x])
    out = out.sort_values(by=["Model", "Rodzaj wag"])
    return out


if __name__ == "__main__":
    typer.run(generate_table)
Beispiel #8
0
def run(func):
    wrapped_func = wrap_func(func)
    typer.run(wrapped_func)
Beispiel #9
0
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
This script is a patch to order deterministically model-best/vocab/strings.json.

See https://github.com/explosion/spaCy/pull/7603 for details.
"""

from pathlib import Path

import srsly
import typer
from spacy.strings import StringStore


def sort(path: Path):
    """Sort the strings from the vocabulary of a spaCy model.

    For the original code of StringStore.to_disk(), see https://github.com/explosion/spaCy/blob/53a3b967ac704ff0a67a7102ede6d916e2a4545a/spacy/strings.pyx#L219-L227.
    """
    st = StringStore().from_disk(path)
    strings = sorted(st)
    srsly.write_json(path, strings)


if __name__ == "__main__":
    typer.run(sort)
Beispiel #10
0
            logger.info('best_mb_model ->' + str(mb_best_model))


            to_predict_dict_long = {k: to_predict_dict_long[k] for k in [ab_best_model,mb_best_model] if k in to_predict_dict_long} 
            logger.debug('updated predict dict with best models from record')


    ## PREDICTION ##

    logger.debug('running module: iter_predict -> predict / long prediction =' + str(long))
    prediction = iter_predict.predict(to_predict_dict_short, to_predict_dict_long, long = long)
    logger.debug('module end')
    #return prediction

    if monitoring == True:

        logger.debug('running module: monitoring -> evaluation')
        monitoring_report.evaluation(data_prep= data_prep,path_to_dir='predicciones/prediccion_corta/entregable/' , output_path='predicciones/monitoring/' )
        logger.debug('module end')


    exit()



if __name__ == "__main__":
    
    typer.run(principal)

    logger.info('program finished')
 
Beispiel #11
0
def start():
    """Wrapper around typer entrypoint"""
    load_dotenv()

    typer.run(main)
Beispiel #12
0
            raw_submitted_df=submission_df.loc[epsilon_mask, :],
            processes=processes,
        )

        logger.info(f"starting calculation for epsilon={epsilon}")
        epsilon_score = metric.overall_score()

        # save out some records from this run if the user would like to output a report
        if report_path is not None:
            report["per_epsilon"].append(metric.report)

        logger.success(f"score for epsilon {epsilon}: {epsilon_score}")
        scores_per_epsilon.append(epsilon_score)

    score_dict = dict(zip(epsilons, scores_per_epsilon))
    mean_score = np.mean(scores_per_epsilon)
    logger.success(
        f"finished scoring all epsilons: OVERALL SCORE = {mean_score} (per epsilon: {score_dict})"
    )

    if report_path is not None:
        with report_path.open("w") as fp:
            logger.info(f"writing out run report to {report_path}")
            json.dump(report, fp)
            logger.success(f"wrote out run report to {report_path}")
    return mean_score


if __name__ == "__main__":
    typer.run(score_submission)
def main():
    """Dispatch to typer"""
    parse_cli.__doc__ = __doc__.format(version=api.__version__)
    typer.run(parse_cli)
Beispiel #14
0
            logger.info(f'Reading CSV into a pandas dataframe '
                        f'(lines={num_lines}, file_size={csv_size})...')
            dfs = pd.read_csv(
                csv_file,
                encoding=encoding,
                error_bad_lines=False,
                chunksize=CHUNKSIZE,
            )
            for chunk_number, df in enumerate(dfs):
                chunk_number += 1
                logger.info(f'Processing chunk {chunk_number}/{chunks}...')
                df.to_json(file, orient='records', lines=True)
                file.write('\n')

    logger.info('Initializing Elasticsearch...')

    es = Elasticsearch()

    key = ({
        "_index": f'{dataset_name}_index',
        "_type": f'{dataset_name}_index_doctype',
        "_source": record,
    } for record in open_input_json_file(json_file))
    helpers.bulk(es, key)

    logger.info('Finished.')


if __name__ == '__main__':
    typer.run(populate)
Beispiel #15
0
        mu_bar = None

    print('Discretize ...')
    if fv:
        m, data = discretize_stationary_fv(problem, diameter=1. / n)
    else:
        m, data = discretize_stationary_cg(problem, diameter=1. / n, mu_energy_product=mu_bar)
    print(data['grid'])
    print()

    print('Solve ...')
    U = m.solution_space.empty()
    for mu in problem.parameter_space.sample_uniformly(10):
        U.append(m.solve(mu))
    if mu_bar is not None:
        # use the given energy product
        norm_squared = U[-1].norm(m.products['energy'])[0]
        print('Energy norm of the last snapshot: ', np.sqrt(norm_squared))
    if not fv:
        if norm == 'h1':
            norm_squared = U[-1].norm(m.products['h1_0_semi'])[0]
            print('H^1_0 semi norm of the last snapshot: ', np.sqrt(norm_squared))
        if norm == 'l2':
            norm_squared = U[-1].norm(m.products['l2_0'])[0]
            print('L^2_0 norm of the last snapshot: ', np.sqrt(norm_squared))
    m.visualize(U, title='Solution for mu in [0.1, 1]')


if __name__ == '__main__':
    run(main)
Beispiel #16
0
def cli():
    typer.run(main)
Beispiel #17
0
def main():
    typer.run(server)
            entity = entity_ids[i]
            if not kb.contains_entity(entity):
                kb.add_entity(entity, freqs[i], embeddings[i])

        for a in aliases:
            ents = [e for e in a["entities"] if kb.contains_entity(e)]
            n_ents = len(ents)
            if n_ents > 0:
                prior_prob = [1.0 / n_ents] * n_ents
                kb.add_alias(alias=a["alias"], entities=ents, probabilities=prior_prob)

        msg.good("Done adding entities and aliases to kb")

    msg.divider("Create ANN Index")

    cg = CandidateGenerator().fit(kb.get_alias_strings(), verbose=True)

    ann_linker = nlp.create_pipe("ann_linker")
    ann_linker.set_kb(kb)
    ann_linker.set_cg(cg)

    nlp.add_pipe(ann_linker, last=True)

    nlp.meta["name"] = new_model_name
    nlp.to_disk(output_dir)
    nlp.from_disk(output_dir)


if __name__ == "__main__":
    typer.run(create_index)
Beispiel #19
0
            }, {
                "days": 150
            }],
            "test_begin": to_timestamp(search_end),
            "test_end": to_timestamp(last)
        }
    }


def create(dirname: str):
    datasets, targets = get_datasets()
    # batch by pipeline
    res = {}
    for pipeline, target, d in itertools.product(PIPELINE_LIST, TARGET_LIST,
                                                 datasets):
        if d['name'] in SKIP_DATASET:
            continue
        tag = "{}-{}-{}".format(d['ticker'], d['name'], target)
        if not tag in res:
            res[tag] = []
        res[tag].append(get_request(pipeline, target, d))
    os.makedirs(dirname, exist_ok=True)
    for name, items in res.items():
        with open('{}/{}.json'.format(dirname, name)) as f:
            json.dump(items, f)


if __name__ == "__main__":
    random.seed(datetime.utcnow().timestamp())
    typer.run(create)
Beispiel #20
0
import cv2
import imageio as io
import typer
from pathlib import Path


def splitone(path: str = typer.Option(
    ...,
    '--path',
    '-p',
    help='Pass path to the directory containing the mp4 files.'),
             num: int = typer.Option(0,
                                     '--num',
                                     '-n',
                                     help='Pass the frame number.')):
    '''Creates a poster image for all mp4 files in the path.'''
    f = Path(path)

    for gif in f.glob('*.gif'):
        for (i, frame) in enumerate(io.get_reader(gif)):
            if i == num:
                frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB)
                io.imwrite(f'{f}/{gif.stem}.png', frame)


if __name__ == '__main__':
    typer.run(splitone)
Beispiel #21
0
    else:
        proposals, voteplan_proposals, challenges = asyncio.run(
            get_proposals_voteplans_and_challenges_from_api(vit_station_url))

    try:
        sanity_check_data(proposals, voteplan_proposals)
    except SanityException as e:
        print(f"{e}")
        sys.exit(1)

    for challenge in challenges.values():
        challenge_proposals, challenge_voteplan_proposals = filter_data_by_challenge(
            challenge.id, proposals, voteplan_proposals)
        results = calc_results(
            challenge_proposals,
            challenge_voteplan_proposals,
            challenge.proposers_rewards,
            conversion_factor,
            approval_threshold,
        )
        out_stream = (output_json(results) if output_format
                      == OutputFormat.JSON else output_csv(results))
        chalenge_ouput_file_path = build_path_for_challenge(
            output_file, challenge.title.replace(" ", "_"))
        with open(chalenge_ouput_file_path, "w", encoding="utf-8") as out_file:
            dump_to_file(out_stream, out_file)


if __name__ == "__main__":
    typer.run(calculate_rewards)
Beispiel #22
0
def _run():
    typer.run(main)
Beispiel #23
0
            if best_mean_reward is None or best_mean_reward < mean_reward:
                if save_name is None:
                    save_name = environment + "-best.pt"
                torch.save(net.state_dict(), outdir / save_name)
                if best_mean_reward is not None:
                    typer.echo(f"Best mean reward updated " +
                               f"{best_mean_reward:.3f} -> " +
                               f"{mean_reward:.3f}, model saved")
                best_mean_reward = mean_reward

            if mean_reward > reward_bound:
                typer.echo(f"Solved in {frame_idx} frames!")
                break

        if len(buffer) < replay_start_size:
            continue

        if frame_idx % sync_target_frames == 0:
            tgt_net.load_state_dict(net.state_dict())

        optimizer.zero_grad()
        batch = buffer.sample(batch_size)
        loss_t = calc_loss(batch, net, tgt_net, discount_factor, device=device)
        loss_t.backward()
        optimizer.step()


if __name__ == "__main__":
    typer.run(train_DQL_ATARI)
Beispiel #24
0
def main_wrapper():
    typer.run(main)
Beispiel #25
0
import bs4
_mydir = os.path.realpath(os.path.dirname(__file__))


def run_loop(url, delay_seconds=1):
    hash_ = hashlib.sha1(url.encode()).hexdigest()
    filename = os.path.join(_mydir, hash_)
    filename_new = filename + '_new'
    print(f'Will save content to {filename}')
    old = None
    if os.path.exists(filename):
        old = open(filename).read()
    while True:
        res = requests.get(url)
        text = res.text
        if old is None:
            open(filename, 'w').write(text)
            old = text
        else:
            if old != text:
                open(filename_new, 'w').write(text)
                # just leave the file until you delete it
                while True:
                    os.system('spd-say "your program has finished"')
                    time.sleep(1)
        time.sleep(1)


if __name__ == '__main__':
    typer.run(run_loop)
        agent = DqnAgent(state_size=state_size,
                         action_size=action_size,
                         device=DEVICE,
                         dueling_dqn=dueling_dqn)
        if agent_parameters_path:
            agent.load(agent_parameters_path)
        logging.info(f'Loaded {agent} agent.')

    env_info = env.reset(train_mode=False)[brain_name]
    state = env_info.vector_observations[0]
    score = 0
    while True:
        action = agent.act(state)
        env_info = env.step(action)[brain_name]
        next_state = env_info.vector_observations[0]
        reward = env_info.rewards[0]
        done = env_info.local_done[0]
        score += reward
        state = next_state
        sleep(0.04)
        if done:
            break

    print("Score: {}".format(score))

    env.close()


if __name__ == '__main__':
    typer.run(run_environment)
    new_nodes = convert_constant_nodes_to_int32(graph.node)

    graph_name = f"{graph.name}-int32"
    log.info("Creating new graph...")
    # * create a new graph with converted params and new nodes.
    # print(graph.output)
    graph_int32 = h.make_graph(
        new_nodes,
        graph_name,
        graph.input,
        graph.output,
        initializer=converted_params,
    )
    log.info("Creating new int32 model...")
    model_int32 = h.make_model(graph_int32, producer_name="onnx-typecast")
    model_int32.opset_import[0].version = opset_version
    ch.check_model(model_int32)
    log.info(f"Saving converted model as: {out_path}")
    onnx.save_model(model_int32, out_path)
    log.info(f"Done Done London. 🎉")
    return


if __name__ == "__main__":
    '''
    usage
    python onnx_lib/convert.py /Strawberry/yolov5/output/train/exp_893aug_887_m/weights/best_ap05.onnx /Strawberry/yolov5/output/train/exp_893aug_887_m/weights/best_ap05_32.onnx
    https://github.com/aadhithya/onnx-typecast
    '''
    typer.run(convert_model_to_int32)
Beispiel #28
0
            correct.append(out == Y_test[n:n + batch_size])

        acc = np.concatenate(correct).mean()
        print("Accuracy:", acc)

    if not outdir:
        return

    weights = {
        "w1": model.layer1.weight.data,
        "b1": model.layer1.bias.data,
        "w2": model.layer2.weight.data,
        "b2": model.layer2.bias.data
    }
    Path(outdir).mkdir(exist_ok=True)
    for name, val in weights.items():
        np.save(Path(outdir) / f"{name}.npy", val.astype(np.float32))

    # select one example of each class
    indices = np.array([np.argmax(Y_test == i) for i in range(10)])
    x = X_test[indices].reshape((10, -1)).astype(np.float32) / 255

    y_pred = model(Tensor(x)).softmax().data
    print("Model predictions:", y_pred.argmax(axis=1))

    np.save(Path(outdir) / "samples.npy", x)


if __name__ == "__main__":
    typer.run(train)
Beispiel #29
0
    print('Generating latent vectors...')
    shape = [num_frames, np.prod(grid_size)] + Gs.input_shape[1:] # [frame, image, channel, component]
    all_latents = random_state.randn(*shape).astype(np.float32)
    all_latents = scipy.ndimage.gaussian_filter(all_latents, [smoothing_sec * mp4_fps] + [0] * len(Gs.input_shape), mode='wrap')
    all_latents /= np.sqrt(np.mean(np.square(all_latents)))

    # Frame generation func for moviepy.
    def make_frame(t):
        frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))
        latents = all_latents[frame_idx]
        labels = np.zeros([latents.shape[0], 0], np.float32)        
        images = Gs.run(latents, None, truncation_psi=truncation_psi, randomize_noise=False, output_transform=fmt, minibatch_size=minibatch_size)
        
        images = images.transpose(0, 3, 1, 2) #NHWC -> NCHW
        grid = create_image_grid(images, grid_size).transpose(1, 2, 0) # HWC
        if grid.shape[2] == 1:
            grid = grid.repeat(3, 2) # grayscale => RGB
        return grid

    # Generate video.
    import moviepy.editor # pip install moviepy
    c = moviepy.editor.VideoClip(make_frame, duration=duration_sec)
    if output_width:
        c = c.resize(width=output_width)
    c.write_videofile(str(mp4), fps=mp4_fps, codec=mp4_codec)
    return c

if __name__ == "__main__":
    typer.run(generate_interpolation_video)
Beispiel #30
0
 def start(self):
     typer.run(self.launch)