Exemple #1
0
def nbdev_clean_nbs(fname: Param("A notebook name or glob to convert",
                                 str) = None,
                    clear_all: Param("Clean all metadata and outputs",
                                     bool) = False,
                    disp: Param("Print the cleaned outputs", bool) = False,
                    read_input_stream: Param(
                        "Read input stram and not nb folder") = False):
    "Clean all notebooks in `fname` to avoid merge conflicts"
    #Git hooks will pass the notebooks in the stdin
    if read_input_stream and sys.stdin:
        input_stream = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
        nb = json.load(input_stream)
        clean_nb(nb, clear_all=clear_all)
        _print_output(nb)
        return
    files = Config().nbs_path.glob(
        '**/*.ipynb') if fname is None else glob.glob(fname)
    for f in files:
        if not str(f).endswith('.ipynb'): continue
        nb = json.load(open(f, 'r', encoding='utf-8'))
        clean_nb(nb, clear_all=clear_all)
        if disp: _print_output(nb)
        else:
            x = json.dumps(nb, sort_keys=True, indent=1, ensure_ascii=False)
            with io.open(f, 'w', encoding='utf-8') as f:
                f.write(x)
                f.write("\n")
Exemple #2
0
def nbdev_clean_nbs(fname: Param("A notebook name or glob to convert",
                                 str) = None,
                    clear_all: Param("Clean all metadata and outputs",
                                     bool) = False,
                    disp: Param("Print the cleaned outputs", bool) = False,
                    read_input_stream: Param(
                        "Read input stram and not nb folder") = False):
    "Clean all notebooks in `fname` to avoid merge conflicts"
    #Git hooks will pass the notebooks in the stdin
    if read_input_stream and sys.stdin:
        input_stream = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
        nb = json.load(input_stream)
        clean_nb(nb, clear_all=clear_all)
        _print_output(nb)
        return
    files = Config().nbs_path.glob(
        '**/*.ipynb') if fname is None else glob.glob(fname)
    for f in files:
        if not str(f).endswith('.ipynb'): continue
        nb = read_nb(f)
        clean_nb(nb, clear_all=clear_all)
        if disp: _print_output(nb)
        else:
            NotebookNotary().sign(nb)
            nbformat.write(nb, str(f), version=4)
Exemple #3
0
def nbdev_nb2md(fname:Param("A notebook file name to convert", str),
                dest:Param("The destination folder", str)='.',
                img_path:Param("Folder to export images to")="",
                jekyll:Param("To use jekyll metadata for your markdown file or not", bool_arg)=False,):
    "Convert the notebook in `fname` to a markdown file"
    nb_detach_cells(fname, dest=img_path)
    convert_md(fname, dest, jekyll=jekyll, img_path=img_path)
Exemple #4
0
def nbdev_new(name: Param("A directory to create the project in", str),
              template_git_repo: Param("url to template repo", str)=_template_git_repo):
    "Create a new nbdev project with a given name."

    path = Path(f"./{name}").absolute()

    if path.is_dir():
        print(f"Directory {path} already exists. Aborting.")
        return

    print(f"Creating a new nbdev project {name}.")

    def rmtree_onerror(func, path, exc_info):
        "Use with `shutil.rmtree` when you need to delete files/folders that might be read-only."
        os.chmod(path, stat.S_IWRITE)
        func(path)

    try:
        subprocess.run(['git', 'clone', f'{template_git_repo}', f'{path}'], check=True, timeout=5000)
        # Note: on windows, .git is created with a read-only flag
        shutil.rmtree(path/".git", onerror=rmtree_onerror)
        subprocess.run("git init".split(), cwd=path, check=True)
        subprocess.run("git add .".split(), cwd=path, check=True)
        subprocess.run("git commit -am \"Initial\"".split(), cwd=path, check=True)

        print(f"Created a new repo for project {name}. Please edit settings.ini and run nbdev_build_lib to get started.")
    except Exception as e:
        print("An error occured while copying nbdev project template:")
        print(e)
        if os.path.isdir(path):
            try:
                shutil.rmtree(path, onerror=rmtree_onerror)
            except Exception as e2:
                print(f"An error occured while cleaning up. Failed to delete {path}:")
                print(e2)
Exemple #5
0
def nbdev_conda_package(
        path: Param("Path where package will be created", str) = 'conda',
        do_build: Param("Run `conda build` step", bool) = True,
        build_args: Param("Additional args (as str) to send to `conda build`",
                          str) = '',
        do_upload: Param("Run `anaconda upload` step", bool) = True,
        upload_user: Param("Optional user to upload package to") = None):
    "Create a `meta.yaml` file ready to be built into a package, and optionally build and upload it"
    write_conda_meta(path)
    cfg = Config()
    name = cfg.get('lib_name')
    out = f"Done. Next steps:\n```\`cd {path}\n" ""
    out_upl = f"anaconda upload $CONDA_PREFIX/conda-bld/noarch/{name}-{cfg.get('version')}-py_0.tar.bz2"
    if not do_build:
        print(f"{out}conda build {name}\n{out_upl}\n```")
        return

    os.chdir(path)
    try:
        res = check_output(f"conda build {build_args} {name}".split()).decode()
    except subprocess.CalledProcessError as e:
        print(f"{e.output}\n\nBuild failed.")
    if 'anaconda upload' not in res:
        print(f"{res}\n\Build failed.")
        return

    upload_str = re.findall('(anaconda upload .*)', res)[0]
    if upload_user:
        upload_str = upload_str.replace('anaconda upload ',
                                        f'anaconda upload -u {upload_user} ')
    try:
        res = check_output(upload_str.split(), stderr=STDOUT).decode()
    except subprocess.CalledProcessError as e:
        print(f"{e.output}\n\nUpload failed.")
    if 'Upload complete' not in res: print(f"{res}\n\nUpload failed.")
Exemple #6
0
def nbdev_test_nbs(fname: Param("A notebook name or glob to convert",
                                str) = None,
                   flags: Param("Space separated list of flags", str) = None,
                   n_workers: Param("Number of workers to use", int) = None,
                   verbose: Param("Print errors along the way", bool) = True,
                   timing: Param(
                       "Timing each notebook to see the ones are slow",
                       bool) = False):
    "Test in parallel the notebooks matching `fname`, passing along `flags`"
    if flags is not None: flags = flags.split(' ')
    if fname is None:
        files = [
            f for f in Config().nbs_path.glob('*.ipynb')
            if not f.name.startswith('_')
        ]
    else:
        files = glob.glob(fname)
    files = [Path(f).absolute() for f in sorted(files)]
    if len(files) == 1 and n_workers is None: n_workers = 0
    # make sure we are inside the notebook folder of the project
    os.chdir(Config().nbs_path)
    results = parallel(_test_one,
                       files,
                       flags=flags,
                       verbose=verbose,
                       n_workers=n_workers)
    passed, times = [r[0] for r in results], [r[1] for r in results]
    if all(passed): print("All tests are passing!")
    else:
        msg = "The following notebooks failed:\n"
        raise Exception(
            msg + '\n'.join([f.name for p, f in zip(passed, files) if not p]))
    if timing:
        for i, t in sorted(enumerate(times), key=lambda o: o[1], reverse=True):
            print(f"Notebook {files[i].name} took {int(t)} seconds")
Exemple #7
0
def nbdev_nb2md(
    fname: Param("A notebook file name to convert", str),
    dest: Param("The destination folder", str) = '.',
    jekyll: Param("To use jekyll metadata for your markdown file or not",
                  bool) = True,
):
    "Convert the notebook in `fname` to a markdown file"
    convert_md(fname, dest, jekyll=jekyll)
Exemple #8
0
def nbdev_upgrade(migrate2magic:Param("Migrate all notebooks in `nbs_path` to use magic flags", bool_arg)=True,
                  add_collapse_css:Param("Add css for \"#collapse\" components", bool_arg)=True):
    "Update an existing nbdev project to use the latest features"
    if migrate2magic:
        for fname in Config().nbs_path.glob('*.ipynb'):
            print('Migrating', fname)
            nbformat.write(_migrate2magic(read_nb(fname)), str(fname), version=4)
    if add_collapse_css: _add_collapse_css()
Exemple #9
0
def nbdev_build_docs(fname:Param("A notebook name or glob to convert", str)=None,
                     force_all:Param("Rebuild even notebooks that haven't changed", bool)=False,
                     mk_readme:Param("Also convert the index notebook to README", bool)=True,
                     n_workers:Param("Number of workers to use", int)=None):
    "Build the documentation by converting notebooks mathing `fname` to html"
    notebook2html(fname=fname, force_all=force_all, n_workers=n_workers)
    if fname is None: make_sidebar()
    if mk_readme: make_readme()
Exemple #10
0
def nbdev_fix_merge(
    fname: Param("A notebook filename to fix", str),
    fast: Param(
        "Fast fix: automatically fix the merge conflicts in outputs or metadata",
        bool) = True,
    trust_us: Param("Use local outputs/metadata when fast mergning",
                    bool) = True):
    "Fix merge conflicts in notebook `fname`"
    fix_conflicts(fname, fast=fast, trust_us=trust_us)
Exemple #11
0
def nbdev_build_docs(fname:Param("A notebook name or glob to convert", str)=None,
                     force_all:Param("Rebuild even notebooks that haven't changed", bool_arg)=False,
                     mk_readme:Param("Also convert the index notebook to README", bool_arg)=True,
                     n_workers:Param("Number of workers to use", int)=None,
                     pause:Param("Pause time (in secs) between notebooks to avoid race conditions", float)=0.5):
    "Build the documentation by converting notebooks mathing `fname` to html"
    notebook2html(fname=fname, force_all=force_all, n_workers=n_workers, pause=pause)
    if fname is None: make_sidebar()
    if mk_readme: make_readme()
Exemple #12
0
def banet_viirs750_download(region:Param("Region name", str),
    tstart:Param("Start of serach window yyyy-mm-dd HH:MM:SS", str),
    tend:Param("End of search windo yyyy-mm-dd HH:MM:SS", str),
    path_save:Param("Path to save the outputs of the request", str),
    regions_path:Param("Path for region json files", str)):
    Path(path_save).mkdir(exist_ok=True)
    region = Region.load(f'{regions_path}/R_{region}.json')
    viirs_downloader = VIIRS750_download(region, tstart, tend)
    viirs_downloader_list = viirs_downloader.split_times()
    print(f'Splitting request into {len(viirs_downloader_list)} orders.')
    run_all(viirs_downloader_list, path_save)
Exemple #13
0
def nbdev_trust_nbs(fname:Param("A notebook name or glob to convert", str)=None,
                    force_all:Param("Trust even notebooks that haven't changed", bool)=False):
    "Trust noteboks matching `fname`"
    check_fname = Config().nbs_path/".last_checked"
    last_checked = os.path.getmtime(check_fname) if check_fname.exists() else None
    files = Config().nbs_path.glob('**/*.ipynb') if fname is None else glob.glob(fname)
    for fn in files:
        if last_checked and not force_all:
            last_changed = os.path.getmtime(fn)
            if last_changed < last_checked: continue
        nb = read_nb(fn)
        if not NotebookNotary().check_signature(nb): NotebookNotary().sign(nb)
    check_fname.touch(exist_ok=True)
Exemple #14
0
def deep_animate(source: Param('Path to the source image.', str),
                 driving: Param('Path to the driving video.', str),
                 config: Param('Path to configuration file.', str),
                 checkpoint: Param('Path to model.', str),
                 device: Param('cpu or gpu accelaration', str) = 'cpu',
                 dest: Param('Path to save the generated video.',
                             str) = 'generated_video.mp4',
                 relative: Param('Relative.', bool) = True,
                 adapt_movement_scale: Param('Adaptive moment scale.',
                                             bool) = True):

    source_image = imageio.imread(source)
    driving_video = imageio.mimread(driving)

    # resize image and video to 256x256
    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]

    generator, kp_detector = load_checkpoints(config_path=config,
                                              checkpoint_path=checkpoint)

    predictions = animate(source_image,
                          driving_video,
                          generator,
                          kp_detector,
                          relative=relative,
                          adapt_movement_scale=adapt_movement_scale)

    imageio.mimsave(dest, [img_as_ubyte(frame) for frame in predictions])
Exemple #15
0
def nbdev_new(name: Param("A directory to create the project in", str)):
    "Create a new nbdev project with a given name."

    path = Path(f"./{name}").absolute()

    if path.is_dir():
        print(f"Directory {path} already exists. Aborting.")
        return

    print(f"Creating a new nbdev project {name}.")

    try:
        subprocess.run(f"git clone {_template_git_repo} {path}".split(),
                       check=True,
                       timeout=5000)
        shutil.rmtree(path / ".git")
        subprocess.run("git init".split(), cwd=path, check=True)
        subprocess.run("git add .".split(), cwd=path, check=True)
        subprocess.run("git commit -am \"Initial\"".split(),
                       cwd=path,
                       check=True)

        print(
            f"Created a new repo for project {name}. Please edit settings.ini and run nbdev_build_lib to get started."
        )
    except Exception as e:
        print("An error occured while copying nbdev project template:")
        print(e)
        if os.path.isdir(path): shutil.rmtree(path)
Exemple #16
0
def banet_predict_monthly(region:Param("Region name", str),
                    input_path:Param("Input path for dataset", str),
                    output_path:Param("Output path for tiles dataset", str),
                    year:Param("Set to process a single year instead of all available", int),
                    weight_files:Param("List of pth weight files", list)=_weight_files):

    iop = InOutPath(input_path, f'{output_path}/{region}')
    times = pd.DatetimeIndex([pd.Timestamp(o.stem.split('_')[-1])
                              for o in (iop/region).src.ls(include=['.mat'])])
    times = times[times.year == year]
    tstart, tend = times.min(), times.max()
    month_start = (tstart + pd.Timedelta(days=31)).month
    for m in range(month_start, tend.month):
        print(f'Generating maps for {calendar.month_name[m]} {year}:')
        t = pd.Timestamp(f'{year}-{m}-01')
        predict_month(iop, t, weight_files, region)
Exemple #17
0
def nbdev_bump_version(part: Param("Part of version to bump", int) = 2):
    "Increment version in `settings.py` by one"
    cfg = Config()
    print(f'Old version: {cfg.version}')
    cfg.d['version'] = bump_version(Config().version, part)
    cfg.save()
    print(f'New version: {cfg.version}')
Exemple #18
0
def nbdev_read_nbs(fname:Param("A notebook name or glob to convert", str)=None):
    "Check all notebooks matching `fname` can be opened"
    files = Config().nbs_path.glob('**/*.ipynb') if fname is None else glob.glob(fname)
    for nb in files:
        try: _ = read_nb(nb)
        except Exception as e:
            print(f"{nb} is corrupted and can't be opened.")
            raise e
Exemple #19
0
def banet_predict_times(
        region: Param("Region name", str),
        tstart: Param("Start of serach window yyyy-mm-dd HH:MM:SS", str),
        tend: Param("End of search windo yyyy-mm-dd HH:MM:SS", str),
        input_path: Param("Input path for dataset", str),
        output_path: Param("Output path for tiles dataset", str),
        regions_path: Param("Path for region json files", str),
        product: Param("Name of product (default VIIRS750)", str) = "VIIRS750",
        output: Param("Name of file to save results", str) = "data",
        weight_files: Param("List of pth weight files", list) = _weight_files):

    iop = InOutPath(input_path, f'{output_path}')
    times = pd.date_range(tstart, tend, freq='D')
    R = Region.load(f'{regions_path}/R_{region}.json')
    predict_time(iop, times, weight_files, R, product=product, output=output)
Exemple #20
0
def banet_predict_times(region:Param("Region name", str),
                    tstart:Param("Start of serach window yyyy-mm-dd HH:MM:SS", str),
                    tend:Param("End of search windo yyyy-mm-dd HH:MM:SS", str),
                    input_path:Param("Input path for dataset", str),
                    output_path:Param("Output path for tiles dataset", str),
                    regions_path:Param("Path for region json files", str),
                    weight_files:Param("List of pth weight files", list)=_weight_files):

    iop = InOutPath(input_path, f'{output_path}')
    times = pd.date_range(tstart, tend, freq='D')
    R = Region.load(f'{regions_path}/R_{region}.json')
    predict_time(iop, times, weight_files, R)
Exemple #21
0
def banet_create_dataset(
    region: Param("Region name", str),
    viirs_path: Param("Input path for VIIRS raw data", str),
    fires_path: Param("Input path for Active Fires csv", str),
    save_path: Param("Path to save outputs", str),
    regions_path: Param("Path where region defenition files are stored", str),
    mcd64_path: Param("Input path for MCD64 raw data", str) = None,
    cci51_path: Param("Input path for FireCCI51 raw data", str) = None,
    bands: Param("List of bands to use as inputs for VIIRS raw data",
                 str) = _bands,
    year: Param("Set to process a single year instead of all available",
                int) = None):

    paths = InOutPath(f'{viirs_path}', f'{save_path}')
    R = Region.load(f'{regions_path}/R_{region}.json')

    # VIIRS750
    print('\nCreating dataset for VIIRS750')
    viirs = Viirs750Dataset(paths, R, bands=bands)
    viirs.filter_times(year)
    merge_tiles = MergeTiles('SatelliteZenithAngle')
    mir_calc = MirCalc('SolarZenithAngle', 'Radiance_M12', 'Radiance_M15')
    rename = BandsRename(['Reflectance_M5', 'Reflectance_M7'], ['Red', 'NIR'])
    bfilter = BandsFilter(['Red', 'NIR', 'MIR'])
    act_fires = ActiveFires(f'{fires_path}/hotspots{R.name}.csv')
    viirs.process_all(
        proc_funcs=[merge_tiles, mir_calc, rename, bfilter, act_fires])

    # MCD64A1C6
    if mcd64_path is not None:
        print('\nCreating dataset for MCD64A1C6')
        paths.input_path = Path(mcd64_path)
        mcd = MCD64Dataset(paths, R)
        mcd.match_times(viirs)
        mcd.process_all()

    # FireCCI51
    if cci51_path is not None:
        print('\nCreating dataset for FireCCI51')
        paths.input_path = Path(cci51_path)
        cci51 = FireCCI51Dataset(paths, R)
        cci51.match_times(viirs)
        cci51.process_all()
Exemple #22
0
def banet_viirs750_download(region:Param("Region name", str),
    tstart:Param("Start of serach window yyyy-mm-dd HH:MM:SS", str),
    tend:Param("End of search windo yyyy-mm-dd HH:MM:SS", str),
    email:Param("ladsweb user email", str),
    auth:Param("ladsweb user authentication key (go to Profile>App Keys)", str),
    path_save:Param("Path to save the outputs of the request", str),
    regions_path:Param("Path for region json files", str)=_regions_path):

    region = Region.load(f'{regions_path}/R_{region}.json')
    viirs_downloader = VIIRS750_download(region, tstart, tend)
    viirs_downloader_list = viirs_downloader.split_times()
    print(f'Splitting request into {len(viirs_downloader_list)} orders.')
    run_parallel(viirs_downloader_list, path_save, email, auth)
Exemple #23
0
def banet_dataset2tiles(region:Param("Region name", str),
                  input_path:Param("Input path for dataset", str),
                  output_path:Param("Output path for tiles dataset", str),
                  size:Param("Tiles size", int)=128,
                  step:Param("Step size of moving window to create tiles", int)=100,
                  year:Param("Set to process a single year instead of all available", int)=None):

    iop = InOutPath(input_path, output_path)
    r2t = Region2Tiles(iop, 'VIIRS750', 'MCD64A1C6', regions=[region],
                       bands=[['Red', 'NIR', 'MIR', 'FRP'], ['bafrac']],
                       size=size, step=step)
    if year is None: r2t.process_all()
    else: r2t.process_all(include=[f'_{year}'])
Exemple #24
0
def fire_split_run(input_path: Param("Directory with tif files", str),
                   output_path: Param("Directory to save outputs", str),
                   interval_days: Param("Number of days to keep as same event",
                                        int) = 16,
                   interval_pixels: Param("Number of n x n buffer pixels",
                                          int) = 8,
                   min_size_pixels: Param("Ignore fires smaller than this",
                                          int) = 1,
                   save_tif: Param("Save output as tif", bool) = True,
                   save_shape: Param("Save output as shapefile", bool) = True):
    path = Path(input_path)
    out = Path(output_path)
    run_all(path,
            out,
            interval_days=interval_days,
            interval_pixels=interval_pixels,
            min_size_pixels=min_size_pixels,
            save_tif=save_tif,
            save_shape=save_shape)
Exemple #25
0
def tango(
    q_path:Param("Path to the query video", str),
    cor_path:Param("Path to the corpus", str),
    cb_path:Param("Path to the codebook", str),
    vis_path:Param("Path to SimCLR checkpoint", str),
    fps:Param("FPS to set the vidoes to", int) = 30,
    approach:Param("Approach to use: vis, txt, comb", str) = 'vis'
):
    q_path = Path(q_path)
    cor_path = Path(cor_path)
    cb_path = Path(cb_path)
    vis_path = Path(vis_path)

    q_vid = Video(q_path, fps)
    codebook = pickle.load(open(cb_path, 'rb'))
    simclr = SimCLRModel.load_from_checkpoint(checkpoint_path = str(vis_path)).eval()
    model = SimCLRExtractor(simclr)

    vid_ds = VideoDataset.from_path(cor_path).label_from_paths()
    sorted_rankings = compute_sims(q_vid, vid_ds, model, codebook, 1_000, fps, 5)
    pp = pprint.PrettyPrinter(indent=4)
    pp.pprint(sorted_rankings)
Exemple #26
0
def nbdev_update_lib(fname: Param("A notebook name or glob to convert",
                                  str) = None):
    "Propagates any change in the modules matching `fname` to the notebooks that created them"
    script2notebook(fname=fname)
Exemple #27
0
def nbdev_build_lib(fname: Param("A notebook name or glob to convert",
                                 str) = None):
    "Export notebooks matching `fname` to python modules"
    write_tmpls()
    notebook2script(fname=fname)
Exemple #28
0
def nbdev_detach(path_nb: Param("Path to notebook"),
                 dest: Param("Destination folder", str) = "",
                 use_img: Param("Convert markdown images to img tags",
                                bool_arg) = False):
    "Export cell attachments to `dest` and update references"
    nb_detach_cells(path_nb, dest=dest, use_img=use_img)
Exemple #29
0
def nbdev_detach(path_nb: Param("Path to notebook"),
                 dest: Param("Destination folder", str) = ""):
    "Export cell attachments to `dest` and update references"
    nb_detach_cells(path_nb, dest=dest)
Exemple #30
0
def banet_nrt_run(region: Param("Region name", str),
                  left: Param("Left limit of the bounding box.", float),
                  bottom: Param("Bottom limit of the bounding box.", float),
                  right: Param("Right limit of the bounding box.", float),
                  top: Param("Top limit of the bounding box.", float),
                  project_path: Param("Root directory of the project", str),
                  hotspots_region: Param("Hotspots region name", str),
                  time: Param("Day for the run",
                              str,
                              choices=["today", "yesterday"]) = "today",
                  threshold: Param("Threshold to apply to output of the model",
                                   float) = 0.5,
                  skip_hotspots: Param("Skip download of ladsweb data",
                                       bool) = False,
                  skip_ladsweb: Param("Skip download of ladsweb data",
                                      bool) = False,
                  skip_preprocess: Param("Skip download of ladsweb data",
                                         bool) = False,
                  skip_getpreds: Param("Skip download of ladsweb data",
                                       bool) = False):
    paths = ProjectPath(project_path)
    weight_files = [
        'banetv0.20-val2017-fold0.pth', 'banetv0.20-val2017-fold1.pth',
        'banetv0.20-val2017-fold2.pth'
    ]
    manager = RunManager(paths, region, time=time)
    R = {
        'name': region,
        'bbox': [left, bottom, right, top],
        'pixel_size': 0.01
    }
    dict2json(R, paths.config / f'R_{region}.json')
    if not skip_hotspots: manager.update_hotspots(hotspots_region)
    if not skip_ladsweb: manager.download_viirs()
    if not skip_preprocess: manager.preprocess_dataset()
    if not skip_getpreds: manager.get_preds(weight_files, threshold=threshold)