remote_commit = get_last_commit(repo_url, branch)
    if remote_commit == last_commit:
        print('Already up to date')
    else:
        os.system('git clone -b {0} --depth=1 {1} {2}'.format(
            branch, repo_url, root))
        print('Will need to update', remote_commit, last_commit)
        walk_upload(ftp_dir, root, upload_root)

        with open(last_commit_file, 'w') as file:
            file.write(remote_commit)

    os.system('rm -rf ' + root)


def run_update(host, user, password, port=21, ftp_dir='/htdocs/testing'):
    global ftp_command
    ftp_command = ftp_command.format_map(
        LeaveMissing(user=user, password=password, host=host, port=port))

    update_branch(
        'master', ftp_dir, '/',
        'https://github.com/covid19cubadata/covid19cubadata.github.io.git')
    update_branch(
        'gh-pages', ftp_dir, '/api',
        'https://github.com/covid19cuba/covid19cubadata.github.io.git')


if __name__ == "__main__":
    fire.Fire(run_update)
예제 #2
0
        "/knn-entity-search",
        resource_class_kwargs={'ent_dict_name': knn.ent_dict_name})
    api.add_resource(RelationSearch,
                     "/knn-relation-search",
                     resource_class_kwargs={'rel_dict_uri': knn.rel_dict_uri})
    CORS(app)
    app.run(host="0.0.0.0", port="5006")


def launch_api_multi(ent_paths, rel_path, entity_name_file, relation_name_file,
                     port):
    app = Flask(__name__)
    api = Api(app)
    knn = load_multi_knn(ent_paths, rel_path, entity_name_file,
                         relation_name_file)
    api.add_resource(Knn, "/knn", resource_class_kwargs={'knn': knn})
    api.add_resource(IndexedEntitySearch,
                     "/knn-entity-search",
                     resource_class_kwargs={'entity_index': knn.entity_index})
    api.add_resource(RelationSearch,
                     "/knn-relation-search",
                     resource_class_kwargs={
                         'rel_dict_uri': knn.relation_index.uri_to_entity
                     })
    CORS(app)
    app.run(host="0.0.0.0", port=port)


if __name__ == "__main__":
    fire.Fire(launch_api_multi)
        self.df_result = self.test_df.merge(self.train_df,
                                            on=['userid', 'movieid'])
        # in order to get the original ids we just need to add  1
        self.df_result['userid'] = self.df_result['userid'] + 1
        self.df_result['movieid'] = self.df_result['movieid'] + 1
        if self.user_info_file != None:
            self.df_result = self.df_result.merge(self.user_info_df,
                                                  on=['userid'])
        if self.movie_info_file != None:
            self.df_result = self.df_result.merge(self.movie_info_df,
                                                  on=['movieid'])
        self.df_result.to_csv(self.outdir + 'test_results.csv', index=False)

        print(f'output written to {self.outdir}test_results.csv')
        test_rmse = (np.mean(
            (self.df_result['rating'].values -
             self.df_result['predicted_rating'].values)**2))**0.5
        print(f'test RMSE : {test_rmse}')

    def main_process(self):
        self.read_data()

        if self.mode == 'train':
            self._train()
        else:
            self.inference()

if __name__ == '__main__':
    with ElapsedTimer('process RBM'):
        fire.Fire(recommender)
        elif total_dev_samples == 0 and trainall is True:
            classification_model.fit_generator(generator=train_gen,
                                               steps_per_epoch=self.steps_per_epoch,
                                               epochs=opt.num_epochs,
                                               shuffle=True)

        elif total_dev_samples != 0 and trainall is True:
            dev_gen = self.get_sample_generator(dev, batch_size=opt.batch_size)
            self.validation_steps = int(np.ceil(total_dev_samples / float(opt.batch_size)))

            for epoch in range(opt.num_epochs):
                self.logger.info('epoch: %d' % epoch)
                classification_model.fit_generator(generator=train_gen,
                                                   steps_per_epoch=self.steps_per_epoch,
                                                   epochs=1,
                                                   shuffle=True)
                classification_model.fit_generator(generator=dev_gen,
                                                   steps_per_epoch=self.validation_steps,
                                                   epochs=1,
                                                   shuffle=True)

        open(self.model_fname + '.json', 'w').write(classification_model.to_json())
        classification_model.save(self.model_fname + '.h5')


if __name__ == '__main__':
    clsf = Classifier()
    fire.Fire({'train': clsf.train,
               'predict': clsf.predict})
예제 #5
0
                  'only_last': False,
                  'gradSteps': 40,
                  'noRestarts': 1,
                  'lr_pgd': 1e-2,
                  'training_type': 'FGSM',
                  'slope': 1.00,
                  'eig_start': tau}

        counter = 0
        for reg_strength in regularizers_strengths:
            kwargs['alpha_spectra'] = reg_strength
            models = [ModelFactory(**kwargs) for j in range(realizations)]
            print('no vibes')
            for j in range(realizations):
                for epoch in tqdm(range(num_epochs)):
                    models[j].train_epoch(train_loader, X_full)


            model_params = []
            for idx in range(len(models)):
                model_params.append((kwargs, models[idx].state_dict()))

            torch.save(model_params, save_dir + dataset + '/tau=' + str(tau) + '_arch=' + arch + '_activation=' + activation + '_epochs=' + str(
                num_epochs) + '_alpha=' + str(1) + '_beta=' + str(reg_strength))
            counter += 1
            print(str(len(regularizers_strengths) - counter) + " combos left")


if __name__ == '__main__':
    fire.Fire(train_network)
예제 #6
0
    model_path (string): path to a pretrained model in .h5 format.
    input_path (string): path to the input file in CoNLL format of words to be syllabified.
    """
    words = read_conll_single(
        input_path
    )  # words: list [ { tokens: [ raw_tokens, ... ] } ... ]

    model = BiLSTM.load_model(model_path, config_path)
    data_matrix = create_data_matrix(words, model.mappings)
    tags = model.tagWords(data_matrix)["english"]

    print("\nTagged Words: ")
    for i, word in enumerate(words):
        joined = []
        for j, ch in enumerate(word["tokens"]):
            # pad tags with 0 to length of word.
            if len(tags[i]) < len(word["tokens"]):
                tags[i] += [0] * (len(word["tokens"]) - len(tags[i]))
            joined.append((ch, tags[i][j]))

        for tup in joined:
            print(tup[0], end="")
            if tup[1] == 1:
                print("-", end="")

        print("")


if __name__ == "__main__":
    fire.Fire(run)
예제 #7
0
파일: main.py 프로젝트: agroszer/skyscan
    def process(self, fname, outfname='/tmp/output.csv'):
        reader = CSVSource()
        rows = reader.load(fname)

        ota = self.ota_class()

        results = []
        for row in rows:
            res = ota.query(row['DateFrom'], row['DateTo'], row['from'],
                            row['to'], row['persons'])
            results.append(res)

        writer = CSVResult(outfname)
        writer.write(results)

    def test(self):
        self.ota_class = SkyScanner
        self.process('./input.csv')

        self.ota_class = SkyPicker
        self.process('./input.csv')


def main():
    fire.Fire(SkyScan)


if __name__ == '__main__':
    fire.Fire(SkyScan)
예제 #8
0
            sample_idx += len(data)
        for t in ['train', 'dev']:
            if chunk[t]['num'] > 0:
                self.copy_chunk(dataset[t], chunk[t], num_samples[t], True)
                num_samples[t] += chunk[t]['num']

        for div in ['train', 'dev']:
            ds = dataset[div]
            size = num_samples[div]
            shape = (size, opt.max_len)
            ds['uni'].resize(shape)
            ds['w_uni'].resize(shape)
            ds['img_feat'].resize((size, opt.img_size))
            ds['cate'].resize((size, len(self.y_vocab)))

        data_fout.close()
        meta = {'y_vocab': self.y_vocab}
        meta_fout.write(cPickle.dumps(meta, 2))
        meta_fout.close()

        self.logger.info('# of classes: %s' % len(meta['y_vocab']))
        self.logger.info('# of samples on train: %s' % num_samples['train'])
        self.logger.info('# of samples on dev: %s' % num_samples['dev'])
        self.logger.info('data: %s' % os.path.join(output_dir, 'data.h5py'))
        self.logger.info('meta: %s' % os.path.join(output_dir, 'meta'))


if __name__ == '__main__':
    data = Data()
    fire.Fire({'make_db': data.make_db, 'build_y_vocab': data.build_y_vocab})
예제 #9
0
            print('(%d %d%%) %.4f' %
                  (iter, iter / n_iters * 100, print_loss_avg))

        if iter % plot_every == 0:
            plot_loss_avg = plot_loss_total / plot_every
            plot_losses.append(plot_loss_avg)
            plot_loss_total = 0

    # Save the encoder and decoder parameters
    info_encoder = {
        'state_dict': encoder.state_dict(),
        'in_size': input_lang.n_words,
        'hidd_size': hidd_size,
        'in_lang': input_lang,
        'out_lang': output_lang
    }
    info_decoder = {
        'state_dict': decoder.state_dict(),
        'out_size': output_lang.n_words,
        'hidd_size': hidd_size
    }
    torch.save(info_encoder, 'encoder.pkl')
    torch.save(info_decoder, 'decoder.pkl')

    return plot_losses


if __name__ == '__main__':
    # Only expose the train function to the command line
    fire.Fire(train)
예제 #10
0
                        handler.x -= 0.4
                else:
                    for j in range(Nv):
                        for k in range(Ny):
                            swap(handler, k * Ng + j, k * Ng + j + 1)
                        handler.x += 0.3

                    handler.x += 0.2
                    for k in range(Ng):
                        for j in range(0, Ny):
                            swap(handler, j * Ng + k, (j + 1) % Ny * Ng + k)
                            handler.x += 0.4
                        handler.x -= 0.4
                handler.x += 0.7
                b[0].text(r"$\times d$", "top")

                handler.x += 0.3
                for k in range(Ny):
                    if i != Nx - Nv - 1:
                        m = measure_reset(handler, k * Ng, basis)
                handler.x += 1.0

            handler.x -= 0.7
            for i in range(num_bit):
                measure(handler, i, basis)

            box >> (slice(8.5, 21.5), slice(-5, -1.5))


fire.Fire(PLT())
예제 #11
0
            takeover = Takeover(subdomains)
            takeover.run()

    def run(self):
        print(banner)
        dt = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        print(f'[*] Starting OneForAll @ {dt}\n')
        logger.log('DEBUG', 'Python ' + utils.python_version())
        logger.log('DEBUG', 'OneForAll ' + config.oneforall_version)
        logger.log('INFOR', f'开始运行OneForAll')
        self.domains = utils.get_domains(self.target)
        if self.domains:
            for self.domain in self.domains:
                self.main()
            if len(self.domains) >= 2:
                utils.export_all(self.format, self.datas)
        else:
            logger.log('FATAL', f'获取域名失败')
        logger.log('INFOR', f'结束运行OneForAll')

    @staticmethod
    def version():
        print(banner)
        exit(0)


if __name__ == '__main__':
    fire.Fire(OneForAll)
    # OneForAll('example.com').run()
    # OneForAll('./domains.txt').run()
예제 #12
0
'''
simple maritime CLI using fire
'''
import fire


class Ships():
    # command groups with subcommands can be defined using classes
    def sail(self):
        ship_name = 'Your ship'
        print(f'{ship_name} is setting sail!')

    def list(self):
        ships = ['Going Merry', 'Thousand Sunny', 'Victory Hunter', 'Baratie']
        print(f"Ships: {', '.join(ships)}")


def sailors(greeting, name):
    print(f'{greeting} {name}')


class Cli():
    # top level group containing command (sailors) and command groups (ships)
    def __init__(self):
        self.sailors = sailors
        self.ships = Ships()


if __name__ == '__main__':
    fire.Fire(Cli)
예제 #13
0
파일: aiobrute.py 프로젝트: zsdlove/bayonet
            logger.log(
                'DEBUG', f'{self.source}模块发现{self.domain}的域名:\n'
                f'{self.subdomains}')
            if not self.path:
                name = f'{self.domain}_brute_result.{self.format}'
                self.path = Oneforall.result_save_path.joinpath(name)
            # 数据库导出
            if self.export:
                dbexport.export(self.domain,
                                valid=self.valid,
                                path=self.path,
                                format=self.format,
                                show=self.show)


def do(domain, result):  # 统一入口名字 方便多线程调用
    """
    类统一调用入口

    :param str domain: 域名
    :param result: 结果集队列
    """
    brute = AIOBrute(domain)
    brute.run(result)


if __name__ == '__main__':
    fire.Fire(AIOBrute)
    # result_queue = queue.Queue()
    # do('example.com', result_queue)
예제 #14
0
class PandasData(object):
    def __init__(self, columns):
        self.columns = columns

    def to_df(self, *iterators):
        mapped = zip(*iterators)
        df = pd.DataFrame(list(mapped), columns=self.columns)
        return df


"""
procs-pandas-and-collections.ipynb
"""


class PandasHelper(object):
    def tests(self):
        columns = ['name', 'roll', 'marks']
        data = PandasData(columns)
        # initializing lists
        name = ["Manjeet", "Nikhil", "Shambhavi", "Astha"]
        roll_no = [4, 1, 3, 2]
        marks = [40, 50, 60, 70]
        df = data.to_df(name, roll_no, marks)
        print(df)


if __name__ == '__main__':
    import fire
    fire.Fire(PandasHelper)
예제 #15
0
파일: utils.py 프로젝트: bityangke/WSSS2020
        # print("cpl_arr.shape ", cpl_arr.shape)
        # print("ppl_arr.shape ", ppl_arr.shape)
        # print("final.shape ", final.shape)
        scipy.misc.toimage(final, cmin=0, cmax=255, pal=colors_map,
                           mode="P").save(
                               os.path.join(save_path, label_name + '.png'))
    # === evaluation ===
    evaluate_dataset_IoU(
        file_list=args.path4train_aug_images,
        predicted_folder=save_path,
        path4GT=args.path4VOC_class_aug,
        descript="OurCPLmIoU=59.77@OurPPLmIoU=77.05@train_aug@afterCRF")


""" 2020.6.25 """


def get_least_modify_file(folder, show_sorted_data=False):
    dir_list = os.listdir(folder)
    dir_list = sorted(dir_list,
                      key=lambda x: os.path.getmtime(os.path.join(folder, x)))
    if show_sorted_data:
        for it in dir_list:
            print("dir_list: {:<80}  time: {:}".format(
                it, os.path.getmtime(os.path.join(args.path4GCN_logit, it))))
    return dir_list[-1]


if __name__ == "__main__":
    fire.Fire()
예제 #16
0
            if use_title:
                if sn in summaries_splits:
                    web_page_summaries = summaries_splits[sn]
                    entitys += [' '.join(s) for s in web_page_summaries]
            anno['partition_description'].append(entitys)

    out_path = anno_json.replace('.json', '.entity.json')
    with open(out_path, 'w') as f:
        for anno_line in meme_anno:
            seri_line = json.dumps(anno_line)
            f.write(f"{seri_line}\n")


if __name__ == "__main__":
    from loguru import logger
    """
    create_img_list --> detect_dataset --> create_description --> titles_cleanup 
    --> titles_summary  --> insert_anno_jsonl
    """

    with logger.catch(reraise=True):
        fire.Fire({
            'detect_web': detect_web,
            'detect_image': detect_image,
            'detect_dataset': detect_dataset,
            'create_img_list': create_img_list,
            'create_description': create_description,
            'titles_cleanup': titles_cleanup,
            'titles_summary': titles_summary,
            'insert_anno_jsonl': insert_anno_jsonl,
        })
예제 #17
0
        embeddingBlock=EmbeddingBlock(
            blockArgument1='',
            blockArgument2='',
        ),
        lstmBlock=LstmBlock(
            blockArgument1='',
            blockArgument2='',
        ),
        outputBlock=OutputBlock(
            blockArgument1='',
            blockArgument2='',
        ),
    )
    modelManager = TensorflowModelManager(
        model=myTestModel,
        optimizer=tf.keras.optimizers.Adam(lr=1),
        batchProcessor=batchProcessor,
        project=project,
        restoreWeightsPath=None
    )
    modelManager.run(
        stepCount=100,
        evaluationSteps=list(range(0, 100, 10)),
        tracingSteps=list(range(0, 100, 5))
    )


if __name__ == '__main__':
    tf.get_logger().setLevel('ERROR')
    fire.Fire(train_my_test_model)
예제 #18
0
    assert dir_path.exists(), f'Error: {dir_path} does not exist.'
    df_trn = pd.read_csv(dir_path / 'datatrain.csv',
                         header=None,
                         chunksize=chunksize)
    df_val = pd.read_csv(dir_path / 'datavalid.csv',
                         header=None,
                         chunksize=chunksize)

    # df_trn = pd.read_csv(dir_path / 'train.csv', header=None, chunksize=chunksize)
    # df_val = pd.read_csv(dir_path / 'val.csv', header=None, chunksize=chunksize)

    # df_trn = pd.read_csv( 'data/wiki/sw/datatrain.csv', header=None, chunksize=24000 )
    # df_val = pd.read_csv( 'data/wiki/sw/datavalid.csv', header=None, chunksize=24000 )

    tmp_path = dir_path / 'tmp'
    tmp_path.mkdir(exist_ok=True)
    tok_trn, trn_labels = get_all(df_trn, n_lbls, lang=lang)
    tok_val, val_labels = get_all(df_val, n_lbls, lang=lang)

    np.save(tmp_path / 'tok_trn.npy', tok_trn)
    np.save(tmp_path / 'tok_val.npy', tok_val)
    np.save(tmp_path / 'lbl_trn.npy', trn_labels)
    np.save(tmp_path / 'lbl_val.npy', val_labels)

    trn_joined = [' '.join(o) for o in tok_trn]
    open(tmp_path / 'joined.txt', 'w', encoding='utf-8').writelines(trn_joined)


if __name__ == '__main__':
    fire.Fire(create_toks)
예제 #19
0
    if os.path.exists(output):
        print(f"Output exists.")
        return
    else:
        os.makedirs(output)

    with LmdbDataset(input) as db:
        count = db.read_count()

        if num == -1 or num > count:
            convert_count = count
        else:
            convert_count = num

        print(f"Total count: {count}, will convert: {convert_count}")
        for i in tqdm(range(convert_count)):
            num = "{:09d}".format(i)
            ret = db.read(num)
            label = ret["label"]
            image = ret["image"]
            cv2.imwrite(os.path.join(output, num + ".jpg"), image)
            labels.append((num, label))

    with open(os.path.join(output, "label.txt"), "w", encoding="utf-8") as f:
        for num, label in labels:
            f.write(f"{num} {label}\n")


if __name__ == "__main__":
    fire.Fire(lmdb2img)
            x=traindf[COLUMNS],
            y=traindf[LABEL] / scale,  # scale down
            num_epochs=None,
            batch_size=batch_size,
            shuffle=True,
        ),
        max_steps=num_training_steps)

    eval_spec = tf.estimator.EvalSpec(
        input_fn=tf.estimator.inputs.pandas_input_fn(
            x=evaldf[COLUMNS],
            y=evaldf[LABEL] / scale,  # scale down
            num_epochs=1,
            batch_size=len(evaldf),
            shuffle=False),
        steps=None,
        start_delay_secs=1,
        throttle_secs=10,
    )

    estimator = make_keras_estimator(hidden_units.split(','))
    #estimator = tf.contrib.estimator.add_metrics(estimator, scaled_rmse(scale))  # for now this doesn't work okay

    metrics, _ = tf.estimator.train_and_evaluate(estimator, train_spec,
                                                 eval_spec)
    return metrics


if __name__ == '__main__':
    fire.Fire(task)
예제 #21
0
파일: main.py 프로젝트: agroszer/skyscan
def main():
    fire.Fire(SkyScan)
예제 #22
0
def main():
    """ Convert to CLI """
    fire.Fire(plan_apply_dag)
예제 #23
0
            else:
                raw_text = input_text
            print(raw_text)
            context_tokens = enc.encode(raw_text)
            generated = 0
            for _ in range(nsamples // batch_size):
                out = sess.run(output,
                               feed_dict={
                                   context:
                                   [context_tokens for _ in range(batch_size)]
                               })[:, len(context_tokens):]
                for i in range(batch_size):
                    generated += 1
                    text = enc.decode(out[i])
                    print("=" * 40 + " SAMPLE " + str(generated) + " " +
                          "=" * 40)
                    print(text)
                    # Save the output in a text file as well
                    file1 = open("GPT-2_output.txt", "w")
                    file1.writelines(raw_text)
                    file1.writelines(text)
                    file1.close()  # to change file access modes
                    output = text
                    return output
            print("=" * 80)
            return output


if __name__ == '__main__':
    fire.Fire(interact_model)
        df = df.sample(2, random_state=0)
        dataset_name = f"{dataset_name}_test"

    # create the dataset
    ds = Dataset(
        dataset=df,
        name=dataset_name,
        package_owner=package_owner,
        readme_path=
        "/allen/aics/gene-editing/FISH/2019/chaos/data/normalized_2D_tiffs/5500000322_500000323/README.md",
    )

    # set data path cols, metadata cols, and extra files
    ds.set_metadata_columns(["fov_id", "original_fov_location"])
    ds.set_path_columns(
        ["rescaled_2D_fov_tiff_path", "rescaled_2D_single_cell_tiff_path"])
    ds.set_extra_files([
        "/allen/aics/gene-editing/FISH/2019/chaos/data/normalized_2D_tiffs/5500000322_500000323/channel_defs.json",
        "/allen/aics/gene-editing/FISH/2019/chaos/data/normalized_2D_tiffs/5500000322_500000323/parameters.json",
    ])

    # tag with commit hash
    label = (subprocess.check_output(["git", "rev-parse",
                                      "HEAD"]).strip().decode("utf-8"))
    ds.distribute(s3_bucket,
                  message=f"git commit hash of fish_morphology_code = {label}")


if __name__ == "__main__":
    fire.Fire(distribute_autocontrasted_dataset)
예제 #25
0
import fire

from .core import Main


if __name__ == "__main__":
    fire.Fire(Main)
예제 #26
0
import fire
import requests

base_url = "https://api.spotifam.com"

class SpotifamAPI(object):

    def empty(object):
        result = requests.get(base_url)
        print result.text

    def getqueue(self, room_code):
        request_url = base_url + "/getqueue/?room_code=" + str(room_code)
        result = requests.get(request_url)
        print result.text

    def createroom(self, api_auth_token):
        request_url = base_url + "/createroom?auth_token=" + api_auth_token
        result = requests.get(request_url)
        print result.text

    def addsong(self, room_code, song_uid):
        request_url = base_url + "/addsong/"
        print "THIS IS NOT BUILT YET"


# RUN --------------------------------------------------------------------------

if __name__ == '__main__':
    fire.Fire(SpotifamAPI)
예제 #27
0
def render_metadata(metadata, filename, render_path_prefix):
    fmt = f"""## [{metadata.title}]({render_path_prefix}/{filename})
📅 {metadata.date}
🏷 {metadata.tag}

{metadata.description}
<hr />
    """

    return fmt


def main(doc_dir, render_path_prefix):
    sorted_path = sorted(
        [p for p in pathlib.Path(doc_dir).iterdir() if p.is_file()],
        key=lambda x: x.name,
        reverse=True)
    for path in sorted_path:
        with open(path) as f:
            markdown_str = f.read()
            metadata = get_metadata(markdown_str)
            fmt = render_metadata(metadata, path.name, render_path_prefix)
            print(fmt)


if __name__ == "__main__":
    import fire

    fire.Fire(main)
예제 #28
0
        time.sleep(3)
        self.driver.find_element_by_id('J_username').send_keys(self.username)
        self.driver.find_element_by_id('J_pwd').send_keys(self.passwd)
        while self.driver.current_url != 'https://www.hupu.com/':
            time.sleep(3)


class DouBanCookieGetter(CookieGetter):
    """douban.com"""
    def login(self):
        self.driver.get('https://www.douban.com/accounts/login')
        time.sleep(3)
        self.driver.find_element_by_id('email').send_keys(self.username)
        self.driver.find_element_by_id('password').send_keys(self.passwd)
        self.driver.find_element_by_id('remember').click()
        self.driver.find_element_by_class_name('btn-submit').click()
        while self.driver.current_url != 'https://www.douban.com/':
            time.sleep(3)


def start(name, username, password):
    getter = {'douban': DouBanCookieGetter, 'hupu': HuPuCookieGetter}.get(name)
    if not getter:
        print('name is invalid!')
        return
    getter(name, username, password)


if __name__ == '__main__':
    fire.Fire(start)
예제 #29
0
    input data type due to interpolation) but cased to float32.

    Args:
        in_filepath, out_filepath: str
            Paths to input and output images
        dtype: str
            Image data type conversion. Accepted formats are bool, uint8,
            int16, uint16, float32, float64.
    """
    # load image
    img = io.imread(in_filepath)

    # clip
    if "float" in dtype:
        info = np.finfo(dtype)
        a_max, a_min = info.max, info.min
    elif "int" in dtype:
        info = np.iinfo(dtype)
        a_max, a_min = info.max, info.min
    else:
        a_max, a_min = True, False

    img = np.clip(img, a_min=a_min, a_max=a_max).astype(dtype)

    # save to disk
    io.imsave(out_filepath, img)


if __name__ == "__main__":
    fire.Fire(se_postprocessing)
예제 #30
0
def main():
    """initilaze"""
    fire.Fire(Kutt)