コード例 #1
0
def all_last_saved(max_elapsed=60):
    from time import strftime
    from fastai.data.transforms import get_files
    print('\n')
    lib_path = Path(os.getcwd()).parent
    folder = lib_path / 'tsai'
    print('Checking folder:', folder)
    counter = 0
    elapsed = 0
    current_time = time.time()
    for fp in get_files(folder):
        fp = str(fp)
        fn = fp.split('/')[-1]
        if not fn.endswith(".py") or fn.startswith("_") or fn.startswith(
                ".") or fn in ['imports.py', 'all.py', 'basics.py'
                               ]:  # add files without a notebook
            continue
        elapsed_time = current_time - os.path.getmtime(fp)
        if elapsed_time > max_elapsed:
            print(f"{fn:30} saved {elapsed_time:10.0f} s ago ***")
            counter += 1
        elapsed += elapsed_time
    if counter == 0:
        print('Correct conversion! 😃')
        output = 1
    else:
        print('Incorrect conversion! 😔')
        output = 0
    print(f'Total time elapsed {elapsed:.3f} s')
    print(strftime("%A %d/%m/%y %T %Z"))
    return output
コード例 #2
0
 def load_ensemble(self, path=None):
     path = path or self.ensemble_dir
     models = get_files(path, extensions='.pth', recurse=False)
     assert len(models) > 0, f'No models found in {path}'
     self.models = {}
     for m in models:
         model_id = int(m.stem[-1])
         self.models[model_id] = m
     print(f'Found {len(self.models)} models in folder {path}')
     print(self.models)
コード例 #3
0
def tokenize_folder(path,
                    extensions=None,
                    folders=None,
                    output_dir=None,
                    skip_if_exists=True,
                    **kwargs):
    "Tokenize text files in `path` in parallel using `n_workers`"
    path, extensions = Path(path), ifnone(extensions, ['.txt'])
    files = get_files(path,
                      extensions=extensions,
                      recurse=True,
                      folders=folders)

    def _f(i, output_dir):
        return output_dir / files[i].relative_to(path)

    return _tokenize_files(_f,
                           files,
                           path,
                           skip_if_exists=skip_if_exists,
                           **kwargs)
コード例 #4
0
ファイル: learner.py プロジェクト: matjesg/deepflash2
    def load_ensemble(self, path=None):
        path = path or self.ensemble_dir
        models = sorted(get_files(path, extensions='.pth', recurse=False))
        self.models = {}

        for i, m in enumerate(models, 1):
            if i == 0: self.n_classes = int(m.name.split('_')[2][0])
            else:
                assert self.n_classes == int(
                    m.name.split('_')[2][0]
                ), 'Check models. Models are trained on different number of classes.'
            self.models[i] = m

        if len(self.models) > 0:
            self.set_n(len(self.models))
            print(f'Found {len(self.models)} models in folder {path}:')
            print([m.name for m in self.models.values()])

            # Reset stats
            print(f'Loading stats from {self.models[1].name}')
            _, self.stats = load_smp_model(self.models[1])
コード例 #5
0
def get_audio_files(path, recurse=True, folders=None):
    "Get audio files in `path` recursively, only in `folders`, if specified."
    return get_files(
        path, extensions=audio_extensions, recurse=recurse, folders=folders
    )