def test_find_with_extension(): root = tempfile.mkdtemp() files = [[root, 'file1.txt'], [root, 'sub1', 'file2.txt'], [root, 'sub1', 'sub2', 'file3.txt'], [root, 'sub1', 'sub2', 'sub3', 'file4.txt']] files = [os.sep.join(_) for _ in files] badfiles = [_.replace('.txt', '.csv') for _ in files] # Create all the necessary directories util.smkdirs(os.path.dirname(files[-1])) # Create the dummy files for fname in files + badfiles: with open(fname, 'w') as _: pass def __test(level, sort): results = util.find_with_extension(root, 'txt', depth=level, sort=sort) eq_(sorted(results), sorted(files[:level])) for level in [1, 2, 3, 4]: for sort in [False, True]: yield __test, level, sort # Cleanup for fname, badfname in zip(files[::-1], badfiles[::-1]): os.remove(fname) os.remove(badfname) os.rmdir(os.path.dirname(fname))
def root_and_files(): root = tempfile.mkdtemp() files = [[root, 'file1.txt'], [root, 'sub1', 'file2.txt'], [root, 'sub1', 'sub2', 'file3.txt'], [root, 'sub1', 'sub2', 'sub3', 'file4.txt']] files = [os.sep.join(_) for _ in files] badfiles = [_.replace('.txt', '.csv') for _ in files] # Create all the necessary directories util.smkdirs(os.path.dirname(files[-1])) # Create the dummy files for fname in files + badfiles: with open(fname, 'w'): pass yield root, files for fname, badfname in zip(files[::-1], badfiles[::-1]): os.remove(fname) os.remove(badfname) os.rmdir(os.path.dirname(fname))
def make_muda(presets): '''Construct a MUDA pitch shifter''' drc = muda.deformers.DynamicRangeCompression(preset=presets) smkdirs(OUTPUT_PATH) with open(os.path.join(OUTPUT_PATH, 'muda_drc.pkl'), 'wb') as fd: pickle.dump(drc, fd) return drc
def make_muda(stretch, n_stretch): '''Construct a MUDA time stretcher''' shifter = muda.deformers.LogspaceTimeStretch(n_samples=n_stretch, lower=-stretch, upper=stretch) smkdirs(OUTPUT_PATH) with open(os.path.join(OUTPUT_PATH, 'muda.pkl'), 'wb') as fd: pickle.dump(shifter, fd) return shifter
def make_muda(n_semitones): '''Construct a MUDA pitch shifter''' tones = [] for n in range(1, n_semitones + 1): tones.extend([-n, n]) shifter = muda.deformers.PitchShift(n_semitones=tones) smkdirs(OUTPUT_PATH) with open(os.path.join(OUTPUT_PATH, 'muda.pkl'), 'wb') as fd: pickle.dump(shifter, fd) return shifter
def test_smkdirs(): root = tempfile.mkdtemp() my_dirs = [root, 'level1', 'level2', 'level3'] try: target = os.sep.join(my_dirs) util.smkdirs(target) for i in range(1, len(my_dirs)): tmpdir = os.sep.join(my_dirs[:i]) assert os.path.exists(tmpdir) assert os.path.isdir(tmpdir) finally: for i in range(len(my_dirs), 0, -1): tmpdir = os.sep.join(my_dirs[:i]) os.rmdir(tmpdir)
use_tqdm=True, weak_from_strong=True) # Save results to disk results_file = os.path.join(OUTPUT_PATH, modelid, '{:02d}'.format(fold), 'results.json') with open(results_file, 'w') as fp: json.dump(results, fp, indent=2) print('Done!') if __name__ == '__main__': params = process_arguments(sys.argv[1:]) smkdirs(OUTPUT_PATH) # Get current directory cwd = os.getcwd() # Get directory where git repo lives curfilePath = os.path.relpath(milsed.__file__) curDir = os.path.abspath(os.path.join(curfilePath, os.pardir)) parentDir = os.path.abspath(os.path.join(curDir, os.pardir)) # Change to the repo directory os.chdir(parentDir) # Get GIT version version = milsed.utils.increment_version( os.path.join(OUTPUT_PATH, 'version.txt')) # Return to working dir os.chdir(cwd)
duration, modelid, use_orig_duration=True) # Save results to disk results_file = os.path.join(OUTPUT_PATH, modelid, 'results.json') with open(results_file, 'w') as fp: json.dump(results, fp, indent=2) print('Done!') if __name__ == '__main__': params = process_arguments(sys.argv[1:]) smkdirs(OUTPUT_PATH) # Get current directory cwd = os.getcwd() # Get directory where git repo lives curfilePath = os.path.relpath(milsed.__file__) curDir = os.path.abspath(os.path.join(curfilePath, os.pardir)) parentDir = os.path.abspath(os.path.join(curDir, os.pardir)) # Change to the repo directory os.chdir(parentDir) # Get GIT version version = milsed.utils.increment_version( os.path.join(OUTPUT_PATH, 'version.txt')) # Return to working dir os.chdir(cwd)
with open(os.path.join(OUTPUT_PATH, 'pump.pkl'), 'wb') as fd: pickle.dump(pump, fd) return pump def convert(aud, jam, pump, outdir): data = pump.transform(aud, jam) fname = os.path.extsep.join( [os.path.join(outdir, crema.utils.base(aud)), 'h5']) crema.utils.save_h5(fname, **data) if __name__ == '__main__': params = process_arguments(sys.argv[1:]) smkdirs(OUTPUT_PATH) smkdirs(params.output_path) print('{}: pre-processing'.format(__doc__)) print(params) pump = make_pump(params.sr, params.hop_length, params.n_octaves) stream = tqdm(crema.utils.get_ann_audio(params.input_path), desc='Converting training data') Parallel(n_jobs=params.n_jobs)( delayed(convert)(aud, ann, pump, params.output_path) for aud, ann in stream) if params.augment_path: stream = tqdm(crema.utils.get_ann_audio(params.augment_path), desc='Converting augmented data')
cb.append(K.callbacks.EarlyStopping(patience=early_stopping, verbose=1, monitor=monitor)) # Fit the model model.fit_generator(gen_train, epoch_size, epochs, validation_data=gen_val, validation_steps=validation_size, callbacks=cb) if __name__ == '__main__': params = process_arguments(sys.argv[1:]) smkdirs(OUTPUT_PATH) version = crema.utils.increment_version(os.path.join(OUTPUT_PATH, 'version.txt')) print('{}: training'.format(__doc__)) print('Model version: {}'.format(version)) print(params) train(params.working, params.max_samples, params.duration, params.rate, params.batch_size, params.epochs, params.epoch_size, params.validation_size, params.early_stopping,
with open(os.path.join(OUTPUT_PATH, 'pump.pkl'), 'wb') as fd: pickle.dump(pump, fd) return pump def convert(aud, jam, pump, outdir): data = pump.transform(aud, jam) fname = os.path.extsep.join([os.path.join(outdir, crema.utils.base(aud)), 'h5']) crema.utils.save_h5(fname, **data) if __name__ == '__main__': params = process_arguments(sys.argv[1:]) smkdirs(OUTPUT_PATH) smkdirs(params.output_path) print('{}: pre-processing'.format(__doc__)) print(params) pump = make_pump(params.sr, params.hop_length, params.n_octaves) stream = tqdm(crema.utils.get_ann_audio(params.input_path), desc='Converting training data') Parallel(n_jobs=params.n_jobs)(delayed(convert)(aud, ann, pump, params.output_path) for aud, ann in stream) if params.augment_path: stream = tqdm(crema.utils.get_ann_audio(params.augment_path),
cb.append( K.callbacks.EarlyStopping(patience=early_stopping, verbose=1, monitor=monitor)) # Fit the model model.fit_generator(gen_train, epoch_size, epochs, validation_data=gen_val, validation_steps=validation_size, callbacks=cb) if __name__ == '__main__': params = process_arguments(sys.argv[1:]) smkdirs(OUTPUT_PATH) version = crema.utils.increment_version( os.path.join(OUTPUT_PATH, 'version.txt')) print('{}: training'.format(__doc__)) print('Model version: {}'.format(version)) print(params) train(params.working, params.max_samples, params.duration, params.rate, params.batch_size, params.epochs, params.epoch_size, params.early_stopping, params.reduce_lr, params.seed)