def create_shortcut(r, what, where=''): # TODO Move to windows helpers print('\n\n+---- Creating Shortcut ----') print('What = %s\n Where=%s' % (what, where)) run_in = '' what_args = '' if isinstance(what, tuple): tup = what what = tup[0] what_args = tup[1] run_in = tup[2] if run_in == ' ': run_in = '' if what_args == ' ': what_args = '' if where == '': target = what + '.lnk' else: import ubelt as ub ub.ensuredir(where) base_what = os.path.basename(what) if len(base_what) > 0: if base_what[-1] in ['"', "'"]: base_what = base_what[0:-1] target = where + '/' + base_what + '.lnk' helpers_vbs = r.f.create_shortcut_vbs cmd = 'cscript "%s" "%s" "%s" "%s" "%s"' % (helpers_vbs, target, what, what_args, run_in) print(cmd) call(cmd)
def __init__(self, inputs, task, colorspace='RGB'): self.inputs = inputs self.task = task self.colorspace = colorspace self.loader = im_loaders.np_loader self.rng = np.random.RandomState(432432) inputs_base = ub.ensuredir((task.workdir, 'inputs')) inputs.base_dpath = inputs_base if len(inputs): inputs.prepare_images(force=True) inputs.prepare_input() self.input_id = inputs.input_id self.with_gt = self.inputs.gt_paths else: self.input_id = '' self.augment = None self.im_augment = torchvision.transforms.Compose([ RandomGamma(rng=self.rng), RandomBlur(rng=self.rng), ]) self.rand_aff = RandomWarpAffine(self.rng) if self.inputs.aux_paths: self.aux_keys = sorted(self.inputs.aux_paths.keys()) else: self.aux_keys = [] self.center_inputs = None self.use_aux_diff = ub.argflag('--use_aux_diff') self.use_dual_gt = ub.argval('--arch', default='unet')
def save(self, data, cfgstr=None): """ Writes data to path specified by `self.fpath(cfgstr)`. Metadata containing information about the cache will also be appended to an adjacent file with the `.meta` suffix. Example: >>> from ubelt.util_cache import * # NOQA >>> # Normal functioning >>> cfgstr = 'long-cfg' * 32 >>> cacher = Cacher('test_enabled_save', cfgstr) >>> cacher.save('data') >>> assert exists(cacher.get_fpath()), 'should be enabeled' >>> assert exists(cacher.get_fpath() + '.meta'), 'missing metadata' >>> # Setting the cacher as enabled=False turns it off >>> cacher2 = Cacher('test_disabled_save', 'params', enabled=False) >>> cacher2.save('data') >>> assert not exists(cacher2.get_fpath()), 'should be disabled' """ import ubelt as ub if not self.enabled: return if self.verbose > 0: self.log('[cacher] ... {} cache save'.format(self.fname)) cfgstr = self._rectify_cfgstr(cfgstr) condensed = self._condense_cfgstr(cfgstr) # Make sure the cache directory exists ub.ensuredir(self.dpath) data_fpath = self.get_fpath(cfgstr=cfgstr) meta_fpath = data_fpath + '.meta' # Also save metadata file to reconstruct hashing with open(meta_fpath, 'a') as file_: # TODO: maybe append this in json format? file_.write('\n\nsaving {}\n'.format(ub.timestamp())) file_.write(self.fname + '\n') file_.write(condensed + '\n') file_.write(cfgstr + '\n') file_.write(str(self.meta) + '\n') with open(data_fpath, 'wb') as file_: # Use protocol 2 to support python2 and 3 pickle.dump(data, file_, protocol=self.protocol)
def server_setup(): """Fixture to setup server and remove artifacts associated with the server after the test.""" data_dir = Path(f"{os.path.dirname(__file__)}/data") result_dir = Path( f"{os.path.dirname(__file__)}/server_results_{time.time()}") ub.ensuredir(data_dir) ub.ensuredir(result_dir) url = URL server.set_provider(FileProvider(data_dir, result_dir)) api_process = multiprocessing.Process(target=server.init, args=("localhost", 3307)) api_process.start() yield url, result_dir api_process.terminate() api_process.join() shutil.rmtree(result_dir)
def demo(Repo, ensure=True): repo = Repo( remote='https://github.com/Erotemic/ubelt.git', code_dpath=ub.ensuredir(ub.expandpath('~/tmp/demo-repos')), ) if ensure: repo.ensure() return repo
def dump_batch_item(harn, batch, outputs, postout): fig = harn.visualize_prediction(batch, outputs, postout, idx=0, thresh=0.2) img = nh.util.mplutil.render_figure_to_image(fig) dump_dpath = ub.ensuredir((harn.train_dpath, 'dump')) dump_fname = 'pred_{:08d}.png'.format(harn.epoch) fpath = os.path.join(dump_dpath, dump_fname) nh.util.imwrite(fpath, img)
def ensure_voc_data(dpath=None, force=False, years=[2007, 2012]): """ Download the Pascal VOC data if it does not already exist. Example: >>> # xdoctest: +REQUIRES(--download) >>> devkit_dpath = ensure_voc_data() """ if dpath is None: dpath = ub.expandpath('~/data/VOC') devkit_dpath = join(dpath, 'VOCdevkit') # if force or not exists(devkit_dpath): ub.ensuredir(dpath) def extract_tarfile(fpath, dpath='.'): # Old way # ub.cmd('tar xvf "{}" -C "{}"'.format(fpath1, dpath), verbout=1) import tarfile try: tar = tarfile.open(fpath1) tar.extractall(dpath) finally: tar.close() fpath1 = ub.grabdata('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCdevkit_08-Jun-2007.tar', dpath=dpath) if force or not exists(join(dpath, 'VOCdevkit', 'VOCcode')): extract_tarfile(fpath1, dpath) if 2007 in years: # VOC 2007 train+validation data fpath2 = ub.grabdata('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', dpath=dpath) if force or not exists(join(dpath, 'VOCdevkit', 'VOC2007', 'ImageSets', 'Main', 'bird_trainval.txt')): extract_tarfile(fpath2, dpath) # VOC 2007 test data fpath3 = ub.grabdata('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', dpath=dpath) if force or not exists(join(dpath, 'VOCdevkit', 'VOC2007', 'ImageSets', 'Main', 'bird_test.txt')): extract_tarfile(fpath3, dpath) if 2012 in years: # VOC 2012 train+validation data fpath4 = ub.grabdata('https://pjreddie.com/media/files/VOCtrainval_11-May-2012.tar', dpath=dpath) if force or not exists(join(dpath, 'VOCdevkit', 'VOC2012', 'ImageSets', 'Main', 'bird_trainval.txt')): extract_tarfile(fpath4, dpath) return devkit_dpath
def _create_test_filesystem(): dpath = ub.ensure_app_cache_dir('xdev/test_search_replace') text1 = ub.paragraph(''' Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. ''') text2 = ub.codeblock(''' def fib(n): a, b = 0, 1 while a < n: print(a, end=' ') a, b = b, a+b print() fib(1000) ''') text3 = ub.codeblock(''' This file contains Lorem and fib Newlines fib lorem fib ''') text4 = '' fpath1 = join(dpath, 'lorium.txt') fpath2 = join(dpath, 'fib.py') fpath3 = join(dpath, 'foo.txt') fpath4 = join(ub.ensuredir((dpath, 'subdir')), 'foo.txt') with open(fpath1, 'w') as file: file.write(text1) with open(fpath2, 'w') as file: file.write(text2) with open(fpath3, 'w') as file: file.write(text3) with open(fpath4, 'w') as file: file.write(text4) info = { 'root': dpath, 'contents': [fpath1, fpath2, fpath3], } return info
def setup_dpath(self, train_dpath, short=True, hashed=True): train_info = self.train_info(train_dpath, short, hashed) train_dpath = ub.ensuredir(train_info['train_dpath']) # backwards compatability code, # can eventually remove after a major version change if True: # backwards compatability code if os.path.exists( train_info['old_train_dpath']) and not os.path.islink( train_info['old_train_dpath']): ub.delete(train_info['train_dpath']) ub.symlink(train_info['old_train_dpath'], train_info['train_dpath'], overwrite=True, verbose=3) # setup symlinks # ub.ensuredir(dirname(train_info['link_dpath'])) # ub.symlink(train_info['train_dpath'], train_info['link_dpath'], # overwrite=True, verbose=3) if train_info['nice_dpath']: ub.ensuredir(dirname(train_info['nice_dpath'])) ub.symlink(train_info['train_dpath'], train_info['nice_dpath'], overwrite=True, verbose=3) verbose = 0 if verbose: print('+=========') # print('hyper_strid = {!r}'.format(params.hyper_id())) # print('train_init_id = {!r}'.format(train_info['input_id'])) # print('arch = {!r}'.format(train_info['arch_id'])) # print('train_hyper_hashid = {!r}'.format(train_info['train_hyper_hashid'])) print('hyper = {}'.format(ub.repr2(train_info['hyper'], nl=3))) print('train_hyper_id_brief = {!r}'.format( train_info['train_hyper_id_brief'])) print('train_id = {!r}'.format(train_info['train_id'])) print('+=========') return train_info
def test_modname_to_modpath_namespace(): """ Ignore: import sys sys.path.append('/home/joncrall/code/xdoctest/testing') from test_static import * temp = ub.TempDir() temp.__enter__() sys.path.append(temp.dpath) temp.__exit__(None, None, None) %timeit _syspath_modname_to_modpath('xdoctest.static_analysis') %timeit _pkgutil_modname_to_modpath('xdoctest.static_analysis') """ with ub.TempDir() as temp: dpath = temp.dpath # Some "bad" non-module directories tmpbad = ub.ensuredir((dpath, '_tmpbad')) # Make a submodule of a bad directory, look good. sub_bad = ub.ensuredir((tmpbad, 'sub_bad')) ub.touch(join(tmpbad, '_inbad.py')) subbad = ub.touch(join(sub_bad, '__init__.py')) # NOQA b0 = ub.touch(join(sub_bad, 'b0.py')) # NOQA with PythonPathContext(dpath): assert _static_modname_to_modpath('_tmpbad') is None # Tricky case, these modules look good outside of _tmpbad WOW, you # can actually import this and it works, but pkgloader still # returns None so we should too. assert _static_modname_to_modpath('_tmpbad.sub_bad') is None assert _static_modname_to_modpath('_tmpbad.sub_bad.b0') is None # We should be able to statically find all of the good module # directories. # this should all be static import sys assert '_tmpsingle' not in sys.modules assert '_tmpbad' not in sys.modules
def test_overwrite_symlink(): """ CommandLine: python -m ubelt.tests.test_links test_overwrite_symlink """ # TODO: test that we handle broken links dpath = ub.ensure_app_cache_dir('ubelt', 'test_overwrite_symlink') ub.delete(dpath, verbose=2) ub.ensuredir(dpath, verbose=2) happy_fpath = join(dpath, 'happy_fpath.txt') other_fpath = join(dpath, 'other_fpath.txt') happy_flink = join(dpath, 'happy_flink.txt') for verbose in [2, 1, 0]: print('=======') print('verbose = {!r}'.format(verbose)) ub.delete(dpath, verbose=verbose) ub.ensuredir(dpath, verbose=verbose) ub.touch(happy_fpath, verbose=verbose) ub.touch(other_fpath, verbose=verbose) util_links._dirstats(dpath) ub.symlink(happy_fpath, happy_flink, verbose=verbose) # Creating a duplicate link # import six # import sys # if not six.PY2 and sys.platform.startswith('win32'): util_links._dirstats(dpath) ub.symlink(happy_fpath, happy_flink, verbose=verbose) util_links._dirstats(dpath) with pytest.raises(Exception): # file exists error ub.symlink(other_fpath, happy_flink, verbose=verbose) ub.symlink(other_fpath, happy_flink, verbose=verbose, overwrite=True) ub.delete(other_fpath, verbose=verbose) with pytest.raises(Exception): # file exists error ub.symlink(happy_fpath, happy_flink, verbose=verbose) ub.symlink(happy_fpath, happy_flink, verbose=verbose, overwrite=True)
def localize_model(train_dpath, epoch, local_path): import shutil local_snap_path = ub.ensuredir((local_path, 'torch_snapshots')) orig_load_path = fit_harn2.get_snapshot(train_dpath, epoch=epoch) orig_info_fpath = join(train_dpath, 'train_info.json') local_info_fpath = join(local_path, 'train_info.json') local_load_path = join(local_snap_path, basename(orig_load_path)) shutil.copy2(orig_info_fpath, local_info_fpath) shutil.copy2(orig_load_path, local_load_path)
def post(self, *args, **kwargs): upload_file_dir = "upload" download_file_dir = "static" ubelt.ensuredir(upload_file_dir) ubelt.ensuredir(download_file_dir) file_metas = self.request.files["csv"] # print("file_metas:", file_metas) # one file one time for meta in file_metas: # print("meta:", meta) random_name = uuid.uuid4().hex file_name = "%s/%s.csv" % (upload_file_dir, random_name) with open(file_name, "wb") as wf: wf.write(meta["body"]) self.set_status(200) self.set_header("Content-Type", "application/json; charset=UTF-8") self.finish("ok")
def __init__(dset, inputs, task, workdir, output_colorspace='RGB'): dset.inputs = inputs dset.task = task dset.output_colorspace = output_colorspace dset.rng = np.random.RandomState(432432) inputs_base = ub.ensuredir((workdir, 'inputs')) inputs.base_dpath = inputs_base if len(inputs): inputs.prepare_id() dset.input_id = inputs.input_id dset.with_gt = dset.inputs.gt is not None else: dset.input_id = '' # TODO: only use horizontal flipping and translation by 4 pixels to # match results from other papers # https://arxiv.org/pdf/1603.09382.pdf page 8 dset.augment = None # dset.im_augment = torchvision.transforms.Compose([ # RandomGamma(rng=dset.rng), # RandomBlur(rng=dset.rng), # ]) # dset.rand_aff = RandomWarpAffine(dset.rng) augmentors = [ # iaa.Sometimes(.8, iaa.ContrastNormalization((0.2, 1.8))), iaa.Fliplr(p=.5), iaa.Affine(translate_px={ 'x': (-1, 1), 'y': (-1, 1) }), # CropTo((30, 30)), # iaa.Crop(px=(1, 1, 1, 1)), # imgaug.Brightness(63), # imgaug.RandomCrop((30, 30)), # imgaug.MeanVarianceNormalize(all_channel=True) ] dset.augmenter = iaa.Sequential(augmentors) # iaa.Sequential([ # iaa.Affine(translate_px={"x":-40}), # iaa.AdditiveGaussianNoise(scale=0.1*255) # ]) # dset.rand_aff = RandomWarpAffine( # dset.rng, tx_pdf=(-2, 2), ty_pdf=(-2, 2), flip_lr_prob=.5, # zoom_pdf=None, shear_pdf=None, flip_ud_prob=None, # enable_stretch=None, default_distribution='uniform') dset.center_inputs = None
def download_content(): with open("/data/homekoo/text_homekoo_mamacn_result.log", "r") as rf: data_list = list(rf.readlines()) total = len(data_list) print "total lines is:", total ubelt.ensuredir("/data/homekoo/text_mamacn") for i, target_url in enumerate(data_list): if i % 10 == 9: print "crawling: %s/%s" % (i, total) # todo # do one thing one time if re.match("http://www.gzmama.com/", target_url, re.I): tid = 0 if re.match(r"http://www.gzmama.com/thread-(\d+)-\d+-\d+.html", target_url, re.I): tid = re.match( r"http://www.gzmama.com/thread-(\d+)-\d+-\d+.html", target_url, re.I).groups()[0] elif re.match(r"http://www.gzmama.com/forum.php\?.*&tid=(\d+)", target_url, re.I): tid = re.match( r"http://www.gzmama.com/forum.php\?.*&tid=(\d+)", target_url, re.I).groups()[0] else: print "cannot get tid of %s" % target_url continue print "target_url: %s - %s" % (tid, target_url) download_path = "/data/homekoo/text_mamacn/%s.html" % (tid) if exists(download_path): print "exists: %s" % download_path continue try: get_all_content(target_url, download_path) except Exception: print traceback.format_exc()
def __init__(self, fname, cfgstr=None, dpath=None, appname='ubelt', ext='.pkl', meta=None, verbose=None, enabled=True, log=None, protocol=2): import ubelt as ub if verbose is None: verbose = self.VERBOSE if dpath is None: # pragma: no branch dpath = ub.ensure_app_cache_dir(appname) ub.ensuredir(dpath) self.dpath = dpath self.fname = fname self.cfgstr = cfgstr self.verbose = verbose self.ext = ext self.meta = meta self.enabled = enabled self.protocol = protocol self.log = print if log is None else log if len(self.ext) > 0 and self.ext[0] != '.': raise ValueError('Please be explicit and use a dot in ext')
def test_kwcoco_cli(): import pytest pytest.skip('disable for now') import ubelt as ub dpath = ub.ensure_app_cache_dir('kwcoco/test/cli') ub.delete(dpath) ub.ensuredir(dpath) verbose = 3 cmdkw = dict(verbose=verbose, check=True, cwd=dpath) info = ub.cmd('kwcoco --help', **cmdkw) info = ub.cmd('kwcoco toydata --dst foo.json', **cmdkw) assert exists(join(dpath, 'foo.json')) info = ub.cmd('kwcoco stats --src foo.json', **cmdkw) info = ub.cmd( 'kwcoco split --src foo.json --dst1 foo1.json --dst2=foo2.json', **cmdkw) assert exists(join(dpath, 'foo1.json')) assert exists(join(dpath, 'foo2.json')) info = ub.cmd( 'kwcoco split --src foo1.json --dst1 foo3.json --dst2=foo4.json', **cmdkw) assert exists(join(dpath, 'foo3.json')) assert exists(join(dpath, 'foo4.json')) info = ub.cmd('kwcoco union --src foo3.json foo4.json --dst bar.json', **cmdkw) assert exists(join(dpath, 'bar.json')) info = ub.cmd('kwcoco show --src foo3.json --dst foo3.png', **cmdkw) # NOQA assert exists(join(dpath, 'foo3.png'))
def unzip_file(zip_fpath, force_commonprefix=True, output_dir=None, prefix=None, dryrun=False, overwrite=None, verbose=1): import zipfile zip_file = zipfile.ZipFile(zip_fpath) if output_dir is None: output_dir = dirname(zip_fpath) archive_namelist = zip_file.namelist() # force extracted components into a subdirectory if force_commonprefix is if prefix is not None: output_dir = join(output_dir, prefix) ub.ensuredir(output_dir) archive_basename, ext = splitext(basename(zip_fpath)) if force_commonprefix and commonprefix(archive_namelist) == '': # use the archivename as the default common prefix output_dir = join(output_dir, archive_basename) ub.ensuredir(output_dir) for member in archive_namelist: (dname, fname) = split(member) dpath = join(output_dir, dname) ub.ensuredir(dpath) if verbose: print('Unarchive ' + fname + ' in ' + dpath) if not dryrun: if overwrite is False: if exists(join(output_dir, member)): continue zip_file.extract(member, path=output_dir) zip_file.close() # hack return join(output_dir, archive_basename)
def main(): for userid in userid_list: print "working on:%s" % userid albumId_list = get_albumId_list(userid) if not albumId_list: print "nothing is in %s" % userid return info = { "anchorNickName": albumId_list[0]["anchorNickName"], "anchorUid": albumId_list[0]["anchorUid"], } print "album length is: %s" % len(albumId_list) for item in albumId_list: albumId = item["id"] result_list = get_album(albumId) print "album %s length is: %s" % (item["title"], len(result_list)) for item in result_list: print "%s + %-36s %s" % (item["albumName"], item["trackName"], item["src"]) download_dir = "download/%s - %s/%s - %s" % ( info["anchorUid"], info["anchorNickName"], albumId, item["albumName"]) ubelt.ensuredir(download_dir) download_path = "%s/%s.%s" % ( download_dir, item["trackName"].replace( "/", "_"), item["src"].split(".")[-1] or "m4a") if exists(download_path): print "exists: %s" % download_path continue try: download_sound(item["src"], download_path) except Exception: print traceback.format_exc() raise
def _mode_paths(prep, mode, input, clear=False): out_dpaths = {} if input.im_paths: out_dpaths['im'] = prep.subdir('im', mode) if input.gt_paths: out_dpaths['gt'] = prep.subdir('gt', mode) if input.aux_paths: out_dpaths['aux'] = {} for aux in input.aux_paths.keys(): out_dpaths['aux'][aux] = prep.subdir('aux', aux, mode) if clear: # Start fresh. Remove existing files if 'im' in out_dpaths: ub.delete(out_dpaths['im'], verbose=False) ub.ensuredir(out_dpaths['im']) if 'gt' in out_dpaths: ub.delete(out_dpaths['gt'], verbose=False) ub.ensuredir(out_dpaths['gt']) if 'aux' in out_dpaths: for aux in out_dpaths['aux'].keys(): ub.delete(out_dpaths['aux'][aux], verbose=False) ub.ensuredir(out_dpaths['aux'][aux]) return out_dpaths
def deploy_trained_for_testing(harn, stride=1): """ Makes the deployable/testable models, and then this function actually deploys them to the test directory (via a link file). """ harn.prepare_test_model(force=False) for test_weights_fpath in harn.make_testable_weights(stride): dname = splitext(basename(test_weights_fpath)[5:])[0] test_weights_dpath = ub.ensuredir((harn.test_dpath, dname)) link_fpath = join(test_weights_dpath, 'test_weights.caffemodel.lnk') ub.writeto(link_fpath, test_weights_fpath) yield test_weights_fpath
def preprocess(task, force=False): task.prepare_fullres_inputs() datadir = ub.ensuredir((task.workdir, 'data')) prep = preprocess.Preprocessor(datadir) prep.part_config['overlap'] = .75 prep.ignore_label = task.ignore_label clear = force fullres = task.fullres # task.input_modes['lowres'] = prep.make_lowres(fullres, clear=clear) task.input_modes['part-scale1'] = prep.make_parts( fullres, scale=1, clear=clear)
def main(cls, cmdline=True, **kw): """ Example: >>> kw = {'src': ['special:shapes8', 'special:shapes1']} >>> cmdline = False >>> cls = CocoUnionCLI >>> cls.main(cmdline, **kw) """ import kwcoco config = cls.CLIConfig(kw, cmdline=cmdline) print('config = {}'.format(ub.repr2(dict(config), nl=1))) if config['src'] is None: raise Exception('must specify sources: {}'.format(config['src'])) if len(config['src']) == 0: raise ValueError('Must provide at least one input dataset') datasets = [] for fpath in ub.ProgIter(config['src'], desc='reading datasets', verbose=1): print('reading fpath = {!r}'.format(fpath)) dset = kwcoco.CocoDataset.coerce(fpath) if config['absolute']: dset.reroot(absolute=True) datasets.append(dset) combo = kwcoco.CocoDataset.union(*datasets) out_fpath = config['dst'] out_dpath = dirname(out_fpath) if out_dpath: ub.ensuredir(out_dpath) print('Writing to out_fpath = {!r}'.format(out_fpath)) combo.fpath = out_fpath combo.dump(combo.fpath, newlines=True)
def test_ensuredir_recreate(): base = ub.ensure_app_cache_dir('ubelt/tests') folder = join(base, 'foo') member = join(folder, 'bar') ub.ensuredir(folder, recreate=True) ub.ensuredir(member) assert exists(member) ub.ensuredir(folder, recreate=True) assert not exists(member)
def make_solver_file(input_fpath, arch='segnet_basic', dpath=None, modelkw={}, params=None, gpu_num=0): assert input_fpath, 'must specify' model_fpath = make_model_file(input_fpath, arch=arch, mode='fit', dpath=dpath, modelkw=modelkw) solver_fname = '{}_solver.prototext'.format(arch) solver_fpath = join(dpath, solver_fname) snapshot_dpath = ub.ensuredir((dpath, 'snapshots')) snapshot_prefix = snapshot_dpath + '/' text = make_solver(model_fpath, snapshot_prefix=snapshot_prefix, params=params, gpu_num=gpu_num) ub.writeto(solver_fpath, text) print('made solver_fpath = {!r}'.format(ub.compressuser(solver_fpath))) return solver_fpath
def prepare_input(self): """ Prepare the text file containing inputs that can be passed to caffe. """ self.prepare_images() if self.input_fpath is None: assert self.base_dpath is not None assert self.input_id is not None self.input_dpath = ub.ensuredir((self.base_dpath, self.input_id)) # TODO: remove or refactor (holdover from caffe) self.input_fpath = make_input_file(self.im_paths, self.gt_paths, dpath=self.input_dpath) print('{} input_fpath = {!r}'.format( self.tag, ub.compressuser(self.input_fpath)))
def initialize(harn, reset=False): """ Uses the hyper parameters to initialize the necessary resources and restart from previously """ if harn.train_dpath is None: harn.setup_paths() else: ub.ensuredir(harn.train_dpath) if reset == 'delete': ub.delete(harn.train_dpath) ub.ensuredir(harn.train_dpath) harn.setup_loggers() harn.setup_modules() assert harn.model is not None, 'required module' assert harn.optimizer is not None, 'required module' assert harn.monitor is not None, 'required module' try: harn.resume_from_previous_snapshots() except CannotResume: harn.reset_weights() if harn.train_dpath: harn.log(' * harn.train_dpath = {!r}'.format(harn.train_dpath)) harn.log(' * harn.nice_dpath = {!r}'.format(harn.nice_dpath)) harn.log( 'Snapshots will save to harn.snapshot_dpath = {!r}'.format( harn.snapshot_dpath)) else: harn.warn('harn.train_dpath is None, all computation is in memory') harn._initialized = True
def main(): albumId_list = [] analyze_url(from_url) if info.get("albumId"): result_list = get_album(info["albumId"]) # result_list = json.loads(open("zz.txt", "r").read()) print info["albumId"], " have result_list count:", len(result_list) else: pass # get user's albumIds # for albumId in albumIds: # get_album(info["albumId"]) for item in result_list: print "%s + %-36s %s" % (item["albumName"], item["trackName"], item["src"]) download_dir = "download/%s/%s" % (item["anchorId"], item["albumName"]) ubelt.ensuredir(download_dir) download_path = "%s/%s.%s" % (download_dir, item["trackName"], item["src"].split(".")[-1] or "m4a") try: download_sound(item["src"], download_path) except Exception: print traceback.format_exc() pass raise
def cmake_clean(dpath='.'): """ """ dpath = ub.truepath(dpath) cmake_cache_fpath = join(dpath, 'CMakeCache.txt') assert exists(cmake_cache_fpath) fpath_set = set(glob.glob(join(dpath, '*'))) - {cmake_cache_fpath} for fpath in list(fpath_set): if basename(fpath).startswith('_cmake_build_backup_'): fpath_set.remove(fpath) backup_dpath = ub.ensuredir( join(dpath, '_cmake_build_backup_' + ub.timestamp())) for fpath in ub.ProgIter(fpath_set, 'moving files'): shutil.move(fpath, backup_dpath)
def test_ensuredir_verbosity(): base = ub.ensure_app_cache_dir('ubelt/tests') with ub.CaptureStdout() as cap: ub.ensuredir(join(base, 'foo'), verbose=0) assert cap.text == '' # None defaults to verbose=0 with ub.CaptureStdout() as cap: ub.ensuredir((base, 'foo'), verbose=None) assert cap.text == '' ub.delete(join(base, 'foo')) with ub.CaptureStdout() as cap: ub.ensuredir(join(base, 'foo'), verbose=1) assert 'creating' in cap.text with ub.CaptureStdout() as cap: ub.ensuredir(join(base, 'foo'), verbose=1) assert 'existing' in cap.text
def benchmark_pickle_protocols(): data = Pickleable() dpaths = { 'ssd': ub.ensure_app_cache_dir('pickle_benchmark'), 'ra10': ub.ensuredir('/raid/cache/pickle_bench'), } protocols = [0, 1, 2, 3, 4] def benchmark_write(xxd, proto): dpath = dpaths[xxd] args = {'proto': proto, 'xxd': xxd} fpath = join(dpath, 'test_{}.pkl'.format(proto)) for timer in ub.Timerit(10, label='save {}'.format(args)): ub.delete(fpath) ub.writeto(fpath, 'junkdata') ub.delete(fpath) with timer: with open(fpath, 'wb') as file: pickle.dump(data, file, protocol=proto) result = args.copy() result['write_time'] = timer.ellapsed for timer in ub.Timerit(10, label='read {}'.format(args)): with timer: with open(fpath, 'rb') as file: pickle.load(file) result['read_time'] = timer.ellapsed return result results = [] for xxd in dpaths.keys(): for proto in protocols: results.append(benchmark_write(xxd, proto)) df = pd.DataFrame.from_dict(results) df = df.sort_values('write_time') print(df) print('\n') df = df.sort_values('read_time') print(df)
def _dump_monitor_tensorboard(harn): import ubelt as ub import netharn as nh from os.path import join from six.moves import cPickle as pickle harn.debug('Plotting tensorboard data') tb_data = nh.util.read_tensorboard_scalars(harn.train_dpath, cache=0, verbose=0) plot_keys = [ key for key in tb_data if ('train_epoch' in key or 'vali_epoch' in key or 'test_epoch' in key or 'epoch_lr' in key) ] y01_measures = ['_acc', '_ap', '_mAP', '_auc', '_mcc', '_brier'] y0_measures = ['error', 'loss'] out_dpath = ub.ensuredir((harn.train_dpath, 'monitor', 'tensorboard')) tb_data_fpath = join(out_dpath, 'tb_data.pkl') with open(tb_data_fpath, 'wb') as file: pickle.dump(tb_data, file) nh.util.autompl() keys = set(tb_data.keys()).intersection(set(plot_keys)) for key in keys: d = tb_data[key] kw = {} if any(m.lower() in key.lower() for m in y01_measures): kw['ymin'] = 0.0 kw['ymax'] = 1.0 elif any(m.lower() in key.lower() for m in y0_measures): kw['ymin'] = 0.0 ax = nh.util.multi_plot(d['xdata'], d['ydata'], ylabel=key, xlabel='epoch', title=key, fnum=1, doclf=True, **kw) # png is slightly smaller than jpg for this kind of plot fpath = join(out_dpath, key + '.png') ax.figure.savefig(fpath)
def demodata_detections(dataset='haul83', target_step='detect', target_frame_num=7): """ Helper for doctests. Gets test data at different points in the pipeline. """ # <ipython hacks> if 'target_step' not in vars(): target_step = 'detect' if 'target_frame_num' not in vars(): target_frame_num = 7 # </ipython hacks> img_path1, img_path2, cal_fpath = demodata_input(dataset=dataset) stream = StereoFrameStream(img_path1, img_path2) stream.preload() cal = ctalgo.StereoCalibration.from_file(cal_fpath) detector1 = ctalgo.GMMForegroundObjectDetector() detector2 = ctalgo.GMMForegroundObjectDetector() for frame_num, (frame_id, img1, img2) in enumerate(stream): if frame_num == target_frame_num: if target_step == 'detect': return detector1, img1 detections1 = detector1.detect(img1) detections2 = detector2.detect(img2) masks1 = detector1._masks masks2 = detector2._masks n_detect1, n_detect2 = len(detections1), len(detections2) logging.info('frame_num, (n_detect1, n_detect2) = {} ({}, {})'.format( frame_num, n_detect1, n_detect2)) if frame_num == target_frame_num: stacked = DrawHelper.draw_stereo_detections(img1, detections1, masks1, img2, detections2, masks2) drawing_dpath = ub.ensuredir('out') cv2.imwrite(drawing_dpath + '/mask{}_draw.png'.format(frame_id), stacked) break return detections1, detections2, cal
def demo(config=None): """ Runs the algorithm end-to-end. """ # dataset = 'test' # dataset = 'haul83' if config is None: import argparse parser = argparse.ArgumentParser(description='Standalone camtrawl demo') parser.add_argument('--cal', help='path to matlab or numpy stereo calibration file', default='cal.npz') parser.add_argument('--left', help='path to directory containing left images', default='left') parser.add_argument('--right', help='path to directory containing right images', default='right') parser.add_argument('--out', help='output directory', default='./out') parser.add_argument('-f', '--overwrite', action='store_true', help='will delete any existing output') parser.add_argument('--draw', action='store_true', help='draw visualization of algorithm steps') parser.add_argument('--dataset', default=None, help='Developer convenience assumes you have demo ' ' data downloaded and available. If you dont ' ' specify the other args.') args = parser.parse_args() config = args.__dict__.copy() config = FrozenKeyDict(config) if config['dataset'] is not None: img_path1, img_path2, cal_fpath = demodata_input(dataset=config['dataset']) config['left'] = img_path1 config['right'] = img_path2 config['cal'] = cal_fpath img_path1, img_path2, cal_fpath = ub.take(config, [ 'left', 'right', 'cal']) out_dpath = config['out'] logging.info('Demo Config = {!r}'.format(config)) ub.ensuredir(out_dpath) # ---- # Choose parameter configurations # ---- # Use GMM based model gmm_params = { } triangulate_params = { } DRAWING = config['draw'] # ---- # Initialize algorithms # ---- detector1 = ctalgo.GMMForegroundObjectDetector(**gmm_params) detector2 = ctalgo.GMMForegroundObjectDetector(**gmm_params) triangulator = ctalgo.FishStereoMeasurments(**triangulate_params) try: import pyfiglet print(pyfiglet.figlet_format('CAMTRAWL', font='cybermedium')) except ImportError: logging.debug('pyfiglet is not installed') print('========') print('CAMTRAWL') print('========') logging.info('Detector1 Config: ' + ub.repr2(detector1.config, nl=1)) logging.info('Detector2 Config: ' + ub.repr2(detector2.config, nl=1)) logging.info('Triangulate Config: ' + ub.repr2(triangulator.config, nl=1)) logging.info('DRAWING = {!r}'.format(DRAWING)) cal = ctalgo.StereoCalibration.from_file(cal_fpath) stream = StereoFrameStream(img_path1, img_path2) stream.preload() # HACK IN A BEGIN FRAME if len(stream) > 2200: stream.seek(2200) # ---- # Run the algorithm # ---- # n_frames = 2000 # stream.aligned_frameids = stream.aligned_frameids[:stream.index] measure_fpath = join(out_dpath, 'measurements.csv') if exists(measure_fpath): if config['overwrite']: ub.delete(measure_fpath) else: raise IOError('Measurement path already exists') output_file = open(measure_fpath, 'a') if DRAWING: drawing_dpath = join(out_dpath, 'visual') if exists(drawing_dpath): if config['overwrite']: ub.delete(drawing_dpath) else: raise IOError('Output path already exists') ub.ensuredir(drawing_dpath) headers = ['current_frame', 'fishlen', 'range', 'error', 'dz', 'box_pts1', 'box_pts2'] output_file.write(','.join(headers) + '\n') output_file.flush() measurements = [] logger.info('begin camtrawl iteration') import tqdm # prog = ub.ProgIter(iter(stream), total=len(stream), desc='camtrawl demo', # clearline=False, freq=1, adjust=False) prog = tqdm.tqdm(iter(stream), total=len(stream), desc='camtrawl demo', leave=True) def csv_repr(d): if isinstance(d, np.ndarray): d = d.tolist() s = repr(d) return s.replace('\n', '').replace(',', ';').replace(' ', '') for frame_num, (frame_id, img1, img2) in enumerate(prog): logger.debug('frame_num = {!r}'.format(frame_num)) detections1 = list(detector1.detect(img1)) detections2 = list(detector2.detect(img2)) masks1 = detector1._masks masks2 = detector2._masks any_detected = len(detections1) > 0 or len(detections2) > 0 if any_detected: assignment, assign_data, cand_errors = triangulator.find_matches( cal, detections1, detections2) # Append assignments to the measurements for data in assign_data: data['current_frame'] = int(frame_id) measurements.append(data) line = ','.join([csv_repr(d) for d in ub.take(data, headers)]) output_file.write(line + '\n') output_file.flush() else: cand_errors = None assignment, assign_data = None, None if DRAWING >= 2 or (DRAWING and any_detected): DRAWING = 3 stacked = DrawHelper.draw_stereo_detections(img1, detections1, masks1, img2, detections2, masks2, assignment, assign_data, cand_errors) if cv2.__version__.startswith('2'): cv2.putText(stacked, text='frame #{}, id={}'.format(frame_num, frame_id), org=(10, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0), thickness=2, lineType=cv2.cv.CV_AA) else: stacked = cv2.putText(stacked, text='frame #{}, id={}'.format(frame_num, frame_id), org=(10, 50), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0), thickness=2, lineType=cv2.LINE_AA) cv2.imwrite(drawing_dpath + '/mask{}_draw.png'.format(frame_id), stacked) output_file.close() n_total = len(measurements) logger.info('n_total = {!r}'.format(n_total)) if n_total: all_errors = np.array([d['error'] for d in measurements]) all_lengths = np.array([d['fishlen'] for d in measurements]) logger.info('ave_error = {:.2f} +- {:.2f}'.format(all_errors.mean(), all_errors.std())) logger.info('ave_lengths = {:.2f} +- {:.2f} '.format(all_lengths.mean(), all_lengths.std())) return measurements
def write_default_ipython_profile(): """ CommandLine: python ~/local/init/init_ipython_config.py python -c "import xdev, ubelt; xdev.startfile(ubelt.truepath('~/.ipython/profile_default'))" python -c "import xdev, ubelt; xdev.editfile(ubelt.truepath('~/.ipython/profile_default/ipython_config.py'))" References: http://2sn.org/python/ipython_config.py """ dpath = ub.expandpath('~/.ipython/profile_default') ub.ensuredir(dpath) ipy_config_fpath = join(dpath, 'ipython_config.py') ipy_config_text = ub.codeblock( r''' # STARTBLOCK import six c = get_config() # NOQA c.InteractiveShellApp.exec_lines = [] if six.PY2: future_line = ( 'from __future__ import absolute_import, division, print_function, with_statement, unicode_literals') c.InteractiveShellApp.exec_lines.append(future_line) # Fix sip versions try: import sip # http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string sip.setapi('QVariant', 2) sip.setapi('QString', 2) sip.setapi('QTextStream', 2) sip.setapi('QTime', 2) sip.setapi('QUrl', 2) sip.setapi('QDate', 2) sip.setapi('QDateTime', 2) if hasattr(sip, 'setdestroyonexit'): sip.setdestroyonexit(False) # This prevents a crash on windows except ImportError as ex: pass except ValueError as ex: print('Warning: Value Error: %s' % str(ex)) pass c.InteractiveShellApp.exec_lines.append('%load_ext autoreload') c.InteractiveShellApp.exec_lines.append('%autoreload 2') #c.InteractiveShellApp.exec_lines.append('%pylab qt4') c.InteractiveShellApp.exec_lines.append('import numpy as np') c.InteractiveShellApp.exec_lines.append('import ubelt as ub') c.InteractiveShellApp.exec_lines.append('import xdev') c.InteractiveShellApp.exec_lines.append('import pandas as pd') c.InteractiveShellApp.exec_lines.append('pd.options.display.max_columns = 40') c.InteractiveShellApp.exec_lines.append('pd.options.display.width = 160') c.InteractiveShellApp.exec_lines.append('pd.options.display.max_rows = 20') c.InteractiveShellApp.exec_lines.append('pd.options.display.float_format = lambda x: \'%.4f\' % (x,)') c.InteractiveShellApp.exec_lines.append('import networkx as nx') c.InteractiveShellApp.exec_lines.append('from os.path import *') c.InteractiveShellApp.exec_lines.append('from six.moves import cPickle as pickle') #c.InteractiveShellApp.exec_lines.append('if \'verbose\' not in vars():\\n verbose = True') import ubelt as ub c.InteractiveShellApp.exec_lines.append(ub.codeblock( """ class classproperty(property): def __get__(self, cls, owner): return classmethod(self.fget).__get__(None, owner)() class vim(object): @classproperty def focus(cls): from vimtk.cplat_ctrl import Window Window.find('GVIM').focus() @classproperty def copy(cls): import time from vimtk.cplat_ctrl import Window gvim_window = Window.find('GVIM') gvim_window.focus() import vimtk import IPython ipy = IPython.get_ipython() lastline = ipy.history_manager.input_hist_parsed[-2] vimtk.cplat.copy_text_to_clipboard(lastline) from vimtk import xctrl xctrl.XCtrl.do( ('focus', 'GVIM'), ('key', 'ctrl+v'), ('focus', 'x-terminal-emulator.X-terminal-emulator') ) """ )) #c.InteractiveShell.autoindent = True #c.InteractiveShell.colors = 'LightBG' #c.InteractiveShell.confirm_exit = False #c.InteractiveShell.deep_reload = True c.InteractiveShell.editor = 'gvim' #c.InteractiveShell.xmode = 'Context' # ENDBOCK ''' ) with open(ipy_config_fpath, 'w') as file: file.write(ipy_config_text + '\n')