def vicvb_to_jbrowse(self, genome_name, annot_inp_fasta, annot_out, index_html, data_dir_out ): annot_out = util.abspath(annot_out) index_html = util.abspath(index_html) data_dir_out = util.abspath(data_dir_out) if annot_inp_fasta: annot_inp_fasta = util.abspath(annot_inp_fasta) res_gb = self.vicvb_to_genbank( genome_name=genome_name, annot_out=annot_out, annot_inp_fasta=annot_inp_fasta ) res_gff = self.genbank_to_gff(genbank_file=res_gb["genbank_file"]) self.gff_to_jbrowse( gff_file=res_gff["gff_file"], index_html=index_html, data_dir_out=data_dir_out )
def install_jbrowse(install_dir, root_url, rename_to=None, conf_file=None): if not conf_file: conf_file = config.get_default_conf_file() install_dir = util.abspath(install_dir) pkg_data_dir = config.get_pkg_data_dir() jbrowse_zip = glob(pjoin(pkg_data_dir,"JBrowse*")) assert len(jbrowse_zip) == 1, "Expected a single JBrowse archive "+\ "stored within the package data dir: %s" % (pkg_data_dir,) jbrowse_zip = jbrowse_zip[0] util.makedir(install_dir) #context manager only works for zipfile in Python 2.7 f = zipfile.ZipFile(jbrowse_zip, 'r') try: install_name = os.path.dirname(f.namelist()[0]) assert (install_name not in (".","..")) and \ os.path.dirname(install_name) == "",\ "Unsafe path detected in JBrowse archive: {}".format(f.namelist()[0]) install_home = pjoin(install_dir,install_name) #JBrowse setup script will not install Pelr modules #if it is executed in a directory where it was ran before, #even unsuccessfuly. #Wack the existing directory: if os.path.exists(install_home): shutil.rmtree(install_home) #somehow zipfile module wacks executable bits #f.extractall(path=install_dir) #unsafe: check_call(["unzip","-q","-o",jbrowse_zip],cwd=install_dir) finally: f.close() if rename_to: install_home_new = pjoin(install_dir,rename_to) if os.path.exists(install_home_new): shutil.rmtree(install_home_new) os.rename(install_home,install_home_new) install_home = install_home_new check_call(["./setup.sh"],cwd=install_home) for line in fileinput.input(pjoin(install_home,"index.html"),inplace=True): #Galaxy Web server intercepts 'data' in URL params, we need to use another name print line.replace('queryParams.data','queryParams.jbrowse_data'), conf = util.load_config_json(conf_file) conf["jbrowse_bin_dir"] = util.abspath(pjoin(install_home,"bin")) conf["jbrowse_url"] = util.urljoin_path(root_url, os.path.basename(install_home)) util.save_config_json(conf,conf_file) return conf_file
def export_handler(command): def path_ok(path): return os.path.isdir(path) and not os.path.exists(os.path.join(path, csv_filename(use_id=True))) path = os.path.expanduser(request.args.get("path", get_export_path())) if command == "info": return flask.jsonify( filename=csv_filename(use_id=True), path=path, default_path=get_export_path(), ok=path_ok(path), stats=stats(callback=dict), ) elif command == "check_path": ok = path_ok(path) if ok: conf.set("export_path", path) conf.save() return flask.jsonify(ok=ok) elif command == "do_export": if not path_ok(path): return flask.jsonify(ok=False, error="Invalid path: %s" % path) try: shutil.copyfile(csv_path(), os.path.join(path, csv_filename(use_id=True))) shutil.move( csv_path(), util.abspath("backups", "%s-%s" % (csv_filename(), time.strftime("%d-%b-%Y-%H-%M-%S-%p"))) ) conf.set("export_id", conf.get("export_id", "1", type=int) + 1) return flask.jsonify(ok=True) except Exception as e: return flask.jsonify(ok=False, error=str(e)) else: flask.abort(404)
def load_grammar(base_structures, tag_dicts): grammar_dir = util.abspath('grammar') # with Timer('Loading grammar'): with open(os.path.join(grammar_dir, 'rules.txt')) as f: #regex = '\(([^()]+)\)' for line in f: fields = line.split() # tags = tuple(re.findall(regex, fields[0])) # extracts the tags tags = fields[0] base_structures[tags] = float(fields[1]) # maps grammar rule (tags) to probability tagdicts_dir = os.path.join(grammar_dir, 'seg_dist') # with Timer('Loading tag dictionaries'): for fname in os.listdir(tagdicts_dir): if not fname.endswith('.txt'): continue with open(os.path.join(tagdicts_dir, fname)) as f: tag = fname.replace('.txt', '') words = [] for line in f: fields = line.split('\t') try: words.append((fields[0], float(fields[1]))) except: sys.stderr.write("error inserting {} in the tag dictionary {}\n".format(fields, tag)) tag_dicts[tag] = words
def consistencyCheck(ref_csv, outputBshellFile=None, outPutResolutionFile=None): try: ref_imgs, _ = read_imgs_masks(ref_csv) except: ref_imgs = read_imgs(ref_csv) if isfile(outputBshellFile) and isfile(outPutResolutionFile): ref_bvals = read_bvals(outputBshellFile) ref_res = np.load(outPutResolutionFile) else: ref_bshell_img = ref_imgs[0] print(f'Using {ref_bshell_img} to determine b-shells') inPrefix = abspath(ref_bshell_img).split('.nii')[0] ref_bvals = findBShells(inPrefix + '.bval', outputBshellFile) ref_res = load(ref_bshell_img).header['pixdim'][1:4] np.save(outPutResolutionFile, ref_res) print('b-shells are', ref_bvals) print('\nSite', ref_csv, '\n') print('Checking consistency of b-shells among subjects') check_bshells(ref_imgs, ref_bvals) print('spatial resolution is', ref_res) print('Checking consistency of spatial resolution among subjects') check_resolution(ref_imgs, ref_res)
def check_bshells(ref_imgs, ref_bvals): unmatched = [] for imgPath in ref_imgs: imgPath = local.path(imgPath) if not imgPath.exists(): FileNotFoundError(imgPath) inPrefix = abspath(imgPath).split('.nii')[0] bvals = findBShells(inPrefix + '.bval') if (bvals == ref_bvals).all(): print('b-shells matched for', imgPath.name) else: print(f'\nUnmatched b-shells for {imgPath.name}') print(bvals) print(f'ref_bvals {ref_bvals}\n') unmatched.append(imgPath._path) print('') if len(unmatched): print('Unmatched cases:') print(unmatched) raise ValueError( 'Leave out the unmatched cases or change the reference case for determining b-shell to run multi-shell-dMRIharmonization' ) else: print( 'All cases have same b-shells. Data is good for running multi-shell-dMRIharmonization' ) print('')
def rename(folder, format): """ This function renames all images in a given directory according to the pattern given in the format given, init. Format: use NUM for the number of the image. imgNUM -> img1, img2, .., img100,.. """ # Get list of all files then rename images path = util.abspath(folder) names_lis = os.listdir(path) os.chdir(path) # cwd to work with OS commands # Scan for images and rename them for i in range(0, len(names_lis)): f = names_lis[i] extension = get_extension(f) location = "".join([path, "/", f]) # Need absolute location #print("checking location %s. Is file? %s " % (location, is_image(location))) if util.is_image(location): new_f = "".join([format.replace("NUM", str(i)), ".", extension]) util.update_line("Renaming file %s to %s." % (f, new_f)) os.rename(f, new_f) #Tada, done
def save_frame(capture, num, path, w=100, h=100): """ This function will save the current frame in the video capture to an image file. """ # Video's dimensions width = int(capture.get(3)) height = int(capture.get(4)) # Get number of images (rows*cols) in each frame cols = int(width/w) rows = int(height/h) # Read specific frame capture.set(1, num) # set to nth frame ret, frame = capture.read() # Loop to split image into many window_size images for row in range(rows): for col in range(cols): # Gets the cropped image img = frame[h*row:(row+1)*h, w*col:(col+1)*w] name = "img%s-%s-%s.jpg" % (str(num), row, col) cv2.imwrite(util.abspath(path) + "/" + name, img)
def install_jbrowse(install_dir, root_url, rename_to=None, conf_file=None): if not conf_file: conf_file = config.get_default_conf_file() install_dir = util.abspath(install_dir) pkg_data_dir = config.get_pkg_data_dir() jbrowse_zip = glob(pjoin(pkg_data_dir, "JBrowse*")) assert len(jbrowse_zip) == 1, "Expected a single JBrowse archive "+\ "stored within the package data dir: %s" % (pkg_data_dir,) jbrowse_zip = jbrowse_zip[0] util.makedir(install_dir) #context manager only works for zipfile in Python 2.7 f = zipfile.ZipFile(jbrowse_zip, 'r') try: install_name = os.path.dirname(f.namelist()[0]) assert (install_name not in (".","..")) and \ os.path.dirname(install_name) == "",\ "Unsafe path detected in JBrowse archive: {}".format(f.namelist()[0]) install_home = pjoin(install_dir, install_name) #JBrowse setup script will not install Pelr modules #if it is executed in a directory where it was ran before, #even unsuccessfuly. #Wack the existing directory: if os.path.exists(install_home): shutil.rmtree(install_home) #somehow zipfile module wacks executable bits #f.extractall(path=install_dir) #unsafe: check_call(["unzip", "-q", "-o", jbrowse_zip], cwd=install_dir) finally: f.close() if rename_to: install_home_new = pjoin(install_dir, rename_to) if os.path.exists(install_home_new): shutil.rmtree(install_home_new) os.rename(install_home, install_home_new) install_home = install_home_new check_call(["./setup.sh"], cwd=install_home) for line in fileinput.input(pjoin(install_home, "index.html"), inplace=True): #Galaxy Web server intercepts 'data' in URL params, we need to use another name print line.replace('queryParams.data', 'queryParams.jbrowse_data'), conf = util.load_config_json(conf_file) conf["jbrowse_bin_dir"] = util.abspath(pjoin(install_home, "bin")) conf["jbrowse_url"] = util.urljoin_path(root_url, os.path.basename(install_home)) util.save_config_json(conf, conf_file) return conf_file
def __init__(self, *args, **kwargs): super(ConfigForm, self).__init__(*args, **kwargs) event_names = [('', '')] for f in os.listdir(util.abspath('match_schedules')): name, ext = os.path.splitext(f) if ext == '.json': event_names.append((name, name)) self.event_name.choices = event_names
def separateAllBshells(ref_csv, ref_bvals_file, ncpu=4, outPrefix=None): ref_bvals = read_bvals(ref_bvals_file) try: imgs, masks = read_imgs_masks(ref_csv) except: imgs = read_imgs(ref_csv) masks = None pool = Pool(int(ncpu)) for imgPath in imgs: pool.apply_async(separateBshells, kwds={ 'imgPath': imgPath, 'ref_bvals': ref_bvals }, error_callback=RAISE) pool.close() pool.join() if outPrefix: outPrefix = abspath(outPrefix) else: return for bval in ref_bvals: f = open(f'{outPrefix}_b{int(bval)}.csv', 'w') if masks: for imgPath, maskPath in zip(imgs, masks): inPrefix = abspath(imgPath).split('.nii')[0] bPrefix = inPrefix + f'_b{int(bval)}' f.write(f'{bPrefix}.nii.gz,{maskPath}\n') else: for imgPath in imgs: inPrefix = abspath(imgPath).split('.nii')[0] bPrefix = inPrefix + f'_b{int(bval)}' f.write(f'{bPrefix}.nii.gz\n') f.close()
def joinBshells(imgPath, ref_bvals_file=None, ref_bvals=None, sep_prefix=None): if ref_bvals_file: print('Reading reference b-shell file ...') ref_bvals = read_bvals(ref_bvals_file) print('Joining b-shells for', imgPath) imgPath = local.path(imgPath) img = load(imgPath._path) dim = img.header['dim'][1:5] inPrefix = abspath(imgPath).split('.nii')[0] directory = dirname(inPrefix) prefix = basename(inPrefix) bvalFile = inPrefix + '.bval' bvecFile = inPrefix + '.bvec' if sep_prefix: harmPrefix = pjoin(directory, sep_prefix + prefix) else: harmPrefix = inPrefix if not isfile(harmPrefix + '.bval'): copyfile(bvalFile, harmPrefix + '.bval') if not isfile(harmPrefix + '.bvec'): copyfile(bvecFile, harmPrefix + '.bvec') bvals = np.array(read_bvals(inPrefix + '.bval')) joinedDwi = np.zeros((dim[0], dim[1], dim[2], dim[3]), dtype='float32') for bval in ref_bvals: # ind= np.where(bval==bvals)[0] ind = np.where(abs(bval - bvals) <= BSHELL_MIN_DIST)[0] if bval == 0.: b0Img = load(inPrefix + '_b0.nii.gz') b0 = b0Img.get_data() for i in ind: joinedDwi[:, :, :, i] = b0 else: b0_bshell = load(harmPrefix + f'_b{int(bval)}.nii.gz').get_data() joinedDwi[:, :, :, ind] = b0_bshell[:, :, :, 1:] if not isfile(harmPrefix + '.nii.gz'): save_nifti(harmPrefix + '.nii.gz', joinedDwi, b0Img.affine, b0Img.header) else: print(harmPrefix + '.nii.gz', 'already exists, not overwritten.')
def __init__(self, jbrowse_url, jbrowse_bin_dir, jbrowse_galaxy_index_html_tpl=None, jbrowse_data_subdir="data", tbl_to_asn_tpl=None, tbl_to_asn_exe=None): #the line below should be the first in #order to get all parameters into a dict self.opt = locals().copy() self.opt.pop("self") self.opt["jbrowse_bin_dir"] = util.abspath(self.opt["jbrowse_bin_dir"])
def render(theme, item, context=None): feed = item.feed reldir = 'themes/%s' % theme absdir = util.abspath(reldir) if feed.has_favicon: icon = util.abspath(feed.favicon_path) else: icon = util.abspath('icons/feed.png') context = context or {} context['item'] = item context['feed'] = feed context['icon'] = icon context['reldir'] = reldir context['absdir'] = absdir try: template = 'themes/%s/index.html' % theme template = env.get_template(template) return template.render(context) except Exception: if theme == DEFAULT_THEME: raise return render(DEFAULT_THEME, item, context)
def install_to_galaxy(galaxy_home,galaxy_root_url="/",conf_file=None): galaxy_home = util.abspath(galaxy_home) jb_rel_dir = pjoin("static","vicvb") jb_install_dir = pjoin(galaxy_home,jb_rel_dir) jb_root_url = util.urljoin_path(galaxy_root_url,jb_rel_dir) #tool_install_dir = pjoin(galaxy_home,"tools","vicvb") #util.makedir(tool_install_dir) conf_file = install_jbrowse(jb_install_dir, jb_root_url, #rename_to="jbrowse", conf_file=conf_file) #shutil.copy(config.get_pkg_data_file("galaxy_vicvb_browser.xml"), # pjoin(tool_install_dir,"vicvb_browser.xml")) return conf_file
def install_to_galaxy(galaxy_home, galaxy_root_url="/", conf_file=None): galaxy_home = util.abspath(galaxy_home) jb_rel_dir = pjoin("static", "vicvb") jb_install_dir = pjoin(galaxy_home, jb_rel_dir) jb_root_url = util.urljoin_path(galaxy_root_url, jb_rel_dir) #tool_install_dir = pjoin(galaxy_home,"tools","vicvb") #util.makedir(tool_install_dir) conf_file = install_jbrowse( jb_install_dir, jb_root_url, #rename_to="jbrowse", conf_file=conf_file) #shutil.copy(config.get_pkg_data_file("galaxy_vicvb_browser.xml"), # pjoin(tool_install_dir,"vicvb_browser.xml")) return conf_file
def separateBshells(imgPath, ref_bvals_file=None, ref_bvals=None): if ref_bvals_file: print('Reading reference b-shell file ...') ref_bvals = read_bvals(ref_bvals_file) print('Separating b-shells for', imgPath) imgPath = local.path(imgPath) img = load(imgPath._path) dwi = img.get_data() inPrefix = abspath(imgPath).split('.nii')[0] bvals = np.array(read_bvals(inPrefix + '.bval')) bvecs = np.array(read_bvecs(inPrefix + '.bvec')) for bval in ref_bvals: # ind= np.where(bval==bvals)[0] ind = np.where(abs(bval - bvals) <= BSHELL_MIN_DIST)[0] N_b = len(ind) bPrefix = inPrefix + f'_b{int(bval)}' if bval == 0.: b0 = find_b0(dwi, ind) else: b0_bshell = np.zeros( (dwi.shape[0], dwi.shape[1], dwi.shape[2], N_b + 1), dtype='float32') b0_bshell[:, :, :, 0] = b0 b0_bshell[:, :, :, 1:] = dwi[:, :, :, ind] b0_bvals = [0.] + [bval] * N_b b0_bvecs = np.zeros((N_b + 1, 3), dtype='float32') b0_bvecs[1:, ] = bvecs[ind, :] bshell_written = load(bPrefix + '.nii.gz').get_fdata() bvals_written = read_bvals(bPrefix + '.bval') bvecs_written = read_bvecs(bPrefix + '.bvec') print('bshell difference', abs(b0_bshell - bshell_written).sum()) print('bvals difference', (np.array(b0_bvals) - np.array(bvals_written)).sum()) print('bvecs difference', (np.array(b0_bvecs) - np.array(bvecs_written)).sum())
def separateBshells(imgPath, ref_bvals_file=None, ref_bvals=None): if ref_bvals_file: print('Reading reference b-shell file ...') ref_bvals = read_bvals(ref_bvals_file) print('Separating b-shells for', imgPath) imgPath = local.path(imgPath) img = load(imgPath._path) dwi = img.get_data() inPrefix = abspath(imgPath).split('.nii')[0] bvals = np.array(read_bvals(inPrefix + '.bval')) bvecs = np.array(read_bvecs(inPrefix + '.bvec')) for bval in ref_bvals: # ind= np.where(bval==bvals)[0] ind = np.where(abs(bval - bvals) <= BSHELL_MIN_DIST)[0] N_b = len(ind) bPrefix = inPrefix + f'_b{int(bval)}' if bval == 0.: b0 = find_b0(dwi, ind) if isfile(bPrefix + '.nii.gz'): continue if bval == 0.: save_nifti(bPrefix + '.nii.gz', b0, img.affine, img.header) else: b0_bshell = np.zeros( (dwi.shape[0], dwi.shape[1], dwi.shape[2], N_b + 1), dtype='float32') b0_bshell[:, :, :, 0] = b0 b0_bshell[:, :, :, 1:] = dwi[:, :, :, ind] b0_bvals = [0.] + [bval] * N_b b0_bvecs = np.zeros((N_b + 1, 3), dtype='float32') b0_bvecs[1:, ] = bvecs[ind, :] save_nifti(bPrefix + '.nii.gz', b0_bshell, img.affine, img.header) write_bvals(bPrefix + '.bval', b0_bvals) write_bvecs(bPrefix + '.bvec', b0_bvecs)
def select_folder_sample(root, features, output, test=False, test_num=0, test_out=""): """ This method will select samples from a directory-sample structure. This means structure where each directory is a sample under different conditions (i.e each directory is a person's face). root - the path of the root folder. features - list with numbered features to get. """ # Holds the images data = [] # Get all the directories (i.e samples) dirs = util.get_directories(root) # Now go through every directory and # pick the desired features from each folder for d in dirs: # use numpy to get the partial samples images = np.array(util.get_images(d)) samples = images[features] data.extend(list(samples)) # adds to the list util.update_line("Getting imgs in folder: %s." % d) # Get the testing stuff if chosen if test: select_test(data, test_num, test_out) # Save all remaining images to the output dir print(Fore.GREEN) # Change colour to RED for i in range(len(data)): sample = data[i] percent = ((i+1)/len(data))*100 util.update_line("Copying samples progess: %.1f%%" % percent) dts = "/".join([util.abspath(output), os.path.basename(sample)]) util.copy_image(sample, dts) print(Style.RESET_ALL)
def match_data(): if conf.get("station", "none") == "none": return flask.jsonify(error="No station specified") event_name = conf.get("event_name", "") if not event_name: return flask.jsonify(error="No match specified") path = util.abspath("match_schedules", event_name + ".json") try: with open(path) as f: raw_data = json.load(f) except (IOError, ValueError) as e: return flask.jsonify(error="Could not load match data: %s" % e) data = {} station = conf.get("station") for k, v in raw_data.items(): if not isinstance(v, dict): return flask.jsonify(error="Bad match entry (%s)" % k) elif station not in raw_data[k]: return flask.jsonify(error="Match %s missing team ID for %s" % (k, station)) else: data[k] = raw_data[k][station] return flask.jsonify(data)
def match_data(): if conf.get('station', 'none') == 'none': return flask.jsonify(error='No station specified') event_name = conf.get('event_name', '') if not event_name: return flask.jsonify(error='No match specified') path = util.abspath('match_schedules', event_name + '.json') try: with open(path) as f: raw_data = json.load(f) except (IOError, ValueError) as e: return flask.jsonify(error='Could not load match data: %s' % e) data = {} station = conf.get('station') for k, v in raw_data.items(): if not isinstance(v, dict): return flask.jsonify(error='Bad match entry (%s)' % k) elif station not in raw_data[k]: return flask.jsonify(error='Match %s missing team ID for %s' % (k, station)) else: data[k] = raw_data[k][station] return flask.jsonify(data)
def read(self, path): grammar_dir = util.abspath(path) with open(os.path.join(grammar_dir, 'rules.txt')) as f: for line in f: fields = line.split() tags = fields[0] # map grammar rule (tags) to probability self.base_structures[tags] = float(fields[1]) tagdicts_dir = os.path.join(grammar_dir, 'nonterminals') for fname in os.listdir(tagdicts_dir): if not fname.endswith('.txt'): continue with open(os.path.join(tagdicts_dir, fname)) as f: tag = fname.replace('.txt', '') words = [] for line in f: fields = line.split('\t') try: word, prob = fields words.append(word) self.probabilities[(tag, word)] = float(prob) except: sys.stderr.write( "error inserting {} in the tag dictionary {}\n". format(fields, tag)) self.tag_dicts[tag] = words with open(os.path.join(grammar_dir, 'verb-treecut.pickle'), 'rb') as f: self.verb_treecut = pickle.load(f) with open(os.path.join(grammar_dir, 'noun-treecut.pickle'), 'rb') as f: self.noun_treecut = pickle.load(f) with open(os.path.join(grammar_dir, 'params.pickle'), 'rb') as f: opts = pickle.load(f) self.lowres = opts.lowres self.tagtype = opts.tags
def export_handler(command): def path_ok(path): return os.path.isdir(path) and not os.path.exists( os.path.join(path, csv_filename(use_id=True))) path = os.path.expanduser(request.args.get('path', get_export_path())) if command == 'info': return flask.jsonify( filename=csv_filename(use_id=True), path=path, default_path=get_export_path(), ok=path_ok(path), stats=stats(callback=dict), ) elif command == 'check_path': ok = path_ok(path) if ok: conf.set('export_path', path) conf.save() return flask.jsonify(ok=ok) elif command == 'do_export': if not path_ok(path): return flask.jsonify(ok=False, error="Invalid path: %s" % path) try: shutil.copyfile(csv_path(), os.path.join(path, csv_filename(use_id=True))) shutil.move( csv_path(), util.abspath( 'backups', '%s-%s' % (csv_filename(), time.strftime('%d-%b-%Y-%H-%M-%S-%p')))) conf.set('export_id', conf.get('export_id', '1', type=int) + 1) return flask.jsonify(ok=True) except Exception as e: return flask.jsonify(ok=False, error=str(e)) else: flask.abort(404)
def schedule_select(): return flask.jsonify(file=subprocess.check_output([sys.executable, util.abspath("filedialog.py")]).decode().strip())
def csv_path(): return util.abspath('..', csv_filename())
def schedule_select(): return flask.jsonify(file=subprocess.check_output( [sys.executable, util.abspath('filedialog.py')]).strip())
def csv_path(): return util.abspath('..',csv_filename())
def gff_to_jbrowse(self, gff_file, index_html, data_dir_out, jbrowse_url=None ): opt = self.opt if jbrowse_url is None: jbrowse_url = opt["jbrowse_url"] env = None if self.opt.get("jbrowse_bin_dir",None): env = os.environ.copy() util.add_to_path(opt["jbrowse_bin_dir"], prepend=True, env=env) if not os.path.exists(data_dir_out): os.makedirs(data_dir_out) gff_file = util.abspath(gff_file) #fasta_file = util.abspath(fasta_file) jbrowse_out_dir = os.path.join(data_dir_out,opt["jbrowse_data_subdir"]) #can use os.devnull to discard all output jbrowse_conv_log_base = os.path.join(os.getcwd(),"jbrowse_conv_log") with nested( open(jbrowse_conv_log_base+".out","w"),\ open(jbrowse_conv_log_base+".err","w") ) as ( stdout, stderr ): check_call(["prepare-refseqs.pl","--gff",gff_file,"--out",jbrowse_out_dir], env=env, stdout=stdout, stderr=stderr) #@todo use biodb-to-json instead with flat file input, and accept config #file as a parameter (provide a default one too). See volvox.json config #in the distribution. Also add dropped_features param to load everything #unique in field 3 of GFF and check that only dropped_features are missing #from the config check_call(["flatfile-to-json.pl","--gff",gff_file,"--out",jbrowse_out_dir, "--trackLabel","Genes", "--cssClass","feature5", "--type","gene", "--autocomplete","all" "--getLabel", "--getType" ], env=env, stdout=stdout, stderr=stderr) check_call(["flatfile-to-json.pl","--gff",gff_file,"--out",jbrowse_out_dir, "--trackLabel","CDS", "--cssClass","generic_parent", "--subfeatureClasses",'{ "exon" : "exon" }', "--type","CDS", "--type","exon", "--autocomplete","all" "--getLabel", "--getType", "--getSubs", "--getPhase" ], env=env, stdout=stdout, stderr=stderr) check_call(["flatfile-to-json.pl","--gff",gff_file,"--out",jbrowse_out_dir, "--trackLabel","Peptides", "--cssClass","est", "--subfeatureClasses",'{ "mat_peptide" : "transcript-CDS" }', "--type","mat_peptide", "--autocomplete","all" "--getLabel", "--getType", "--getSubs", "--getPhase" ], env=env, stdout=stdout, stderr=stderr) check_call(["flatfile-to-json.pl","--gff",gff_file,"--out",jbrowse_out_dir, "--trackLabel","Misc", "--cssClass","feature3", "--type","misc_feature", "--autocomplete","all" "--getLabel", "--getType", "--getSubs", "--getPhase" ], env=env, stdout=stdout, stderr=stderr) check_call(["generate-names.pl","--out",jbrowse_out_dir], env=env, stdout=stdout, stderr=stderr) tracks_conf_file = os.path.join(jbrowse_out_dir,"trackList.json") tracks_conf = util.load_config_json(tracks_conf_file) tracks_conf["refSeqDropdown"] = True #show pull-down even for very many sequences util.save_config_json(tracks_conf,tracks_conf_file) #create index.html that redirects to JBrowse index.html with correct data param etc _jbrowse_dataset_index_html = \ config.get_data_string(self.opt["jbrowse_galaxy_index_html_tpl"], "galaxy.index.html") jbrowse_url_params = util.to_url_params(dict( tracks=",".join(("DNA","Genes","CDS","Peptides","Misc")), tracklist=0 )) with open(index_html,"w") as f: f.write(_jbrowse_dataset_index_html.\ format(jbrowse_url=jbrowse_url.rstrip("/"), jbrowse_data_subdir=opt["jbrowse_data_subdir"], jbrowse_url_params=jbrowse_url_params))
# Print erlcome text print(Fore.BLUE) print("Negative Annotation... Settings passed:") print("Background file path: %s" % bg_info_path) print("Background images sample %s" % images_path) print("Use relative path: %s" % arguments['--relative']) print(Style.RESET_ALL) # Open/create background file open(bg_info_path, "w").close() bg_file = open(bg_info_path, "a") print("file opened successfully, retrieving imags (might take a while)") # Go through all images and save them images = get_images(abspath(images_path)) print("Starting to save images") print(Fore.GREEN) for i in range(len(images)): # Print progress here percent = ((i + 1) / len(images)) * 100 update_line("Progress: %.2f" % percent) # Save the info to the file save_bg_info(images[i], bg_file, func) print(Style.RESET_ALL) # Finally close the file bg_file.close()
low = int(arguments['LOW']) high = int(arguments['HIGH']) size = int(arguments['SIZE']) features = func(list(range(low, high)), size) else: # Here use default function to select (random) low = int(arguments['LOW']) high = int(arguments['HIGH']) size = int(arguments['SIZE']) features = get_random(list(range(low, high)), size) # Get the mandatory arguments root_path = arguments['ROOT'] out_path = arguments['OUTPUT'] # Print out info to user print(Fore.BLUE + "Setting passed: ") print("Root directory..... %s." % root_path) print("Output directory... %s." % out_path) print("Features passed.... %s." % list(features)) print(Style.RESET_ALL) # Finally save the data if arguments['--test']: num = int(arguments['--test']) test_out = util.abspath(os.path.join(out_path, "test")) select_folder_sample(root_path, features, out_path, True, num, test_out) else: select_folder_sample(root_path, features, out_path)
def get_path(self, item): return abspath(join(self.dir, item))
# Print welcome and setting: print(Fore.BLUE) print("Generating Negatives script... Settings are:\n") print("Video path: %s"% vid_path) print("Output folder path: %s" % output_path) print("Starting frame: %s" % start) print("Time interval: %s" % interval) print("Number of frames: %s" % n_frames) print("Width of samples: %s" % width) print("Height of samples: %s" % height) print(Style.RESET_ALL) # BAck to normal color # Open video here video_cap = cv2.VideoCapture(util.abspath(vid_path)) # Calculate video stuff video_fps = video_cap.get(cv2.CAP_PROP_FPS) video_frames = video_cap.get(cv2.CAP_PROP_FRAME_COUNT) frame_gap = int(interval*video_fps) # Make sure there are sufficient frames in the video if (start + n_frames*frame_gap) > video_frames: print(Fore.RED, end='') print("Interval or #frames to save too large for video duration.") print(Style.RESET_ALL) sys.exit(1) # Exit # Do the saving here print(Fore.GREEN) # change output to green
# Create the obj face_cascade = cv2.CascadeClassifier(xml_file) # Now open/create the file if not os.path.isfile(positive_info_file): # Doesn't exist so create file beforehand open(positive_info_file, "w").close() # Now open in append mode for writing the lines info_file = open(positive_info_file, "w").close() info_file = open(positive_info_file, "a") # append mode # Get relative path of img folders to info file #os.chdir(os.path.dirname(positive_info_file)) #img_rel_path = get_relative_dir(positive_info_file, img_path) img_path = abspath(img_path) # Not just save faces for every image printf(Fore.GREEN) # Change colour for path in images: save_func(path, info_file, face_cascade) # Use ansi to update line (FIX FOR WINDOWS) update_line("Image processed: %s " % path) # Set things back to normal (colors and style) print(Style.RESET_ALL) info_file.close() # Close as function doesnt'do it print(Fore.GREEN + "Finished" + Style.RESET_ALL)
print('\nSite', ref_csv, '\n') print('Checking consistency of b-shells among subjects') check_bshells(ref_imgs, ref_bvals) print('spatial resolution is', ref_res) print('Checking consistency of spatial resolution among subjects') check_resolution(ref_imgs, ref_res) if __name__ == '__main__': if len(sys.argv) == 1 or sys.argv[1] == '-h' or sys.argv[1] == '--help': print( '''Check consistency of b-shells and spatial resolution among subjects Usage: consistencyCheck list.csv/txt ref_bshell_bvalues.txt ref_res_file.npy Provide a csv/txt file with first column for dwi and 2nd column for mask: dwi1,mask1\\ndwi2,mask2\\n... or just one column for dwi1\\ndwi2\\n... In addition, provide ref_bshell_bvalues and ref_res_file.''') exit() ref_csv = abspath(sys.argv[1]) outputBshellFile = abspath(sys.argv[2]) outPutResolutionFile = abspath(sys.argv[3]) if isfile(ref_csv): consistencyCheck(ref_csv, outputBshellFile, outPutResolutionFile) else: raise FileNotFoundError(f'{ref_csv} does not exists.')
def csv_path(): return util.abspath("..", csv_filename())