Example #1
0
    def generate_pipeline_hash(self, inputs, dependencies=[], filenames=True):
        """
            Returns a hash representing this pipeline combined with its inputs
        """
        from .glob import glob

        hasher = md5()

        join = lambda it: (y for x in it for y in x)
        expanded_inputs = list(join([ glob(x) for x in inputs]))

        for inp in sorted(expanded_inputs):
            if filenames:
                u = str(os.path.getmtime(inp))
                hasher.update(u)
            else:
                hasher.update(inp)

        for dep in dependencies:
            # Update hasher for every file in dependencies
            for f in sorted(glob(dep)) if not os.path.isfile(dep) else [dep]:
                u = str(os.path.getmtime(f))
                hasher.update(u)

        return hasher.hexdigest()
Example #2
0
 def compileWithAsh(self, tests):
     start_time = datetime.today()
     #print("starting compile of %d tests at %s" % (len(tests),start_time))
     total=len(tests)
     if not pexpect:
           for test in tests:
               self.js_print('%d\tcompiling %s' % (total,test))
               self.compile_test(test)
               (testdir, ext) = splitext(test)
               if exists(testdir+".abc")==False:
                   print("ERROR abc files %s.abc not created" % (testdir))
                   self.ashErrors.append("abc files %s.abc not created" % (testdir))
               total -= 1;
     else:  #pexpect available
         child = pexpect.spawn("java -classpath %s macromedia.asc.embedding.Shell" % self.asc)
         child.logfile = None
         child.expect("\(ash\)")
         child.expect("\(ash\)")
 
         for test in tests:
             if self.debug:
                 print cmd
             else:
                 print "Compiling ", test
                 
             if test.endswith(self.abcasmExt):
                 self.compile_test(test)
             else:
                 arglist = self.parseArgStringToList(self.ascargs)
             
                 (dir, file) = split(test)
                 # look for .asc_args files to specify dir / file level compile args
                 arglist = self.loadAscArgs(arglist, dir, test)
                 
                 cmd = "asc -import %s " % (self.builtinabc)
                 for arg in arglist:
                     cmd += ' %s' % arg
                 
                 for p in self.parents(dir):
                     shell = join(p,"shell.as")
                     if isfile(shell):
                         cmd += " -in " + shell
                         break
                 (testdir, ext) = splitext(test)
                 deps = glob(join(testdir,"*.as"))
                 deps.sort()
                 for util in deps + glob(join(dir,"*Util.as")):
                     cmd += " -in %s" % util #no need to prepend \ to $ when using ash
                 cmd += " %s" % test
             
                 if exists(testdir+".abc"):
                     os.unlink(testdir+".abc")
                 child.sendline(cmd)
                 child.expect("\(ash\)")
                 if not exists(testdir+".abc"):
                     print("ERROR: abc file %s.abc not created, cmd used to compile: %s" % (testdir,cmd))
                     self.ashErrors.append("abc file %s.abc not created, cmd used to compile: %s" % (testdir,cmd))
                 total -= 1;
                 #print("%d remaining, %s" % (total,cmd))
     end_time = datetime.today()
Example #3
0
def guess_language_from_files(path):
    from glob import glob

    with cd(path) as _:
        if len(glob('*.[ch]')) > 0:
            return 'c'
        elif len(glob('*.cxx') + glob('*.hxx')) > 0:
            return 'cxx'
        else:
            return None
Example #4
0
def guess_language_from_files(path):
    from glob import glob

    with cd(path) as _:
        if len(glob("*.[ch]")) > 0:
            return "c"
        elif len(glob("*.cxx") + glob("*.hxx")) > 0:
            return "cxx"
        else:
            return None
Example #5
0
def CR_bounds():
    """
    Cramer-Rao bounds for signal to noise of 100 spectra 
    """
    filenames = glob("./gradient*npz") + glob("./Gradient_Spectra_elem/*npz") 
    for fn in filenames: 
        data = np.load(fn) 
        gradient_value = data["gradient_value"] 
        crb = 1.0/np.sqrt(np.sum(gradient_value**2 * 1.e4) ) 
        print crb, fn, crb 
Example #6
0
 def build_incfiles(self, as_file):
     files=[]
     (dir, file) = split(as_file)
     for p in self.parents(dir):
         shell = join(p,'shell'+self.sourceExt)
         if isfile(shell):
             files.append(shell)
     (testdir, ext) = splitext(as_file)
     if not self.eval:
         for util in glob(join(testdir,'*'+self.sourceExt)) + glob(join(dir,'*Util'+self.sourceExt)):
             files.append(string.replace(util, "$", "\$"))
     return files
Example #7
0
def can_folderise(folder):
    """
    Check if corpus can be put into folders
    """
    import os
    from glob import glob
    if os.path.isfile(folder):
        return False
    fs = glob(os.path.join(folder, '*.txt'))
    if len(fs) > 1:
        if not any(os.path.isdir(x) for x in glob(os.path.join(folder, '*'))):
            return True
    return False
Example #8
0
def PDB_ITR():
    PDB_DIRS = sorted(os.listdir("pdb"))
    for pdb_dir in PDB_DIRS:
        os.system("mkdir -p tmscores/{}".format(pdb_dir))
        FILES = glob(os.path.join("pdb", pdb_dir, "*.pdb"))
        for f_pdb in sorted(FILES):
            yield f_pdb
Example #9
0
def step2b(mincol=15):

    fraclist = np.array([1,0.2,0.02,0.005,0.4,2.5])
    modellist = ['/d/monk/eigenbrot/WIYN/14B-0456/anal/models/bc03_{}_ChabIMF.fits'.format(i) for i in ['solarZ','004Z','0004Z','0001Z','008Z','05Z']]
    for p in range(6):
    
        basename = 'NGC_891_P{}'.format(p+1)
       
        for z in range(fraclist.size):
            name = glob('{}*_Z{:04}.dat'.format(basename,int(fraclist[z]*1000)))[0]
            print name
            try:
                tmp[z] = np.loadtxt(name)
            except UnboundLocalError:
                data = np.loadtxt(name)
                tmp = np.zeros((fraclist.size,data.shape[0],data.shape[1]))
                tmp[z] = data
        
        outfile = 'P{}_models.dat'.format(p+1)
        f = open(outfile,'w')

        bdx = np.argmin(tmp[:,:,mincol],axis=0)
        for i in range(tmp.shape[1]):
            f.write('{:5.3f} {:}\n'.format(fraclist[bdx[i]], modellist[bdx[i]]))
        f.close()
        del tmp

    return
Example #10
0
def step2():

    fraclist = np.array([1,0.2,0.02,0.005,0.4,2.5])
   
    for p in range(6):
    
        basename = 'NGC_891_P{}'.format(p+1)
       
        for z in range(fraclist.size):
            name = glob('{}*_Z{:04}.dat'.format(basename,int(fraclist[z]*1000)))[0]
            print name
            try:
                tmp[z] = np.loadtxt(name)
            except UnboundLocalError:
                data = np.loadtxt(name)
                tmp = np.zeros((fraclist.size,data.shape[0],data.shape[1]))
                tmp[z] = data

        bdx = np.argmin(tmp[:,:,15],axis=0)
        h = open(name,'r')
        head = h.readlines()[4]
        outname = '{}_fit.dat'.format(name.split('_Z')[0])
        f = open(outname,'w')
        f.write('# Generated on {}\n'.format(time.asctime()))
        f.write(head)
        for i in range(tmp.shape[1]):
            tmp[bdx[i],i,19] = fraclist[bdx[i]]
            f.write(str('{:11n}'+12*'{:13.3e}'+'{:7.2f}{:12.3f}'+4*'{:12.3e}'+'{:10.3f}'+'\n').format(*tmp[bdx[i],i,:]))
            
        f.close()
        h.close()
        del tmp

    return
def get_clubs():
    data = []
    datae=[]
    data = glob("Data/clubs/*.txt")
    for i in range (len(data)):
        a=[]
        t=0.0
        b=0.0
        c=""
        path = data[i].replace('\\',"/")
        infile = open(path,'r')
        info = infile.read().replace(" ","")
        info = info.replace('\n',',').split(",")
        a.append(data[i].replace("Data/clubs\\","").replace(".txt",""))
        a.append(info[0])
        a.append(info[2]+" "+info[3])
        num = (len(info)-1)/4
        for i in range(num):
            c=info[(i*4)+4]
            c=c.split("/")
            t+=int(c[0])
            b+=int(c[1])
        a.append((round((t/b)*100.0,2)))
        a.append((info[4].split("/"))[1])


        datae.append(a)
        infile.close()
    #print datae
    return datae
Example #12
0
def     _get_setup_modules( options,
                            isfile = os.path.isfile,
                            isdir = os.path.isdir,
                            path_join = os.path.join,
                            glob = glob.glob,
                            _setup_modules = _setup_modules ):
    
    global _setup_path
    
    setup_path = options.setup_path.GetList()
    
    if setup_path == _setup_path:
        return _setup_modules
    
    setup_modules = []
    
    for p in setup_path:
        
        if isfile( p ):
            if p not in setup_modules:
                setup_modules.append( p )
        
        elif isdir( p ):
            for m in glob( path_join( p, '*.py') ):
                if m not in setup_modules:
                    setup_modules.append( m )
    
    _setup_path = setup_path

    return setup_modules
def read_red():
    data = []
    datae=[]
    data = glob("Data/students/*.txt")
    ##print data

    for i in range (len(data)):
        a=[]
        t=0.0
        b=0.0
        c=""
        path = data[i].replace('\\',"/")
        infile = open(path,'r')
        info = infile.read().replace(" ","")
        info = info.replace('\n',',').split(",")
        a.append(data[i].replace("Data/students\\","").replace(".txt",""))
        a.append(info[0])
        a.append(info[1])
        a.append(info[2])
        num = ((len(info))-3)/2
        for i in range(num):
            c=info[(i*2)+4]
            c=c.split("/")
            t+=int(c[0])
            b+=int(c[1])
        perc = (round((t/b)*100))
        a.append(perc)
        if perc <= 70:
            datae.append(a)
        else:
            None
        infile.close()
    return datae
Example #14
0
 def backupFiles(taskFile, glob=glob.glob):
     root, ext = os.path.splitext(taskFile.filename())
     datePattern = '[0-9]'*8
     timePattern = '[0-9]'*6
     files = glob('%s.%s-%s.tsk.bak'%(root, datePattern, timePattern))
     files.sort()
     return files
Example #15
0
def get_serials_info():
    """."""
    serials_info = {'ports': []}
    for port, desc, hwid in comports():
        if port:
            serials_info['ports'].append(port)
            info = {'port': port, 'description': desc, 'hwid': hwid}

            vid = ''
            pid = ''
            if hwid:
                hwid_infos = hwid.split()
                for hwid_info in hwid_infos:
                    if hwid_info.startswith('VID') and '=' in hwid_info:
                        vid_pid = hwid_info.split('=')[-1]
                        if ':' in vid_pid:
                            vid, pid = vid_pid.split(':')
                            vid = '0x' + vid.strip()
                            pid = '0x' + pid.strip()
                        break
            info['vid'] = vid
            info['pid'] = pid
            serials_info[port] = info

    os_name = sys_info.get_os_name()
    if not serials_info['ports'] and os_name == 'osx':
        for port in glob('/dev/tty.*'):
            serials_info['ports'].append(port)
            info = {'port': port, 'description': '', 'hwid': ''}
            info['vid'] = ''
            info['pid'] = ''
            serials_info[port] = info
    return serials_info
Example #16
0
def getFiles(basePath, app, fileName):
    if app == 'gw':
        Index, file = getLastIndexFile(basePath, fileName)
        Files = file
        return Index, file

    allAppDirs = [f for f in glob(os.path.join(basePath, app+'*')) if os.path.isdir(f)]
    folderRegEx = re.compile(".*%s\d+" % app)
    indexs = []
    for i in range(0, len(allAppDirs)):
        if not re.match(folderRegEx, allAppDirs[i]):
            indexs.append(i)
    for i in range(0, len(indexs)):
        allAppDirs.pop(i)
        
    if len(allAppDirs) == 0:
        raise Exception,  'Can\'t found folders %s under %s.' % (app, basePath)
    allAppDirs.sort(sortDirs)
    index = 1
    Files = ''
    if len(allAppDirs) == 1:
        Index, file = getLastIndexFile(allAppDirs[0]+'/conf/', fileName)
        Files = file
        return Index, Files
    elif len(allAppDirs) > 1:
        for index in range(0, len(allAppDirs) - 1):
            Index, file = getLastIndexFile(allAppDirs[index]+'/conf/', fileName)
            if index == 0:
                retIndex = Index
            Files += file + ', '
    Index, file = getLastIndexFile(allAppDirs[-1]+'/conf/', fileName)
    Files += file
    return retIndex, Files
Example #17
0
def import_all_from_folder(path, excludes=[]):
  import os
  import sys
  from importlib import import_module
  from glob import glob

  modules = {}
  base_path = os.path.dirname(path)

  if not os.path.exists(base_path):
    raise ImportError("Cannot import modules from %s. Path does not exist." % base_path)

  add_path = base_path not in sys.path
    
  if add_path:
    sys.path.append(base_path)

  for file_path in glob('%s/*.py' % base_path):
    dir_name, file_name = os.path.split(file_path)
    module_name = file_name[:-3]
    modules[module_name] = import_module(module_name)

  if add_path:
    sys.path.remove(base_path)

  return modules
Example #18
0
def process_images(self, docname, doctree):
    """
    Process and rewrite image URIs.
    """
    docdir = path.dirname(self.doc2path(docname, base=None))
    for node in doctree.traverse(nodes.image):
        # Map the mimetype to the corresponding image.  The writer may
        # choose the best image from these candidates.  The special key * is
        # set if there is only single candidate to be used by a writer.
        # The special key ? is set for nonlocal URIs.
        node['candidates'] = candidates = {}
        imguri = node['uri']
        if imguri.find('://') != -1:
            self.warn(docname, 'nonlocal image URI found: %s' % imguri,
                      node.line)
            candidates['?'] = imguri
            continue
        # imgpath is the image path *from srcdir*
        if imguri.startswith('/') or imguri.startswith(os.sep):
            # absolute path (= relative to srcdir)
            imgpath = path.normpath(imguri[1:])
        else:
            imgpath = path.normpath(path.join(docdir, imguri))
        # set imgpath as default URI
        node['uri'] = imgpath
        if imgpath.endswith(os.extsep + '*'):
            for filename in glob(path.join(self.srcdir, imgpath)):
                new_imgpath = relative_path(self.srcdir, filename)
                if filename.lower().endswith('.pdf'):
                    candidates['application/pdf'] = new_imgpath
                elif filename.lower().endswith('.svg'):
                    candidates['image/svg+xml'] = new_imgpath
                elif ".latex." in filename.lower():
                    candidates['latex'] = new_imgpath
                elif ".html." in filename.lower():
                    candidates['html'] = new_imgpath
                else:
                    try:
                        f = open(filename, 'rb')
                        try:
                            imgtype = imghdr.what(f)
                        finally:
                            f.close()
                    except (OSError, IOError), err:
                        self.warn(docname, 'image file %s not '
                                  'readable: %s' % (filename, err),
                                  node.line)
                    if imgtype:
                        candidates['image/' + imgtype] = new_imgpath
        else:
            candidates['*'] = imgpath
        # map image paths to unique image names (so that they can be put
        # into a single directory)
        for imgpath in candidates.itervalues():
            self.dependencies.setdefault(docname, set()).add(imgpath)
            if not os.access(path.join(self.srcdir, imgpath), os.R_OK):
                self.warn(docname, 'image file not readable: %s' % imgpath,
                          node.line)
                continue
            self.images.add_file(docname, imgpath)
Example #19
0
def processDatadir(datadir, stu, bindir=None, dryrun=True):
	bestzip is None
	zips = glob(os.path.join(datadir, 'zips', '*tabblock*zip'))
	for zname in zips:
		if betterShapefileZip(zname, bestzip):
			bestzip = zname
	linksname = os.path.join(datadir, stu + '.links')
	mppb_name = os.path.join(datadir, stu + '.mppb')
	mask_name = os.path.join(datadir, stu + 'mask.png')
	mppbsm_name = os.path.join(datadir, stu + '_sm.mppb')
	masksm_name = os.path.join(datadir, stu + 'mask_sm.png')
	args1 = []
	if not os.path.exists(linksname):
		args1 += ['--links', linksname]
	if not os.path.exists(mppb_name):
		args1 += ['--rast', mppb_name, '--mask', mask_name,
			'--boundx', '1920', '--boundy', '1080']
	if args1:
		command = makeCommand(args1, bindir)
		if dryrun:
			print ' '.join(command)
		else:
			subprocess.Popen(command, shell=False, stdin=None)
	if not os.path.exists(mppbsm_name):
		args2 = ['--rast', mppbsm_name, '--mask', masksm_name,
			'--boundx', '640', '--boundy', '480', bestzip]
Example #20
0
def output_module_html(webpage_path):
    """Output an HTML page for each module"""

    icons_relpath = os.path.relpath(cellprofiler.icons.__path__[0])
    all_png_icons = glob(os.path.join(icons_relpath, "*.png"))
    icon_names = [os.path.basename(f)[:-4] for f in all_png_icons]

    help_text = """
<h2>Help for CellProfiler Modules</a></h2>
<ul>\n"""
    d = {}
    module_path = webpage_path
    if not (os.path.exists(module_path) and os.path.isdir(module_path)):
        try:
            os.mkdir(module_path)
        except IOError:
            raise ValueError("Could not create directory %s" % module_path)

    for module_name in sorted(cellprofiler.modules.get_module_names()):
        module = cellprofiler.modules.instantiate_module(module_name)
        location = os.path.split(
                module.create_settings.im_func.func_code.co_filename)[0]
        if location == cellprofiler.preferences.get_plugin_directory():
            continue
        if isinstance(module.category, (str, unicode)):
            module.category = [module.category]
        for category in module.category:
            if not d.has_key(category):
                d[category] = {}
            d[category][module_name] = module
        result = module.get_help()
        if result is None:
            continue
        result = result.replace('<body><h1>', '<body><h1>Module: ')

        # Replace refs to icons in memory with the relative path to the image dir (see above)
        result = re.sub("memory:", os.path.join("images", "").encode('string-escape'), result)

        # Check if a corresponding image exists for the module
        if module_name in icon_names:
            # Strip out end html tags so I can add more stuff
            result = result.replace('</body>', '').replace('</html>', '')

            # Include images specific to the module, relative to html files ('images' dir)
            LOCATION_MODULE_IMAGES = os.path.join('images', '%s.png' % module_name)
            result += '\n\n<div><p><img src="%s", width="50%%"></p></div>\n' % LOCATION_MODULE_IMAGES

            # Now end the help text
            result += '</body></html>'
        fd = open(os.path.join(module_path, "%s.html" % module_name), "w")
        fd.write(result)
        fd.close()
    for category in sorted(d.keys()):
        sub_d = d[category]
        help_text += "<li><b>%s</b><br><ul>\n" % category
        for module_name in sorted(sub_d.keys()):
            help_text += "<li><a href='%s.html'>%s</a></li>\n" % (module_name, module_name)
        help_text += "</ul></li>\n"
    help_text += "</ul>\n"
    return help_text
Example #21
0
def from_ganga(job_id):
    import os
    from glob import glob

    prefix = "$WORKDIR/gangadir/workspace/albarano/LocalXML/"
    pattern = prefix + str(job_id) + "/*/output/*.root"
    return glob(os.path.expandvars(pattern))
Example #22
0
def get_index(pointing, data, datfile=None):

    if datfile is None:
        datfile = glob("{}/anal/indecies/NGC*P{}*dfkmid3*bands.dat".format(basepath, pointing))[0]
    print "Getting index info from {}".format(datfile)

    galdata = np.loadtxt(
        datfile,
        usecols=(0, 1, 5, 6),
        dtype={"names": ("aps", "bands", "index", "eqwidth"), "formats": ("S50", "S11", "f4", "f4")},
    )

    aps = np.unique(galdata["aps"])
    sar = [int(s.split("(")[-1].split(")")[0]) for s in aps]
    sidx = np.argsort(sar)
    aps = aps[sidx]
    sar = np.array(sar)[sidx]

    for i in range(aps.size):
        if sar[i] != data["ap"][i] + 1:
            print "WARNING: index ap {} does not match data ap {}".format(sar[i], data["ap"][i])
            raw_input("")

        idx = np.where(galdata["aps"] == aps[i])
        bands = bcc.eat_index(galdata["eqwidth"][idx])

        data["Hb"][i] = bands[0]
        data["HdA"][i] = bands[1]
        data["HgA"][i] = bands[2]
        data["HdF"][i] = bands[3]
        data["HgF"][i] = bands[4]
        data["Fe"][i] = bands[5]
        data["MgFe"][i] = bands[6]

    return data
Example #23
0
def get_library_location(package):
    # get abs path of a package in the library, rather than locally
    library_package_paths = glob(os.path.join(get_path('platlib'), '*'))
    sys.path = library_package_paths + sys.path
    package_path = get_loader(package).filename
    sys.path = sys.path[len(library_package_paths):]
    return package_path
Example #24
0
def clean_flist(raw_flist,s=False,lc=False):
    if not raw_flist:
        if (s or STRICT):
            raise Exception, "No file argument provided."
        else:
            perror("No file argument provided.")
            err("No file argument provided.")

    if type(raw_flist) != type(list()):
        raw_flist = [raw_flist]

    full_flist = []
    clean_list = []

    for f in raw_flist:
        full_flist.extend(glob(expand(f)))

    for entry in full_flist:
        valid = validate_path(entry)
        if exists(entry) and valid:
            clean_list.append(entry)
        elif not valid:
            warning("Invalid path: [%s] - skipping" % entry)
        else:
            warning("[%s] does not exist - skipping" % entry)

    if not clean_list:
        if (s or STRICT):
            raise Exception,"No such file or directory: %s" % raw_flist 
        else:
            warn("No such file or directory: [%s]" % raw_flist)

    return clean_list
Example #25
0
    def make_movie_file(self):
        """
        Create subdirectory based on casename, move all plot
        frame files to this directory, and generate
        an index.html for viewing the movie in a browser
        (as a sequence of PNG files).
        """
        # Make HTML movie in a subdirectory
        directory = self.casename
        if os.path.isdir(directory):
            shutil.rmtree(directory)   # rm -rf directory
        os.mkdir(directory)            # mkdir directory
        # mv frame_*.png directory
        for filename in glob('frame_*.png'):
            os.rename(filename, os.path.join(directory, filename))
        os.chdir(directory)        # cd directory
        self.plt.movie('frame_*.png', encoder='html',
                       output_file='index.html', fps=4)

        # Make other movie formats: Flash, Webm, Ogg, MP4
        codec2ext = dict(flv='flv', libx64='mp4', libvpx='webm',
                         libtheora='ogg')
        filespec = 'frame_%04d.png'
        movie_program = 'avconv'  # or 'ffmpeg'
        for codec in codec2ext:
            ext = codec2ext[codec]
            cmd = '%(movie_program)s -r %(fps)d -i %(filespec)s '\
                  '-vcodec %(codec)s movie.%(ext)s' % vars()
            os.system(cmd)
        os.chdir(os.pardir)  # move back to parent directory
Example #26
0
def explosion(x, y):
    for file in glob("graphics/explosion_1/*.png"):
        img = image.load(file).convert_alpha()
        img = transform.scale(img, (200, 200))
        for _ in range(10):
            screen.blit(img, (x - 80, y - 80))
            display.update()
Example #27
0
def list_serial_ports():
    """."""
    serial_ports = [port for port, d, h in comports() if port]
    os_name = sys_info.get_os_name()
    if not serial_ports and os_name == "osx":
        for port in glob("/dev/tty.*"):
            serial_ports.append(port)
    return serial_ports
Example #28
0
def glob_tree(roots, patterns, exclude_patterns=None):
    """Recursive version of GLOB. Builds the glob of files while
    also searching in the subdirectories of the given roots. An
    optional set of exclusion patterns will filter out the
    matching entries from the result. The exclusions also apply
    to the subdirectory scanning, such that directories that
    match the exclusion patterns will not be searched."""

    if not exclude_patterns:
        exclude_patterns = []

    result = glob(roots, patterns, exclude_patterns)
    subdirs = [s for s in glob(roots, ["*"]) if s != "." and s != ".." and os.path.isdir(s)]
    if subdirs:
        result.extend(glob_tree(subdirs, patterns, exclude_patterns))
        
    return result
Example #29
0
def processDir(srcDir, target):
	allFiles = glob(srcDir + sep + "*")
	debug("dir = " + srcDir + ", target = " + target)
	for aFile in allFiles:
		if isfile(aFile):
			debug("is file = " + aFile)
			if aFile.endswith(".jar") or aFile.endswith(".pom") or aFile.endswith(".asc") or aFile.endswith(".war"):
				copy2(aFile, target)
Example #30
0
File: drive.py Project: wyolum/mmM
def connect():
    global s
    patt = '/dev/ttyUSB*'
    for port in glob(patt):
        s = Serial(port, baudrate, timeout=timeout)
        break
    else:
        raise ValueError("mmM board not found: no serial port")
Example #31
0
def _read_data(data_path, channel):
    class_list = os.listdir(data_path)
    class_list.sort()
    n_classes = len(class_list)
    image_list = []
    image_holder = []
    label_list = []

    for i in range(n_classes):
        img_class = glob(os.path.join(data_path, class_list[i]) + '/*.*')
        image_list += img_class
        for j in range(len(img_class)):
            label_list += [i]

    if channel == "1":
        flags = 0
    else:
        flags = 1
    length_data = len(image_list)
    for j in range(length_data):
        img = cv2.imread(image_list[j], flags=flags)
        if channel == 1:
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            img = cv2.resize(img, (32, 32))
            img = img.astype(np.float32)
            img = np.reshape(img, [1, 32, 32, channel])
        else:
            img = img.resize(img, (32, 32))
            img = img.astype(np.float32)
        image_holder.append(img)

    image_holder = np.concatenate(image_holder, axis=0)
    label_holder = np.asarray(label_list, dtype=np.int32)

    idx = np.random.permutation(length_data)
    images = image_holder[idx]
    labels = label_holder[idx]

    ## preprocessing
    mean = np.mean(images, axis=(0, 1, 2))
    std = np.std(images, axis=(0, 1, 2))
    images = (images - mean) / std

    return images, labels
Example #32
0
    def update_hadoop_env(self):
        print("\nChecking hadoop-env.sh")
        try:
            if argv.hadoop_path:
                conf_base_path = argv.hadoop_path
            else:
                conf_base_path = glob(
                    '/opt/mapr/hadoop/hadoop-*.*.*/etc/hadoop/')[-1]
            hadoop_env_sh = os.path.join(conf_base_path, 'hadoop-env.sh')
            preunravel_hadoop_env = os.path.join(conf_base_path,
                                                 'hadoop-env.sh.preunravel')

            if not os.path.exists(preunravel_hadoop_env) and not argv.dry_test:
                print("Backup original hadoop-env.sh")
                copyfile(hadoop_env_sh, preunravel_hadoop_env)

            with open(hadoop_env_sh, 'r') as f:
                content = f.read()
                f.close()

            if self.do_hive and self.configs['hadoop-env'].split(
                    ':')[1] in content:
                print("{0} {1:>{width}}".format("HADOOP_CLASSPATH",
                                                "No change needed",
                                                width=80 -
                                                len('HADOOP_CLASSPATH')))
                if argv.verbose: print_verbose(self.configs['hadoop-env'])
            elif self.do_hive:
                print("{0} {1:>{width}}".format("HADOOP_CLASSPATH",
                                                "Missing value",
                                                width=80 -
                                                len('HADOOP_CLASSPATH')))
                if argv.verbose:
                    print_verbose('None', self.configs['hadoop-env'])
                if not argv.dry_test:
                    with open(hadoop_env_sh, 'a') as f:
                        print("appending HADOOP_CLASSPATH")
                        f.write('\n' + self.configs['hadoop-env'])
                        f.close()
            else:
                print("Skip hadoop-env.sh")
        except Exception as e:
            print("Error: " + str(e))
            self.do_hive = False
Example #33
0
def par_crop(instance_size=511,
             num_threads=24,
             image_folder='data_folder',
             save_folder='siamrpn++_model'):
    dataDir = '.'
    crop_path = join(save_folder, 'crop{:d}'.format(instance_size))
    if not isdir(crop_path): makedirs(crop_path)
    videos = [
        x.split('/')[-2]
        for x in sorted(glob(join(image_folder, '*', '*.csv')))
    ]
    n_videos = len(videos)
    with futures.ProcessPoolExecutor(max_workers=num_threads) as executor:
        fs = [
            executor.submit(crop_video, video, image_folder, crop_path,
                            instance_size) for video in videos
        ]
        for i, f in enumerate(futures.as_completed(fs)):
            printProgress(i, n_videos, suffix='Done ', barLength=40)
Example #34
0
def add_posterior_predictive_data(resultdir, stats='hist'):
    """Generate posterior predictive data for all subjects in result and store
    in results file.
    """
    subjects = [
        int(os.path.basename(f)[1:3])
        for f in glob(os.path.join(resultdir, '*%s.log' % stats))
    ]

    for sub in subjects:
        print('\rProcessing subject %2d ...' % sub)

        # generate posterior predictive data
        try:
            choices_post, rts_post, data, model = (
                generate_posterior_predictive_data(resultdir, sub, stats,
                                                   'r+'))
        except IOError:
            warnings.warn("Skipping subject {} due to IOError!".format(sub))
Example #35
0
def event_list(startdate, enddate):
    event_list = []
    reapath = os.path.join(SEISAN_DATA, 'REA', DB)
    years = list(range(startdate.year, enddate.year + 1))
    for year in years:
        if year == enddate.year and year == startdate.year:
            months = list(range(startdate.month, enddate.month + 1))
        elif year == startdate.year:
            months = list(range(startdate.month, 13))
        elif year == enddate.year:
            months = list(range(1, enddate.month + 1))
        else:
            months = list(range(1, 13))
        for month in months:
            #print month
            dir = os.path.join(reapath, "%04d" % year, "%02d" % month)
            flist = glob(os.path.join(dir, "*L.S*"))
            event_list.extend(flist)
    return event_list
Example #36
0
def glob_tree(roots, patterns, exclude_patterns=None):
    """Recursive version of GLOB. Builds the glob of files while
    also searching in the subdirectories of the given roots. An
    optional set of exclusion patterns will filter out the
    matching entries from the result. The exclusions also apply
    to the subdirectory scanning, such that directories that
    match the exclusion patterns will not be searched."""

    if not exclude_patterns:
        exclude_patterns = []

    result = glob(roots, patterns, exclude_patterns)
    subdirs = [
        s for s in result if s != "." and s != ".." and os.path.isdir(s)
    ]
    if subdirs:
        result.extend(glob_tree(subdirs, patterns, exclude_patterns))

    return result
Example #37
0
def expand_globs(path_list, root_dir):
    files = []
    for path in path_list:
        if not os.path.isabs(path):
            path = os.path.join(root_dir, path)
        if os.path.islink(path):
            files.append(path.replace(root_dir + os.path.sep, ''))
        elif os.path.isdir(path):
            files.extend(os.path.join(root, f).replace(root_dir + os.path.sep, '')
                            for root, _, fs in os.walk(path) for f in fs)
        elif os.path.isfile(path):
            files.append(path.replace(root_dir + os.path.sep, ''))
        else:
            # File compared to the globs use / as separator indenpendently of the os
            glob_files = [f.replace(root_dir + os.path.sep, '')
                          for f in glob(path)]
            files.extend(glob_files)
    files = [f.replace(os.path.sep, '/') for f in files]
    return files
Example #38
0
    def load_profile(self, widget):
        self.loadProfile_window = gtk.Window()
        self.loadProfile_window.set_border_width(1)
        self.loadProfile_window.set_default_size(330, 200)
        self.loadProfile_window.set_title("Load Profile")
        self.loadProfile_window.set_position(gtk.WIN_POS_CENTER)
        loadFixed = gtk.Fixed()
        self.loadProfile_window.add(loadFixed)

        # Scrollable treeview for displaying existing profiles
        self.loadList = gtk.ListStore(str, str)
        self.loadTreeview = gtk.TreeView(self.loadList)
        # Item Column
        column_item = gtk.TreeViewColumn()
        cell_icon = gtk.CellRendererPixbuf()
        cell_icon.set_property('cell-background', 'light blue')
        column_item.pack_start(cell_icon, False)
        column_item.set_attributes(cell_icon, stock_id=0)
        cell_item = gtk.CellRendererText()
        cell_item.set_property('cell-background', 'white')
        column_item.pack_start(cell_item, True)
        column_item.set_attributes(cell_item, text=1)
        #Set up treeview
        self.loadTreeview.append_column(column_item)
        self.loadTreeview.set_search_column(0)
        self.loadTreeview.set_size_request(300, 120)
        # Embed treeview in scrollable window
        sw = gtk.ScrolledWindow()
        sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
        sw.add(self.loadTreeview)
        loadFixed.put(sw, 15, 20)

        for f in glob("*.profile"):
            self.loadList.append([gtk.STOCK_FILE, str(f).split(".")[0]])

        confirmButton = gtk.Button("Confirm")
        confirmButton.connect("clicked", self.confirmLoad)
        loadFixed.put(confirmButton, 100, 160)
        cancelButton = gtk.Button("Cancel")
        cancelButton.connect("clicked", self.cancelLoad)
        loadFixed.put(cancelButton, 190, 160)

        self.loadProfile_window.show_all()
Example #39
0
def merge(path, blank_filename, output_filename):
    blank = PdfFileReader(file(blank_filename, "rb"))
    output = PdfFileWriter()

    for pdffile in glob('*.pdf'):
        if pdffile == output_filename:
            continue
        print("Parse '%s'" % pdffile)
        document = PdfFileReader(open(pdffile, 'rb'))
        for i in range(document.getNumPages()):
            output.addPage(document.getPage(i))

        if document.getNumPages() % 2 == 1:
            output.addPage(blank.getPage(0))
            print("Add blank page to '%s' (had %i pages)" % (pdffile, document.getNumPages()))
    print("Start writing '%s'" % output_filename)
    output_stream = file(output_filename, "wb")
    output.write(output_stream)
    output_stream.close()
Example #40
0
    def __init__(self,
                 dataset,
                 load_size,
                 channels,
                 augment_flag,
                 transforms=None):
        "Initialize variables"
        self.load_size = load_size
        self.channels = channels
        self.augment_flag = augment_flag
        if transforms:
            self.transforms = transforms.Compose(transforms)
        else:
            self.transforms = None

        #  Check if dataset exists
        check_exists(dataset)
        self.dataset_name = dataset
        self.train = glob("./dataset/{}/*.*".format(self.dataset_name))
Example #41
0
def write_sessions_tsv(bids_dir, sessions_dict):
    """Create <participant_id>_sessions.tsv files.

    Write the content of the function create scans dict in several TSV files
    following the BIDS specification.

    Args:
        bids_dir: path to the bids directory
        sessions_dict: output of the function create_sessions_dict
    """
    import os
    from glob import glob
    from os import path

    import pandas as pd

    bids_paths = glob(path.join(bids_dir, "sub-*"))

    for sp in bids_paths:
        bids_id = sp.split(os.sep)[-1]

        if bids_id in sessions_dict:
            session_df = pd.DataFrame(
                sessions_dict[bids_id]["M00"],
                index=[
                    "i",
                ],
            )
            cols = session_df.columns.tolist()
            cols = cols[-1:] + cols[:-1]
            session_df = session_df[cols]
        else:
            print(f"No session data available for {sp}")
            session_df = pd.DataFrame(columns=["session_id"])
            session_df["session_id"] = pd.Series("M00")

        session_df = session_df.fillna("n/a")
        session_df.to_csv(
            path.join(sp, bids_id + "_sessions.tsv"),
            sep="\t",
            index=False,
            encoding="utf8",
        )
Example #42
0
def teste():
    from glob import glob
    import os
    #import cv2
    #pngs = glob('./**/*.png', recursive=True)
    pngs = glob('/home/users/lucas/DataMining/Yolo/darknet/data/*.jpg')

    os.chdir("/home/users/lucas/DataMining/Yolo/darknet/")
    os.system("./darknet detect cfg/yolov3.cfg yolov3.weights")

    for j in pngs:
        #img = cv2.imread(j)
        #cv2.imwrite(j[:-3] + 'jpg', img)
        #os.remove(j)
        #os.system(j)
        #print j
        #os.system("chmod +x "+j)
        os.system(j)
        print "IMG OK"
Example #43
0
def execute():
    message, result = show_entry_fields()
    print(result)
    print(message)
    if result == True:
        vbasename = basename.get()
        vsequence = str(sequence.get())
        vdestname = destname.get()
        for i in vsequence.split(" "):
            if VERS == "NICOS":
                os.chdir(vdestname)
                filename = "%s%08d" % (vbasename, int(i))
                #                command="ln -s %s%08d.dat ." %(vbasename,int(i))
                #                os.system(command)
                #                print( "command:",command
                command = "ln -s ../../%s%08d.dat ." % (
                    vbasename[vbasename.rfind("/data/") + 1:], int(i))
                #                print( "command neu:",command
                os.system(command)
                globname = "%s*_%08d_*" % (vbasename, int(i))
                for item in glob(globname):
                    #command="ln -s %s ." %(item)
                    #os.system(command)
                    #print( "command glob:",command
                    command = "ln -s ../../%s ." % (item[item.rfind("/data/") +
                                                         1:])
                    os.system(command)


#                    print( "command glob neu:",command
            else:
                filename = "%s%s" % (vbasename, i)
                rsync = "rsync -av %s/ %s" % (filename, vdestname)
                print("execute", rsync)
                os.system(rsync)
        command = "cd %s;%s" % (vdestname, message)
        print("executing command:", command)
        print("executing system(%s)" % (message))
        os.system(command)
        print("finished")

    return
def check_multi_nodule():
    pd_annotation = pd.read_csv(anno_path)
    count_image = 0

    seriesuid_temp = None
    slice_list = []
    multi_list = []
    for each_annotation in pd_annotation.iterrows():
        seriesuid = each_annotation[1].seriesuid
        coord_x = each_annotation[1].coordX
        coord_y = each_annotation[1].coordY
        coord_z = each_annotation[1].coordZ
        diameter_mm = each_annotation[1].diameter_mm

        mhd_name = '{}.mhd'.format(seriesuid)
        mhd_path = glob(os.path.join(working_path, '*', mhd_name),
                        recursive=True)[0]

        numpyImage, numpyOrigin, numpySpacing = load_itk_image(
            mhd_path)  # numpyImage.shape) 维度为(slice,w,h)

        # 将世界坐标下肺结节标注转换为真实坐标系下的坐标标注
        worldCoord = np.asarray(
            [float(coord_x), float(coord_y),
             float(coord_z)])
        voxelCoord = worldToVoxelCoord(worldCoord, numpyOrigin, numpySpacing)

        slice = int(voxelCoord[2] + 0.5)

        if seriesuid_temp == seriesuid:
            if slice in slice_list:
                print('multi nodules in one image')
                multi_list.append((seriesuid_temp, slice))
            slice_list.append(slice)
        else:
            seriesuid_temp = seriesuid
            slice_list.clear()
            slice_list.append(slice)

        print(count_image)
        count_image += 1
    print(multi_list)
Example #45
0
def predict_separate(path,
                     test_path=config.TEST_PATH,
                     dat_file=config.SHAPE_PREDICTOR_DAT_PATH):
    print('Loading Model from: ', path)
    name_of_model = path.split('_')[1]
    model = load_model(path)
    images_processed = 0
    images_with_no_face = 0
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(dat_file)
    y_predProbs = np.empty((0, 7))
    y_true = []
    for label in config.CLASS_LABEL:
        files = sorted(glob(os.path.join(test_path, label.lower()) + '/*.jpg'))
        for images in files:
            img = cv2.resize(cv2.imread(images),
                             (config.TARGET_SIZE['CV2_LANDMARKS_RESIZE'][0],
                              config.TARGET_SIZE['CV2_LANDMARKS_RESIZE'][1]))
            img = cv2.cvtColor(src=img, code=cv2.COLOR_BGR2GRAY)
            faces = detector(img)
            if len(faces) == 0:
                images_with_no_face += 1
                y_predProbs = np.vstack(
                    (y_predProbs, np.zeros(config.NUM_CLASSES)))
            else:
                row = []
                for face in faces:
                    landmarks = predictor(image=img, box=face)
                    for n in range(0, 68):
                        row.append(landmarks.part(n).x)
                        row.append(landmarks.part(n).y)
                    break
                y_predProbs = np.vstack(
                    (y_predProbs,
                     model.predict(np.array(row).reshape(1, -1), verbose=1)))
            y_true.append(label)
            images_processed += 1
            print("No of Images Processed:", images_processed)
    print('\n\nTotal Images:', images_processed, '\nFace detected in: ',
          images_processed - images_with_no_face,
          'images\nFace not detected in: ', images_with_no_face, 'images')
    return y_predProbs, y_true
Example #46
0
    def task_progress(self):
        self.__progress_files = []
        self.__progress_users = []
        self.__progress_tasks = []
        self.__path = str(self.__user.get_org()).strip() + '//'

        self.__progress_window = Tk()
        self.__progress_window.title("Company Name Task Progress List")

        self.__progress_title = Label(self.__progress_window, text = "Task List",\
                             font = ("Arial Bold", 25))
        self.__progress_title.grid(column=2, row=0)

        count = 0
        count2 = 0
        for file in glob(path.join(self.__path, '*.txt')):
            self.__progress_files.append(FileHandler(file))
            self.__progress_users.append(
                self.__progress_files[count].openFile())
            count += 1
        for x in range(len(self.__progress_users)):
            for y in range(len(self.__progress_users[x].get_tasks())):
                self.__progress_task_text = (str(self.__progress_users[x].get_name()).strip() \
                                + '(' + str(self.__progress_users[x].get_team()).strip() + ')' \
                                + ': ' + str(self.__progress_users[x].get_task(y).get_objective()) \
                                + ' | ' + str(self.__progress_users[x].get_task(y).get_urgency()) \
                                + ' | ' + str(self.__progress_users[x].get_task(y).get_date_due()) \
                                + ' | ' + str(self.__progress_users[x].get_task(y).get_status()))
                if (str(self.__progress_users[x].get_task(
                        y).get_reported()).strip() == "True"):
                    self.__progress_task_text += ' |  Reported Completed'
                elif (str(self.__progress_users[x].get_task(
                        y).get_reported()).strip() == "False"):
                    self.__progress_task_text += ' |  Not Reported Completed'
                self.__progress_tasks.append(
                    Label(self.__progress_window,
                          text=self.__progress_task_text))
                self.__progress_tasks[count2].grid(column=x, row=y + 1)
                count2 += 1

        size = 300 * count
        self.__progress_window.geometry(str(size) + 'x300')
def addOutputShouldBeToSolutions():
    from glob import glob
    import subprocess, re
    for tdir in solutionDirs:
        dirmsg = tdir + "\n" + '=' * len(tdir)
        with visitDir(os.path.join(ROOT_DIR, tdir)):
            for sfile in glob("Solution-*.scala"):
                msg = sfile
                with open(sfile) as f:
                    solution = f.read()
                if "OUTPUT_SHOULD" not in solution:
                    if dirmsg:
                        print dirmsg
                        dirmsg = None
                    if msg:
                        print msg
                        msg = None
                    with open(sfile, 'w') as f:
                        f.write(solution + output_should_be)
                    subprocess.call([SUBLIME, sfile])
Example #48
0
def readAllSeqLabelFigure(folderpath):
    file_paths = glob(folderpath + "*.tsv")
    all_texts = []
    all_labels = []
    all_figures = []
    all_figure_appearance = []
    str_seq_lens = []
    for file_name in file_paths:
        str_seqs, label_seqs, figure_spans, figure_appearances, sentenceNums = readSeqLabelFigure(
            file_name)
        str_seqs, label_seqs, figure_spans, figure_appearances = clause2sentence(
            str_seqs, label_seqs, figure_spans, figure_appearances,
            sentenceNums)
        all_texts.extend(str_seqs)
        all_labels.extend(label_seqs)
        all_figures.extend(figure_spans)
        all_figure_appearance.extend(figure_appearances)
        str_seq_lens.append(len(str_seqs))
        print(file_name, len(str_seqs), len(all_texts))
    return all_texts, all_labels, all_figures, all_figure_appearance, str_seq_lens
def createFeatureDataset(nodules_files=None):
    if nodules_files == None:
        #noddir = "/Users/weichaozhou/Documents/Boston University/EC500/DSB3Tutorial/luna2016/"
        noddir = "../Luna_Data/full_data_mskextraction/"
        nodules_files = glob(noddir + "masks*.npy")
    truth_d = pickle.load(open("truthdict.pkl", 'rb'))
    num_of_features = 9
    features = np.zeros((len(nodules_files), num_of_features))
    truth = np.zeros((len(nodules_files)))
    for i, f in enumerate(nodules_files):
        patient_ID = f.replace(".", "_").split("_")[-2]
        try:
            truth[i] = truth_d[int(patient_ID)]
        except KeyError:
            truth[i] = 0
        print(f)
        features[i] = getRegionMetricRow(f)

    np.save("dataY.npy", truth)
    np.save("dataX.npy", features)
Example #50
0
def images_in_list(path):
    os.chdir(path)
    imglist = []

    for i in range(0, len(glob("*png"))):
        imglist.append(glob("*png")[i])
        img_medium_color(imglist[i])

    for i in range(0, len(glob("*jpg"))):
        imglist.append(glob("*jpg")[i])
        img_medium_color(imglist[i])

    for i in range(0, len(glob("*jpeg"))):
        imglist.append(glob("*jpeg")[i])
        img_medium_color(imglist[i])
Example #51
0
def create_report(path='.'):
    paths = glob('*/stat*')

    stats = []
    for p in paths:
        stats.append(json.load(open(p, 'r')))
        stats[-1]['path'] = p

    stats = pd.DataFrame(stats)

    stats['kind'] = stats['path'].apply(
        lambda x: x.split('/')[1].split('_')[1])
    stats['it'] = stats['path'].apply(lambda x: x.split('/')[0].strip('it'))

    stats.loc[stats['kind'] == 'test', 'it'] = 999
    stats['it'] = stats['it'].astype(int)
    stats.loc[stats['kind'] == 'fit', 'it'] += 1
    stats = stats.sort_values(['it', 'kind'])
    stats.loc[stats['kind'] == 'test', 'it'] = -1
    stats = stats.reset_index(drop=True)
    stats = stats.drop(len(stats) - 2,
                       axis=0).drop('path', axis=1).drop('mean deviation',
                                                         axis=1)

    stats.columns = [{
        'it': 'Iteration',
        'kind': 'Kind'
    }.get(x,
          x.upper() + ' (eV)') for x in stats.columns]
    stats.loc[stats['Kind'] == 'test', 'Iteration'] = ''
    stats.index = [
        k + '_' + str(it) for k, it in zip(stats['Kind'], stats['Iteration'])
    ]
    stats.index = [{'test_': 'test'}.get(i, i) for i in stats.index]
    stats['Kind'] = stats['Kind'].apply(lambda x: {
        'sc': 'Self-consistent',
        'fit': 'Fitting',
        'test': 'Testing'
    }[x])

    return stats
Example #52
0
def load_data_by_id(id: int, video_df):
    """Given an frame ID, and a dataset description"""
    video = video_df[(video_df.start < id) & (video_df.end > id)]
    if len(video) == 0:
        return None, None, None, None

    paths = glob(
        "./data/*/{videoID}.npz".format(videoID=video.iloc[0].videoID))
    if len(paths) == 0:
        return None, None, None, None

    path = paths[0]
    frame_id = int(id - video.start)
    q_color_images, q_bounding_box, q_landmarks_2d, q_landmarks_3d = load_data(
        path)
    return (
        q_color_images[..., frame_id],
        q_bounding_box[..., frame_id],
        q_landmarks_2d[..., frame_id],
        q_landmarks_3d[..., frame_id],
    )
def renameWidget(srcWidget, oldName, newName):
    if oldName == newName:
        return
    # first rename parent if possible
    newPath = os.path.dirname(srcWidget) + "/" + newName
    os.system("mv {} {}".format(srcWidget, newPath))

    # now we need to rename each of the files
    # and after renaming replace the instances of oldName in the data structures and python script with newName
    oldFiles = glob("{}/{}.*".format(newPath, oldName))
    for oldFile in oldFiles:
        basename, extension = os.path.splitext(oldFile)
        newFile = newPath + "/" + newName + extension
        os.rename(oldFile, newFile)
        if extension == ".py":
            replaceNamePy(newFile, oldName, newName)
        else:
            replaceName(newFile, "name", newName)
    oldPyPath = "{}/{}.py".format(srcWidget, oldName)
    newPyPath = "{}/{}.py".format(newPath, newName)
    renameWidgetInToolDock(oldPyPath, newPyPath)
Example #54
0
def createFeatureDataset(nodules_files=None):
    if nodules_files == None:

        noddir = "../data/tutorial4/"
        nodules_files = glob(noddir +"masks*.npy")
    truth_d = pickle.load(open("truthdict.pkl",'r'))
    num_of_features = 9
    features = np.zeros((len(nodules_files),num_of_features))
    truth = np.zeros((len(nodules_files)))

    for i,f in enumerate(nodules_files):
        patient_ID = f.replace(".","_").split("_")[2]
        try:
            truth[i] = truth_d[int(patient_ID)]
        except KeyError:
            truth[i] =0
        print(f)
        features[i] = getRegionMetricRow(f)

    np.save("dataY.npy", truth)
    np.save("dataX.npy", features)
Example #55
0
    def __init__(
            self,
            casename='tmp',  # prefix in filenames
            umin=-1,
            umax=1,  # fixed range of y axis
            pause_between_frames=None,  # movie speed
            backend='matplotlib',  # or 'gnuplot'
            screen_movie=True,  # show movie on screen?
            every_frame=1):  # show every_frame frame
        self.casename = casename
        self.yaxis = [umin, umax]
        self.pause = pause_between_frames
        module = 'scitools.easyviz.' + backend + '_'
        exec('import %s as st' % module)
        self.st = st
        self.screen_movie = screen_movie
        self.every_frame = every_frame

        # Clean up old movie frames
        for filename in glob('frame_*.png'):
            os.remove(filename)
def count_freq(limit = 0):
    X = []
    Y = []
    max_words = word_dic["_MAX"]
    cat_names = []
    for cat in os.listdir(root_dir):
        cat_dir = root_dir + "/" + cat
        if not os.path.isdir(cat_dir): continue
        cat_idx = len(cat_names)
        cat_names.append(cat)
        files = glob(cat_dir+"/*.wakati")
        i = 0
        for path in files:
            print(path)
            cnt = count_file_freq(path)
            X.append(cnt)
            Y.append(cat_idx)
            if limit > 0:
                if i > limit: break
                i += 1
    return X,Y
Example #57
0
    def test_can_resume_job_twice(self):
        from glob import glob
        import time

        job_bundle_name, _ = self._submit_and_schedule_job()
        self._pause_job(job_bundle_name)

        time.sleep(self.wait_time)

        # First Request to resume
        self._resume_job(job_bundle_name)
        # Check if second request to resume produces response as expected
        response = self._resume_job(job_bundle_name)

        time.sleep(self.wait_time)

        runs_from_scheduled_job = glob(
            f'{self.archives_dir_path}/{job_bundle_name}_*')

        self.assertEqual(204, response.status_code)
        self.assertIn(len(runs_from_scheduled_job), [3, 4, 5])
Example #58
0
def main():
    parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 RNN/LSTM Language Model')
    parser.add_argument('--gt_file', type=str, default='data/eltec_gt_splits/eng/test.txt',
                        help='random seed')
    parser.add_argument('--results_dir', type=str, default='data/results',
                        help='use CUDA')
    args = parser.parse_args()
    print(args)

    gold = [l.rstrip().split('\t') for l in open(args.gt_file)]
    _, gold = zip(*gold)
    
    for fn in glob(f'{args.results_dir}/*.txt'):
        print('=' * 64)
        print(f'-> results for {fn}')

        silver = [l.rstrip().split('\t') for l in open(fn)]
        _, silver = zip(*silver)

        report = classification_report(gold, silver)
        print(report)
Example #59
0
def packMap(dest, folder, mode=-1, keys=0):
    os.chdir(folder)
    assets = set()
    for file in glob("*.osu"):
        try:
            f = open(folder + file, 'r', encoding="utf-8")
            lines = f.readlines()
            f.close()
            if (-1 == mode or getMode(lines) == mode):
                checks = True
                if (3 == mode and 0 != keys and keys != getCS(lines)):
                    checks = False

                if checks:
                    assets.add(getAudio(lines))
                    assets.add(file)
                    addAll(assets, getDesignAssets(lines))
                    packAssets(dest, assets, folder)
        except Exception as e:
            errors.append("Error while reading \"" + folder + file + "\" (" +
                          str(e) + ")")
Example #60
0
def curatecores():
    for file in glob('*_unique.txt'):
        named = file.rsplit("_", 1)[0]
        outtie = open(named + '_unique-curated.txt', 'w')
        with open(file, 'r') as innie:
            for line in innie:
                if ">" in line:
                    outtie.write(line)
                elif "TRINITY" in line:
                    contigtag = line.split(" ")[0]
                    spacey = line.split(" ")[1:]
                    putrid = [tears for tears in spacey if "GO" in tears]
                    print "file: " + named
                    purrid = str(putrid)
                    print purrid
                    twosie = purrid.split(";")[1]
                    print "twosie: " + twosie
                    outtie.write(twosie)
                    outtie.write("\n")
                else:
                    continue