def _create_node_path(self):
        """Recursively creates the underlying path for our node registration.

        Note, if the path is multi-levels, does not apply an ACL to anything
        but the deepest level of the path. For example, on
        /foo/bar/baz/host:22, the ACL would only be applied to /foo/bar/baz,
        not /foo or /foo/bar.

        Note, no exceptions are caught here. This method is really meant to be
        called only by _create_node(), which handles the exceptions behavior.
        """
        # Strip the path down into a few components...
        (path, node) = split(self._path)

        # Count the number of levels in the path. If its >1, then we split
        # the path into a 'root' path and a 'deep' path. We create the
        # 'root' path with no ACL at all (the default OPEN acl). The
        # final path that will hold our node registration will get created
        # with whatever ACL settings were used when creating the Kazoo
        # connection object.
        if len(filter(None, path.split('/'))) > 1:
            (root_path, deep_path) = split(path)
            self._zk.retry(
                self._zk.ensure_path, root_path,
                acl=security.OPEN_ACL_UNSAFE)

        # Create the final destination path folder that the node will be
        # registered in -- and allow Kazoo to use the ACL if appropriate.
        self._zk.retry(self._zk.ensure_path, path)
Example #2
0
def calc_occurs(target, result, field_name, field_type, field_length):
    """
    Calculates frequencies on a field in target (table, feature, shp) and returns
    a table of results.
    """
    # création du dictionnaire (temporaire) des occurences/fréquences
    dico_freq = {}
    # création de la table résultat (= en sortie)
    new_table(path.split(result)[0], path.split(result)[1])
    # curseurs pour parcourir (r = read) et écrire (w = write) les données ligne par ligne
    curs_r = SearchCursor(target, "", "", field_name)
    curs_w = InsertCursor(result)
    # ajout des 2 champs dans la table résultat
    add_field(result, "OCCUR", field_type, "", "", field_length)
    add_field(result, "FREQ", "SHORT")
    # calcul et ajout au dictionnaire intermédiaire
    for obj in curs_r:
        value = obj.getValue(field_name)
        if dico_freq.has_key(value):
            dico_freq[value] = dico_freq.get(value)+1
        else:
            dico_freq[value] = 1
    del curs_r
    # mise à jour de la table résultats
    for occur in dico_freq.keys():
        row = curs_w.newRow()
        row.OCCUR = occur
        row.FREQ = dico_freq.get(occur)
        curs_w.insertRow(row)
    # nettoyage des curseurs (pour éviter le verrouillage des données = .lock)
    del row, curs_w
    # fin de fonction
    return dico_freq
Example #3
0
 def hasChildren(self, index):
     QApplication.setOverrideCursor(Qt.WaitCursor)
     if index.isValid():
         item = self.itemFromIndex(index)
         path = item.path
         if item.hasChildren():
             children = True
         elif item.isClass or item.isFunction:
             children = False
         elif isdir(path):
             paths = [join(path, p) for p in listdir(path)]
             paths = [p for p in paths if isPackage(p) or isSource(p)]
             paths.sort()
             for key, subiter in groupby(paths, lambda x:splitext(x)[0]):
                 pth = sorted(subiter)[0]
                 item.appendRow(SysPathItem(pth, split(pth)[1]))
             children = bool(paths)
         elif splitext(path)[1] in pyexts:
             contents = readModule(self.dottedName(item), split(path)[0])
             for name, obj in sorted(contents.items()):
                 item.appendRow(SysPathItem(path, name, obj))
             children = bool(contents)
         else:
             children = False
     else:
         children = True
     QApplication.restoreOverrideCursor()
     return children
def lookForFiles() :
    directory, output, volume, Cgeo = getConfTxt()

    isThereACalibrationFile = False
    for path in glob.glob(directory+'*'):
        path = windowsFilenamesSuck(path)
        if path.find('jsc_') > 0 :
            isThereACalibrationFile = True

    CEFiles = 0
    CEsuns = []
    for path in glob.glob(directory+'*'):
        path = windowsFilenamesSuck(path)
        if path.find('CE_') > 0 :
            CEFiles += 1
            CEsuns.append(float(path.split("_")[-1][:-9]))
    # COUNT THE NUMBER OF TPC FILES ARE PRESENT IN INPUT DIR
    TPCFiles = 0
    TPCsuns =  []
    for path in glob.glob(directory+'*'):
        path = windowsFilenamesSuck(path)
        if path.find('TPC_') > 0 :
            TPCFiles += 1
            TPCsuns.append(float(path.split("_")[-1][:-9]))
    # COUNT THE NUMBER OF TPV FILES ARE PRESENT IN INPUT DIR
    TPVFiles = 0
    TPVsuns = []
    for path in glob.glob(directory+'*'):
        path = windowsFilenamesSuck(path)
        if path.find('TPV_') > 0 :
            TPVFiles += 1
            TPVsuns.append(float(path.split("_")[-1][:-9]))
    return isThereACalibrationFile, CEFiles, CEsuns, TPCFiles, TPCsuns, TPVFiles, TPVsuns
Example #5
0
def requirements(filename, module):
    '''
    '''
    head, tail = split(realpath(filename))
    
    while tail and tail != module:
        head, tail = split(head)
    
    content = open(filename).read()
    requirements, seen = set(), set()
    
    for match in modules(content):
        parts = match.group().split('.')
        
        for index in range(1, len(parts)):
            reqname = sep.join(parts[:index+1]) + '.js'
            reqpath = join(head, reqname)
            
            if reqpath in seen:
                continue
            
            seen.add(reqpath)
            
            if not exists(reqpath):
                continue
            
            if realpath(filename) == realpath(reqpath):
                continue
                
            requirements.add(reqname)

    return list(requirements)
Example #6
0
def make_meta_file(file, url, params = None, flag = Event(), 
                   progress = lambda x: None, progress_percent = 1, fileCallback = lambda x: None, gethash = None, filelist = None):
        
    if params is None:
        params = {}
    if 'piece_size_pow2' in params:
        piece_len_exp = params['piece_size_pow2']
    else:
        piece_len_exp = default_piece_len_exp
    if 'target' in params and params['target']:
        f = join(params['target'], split(normpath(file))[1] + '.torrent')
    else:
        a, b = split(file)
        if b == '':
            f = a + '.torrent'
        else:
            f = join(a, b + '.torrent')
            
    if piece_len_exp == 0:  # automatic
        size = calcsize(file, filelist)
        if   size > 8L*1024*1024*1024:   # > 8 gig =
            piece_len_exp = 21          #   2 meg pieces
        elif size > 2*1024*1024*1024:   # > 2 gig =
            piece_len_exp = 20          #   1 meg pieces
        elif size > 512*1024*1024:      # > 512M =
            piece_len_exp = 19          #   512K pieces
        elif size > 64*1024*1024:       # > 64M =
            piece_len_exp = 18          #   256K pieces
        elif size > 16*1024*1024:       # > 16M =
            piece_len_exp = 17          #   128K pieces
        elif size > 4*1024*1024:        # > 4M =
            piece_len_exp = 16          #   64K pieces
        else:                           # < 4M =
            piece_len_exp = 15          #   32K pieces
def main():
# Make a copy or duplicate of an existing file
	if path.exists("textfile.txt"):
		# Get the path of the file in the current directory
		src = path.realpath("textfile.txt")
		# Separate the path part from the filename
		head, tail = path.split(src)
		print "path: " + head
		print "file: " + tail

		# Now, make a backup file
		dst = src + ".bak"
		# Then use the shell to make a copy of the file
		shutil.copy(src,dst)
		# If you want to copy over perms, modification times, and other data
		shutil.copystat(src,dst)

		# Rename the original file
		os.rename("textfile.txt", "newfile.txt")

		# Put things intp a ZIP archive
		root_dir,tail = path.split(src)
		shutil.make_archive("archive", "zip", root_dir)

		# More control over ZIP files
		with ZipFile("testzip.zip", "w") as newzip:
			newzip.write("newfile.txt")
			newzip.write("textfile.txt.bak")
def test(request):
    test_file = path.split(path.dirname(str(request.fspath)))[1] + ".py"
    test_dir = path.split(str(request.fspath))[0]
    test_cmd = ["mpirun", "-n", "2", "python", path.join(test_dir, test_file)]

    handle = subprocess.Popen(test_cmd, cwd=test_dir)
    assert handle.wait() == 0
def SemSave(rep):
    SEM=getcwd()
    nvdir=op.join(SEM,rep)
    if not path.isdir(rep):
        # Copy of the entire OUTPUT_FILES directory to the new directory
        shutil.copytree(op.join(SEM,'OUTPUT_FILES'),nvdir,symlinks=False)
        # Copy of Par_file file
        shutil.copyfile(op.join(SEM,'DATA','Par_file'),op.join(nvdir,'Par_file'))
        # Copy of SOURCE file
        shutil.copyfile(op.join(SEM,'DATA','SOURCE'),op.join(nvdir,'SOURCE'))
        # Par_file reading
        filename=SEM+'/DATA/Par_file'
        f = file(filename,'r')
        lignes= f.readlines()
        f.close()
        # Save stations if generated
        if GetValuePar('generate_STATIONS',lignes)=='.true.':
            shutil.copyfile(op.join(SEM,'DATA','STATIONS'),op.join(nvdir,'STATIONS'))
        # Save configuration files
        if GetValuePar('read_external_mesh',lignes)=='.true.':
            fic=GetValuePar('mesh_file',lignes)
            shutil.copyfile(fic,op.join(nvdir,op.split(fic)[1]))
            fic=GetValuePar('nodes_coords_file',lignes)
            shutil.copyfile(fic,op.join(nvdir,op.split(fic)[1]))
            fic=GetValuePar('materials_file',lignes)
            shutil.copyfile(fic,op.join(nvdir,op.split(fic)[1]))
            fic=GetValuePar('free_surface_file',lignes)
            shutil.copyfile(fic,op.join(nvdir,op.split(fic)[1]))
            fic=GetValuePar('absorbing_surface_file',lignes)
            shutil.copyfile(fic,op.join(nvdir,op.split(fic)[1]))
        else:
            fic=GetValuePar('interfacesfile',lignes)
            shutil.copyfile(op.join(SEM,'DATA',fic),op.join(nvdir,fic))
    else:
        print 'Unable to save, directory /'+rep+' already exists. Change name !'
Example #10
0
    def _get_sub_patterns(self, pattern):
        """Extract sub-patterns from the leading path of `pattern`.

        The right-most path component is successively peeled off until there
        are no patterns left.
        """
        if pattern in self._paths["sub_patterns"]:
            return self._paths["sub_patterns"][pattern]

        head, tail = op.split(pattern)
        if not tail:
            # Pattern ended with a separator. Take the first directory as the
            # base.
            head, tail = op.split(head)

        sub_patterns = []
        seen_magic = glob.has_magic(tail)
        while head:
            new_head, tail = op.split(head)
            if seen_magic and not glob.has_magic(head):
                break
            elif not seen_magic and glob.has_magic(tail):
                seen_magic = True

            if seen_magic:
                sub_patterns.append(head + op.sep)
            head = new_head
        self._paths["sub_patterns"][pattern] = sub_patterns
        return sub_patterns
 def convertPath(self, path):
     import os, os.path
     """Convierte el path a el específico de la plataforma (separador)"""
     if os.name == 'posix':
         return "/"+apply( os.path.join, tuple(path.split('/')))
     elif os.name == 'nt':
         return apply( os.path.join, tuple(path.split('/')))
Example #12
0
def load_local_pkg(fpath):
    """Generator producing all modules under fpath

    Parameters
    ----------
    fpath: string
        File path to the package to load.

    Returns
    -------
    generator
        `module` for each module found in the package.
    """
    package_fpath, package = path.split(fpath)
    package_fpath_len = len(package_fpath) + 1
    sys_path = copy(sys.path)
    sys.path.insert(0, package_fpath)
    try:
        for module_fpath in folder_traverse(
            fpath, basename_regex='[^_].+\.py$', path_exclude_regex='tests'
        ):
            folder_path, fname = path.split(module_fpath[package_fpath_len:])
            module_path = folder_path.split('/')
            module_path.append(path.splitext(fname)[0])
            module_path = '.'.join(module_path)
            try:
                module = import_module(module_path)
            except Exception:
                logging.debug('Cannot load module "{}"'.format(module_path))
            else:
                yield module
    except Exception as exception:
        logging.debug('Exception occurred: "{}'.format(exception))
    finally:
        sys.path = sys_path
Example #13
0
    def get_sqlmap(self):
        """
        Get the path to the sqlmap scanner.

        :returns: Sqlmap scanner file path.
        :rtype: str

        :raises RuntimeError: Sqlmap scanner of config file not found.
        """

        # Get the path to the Sqlmap scanner.
        sqlmap_script = Config.plugin_args["exec"]
        if sqlmap_script and exists(sqlmap_script):
            sqlmap_dir = split(sqlmap_script)[0]
            sqlmap_dir = abspath(sqlmap_dir)
        else:
            sqlmap_dir = split(__file__)[0]
            sqlmap_dir = join(sqlmap_dir, "sqlmap")
            sqlmap_dir = abspath(sqlmap_dir)
            sqlmap_script = join(sqlmap_dir, "sqlmap.py")
            if not sqlmap_script or not exists(sqlmap_script):
                sqlmap_script = "/usr/bin/sqlmap"
                if not exists(sqlmap_script):
                    sqlmap_script = Config.plugin_args["exec"]
                    msg = "Sqlmap not found in the PATH environment variable"
                    if sqlmap_script:
                        msg += ". File: %s" % sqlmap_script
                    Logger.log_error(msg)
                    raise RuntimeError(msg)

        return sqlmap_script
Example #14
0
def egg_info_for_url(url):
    scheme, server, path, parameters, query, fragment = urlparse.urlparse(url)
    base = urllib2.unquote(path.split('/')[-1])
    if server=='sourceforge.net' and base=='download':    # XXX Yuck
        base = urllib2.unquote(path.split('/')[-2])
    if '#' in base: base, fragment = base.split('#',1)
    return base,fragment
Example #15
0
    def build_mo(self):
        """Compile .mo files from available .po files"""
        import subprocess
        import gettext
        from translate.storage import factory

        print "Preparing localization files"
        for po_filename in glob.glob(path.join('po', 'pootle', '*', 'pootle.po')):
            lang = path.split(path.split(po_filename)[0])[1]
            lang_dir = path.join('mo', lang, 'LC_MESSAGES')
            mo_filename = path.join(lang_dir, 'django.mo')

            try:
                store = factory.getobject(po_filename)
                gettext.c2py(store.getheaderplural()[1])
            except Exception, e:
                print "skipping %s, probably invalid header: %s" % (lang, e)

            try:
                if not path.exists(lang_dir):
                    os.makedirs(lang_dir)
                print "compiling %s language" % lang
                subprocess.Popen(['msgfmt', '-c', '--strict', '-o', mo_filename, po_filename])
            except Exception, e:
                print "skipping %s, running msgfmt failed: %s" % (lang, e)
Example #16
0
    def run (self):
        install_data.run(self)
        data_file = []
        fileNames = map(lambda x: path.split(x[0])[1], data_file)
        listNames = map(lambda x: filter(lambda y: y, x[0].split("/")),
                        data_file)
        data_find = {}
        for i in range(len(fileNames)):
            listNames[i].reverse()
            data_find[fileNames[i]] =[listNames[i],data_file[i][1]]

        for path in self.get_outputs():
            nameFile = path.split(path)[1]
            if nameFile in data_find.keys():
                data = data_find[nameFile][0]
                mode = data_find[nameFile][1]
                flagFound = True
                iMax = len(data)
                pathFile = path
                for i in range(iMax):
                    if data[i] != path.split(pathFile)[1]:
                        flagFound = False
                        break
                    pathFile = path.split(pathFile)[0]
                if flagFound:
                    os.chmod(path, mode)
Example #17
0
def ipopt(alpha, beta, cost_prod, cost_stor, cons_prod,
    setup, constraint, time_hor, nb_obj, verbose):
    """
    This function solve quadratic problem as define
    in 'the profit mazimizing capacited lot-size problem'.
    It is based on ipopt solvers (http://www.coin-or.org/Ipopt/).
    
    ipopt(alpha, beta, cost_prod, cost_stor,cons_prod,
    cost_setup, constraint, time_hor, nb_obj, verbose)
    """
    extra_code = open(join(split(__file__)[0],'LLBP','main.cpp')).read()
    results = np.zeros((3*nb_obj+1)*time_hor+1, float)
    if verbose > 1:
        print "\nCompute lower bound (IpOpt)..."
    code="""
    int status = ipopt(alpha,beta,cost_prod,
        cost_stor,cons_prod,setup,constraint,
        results,time_hor,nb_obj,verbose);
    """
    wv.inline(code,['time_hor', 'nb_obj','alpha', 'beta', 'cost_prod',
        'cost_stor', 'setup', 'results', 'cons_prod', 'constraint', 'verbose'],
        include_dirs=["LLBP","/usr/include/coin/"],
        support_code=extra_code,
        libraries=['ipopt','lapack','pthread'],
        sources =[join(split(__file__)[0],'LLBP','LbIpopt.cpp')],
        type_converters=converters.blitz)
        #force = 1)
    return results[0],results[1:time_hor+1],\
    results[time_hor+1:(nb_obj+1)*time_hor+1],\
    results[(nb_obj+1)*time_hor+1:(2*nb_obj+1)*time_hor+1],\
    results[(2*nb_obj+1)*time_hor+1:]
    def run(self):

        img = cv2.imread(self.file_names[0])
        self.on_frame(img)
        self.paused = True
        i = 0
        while i < len(self.file_names):
            file_name = self.file_names[i]
            if not self.paused:
                if self.annotated_img != None:
                    _, name = path.split(file_name)
                    out_path = path.join(self.out_dir, "tracked_" + name)
                    cv2.imwrite(out_path, self.annotated_img)

                    # Log the corners of the region
                    _, frame_name = path.split(file_name)
                    for log, tracker in zip(self.log_files, self.trackers):
                        if tracker.is_initialized() and self.tracking:
                            log.write(frame_name + region_to_string(tracker.get_region()) + "\n")
                        else:
                            log.write("# Tracker was not running in " + frame_name + "\n")

                img = cv2.imread(file_name)
                i += 1
            self.on_frame(img)
 
        for log in self.log_files: log.close()
        self.cleanup()
Example #19
0
 def file(self, relative=False):
     """ Method returns formated photo path - derived from format.id and source Photo filename """
     if relative:
         source_file = path.split(self.photo.image.name)
     else:
         source_file = path.split(self.photo.image.path)
     return path.join(source_file[0],  str (self.format.id) + '-' + source_file[1])
Example #20
0
 def command_patch_list_data(self):
   requested_version = (self.reported_version + 1)
   while requested_version in self.patch_catalog.getCatalog()[self.reported_client]:
     p_file = path.join(self.patch_catalog.getCatalog()['path'],
       self.patch_catalog.getCatalog()[self.reported_client][requested_version] + '.pat')
     p_head, p_tail = path.split(p_file)
     r_file = path.join(self.patch_catalog.getCatalog()['path'],
       self.patch_catalog.getCatalog()[self.reported_client][requested_version] + '.rtp')
     r_head, r_tail = path.split(r_file)
     file_listing = {'patname':p_tail.encode('ascii'),
                     'patnamelen':pack('>i', 
                       len(p_tail.encode('ascii'))),
                     'patlen':pack('>i', 
                       path.getsize(p_file)),
                     'rtpname':r_tail.encode('ascii'),
                     'rtpnamelen':pack('>i', 
                       len(r_tail.encode('ascii'))),
                     'rtplen':pack('>i', 
                       path.getsize(r_file))
                    }
     self.connection.send(file_listing['patnamelen'])
     self.connection.send(file_listing['patname'])
     self.connection.send(file_listing['patlen'])
     self.connection.send(file_listing['rtpnamelen'])
     self.connection.send(file_listing['rtpname'])
     self.connection.send(file_listing['rtplen'])
     print("\nSERVER << sent %s PatchListData entry %s\n" % (self.address, file_listing))
     requested_version = requested_version + 1
   self.connection.send(b'\x00\x00\x00\x00')
   return
def generate_index_page(index_links, index_fp, order=[_index_headers["run_summary"]]):
    """ generate the top-level index page """
    # get containing directory for index_fp
    top_level_dir = split(split(index_fp)[0])[1]
    index_page_header = get_index_page_header()
    index_lines = [index_page_header]
    d = {}
    for e in index_links:
        try:
            d[e[2]].append((e[0], e[1]))
        except KeyError:
            d[e[2]] = [(e[0], e[1])]
    index_lines.append("<table border=1>\n")

    # Determine the order the data should be presented in. This should be
    # the order that the user requested, followed by any categories that
    # the user didn't include in the order parameter.
    ordered_table_entries = order + [k for k in d if k not in order]
    for k in ordered_table_entries:
        v = d[k]
        index_lines.append("<tr colspan=2 align=center bgcolor=#e8e8e8><td colspan=2 align=center>%s</td></tr>\n" % k)
        for description, path in v:
            path = re.sub(".*%s" % top_level_dir, "./", path)
            index_lines.append("<tr>%s</tr>\n" % format_index_link(description, path))
    index_lines.append("</table>\n")

    index_page_footer = get_index_page_footer()
    index_lines.append(index_page_footer)

    open(index_fp, "w").write("".join(index_lines))
Example #22
0
def main():
# make a duplicate of an existing file
  if path.exists("textfile.txt"):
    # get the path to the file in the current directory
    src = path.realpath("textfile.txt");      
   # separate the path part from the filename
    head, tail = path.split(src)
    print "path: " + head
    print "file: " + tail 
    
  # let's make a backup copy by appending "bak" to the name
    dst = src + ".bak"
    # now use the shell to make a copy of the file
    shutil.copy(src,dst)
    
    # copy over the permissions, modification times, and other info
    shutil.copystat(src, dst)  
 
    # rename the original file
    os.rename("textfile.txt", "newfile.txt")
    
    # now put things into a ZIP archive
    root_dir,tail = path.split(src)
    shutil.make_archive("archive", "zip", root_dir)

    # more fine-grained control over ZIP files
    with ZipFile("testzip.zip","w") as newzip:
      newzip.write("newfile.txt")
      newzip.write("textfile.txt.bak")     
Example #23
0
def sendRound_1():
    global context, crypto, purple
    # generate nonce for session key
    context.k_i = crypto.getSomeNonce(c_int(context.len_sid_random))
    k_i_hashed = crypto.hash(c_char_p(context.k_i), c_int(len(context.k_i)))
    
    # Generate Long-term keys
    ### like this: *Well, this may not be exactly long-term key. Whatever.*
    context.myPrivKey = crypto.getSomeNonce(c_int(context.len_authNonce_random))
    context.myPubKey = crypto.exponent(c_char_p("2"), c_char_p(context.myPrivKey))
    
    if 1:
        # Read from file Ephemeral keys
        file = open(join(split(__file__)[0],"ephkey"+context.myUsername+".txt"), 'r')
        context.myEphKeys = file.read() # this is a keypair -- public and private keys
        file.close()
        file = open(join(split(__file__)[0],"ephPubkey"+context.myUsername+".txt"), 'r')
        context.myEphPubKey = file.read()
        file.close()
    else:
        # Generate Ephemeral keys
        print "start Key generation for ", context.myUsername
        context.myEphKeys = crypto.generateKeys()
        file = open(join(split(__file__)[0],"ephkey"+context.myUsername+".txt"), 'w')
        file.write(context.myEphKeys)
        file.close()
        context.myEphPubKey = crypto.getPubPrivKey(c_char_p(context.myEphKeys), c_char_p("public-key"))
        file = open(join(split(__file__)[0],"ephPubkey"+context.myUsername+".txt"), 'w')
        file.write(context.myEphPubKey)
        file.close()

    # Send message 
    purple.PurpleConvChatSend(context.chat, "mpOTR:A_R1:"+k_i_hashed+";"+ context.myPubKey+";"+context.myEphPubKey)
Example #24
0
def compile_fontello_fonts():
    from zipfile import ZipFile
    from StringIO import StringIO
    assert env.hosts == ['localhost'], "Meant to be run locally"
    try:
        import requests
    except ImportError:
        raise RuntimeError(
            "Please 'pip install requests' in your main environment")
    font_dir = join(
        env.projectpath, 'assembl', 'static', 'css', 'fonts')
    config_file = join(font_dir, 'config.json')
    id_file = join(font_dir, 'fontello.id')
    assert os.path.exists(id_file)
    with open(id_file) as f:
        fid = f.read()
    r = requests.get("http://fontello.com/%s/get" % fid)
    if not r.ok:
        raise RuntimeError("Could not get the data")
    with ZipFile(StringIO(r.content)) as data:
        for name in data.namelist():
            dirname, fname = split(name)
            dirname, subdir = split(dirname)
            if fname and (subdir == 'font' or fname == 'config.json'):
                with data.open(name) as fdata:
                    with open(join(font_dir, fname), 'wb') as ffile:
                        ffile.write(fdata.read())
Example #25
0
    def _check_file ( self, file_name ):
        """ Returns the 'checked' version of the specified file name. In the
            case of a traceback that has been pasted into the tool, that data
            may have originated on another system with a different pythonpath,
            so the file may not exist locally. This method attempts to adjust
            such a file, if possible. If the specified file cannot be found
            locally, the original file name is returned.
        """
        if not exists( file_name ):
            fn, base_name = split( normpath( file_name ) )
            paths         = [ '' ]
            while True:
                fn, path = split( fn )
                if path == '':
                    break

                paths.insert( 0, join( path, paths[0] ) )

            for sys_path in self.sys_path:
                for path in paths:
                    fn = join( sys_path, path, base_name )
                    if (isfile( fn ) and ((path == '') or
                        (isfile( join( sys_path, path, '__init__.py' ) )))):
                        return fn

        return file_name
Example #26
0
 def _get_default_regkey(self, regpath=None, forwriting=False):
     """ Get the registry key handle for registry operations. provided the
         registry path. """ 
     if regpath:
         key = regpath
         subkey = ''
         while path.split(key)[0]:
             key, tmp = path.split(key)
             subkey = '\\'.join([tmp, subkey])              
         if key == 'HKEY_CLASSES_ROOT':
             key = _winreg.HKEY_CLASSES_ROOT
         elif key == 'HKEY_CURRENT_CONFIG':
             key = _winreg.HKEY_CURRENT_CONFIG
         elif key == 'HKEY_CURRENT_USER':
             key = _winreg.HKEY_CURRENT_USER
         elif key == 'HKEY_DYN_DATA':
             key = _winreg.HKEY_DYN_DATA
         elif key == 'HKEY_LOCAL_MACHINE':
             key = _winreg.HKEY_LOCAL_MACHINE
         elif key == 'HKEY_PERFORMANCE_DATA':
             key = _winreg.HKEY_PERFORMANCE_DATA
         elif key == 'HKEY_USERS':
             key = _winreg.HKEY_USERS
         else:
             raise TypeError('Invalid registry key (HKEY_)')
         try:
             if forwriting:
                 hkey = _winreg.CreateKey(key, subkey)
             else:
                 hkey = _winreg.OpenKey(key, subkey)
         except:
             raise WindowsError('Cannot open registry path')
         else:
             return hkey
Example #27
0
def gen_filter(path, root_dir, hf_list, cf_list, af_list, tools_ver):

  f1 = r'''<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="{0}" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
'''.format(tools_ver)

  f2 = r'''  <ItemGroup>
    <None Include="..\..\gmp-h.in" />
  </ItemGroup>
</Project>
'''

  relp = split(relpath(root_dir, path))[0] + '\\'
  try:
    makedirs(split(path)[0])
  except IOError as e:
    if e.errno != EEXIST:
      raise
    else:
      pass

  with open(path, 'w') as outf:

    outf.write(f1)
    filter_folders(cf_list, af_list, outf)
    if hf_list:
      filter_headers(hf_list, relp, outf)
    filter_csrc(cf_list, relp, outf)
    if af_list:
      filter_asrc(af_list, relp, outf)
    outf.write(f2)
Example #28
0
def readList():
    f = open(listName, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True)
    data = json.loads(f.read())
    gallery = data['gallery']
    batchTag = data['batchTag']
    files = data ['files']

    successCnt = 0
    errorCnt = 0

    if batchTag is True:
        tags = data['tags']
        for file in files:
            filename = path.split(file)[-1]
            msg = handle_upload(file, gallery, tags)
            if msg =='Extension ERROR!':
                errorCnt += 1
            else:
                successCnt += 1
    else:
        for file in files:
            tags = files[file]
            filename = path.split(file)[-1]
            msg = handle_upload(file, gallery, tags)
            if msg =='Extension ERROR!':
                errorCnt += 1
            else:
                successCnt += 1
    print(successCnt)
    print(errorCnt)
    f.close()
Example #29
0
 def save(self, *args, **kwargs):
     image = PILImage.open(self.instance.image.file)
     temp_handle = StringIO()
     greyscale_image = image.convert("L")
     greyscale_image.save(temp_handle, 'png')
     temp_handle.seek(0)
     suf = SimpleUploadedFile(path.split(self.instance.image.name)[-1],
                              temp_handle.read(), content_type='image/png')
     name = suf.name
     if "." in name:
         name = name.split(".")[0]
     self.instance.preprocessed_image.save("%s_l.png" % name, suf,
                                           save=False)
     if self.cleaned_data["process"] == PROCESSES_DICT["HANDWRITTEN"]:
         factor = [5, 2, 0.25]
         temp_handle = StringIO()
         raw_mask = extract_handwritten_text(image, factor=factor)
         regions_mask = draw_regions(raw_mask, raw_mask,
                                     outline=["white", "white", None],
                                     fill=["white", "white", None])
         handwritten_mask = remove_noise(regions_mask).convert("1")
         handwritten_mask.save(temp_handle, 'png')
         temp_handle.seek(0)
         suf = SimpleUploadedFile(path.split(self.instance.image.name)[-1],
                                  temp_handle.read(),
                                  content_type='image/png')
         name = suf.name
         if "." in name:
             name = name.split(".")[0]
         self.instance.handwritten_mask.save("%s_h.png" % name, suf,
                                            save=False)
     return super(InitialImageForm, self).save(*args, **kwargs)
def find_and_process_expected_tables(start_dir, biom_processor=abspath, filename_pattern="table*biom"):
    """ given a start_dir, return list of tuples describing the table and containing the processed table
    
         start_dir: top-level directory to use when starting the walk
         biom_processor: takes a relative path to a biom file and does
          something with it. default is call abspath on it to convert the
          relative path to an absolute path, but could also be 
          parse_biom_table, for example. Not sure if we'll want this, but 
          it's easy to hook up.
        filename_pattern: pattern to use when matching filenames, can contain 
         globbable (i.e., bash-style) wildcards (default: "table*biom")
        
        results = [(data-set-id, reference-id, biom_processor(table_fp)),
                   ...
                  ]
    """
    table_fps = glob(join(start_dir, "*", "*", "expected", filename_pattern))
    results = []
    for table_fp in table_fps:
        expected_dir, _ = split(table_fp)
        reference_dir, _ = split(expected_dir)
        dataset_dir, reference_id = split(reference_dir)
        _, dataset_id = split(dataset_dir)
        results.append((dataset_id, reference_id, biom_processor(table_fp)))
    return results
def getConnection():
    this_dir = split(__file__)[0]
    fname = join(this_dir, 'sqlite-sakila.sq')
    conn = sqlite3.connect(fname)
    conn.row_factory = dictionary_factory  # note: no parentheses
    return conn
Example #32
0
def to_nativepath(path):
    return os.path.join(path.split('/'))
Example #33
0
    from lib.core.main.Statistics import Send_Statistics
    from lib.core.Check_Supported_OS import Check_Supported

    # installers
    from lib.core.installers.installer_uninstaller import Install_G3nius, Uninstall_G3nius
    from lib.core.installers.check import Check_Installtion_G3nius

    # load GPL libs
    from lib.GPL.IO import gpl_input, gpl_sleep, gpl_confirm
    from lib.GPL.File_Workers import gpl_read_from_file
    from lib.GPL.String_Workers import gpl_fix_spases, gpl_fix_string_to_uri
    from lib.GPL.Page_Managers import gpl_clear_and_banner, gpl_set_banner_verion, gpl_clear
    from lib.GPL.Access_Managers import gpl_check_root_needed_with_error
except Exception as EX:
    exc_type, exc_obj, exc_tb = exc_info()
    FileName = split(exc_tb.tb_frame.f_code.co_filename)[1]
    Line_Number = exc_tb.tb_lineno
    print(colored('[!] Failed to load some local libs.', 'red'))
    print(
        colored("Crashed at:\t" + FileName + ' line ' + str(Line_Number),
                'yellow'))
    print(colored("Exception:\t" + str(EX), 'yellow'))
    print(colored("If you sure don't change local files, report it.", 'red'))
    print(colored('Email : [email protected]', 'blue'))
    exit(Exit_Codes.Crash)
"""		send statisctic usage		"""
# NOTE:
# This feature is optional and just for statistics.
# You can set False to don't send reports.
thread = Thread(target=Send_Statistics,
                args=(Main_Configs.Statistics_Reports, ))
Example #34
0
def build_image(
    dest,
    commands=None,
    source="",
    mounter="v3io",
    base_image=None,
    requirements=None,
    inline_code=None,
    inline_path=None,
    secret_name=None,
    namespace=None,
    with_mlrun=True,
    registry=None,
    interactive=True,
    name="",
    extra=None,
    verbose=False,
):

    if registry:
        dest = "{}/{}".format(registry, dest)
    elif dest.startswith("."):
        dest = dest[1:]
        if "DOCKER_REGISTRY_PORT" in environ:
            registry = urlparse(environ.get("DOCKER_REGISTRY_PORT")).netloc
        else:
            registry = environ.get("DEFAULT_DOCKER_REGISTRY")
            secret_name = secret_name or environ.get("DEFAULT_DOCKER_SECRET")
        if not registry:
            raise ValueError("local docker registry is not defined, set "
                             "DEFAULT_DOCKER_REGISTRY/SECRET env vars")
        dest = "{}/{}".format(registry, dest)

    if isinstance(requirements, list):
        requirements_list = requirements
        requirements_path = "requirements.txt"
        if source:
            raise ValueError("requirements list only works with inline code")
    else:
        requirements_list = None
        requirements_path = requirements

    base_image = base_image or config.default_image
    if with_mlrun:
        commands = commands or []
        commands.append("pip install {}".format(config.package_path))

    if not inline_code and not source and not commands:
        logger.info("skipping build, nothing to add")
        return "skipped"

    context = "/context"
    to_mount = False
    src_dir = "."
    v3io = (source.startswith("v3io://") or source.startswith("v3ios://")
            if source else None)

    if inline_code:
        context = "/empty"
    elif source and "://" in source and not v3io:
        context = source
    elif source:
        if v3io:
            source = urlparse(source).path
        to_mount = True
        if source.endswith(".tar.gz"):
            source, src_dir = path.split(source)
    else:
        src_dir = None

    dock = make_dockerfile(
        base_image,
        commands,
        src_dir=src_dir,
        requirements=requirements_path,
        extra=extra,
    )

    kpod = make_kaniko_pod(
        context,
        dest,
        dockertext=dock,
        inline_code=inline_code,
        inline_path=inline_path,
        requirements=requirements_list,
        secret_name=secret_name,
        name=name,
        verbose=verbose,
    )

    if to_mount:
        # todo: support different mounters
        kpod.mount_v3io(remote=source, mount_path="/context")

    k8s = get_k8s_helper()
    kpod.namespace = k8s.resolve_namespace(namespace)

    if interactive:
        return k8s.run_job(kpod)
    else:
        pod, ns = k8s.create_pod(kpod)
        logger.info(
            'started build, to watch build logs use "mlrun watch {} {}"'.
            format(pod, ns))
        return "build:{}".format(pod)
Example #35
0
def main():
    module = AnsibleModule(
        argument_spec=dict(name=dict(
            aliases=["pkg", "package", "formula"],
            required=False,
            type='list',
            elements='str',
        ),
                           path=dict(
                               default="/usr/local/bin",
                               required=False,
                               type='path',
                           ),
                           state=dict(
                               default="present",
                               choices=[
                                   "present",
                                   "installed",
                                   "latest",
                                   "upgraded",
                                   "head",
                                   "linked",
                                   "unlinked",
                                   "absent",
                                   "removed",
                                   "uninstalled",
                               ],
                           ),
                           update_homebrew=dict(
                               default=False,
                               aliases=["update-brew"],
                               type='bool',
                           ),
                           upgrade_all=dict(
                               default=False,
                               aliases=["upgrade"],
                               type='bool',
                           ),
                           install_options=dict(
                               default=None,
                               aliases=['options'],
                               type='list',
                               elements='str',
                           ),
                           upgrade_options=dict(
                               default=None,
                               type='list',
                               elements='str',
                           )),
        supports_check_mode=True,
    )

    module.run_command_environ_update = dict(LANG='C',
                                             LC_ALL='C',
                                             LC_MESSAGES='C',
                                             LC_CTYPE='C')

    p = module.params

    if p['name']:
        packages = p['name']
    else:
        packages = None

    path = p['path']
    if path:
        path = path.split(':')

    state = p['state']
    if state in ('present', 'installed'):
        state = 'installed'
    if state in ('head', ):
        state = 'head'
    if state in ('latest', 'upgraded'):
        state = 'upgraded'
    if state == 'linked':
        state = 'linked'
    if state == 'unlinked':
        state = 'unlinked'
    if state in ('absent', 'removed', 'uninstalled'):
        state = 'absent'

    update_homebrew = p['update_homebrew']
    if not update_homebrew:
        module.run_command_environ_update.update(
            dict(HOMEBREW_NO_AUTO_UPDATE="True"))
    upgrade_all = p['upgrade_all']
    p['install_options'] = p['install_options'] or []
    install_options = [
        '--{0}'.format(install_option)
        for install_option in p['install_options']
    ]

    p['upgrade_options'] = p['upgrade_options'] or []
    upgrade_options = [
        '--{0}'.format(upgrade_option)
        for upgrade_option in p['upgrade_options']
    ]
    brew = Homebrew(module=module,
                    path=path,
                    packages=packages,
                    state=state,
                    update_homebrew=update_homebrew,
                    upgrade_all=upgrade_all,
                    install_options=install_options,
                    upgrade_options=upgrade_options)
    (failed, changed, message) = brew.run()
    changed_pkgs = brew.changed_pkgs
    unchanged_pkgs = brew.unchanged_pkgs

    if failed:
        module.fail_json(msg=message)
    module.exit_json(changed=changed,
                     msg=message,
                     unchanged_pkgs=unchanged_pkgs,
                     changed_pkgs=changed_pkgs)
Example #36
0
def main():
    option_parser, opts, args = parse_command_line_parameters(**script_info)

    # Some code for error checking of input args:

    # Check if distance_matrix_file is valid:
    try:
        d_header, d_mat = parse_distmat(open(opts.distance_matrix_file, 'U'))
    except:
        option_parser.error(
            "This does not look like a valid distance matrix file.  Please supply a valid distance matrix file using the -d option.")

    if not is_symmetric_and_hollow(d_mat):
        option_parser.error("The distance matrix must be symmetric and "
                            "hollow.")

    # Check if map_fname is valid:
    try:
        mapping, m_header, m_comments = \
            parse_mapping_file(open(opts.map_fname, 'U'))
    except QiimeParseError:
        option_parser.error(
            "This does not look like a valid metadata mapping file.  Please supply a valid mapping file using the -m option.")

    # make sure background_color is valid
    if opts.background_color not in ['black', 'white']:
        option_parser.error(
            "'%s' is not a valid background color.  Please pass in either 'black' or 'white' using the -k option." %
            (opts.background_color))

    # make sure prefs file is valid if it exists
    if opts.prefs_path is not None:
        try:
            prefs_file = open(opts.prefs_path, 'U').read()
        except IOError:
            option_parser.error(
                "Provided prefs file, '%s', does not exist.  Please pass in a valid prefs file with the -p option." %
                (opts.prefs_path))

    if opts.prefs_path is not None:
        prefs = parse_prefs_file(prefs_file)
    else:
        prefs = None

    color_prefs, color_data, background_color, label_color, ball_scale,\
        arrow_colors = sample_color_prefs_and_map_data_from_options(opts)

    # list of labelname, groups, colors, data_colors, data_color_order
    groups_and_colors = list(iter_color_groups(mapping=color_data['map'],
                                               prefs=color_prefs))

    # dict mapping labelname to list of: [groups, colors, data_colors,
    # data_color_order]
    field_to_colors = {}
    for color_info in groups_and_colors:
        field_to_colors[color_info[0]] = color_info[1:]

    qiime_dir = get_qiime_project_dir() + '/qiime/support_files/'

    fields = opts.fields
    if fields is not None:
        fields = map(strip, fields.split(','))
        fields = [i.strip('"').strip("'") for i in fields]
    elif prefs is not None:
        fields = prefs.get('FIELDS', None)
    else:
        fields = get_interesting_mapping_fields(mapping, m_header)

    # Check that all provided fields are valid:
    if fields is not None:
        for f in fields:
            if f not in m_header:
                option_parser.error(
                    "The field, %s, is not in the provided mapping file.  Please supply correct fields (using the -f option or providing a 'FIELDS' list in the prefs file) corresponding to fields in mapping file." %
                    (f))

    within_distances, between_distances, dmat = \
        group_distances(mapping_file=opts.map_fname,
                        dmatrix_file=opts.distance_matrix_file,
                        fields=fields,
                        dir_prefix=get_random_directory_name(
                            output_dir=opts.dir_path,
                            prefix='distances'))

    if not opts.suppress_html_output:
        # histograms output path
        histograms_path = path.join(opts.dir_path, 'histograms')
        try:
            mkdir(histograms_path)
        except OSError:  # raised if dir exists
            pass

        # draw all histograms
        distances_dict, label_to_histogram_filename = \
            draw_all_histograms(single_field=within_distances,
                                paired_field=between_distances,
                                dmat=dmat,
                                histogram_dir=histograms_path,
                                field_to_color_prefs=field_to_colors,
                                background_color=background_color)

        # Get relative path to histogram files.
        label_to_histogram_filename_relative = \
            _make_relative_paths(label_to_histogram_filename, opts.dir_path)

        dm_fname = path.split(opts.distance_matrix_file)[-1]
        basename = path.splitext(dm_fname)[0]
        outfile_name = basename + '_distance_histograms.html'
        make_main_html(distances_dict=distances_dict,
                       label_to_histogram_filename=label_to_histogram_filename_relative,
                       root_outdir=opts.dir_path,
                       outfile_name=outfile_name,
                       title='Distance Histograms')

        # Handle saving web resources locally.
        # javascript file
        javascript_path = path.join(opts.dir_path, 'js')
        try:
            mkdir(javascript_path)
        except OSError:  # raised if dir exists
            pass
        js_out = open(javascript_path + '/histograms.js', 'w')
        js_out.write(open(qiime_dir + 'js/histograms.js').read())
        js_out.close()

    monte_carlo_iters = opts.monte_carlo_iters
    if monte_carlo_iters > 0:
        # Do Monte Carlo for all fields
        monte_carlo_group_distances(mapping_file=opts.map_fname,
                                    dmatrix_file=opts.distance_matrix_file,
                                    prefs=prefs,
                                    dir_prefix=opts.dir_path,
                                    fields=fields,
                                    default_iters=monte_carlo_iters)

        # Do Monte Carlo for within and between fields
        monte_carlo_group_distances_within_between(
            single_field=within_distances,
            paired_field=between_distances, dmat=dmat,
            dir_prefix=opts.dir_path,
            num_iters=monte_carlo_iters)
Example #37
0
 def save_path(self):
     folder, name = split(self.path)
     return f'{folder}/{PREFIX_SAVED_ROM}{name}'
Example #38
0
 def export_ica(self, fname):
     name, ext = splitext(split(fname)[-1])
     ext = ext if ext else ".fif"  # automatically add extension
     fname = join(split(fname)[0], name + ext)
     self.current["ica"].save(fname)
Example #39
0
# imports from acoular
import acoular
from acoular import L_p, Calib, MicGeom, PowerSpectra, Environment, \
RectGrid3D, BeamformerBase, BeamformerEig, BeamformerOrth, BeamformerCleansc, \
MaskedTimeSamples, FiltFiltOctave, BeamformerTimeSq, TimeAverage, \
TimeCache, BeamformerTime, TimePower, \
BeamformerCapon, BeamformerMusic, BeamformerDamas, SteeringVector
# other imports
from os import path
from pylab import figure, subplot, imshow, show, colorbar, title, tight_layout
from pickle import dump

# files
datafile = 'example_data.h5'
calibfile = 'example_calib.xml'
micgeofile = path.join( path.split(acoular.__file__)[0],'xml','array_56.xml')

#octave band of interest
cfreq = 4000

#===============================================================================
# first, we define the time samples using the MaskedTimeSamples class
# alternatively we could use the TimeSamples class that provides no masking
# of channels and samples
#===============================================================================
t1 = MaskedTimeSamples(name=datafile)
t1.start = 0 # first sample, default
t1.stop = 16000 # last valid sample = 15999
invalid = [1,7] # list of invalid channels (unwanted microphones etc.)
t1.invalid_channels = invalid 
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import sys
import os.path as osp
import logging
# add python path of PadleDetection to sys.path
parent_path = osp.abspath(osp.join(__file__, *(['..'] * 3)))
if parent_path not in sys.path:
    sys.path.append(parent_path)

from ppdet.utils.download import download_dataset

logging.basicConfig(level=logging.INFO)

download_path = osp.split(osp.realpath(sys.argv[0]))[0]
download_dataset(download_path, 'coco')
Example #41
0
def pagexmlcombine(ocrindex, gtindex, xmlfile, output):

    xmlfile = path.abspath(xmlfile)
    pagedir = path.split(xmlfile)[0] + '/Pages'    
    commentsdir = path.split(xmlfile)[0] + '/Comments'
    pagename = path.splitext(path.basename(xmlfile))[0]
    
    thispagedir = pagedir + '/' + pagename
    commentsfile = commentsdir + '/' + pagename + '.txt'
    
    # load xml
    root = etree.parse(xmlfile).getroot()
    ns = {"ns":root.nsmap[None]}
    
    #convert point notation (older PageXML versions)
    for c in root.xpath("//ns:Coords[not(@points)]", namespaces=ns):
        cc = []
        for point in c.xpath("./ns:Point", namespaces=ns):
            cx = point.attrib["x"]
            cy = point.attrib["y"]
            c.remove(point)
            cc.append(cx+","+cy)
        c.attrib["points"] = " ".join(cc)    
    
    # combine data in coordmap dict
    textregions = root.xpath('//ns:TextRegion', namespaces=ns)
    coordmap = {}
    for r in textregions:
        rid = r.attrib["id"]
        coordmap[rid] = {"type":r.attrib["type"]}
        
        # coordinates
        coordmap[rid]["coords"] = []
        for c in r.xpath("./ns:Coords", namespaces=ns) + r.xpath("./Coords"):
            coordstrings = [x.split(",") for x in c.attrib["points"].split()]
            coordmap[rid]["coords"] += [(int(x[0]), int(x[1])) for x in coordstrings ]
            
        # find region dir, offset and size
        for imgf in glob(thispagedir + "/*" + coordmap[rid]["type"] + ".png"):
            if not "offset" in coordmap[rid]:
                size = Image.open(imgf).size
                offsetp = path.splitext(imgf)[0] + ".offset"
                with open(offsetp) as f:
                    offset = tuple([int(x) for x in f.read().split(",")])
                # check if coordinates fit in region rectangle
                fit = all([offset[0]<=c[0]<=offset[0]+size[0] \
                         and  offset[1]<=c[1]<=offset[1]+size[1] \
                    for c in coordmap[rid]["coords"]])
                if fit:
                    coordmap[rid]["offset"] = offset
                    coordmap[rid]["size"] = size
                    coordmap[rid]["path"] = path.splitext(imgf)[0]
        if not "path" in coordmap[rid]:
            raise ValueError("Segment for region " + rid + " not found in pagedir "+thispagedir+"!")
            
        # angle
        if path.isfile(coordmap[rid]["path"] + ".angle"):
            with open(coordmap[rid]["path"] + ".angle") as f:
                coordmap[rid]["angle"] = float(f.read())
        else:
            coordmap[rid]["angle"] = 0.0
            
        
        # lines
        coordmap[rid]["lines"] = {}
        for n, l in enumerate(sorted(glob(coordmap[rid]["path"]+"/*.coords"))):
            lpath = path.splitext(l)[0]
            lid = '{}_{:03d}'.format(rid, n + 1)
            coordmap[rid]["lines"][lid] = {}
            with open(l) as f:
                b = f.read().split(",")
            b = [int(x) for x in b] 
            b = [b[1], b[0], b[3], b[2]] # boxes: [(x1, y1, x2, y2),...], cf. kraken segmenter
            coords = [(b[0],b[1]),(b[2],b[1]),(b[2],b[3]),(b[0],b[3])]
            
            # rotate line coordinates 
            if coordmap[rid]["angle"]:
                newcoords = []
                center = tuple([x/2 for x in coordmap[rid]["size"]])
                a = radians(- coordmap[rid]["angle"])
                for c in coords:
                    x = c[0] - center[0]
                    y = c[1] - center[1]
                    x =  x*cos(a) + y*sin(a)
                    y = -x*sin(a) + y*cos(a)
                    x = round(x + center[0])
                    y = round(y + center[1])
                    newcoords.append((x,y))
                coords = newcoords
            
            # relative to absolute coordinates
            coords = [(x[0]+coordmap[rid]["offset"][0], x[1]+coordmap[rid]["offset"][1]) for x in coords]
            coordmap[rid]["lines"][lid]["coords"] = coords
            
            # ocr text
            for fpath in glob(lpath + ".pred.txt"):
                with open(fpath, encoding='utf-8') as f:
                    coordmap[rid]["lines"][lid]["ocr"] =  f.read().strip()
                
            # gt text
            for fpath in glob(lpath + ".gt.txt"):
                with open(fpath, encoding='utf-8') as f:
                    coordmap[rid]["lines"][lid]["gt"] =  f.read().strip()
                    
    # start writing coordmap back to xml
    for rid in sorted(coordmap):
        textregion = root.find('.//ns:TextRegion[@id="'+rid+'"]', namespaces=ns)
        regiontext = []

        # angle
        if coordmap[rid]["angle"]:
            textregion.attrib["orientation"] = str(-1 * coordmap[rid]["angle"])
        # lines
        for lid in coordmap[rid]["lines"]:
            linexml = textregion.find('./ns:TextLine[@id="'+lid+'"]', namespaces=ns)
            if linexml is None:
                linexml = etree.SubElement(textregion, "TextLine", attrib={"id":lid})
            # coords
            coordsxml = linexml.find('./ns:Coords', namespaces=ns)
            if coordsxml is None:
                coordsxml = etree.SubElement(linexml, "Coords")
            coordsxml.attrib["points"] = " ".join(str(x[0])+","+str(x[1]) \
                            for x in coordmap[rid]["lines"][lid]["coords"])
            
            # text
            if "ocr" in coordmap[rid]["lines"][lid]:
                textequivxml = linexml.find('./ns:TextEquiv[@index="'+str(ocrindex)+'"]', namespaces=ns)
                if textequivxml is None:
                    textequivxml = etree.SubElement(linexml, "TextEquiv", attrib={"index":str(ocrindex)})
                unicodexml = textequivxml.find('./ns:Unicode', namespaces=ns)
                if unicodexml is None:
                    unicodexml = etree.SubElement(textequivxml, "Unicode")
                unicodexml.text = coordmap[rid]["lines"][lid]["ocr"]
            if "gt" in coordmap[rid]["lines"][lid]:
                textequivxml = linexml.find('./ns:TextEquiv[@index="'+str(gtindex)+'"]', namespaces=ns)
                if textequivxml is None:
                    textequivxml = etree.SubElement(linexml, "TextEquiv", attrib={"index":str(gtindex)})
                unicodexml = textequivxml.find('./ns:Unicode', namespaces=ns)
                if unicodexml is None:
                    unicodexml = etree.SubElement(textequivxml, "Unicode")
                unicodexml.text = coordmap[rid]["lines"][lid]["gt"]
            
        # region text collect
        for lid in coordmap[rid]["lines"]:
            if "gt" in coordmap[rid]["lines"][lid]:
                regiontext.append(coordmap[rid]["lines"][lid]["gt"])
            elif "ocr" in coordmap[rid]["lines"][lid]:
                regiontext.append(coordmap[rid]["lines"][lid]["ocr"])
            else:
                regiontext.append("")
                    
        # region text insert
        textequivxml = textregion.find('./ns:TextEquiv', namespaces=ns)
        if textequivxml is None:
            textequivxml = etree.SubElement(textregion, "TextEquiv")
        unicodexml = textequivxml.find('./ns:Unicode', namespaces=ns)
        if unicodexml is None:
            unicodexml = etree.SubElement(textequivxml, "Unicode")
        unicodexml.text = "\n".join(regiontext)
    
    # timestamp
    lastchange = root.find('.//ns:LastChange', namespaces=ns)
    lastchange.text = strftime("%Y-%m-%dT%H:%M:%S", gmtime())
    
    # comments
    if path.isfile(commentsfile):
        metadata = root.find('.//ns:Metadata', namespaces=ns)
        commentsxml = metadata.find('./ns:Comments', namespaces=ns)
        if commentsxml is None:
            commentsxml = etree.SubElement(metadata, "Comments")
        with open(commentsfile) as f:
            commentsxml.text = f.read()
    
    # update version
    xmlcontent = etree.tounicode(root.getroottree()).replace(
             "http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19",
             "http://schema.primaresearch.org/PAGE/gts/pagecontent/2017-07-15"
            ).replace(
             "http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15",
             "http://schema.primaresearch.org/PAGE/gts/pagecontent/2017-07-15"                    
            )
    xmlcontent = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>' + xmlcontent
    
    # write file
    with open(path.abspath(output), "w", encoding='utf-8') as f:
        f.write(xmlcontent)
Example #42
0
def main():

    parser = argparse.ArgumentParser(description="Create a surface model")
    parser.add_argument('config', type=str, metavar='INPUT')
    args = parser.parse_args()
    config = json_load_ascii(args.config, shell_replace=True)
    configdir, configfile = split(abspath(args.config))

    # Determine top level parameters
    for q in ['output_model_file', 'sources', 'normalize', 'wavelength_file']:
        if q not in config:
            raise ValueError('Missing parameter: %s' % q)

    wavelength_file = expand_path(configdir, config['wavelength_file'])
    normalize = config['normalize']
    reference_windows = config['reference_windows']
    outfile = expand_path(configdir, config['output_model_file'])

    # load wavelengths file
    q = s.loadtxt(wavelength_file)
    if q.shape[1] > 2:
        q = q[:, 1:]
    if q[0, 0] < 100:
        q = q * 1000.0
    wl = q[:, 0]
    nchan = len(wl)

    # build global reference windows
    refwl = []
    for wi, window in enumerate(reference_windows):
        active_wl = aand(wl >= window[0], wl < window[1])
        refwl.extend(wl[active_wl])
    normind = s.array([s.argmin(abs(wl-w)) for w in refwl])
    refwl = s.array(refwl, dtype=float)

    # create basic model template
    model = {'normalize': normalize, 'wl': wl,
             'means': [], 'covs': [], 'refwl': refwl}

    for si, source_config in enumerate(config['sources']):

        # Determine source parameters
        for q in ['input_spectrum_files', 'windows', 'n_components', 'windows']:
            if q not in source_config:
                raise ValueError(
                    'Source %i is missing a parameter: %s' % (si, q))

        infiles = [expand_path(configdir, fi) for fi in
                   source_config['input_spectrum_files']]
        ncomp = int(source_config['n_components'])
        windows = source_config['windows']

        # load spectra
        spectra = []
        for infile in infiles:
            hdrfile = infile + '.hdr'
            rfl = envi.open(hdrfile, infile)
            nl, nb, ns = [int(rfl.metadata[n])
                          for n in ('lines', 'bands', 'samples')]
            swl = s.array([float(f) for f in rfl.metadata['wavelength']])
            rfl_mm = rfl.open_memmap(interleave='source', writable=True)
            if rfl.metadata['interleave'] == 'bip':
                x = s.array(rfl_mm[:, :, :])
            if rfl.metadata['interleave'] == 'bil':
                x = s.array(rfl_mm[:, :, :]).transpose((0, 2, 1))
            x = x.reshape(nl*ns, nb)
            swl = s.array([float(f) for f in rfl.metadata['wavelength']])

            # import spectra and resample
            spectra.extend(([interp1d(
                swl, x1, kind='linear', bounds_error=False, fill_value='extrapolate')(wl) for x1 in x]))

        spectra = s.array(spectra)
        use = s.all(s.isfinite(spectra), axis=1)
        spectra = spectra[use, :]

        # accumulate total list of window indices
        window_idx = -s.ones((nchan), dtype=int)
        for wi, win in enumerate(windows):
            active_wl = aand(wl >= win['interval'][0], wl < win['interval'][1])
            window_idx[active_wl] = wi

        # Two step model.  First step is k-means initialization
        kmeans = KMeans(init='k-means++', n_clusters=ncomp, n_init=10)
        kmeans.fit(spectra)
        Z = kmeans.predict(spectra)

        for ci in range(ncomp):

            m = s.mean(spectra[Z == ci, :], axis=0)
            C = s.cov(spectra[Z == ci, :], rowvar=False)

            for i in range(nchan):
                window = windows[window_idx[i]]
                if window['correlation'] == 'EM':
                    C[i, i] = C[i, i] + float(window['regularizer'])
                elif window['correlation'] == 'decorrelated':
                    ci = C[i, i]
                    C[:, i] = 0
                    C[i, :] = 0
                    C[i, i] = ci + float(window['regularizer'])
                else:
                    raise ValueError(
                        'I do not recognize the source '+window['correlation'])

            # Normalize the component spectrum if desired
            if normalize == 'Euclidean':
                z = s.sqrt(s.sum(pow(m[normind], 2)))
            elif normalize == 'RMS':
                z = s.sqrt(s.mean(pow(m[normind], 2)))
            elif normalize == 'None':
                z = 1.0
            else:
                raise ValueError(
                    'Unrecognized normalization: %s\n' % normalize)
            m = m/z
            C = C/(z**2)

            model['means'].append(m)
            model['covs'].append(C)

    model['means'] = s.array(model['means'])
    model['covs'] = s.array(model['covs'])

    savemat(outfile, model)
    print('saving results to '+outfile)
Example #43
0
def _main(
    inventory,
    operations,
    verbosity,
    user,
    port,
    key,
    key_password,
    password,
    winrm_username,
    winrm_password,
    winrm_port,
    shell_executable,
    sudo,
    sudo_user,
    su_user,
    parallel,
    fail_percent,
    dry,
    limit,
    no_wait,
    serial,
    quiet,
    debug,
    debug_data,
    debug_facts,
    debug_operations,
    facts=None,
    print_operations=None,
    support=None,
):
    if not debug and not sys.warnoptions:
        warnings.simplefilter('ignore')

    # Setup logging
    log_level = logging.INFO
    if debug:
        log_level = logging.DEBUG
    elif quiet:
        log_level = logging.WARNING

    setup_logging(log_level)

    # Bootstrap any virtualenv
    init_virtualenv()

    deploy_dir = getcwd()
    potential_deploy_dirs = []

    # This is the most common case: we have a deploy file so use it's
    # pathname - we only look at the first file as we can't have multiple
    # deploy directories.
    if operations[0].endswith('.py'):
        deploy_file_dir, _ = path.split(operations[0])
        above_deploy_file_dir, _ = path.split(deploy_file_dir)

        deploy_dir = deploy_file_dir

        potential_deploy_dirs.extend((
            deploy_file_dir,
            above_deploy_file_dir,
        ))

    # If we have a valid inventory, look in it's path and it's parent for
    # group_data or config.py to indicate deploy_dir (--fact, --run).
    if inventory.endswith('.py') and path.isfile(inventory):
        inventory_dir, _ = path.split(inventory)
        above_inventory_dir, _ = path.split(inventory_dir)

        potential_deploy_dirs.extend((
            inventory_dir,
            above_inventory_dir,
        ))

    for potential_deploy_dir in potential_deploy_dirs:
        logger.debug('Checking potential directory: {0}'.format(
            potential_deploy_dir, ))

        if any((
                path.isdir(path.join(potential_deploy_dir, 'group_data')),
                path.isfile(path.join(potential_deploy_dir, 'config.py')),
        )):
            logger.debug(
                'Setting directory to: {0}'.format(potential_deploy_dir))
            deploy_dir = potential_deploy_dir
            break

    # Create an empty/unitialised state object
    state = State()
    pseudo_state.set(state)

    # Setup printing on the new state
    print_operation_io = verbosity > 0
    print_fact_io = verbosity > 1

    state.print_output = print_operation_io  # -v
    state.print_input = print_operation_io  # -v
    state.print_fact_info = print_operation_io  # -v

    state.print_fact_output = print_fact_io  # -vv
    state.print_fact_input = print_fact_io  # -vv

    if not quiet:
        click.echo('--> Loading config...', err=True)

    # Load up any config.py from the filesystem
    config = load_config(deploy_dir)

    # Debug (print) inventory + group data
    if operations[0] == 'debug-inventory':
        command = 'debug-inventory'

    # Get all non-arg facts
    elif operations[0] == 'all-facts':
        command = 'fact'
        fact_names = []

        for fact_name in get_fact_names():
            fact_class = get_fact_class(fact_name)
            if (not issubclass(fact_class, ShortFactBase)
                    and not callable(fact_class.command)):
                fact_names.append(fact_name)

        operations = [(name, None) for name in fact_names]

    # Get one or more facts
    elif operations[0] == 'fact':
        command = 'fact'

        fact_names = operations[1:]
        facts = []

        for name in fact_names:
            args = None

            if ':' in name:
                name, args = name.split(':', 1)
                args = args.split(',')

            if not is_fact(name):
                raise CliError('No fact: {0}'.format(name))

            facts.append((name, args))

        operations = facts

    # Execute a raw command with server.shell
    elif operations[0] == 'exec':
        command = 'exec'
        operations = operations[1:]

    # Execute one or more deploy files
    elif all(cmd.endswith('.py') for cmd in operations):
        command = 'deploy'
        operations = operations[0:]

        for file in operations:
            if not path.exists(file):
                raise CliError('No deploy file: {0}'.format(file))

    # Operation w/optional args (<module>.<op> ARG1 ARG2 ...)
    elif len(operations[0].split('.')) == 2:
        command = 'op'
        operations = get_operation_and_args(operations)

    else:
        raise CliError('''Invalid operations: {0}

    Operation usage:
    pyinfra INVENTORY deploy_web.py [deploy_db.py]...
    pyinfra INVENTORY server.user pyinfra home=/home/pyinfra
    pyinfra INVENTORY exec -- echo "hello world"
    pyinfra INVENTORY fact os [users]...'''.format(operations))

    # Load any hooks/config from the deploy file
    if command == 'deploy':
        load_deploy_config(operations[0], config)

    # Arg based config overrides
    if sudo:
        config.SUDO = True
        if sudo_user:
            config.SUDO_USER = sudo_user

    if su_user:
        config.SU_USER = su_user

    if parallel:
        config.PARALLEL = parallel

    if shell_executable:
        config.SHELL = shell_executable

    if fail_percent is not None:
        config.FAIL_PERCENT = fail_percent

    if not quiet:
        click.echo('--> Loading inventory...', err=True)

    # Load up the inventory from the filesystem
    inventory, inventory_group = make_inventory(
        inventory,
        deploy_dir=deploy_dir,
        ssh_port=port,
        ssh_user=user,
        ssh_key=key,
        ssh_key_password=key_password,
        ssh_password=password,
        winrm_username=winrm_username,
        winrm_password=winrm_password,
        winrm_port=winrm_port,
    )

    # Apply any --limit to the inventory
    limit_hosts = None

    if limit:
        try:
            limit_hosts = inventory.get_group(limit)
        except NoGroupError:
            limits = limit.split(',')

            limit_hosts = [
                host for host in inventory if any(
                    fnmatch(host.name, limit) for limit in limits)
            ]

    # Attach to pseudo inventory
    pseudo_inventory.set(inventory)

    # Initialise the state, passing any initial --limit
    state.init(inventory, config, initial_limit=limit_hosts)

    # If --debug-data dump & exit
    if command == 'debug-inventory' or debug_data:
        if debug_data:
            logger.warning(
                ('--debug-data is deprecated, '
                 'please use `pyinfra INVENTORY debug-inventory` instead.'))
        print_inventory(state)
        _exit()

    # Set the deploy directory
    state.deploy_dir = deploy_dir

    # Connect to all the servers
    if not quiet:
        click.echo(err=True)
        click.echo('--> Connecting to hosts...', err=True)
    connect_all(state)

    # Just getting a fact?
    #

    if command == 'fact':
        if not quiet:
            click.echo(err=True)
            click.echo('--> Gathering facts...', err=True)

        # Print facts as we get them
        state.print_fact_info = True

        # Print fact output with -v
        state.print_fact_output = print_operation_io
        state.print_fact_input = print_operation_io

        fact_data = {}

        for i, command in enumerate(operations):
            name, args = command
            try:
                fact_data[name] = get_facts(
                    state,
                    name,
                    args=args,
                    apply_failed_hosts=False,
                )
            except PyinfraError:
                pass

        print_facts(fact_data)
        _exit()

    # Prepare the deploy!
    #

    # Execute a raw command with server.shell
    if command == 'exec':
        # Print the output of the command
        state.print_output = True

        add_op(
            state,
            server.shell,
            ' '.join(operations),
            _allow_cli_mode=True,
        )

    # Deploy files(s)
    elif command == 'deploy':
        if not quiet:
            click.echo(err=True)
            click.echo('--> Preparing operations...', err=True)

        # Number of "steps" to make = number of files * number of hosts
        for i, filename in enumerate(operations):
            logger.info('Loading: {0}'.format(click.style(filename,
                                                          bold=True)))
            state.current_op_file = i
            load_deploy_file(state, filename)

    # Operation w/optional args
    elif command == 'op':
        if not quiet:
            click.echo(err=True)
            click.echo('--> Preparing operation...', err=True)

        op, args = operations
        args, kwargs = args
        kwargs['_allow_cli_mode'] = True

        add_op(state, op, *args, **kwargs)

    # Always show meta output
    if not quiet:
        click.echo(err=True)
        click.echo('--> Proposed changes:', err=True)
    print_meta(state)

    # If --debug-facts or --debug-operations, print and exit
    if debug_facts or debug_operations:
        if debug_facts:
            print_state_facts(state)

        if debug_operations:
            print_state_operations(state)

        _exit()

    # Run the operations we generated with the deploy file
    if dry:
        _exit()

    if not quiet:
        click.echo(err=True)

    if not quiet:
        click.echo('--> Beginning operation run...', err=True)
    run_ops(state, serial=serial, no_wait=no_wait)

    if not quiet:
        click.echo('--> Results:', err=True)
    print_results(state)

    _exit()
Example #44
0
 def parse_path(self, path):
     sectype, filename = split(path)
     section, atype = split(sectype)
     type = atype.split('-')[0]
     return section, type, filename
Example #45
0
<h2>
Technical Information for Boat and Swimmers
</h2>
<p>
<img src="/Boat_with_three_swimmers/static/images/High_resolution_picture.png" width=400>
</p>
""",
                 render_as_text=False,
                 width=400)

curdoc().add_root(
    column(
        row(description, Spacer(width=100),
            column(Spacer(height=100), area_image)),
        Spacer(height=50),
        scene,
        Spacer(height=50),
        row(
            column(
                numberPersonsSlider,
                play_button,
                #pause_button,
                jump_button,
                reset_button),
            Spacer(width=100),
            velocity_diagram,
        )))
# get path of parent directory and only use the name of the Parent Directory
# for the tab name. Replace underscores '_' and minuses '-' with blanks ' '
curdoc().title = split(dirname(__file__))[-1].replace('_',
                                                      ' ').replace('-', ' ')
Example #46
0
def make_forward_solution(info, trans, src, bem, meg=True, eeg=True,
                          mindist=0.0, ignore_ref=False, n_jobs=1,
                          verbose=None):
    """Calculate a forward solution for a subject.

    Parameters
    ----------
    %(info_str)s
    %(trans)s
    src : str | instance of SourceSpaces
        If string, should be a source space filename. Can also be an
        instance of loaded or generated SourceSpaces.
    bem : dict | str
        Filename of the BEM (e.g., "sample-5120-5120-5120-bem-sol.fif") to
        use, or a loaded sphere model (dict).
    meg : bool
        If True (Default), include MEG computations.
    eeg : bool
        If True (Default), include EEG computations.
    mindist : float
        Minimum distance of sources from inner skull surface (in mm).
    ignore_ref : bool
        If True, do not include reference channels in compensation. This
        option should be True for KIT files, since forward computation
        with reference channels is not currently supported.
    %(n_jobs)s
    %(verbose)s

    Returns
    -------
    fwd : instance of Forward
        The forward solution.

    See Also
    --------
    convert_forward_solution

    Notes
    -----
    The ``--grad`` option from MNE-C (to compute gradients) is not implemented
    here.

    To create a fixed-orientation forward solution, use this function
    followed by :func:`mne.convert_forward_solution`.
    """
    # Currently not (sup)ported:
    # 1. --grad option (gradients of the field, not used much)
    # 2. --fixed option (can be computed post-hoc)
    # 3. --mricoord option (probably not necessary)

    # read the transformation from MRI to HEAD coordinates
    # (could also be HEAD to MRI)
    mri_head_t, trans = _get_trans(trans)
    if isinstance(bem, ConductorModel):
        bem_extra = 'instance of ConductorModel'
    else:
        bem_extra = bem
    if not isinstance(info, (Info, str)):
        raise TypeError('info should be an instance of Info or string')
    if isinstance(info, str):
        info_extra = op.split(info)[1]
        info = read_info(info, verbose=False)
    else:
        info_extra = 'instance of Info'
    n_jobs = check_n_jobs(n_jobs)

    # Report the setup
    logger.info('Source space          : %s' % src)
    logger.info('MRI -> head transform : %s' % trans)
    logger.info('Measurement data      : %s' % info_extra)
    if isinstance(bem, ConductorModel) and bem['is_sphere']:
        logger.info('Sphere model      : origin at %s mm'
                    % (bem['r0'],))
        logger.info('Standard field computations')
    else:
        logger.info('Conductor model   : %s' % bem_extra)
        logger.info('Accurate field computations')
    logger.info('Do computations in %s coordinates',
                _coord_frame_name(FIFF.FIFFV_COORD_HEAD))
    logger.info('Free source orientations')

    megcoils, meg_info, compcoils, megnames, eegels, eegnames, rr, info, \
        update_kwargs, bem = _prepare_for_forward(
            src, mri_head_t, info, bem, mindist, n_jobs, bem_extra, trans,
            info_extra, meg, eeg, ignore_ref)
    del (src, mri_head_t, trans, info_extra, bem_extra, mindist,
         meg, eeg, ignore_ref)

    # Time to do the heavy lifting: MEG first, then EEG
    coil_types = ['meg', 'eeg']
    coils = [megcoils, eegels]
    ccoils = [compcoils, None]
    infos = [meg_info, None]
    megfwd, eegfwd = _compute_forwards(rr, bem, coils, ccoils,
                                       infos, coil_types, n_jobs)

    # merge forwards
    fwd = _merge_meg_eeg_fwds(_to_forward_dict(megfwd, megnames),
                              _to_forward_dict(eegfwd, eegnames),
                              verbose=False)
    logger.info('')

    # Don't transform the source spaces back into MRI coordinates (which is
    # done in the C code) because mne-python assumes forward solution source
    # spaces are in head coords.
    fwd.update(**update_kwargs)
    logger.info('Finished.')
    return fwd
Example #47
0
def main (cache_file, out, store_dir, has_compartment, has_replicate, intrahost_info, pairwise_distance_info, short, ignore_replicate, minimum_coverage = 500):

    has_tropism = False

    processed    = {'columns': ['PID','Date','Gene','Region','Coverage','Reads (clones)','Diversity, %','S, %', 'NS, %' ,'X4, %',''],
                    'annotations' : ['Patient ID', 'Sample Date', 'Reference Gene', 'Maximal contiguous region bracketed by positions with at least %d covering reads' % minimum_coverage, 'Median per base coverage', 'Total Mapped Reads (% unique, i.e. is not a exact match to another read)',
                                     'Mean pairwise nucelotide diversity', 'Mean pairwise synonymous diversity (per nucleotide site)', 'Mean pairwise non-synonymous diversity (per nucleotide site)', 'Percent of reads predicted to be X4- or dual- tropic (IDEPI, env only)', ''],
                    'data' : [] }

    if intrahost_info is not None:
        processed ['intrahost'] = {}

    has_fst = "F_ST" in cache_file

    processed['settings'] = {'compartment' : has_compartment, 'replicate': has_replicate}

    if has_replicate:
        processed['columns'].insert (3, 'Replicate')
        processed['annotations'].insert (3, 'Replicate ID')
    if has_compartment:
        processed['columns'].insert (3, 'Compartment')
        processed['annotations'].insert (3, 'Source compartment for this sample')

    if has_fst:
        processed['F_ST'] = {}


    '''
    for root, dirs, files in os.walk(dir):
        for d in dirs:
            available_information [os.path.join (root, d)] = {'prot_coverage' : None, 'diversity' : None, 'tn93' : None, 'rates': None}
            optional_information [os.path.join (root, d)]  = {'fst' : None}

        for file in files:
            name, ext = splitext (file)
            for which_dict in (available_information, optional_information):
                if root in which_dict and name in which_dict[root]:
                    try:
                        with open (join (root, file)) as fh:
                            which_dict[root][name] = json.load (fh)
                    except:
                        pass
    '''

    required_keys = ['merged_json', 'region_merged_processed', 'tn93_json', 'json_rates']
    optional_keys = ['fst']

    id_to_pid_date       = {}
    earilest_date_by_pid = {}
    merged_msa_to_info   = {}

    print ("Loaded the pipeline cache file with %d records" % len (cache_file), file = sys.stderr)

    tried = 0

    for record, item in cache_file.items():
        try:
            if 'filtered_fastq' not in item or item['filtered_fastq'] is None:
                continue

            row = []
            pid  = item ["patient_id"]
            sample_date = item["sample_date"]
            date = parse_a_date (sample_date)


            if pid not in earilest_date_by_pid:
                earilest_date_by_pid[pid] = date
            else:
                earilest_date_by_pid[pid] = min (date, earilest_date_by_pid[pid])

            id_expansion = [pid, date]

            if has_compartment:
                compartment = item["compartment"]
                id_expansion.append (compartment)
            else:
                compartment = None

            if has_replicate:
                replicate = item["replicate"]
                id_expansion.append (replicate)
            else:
                replicate = None
            id_to_pid_date [item["id"]] = id_expansion


            print(item)

            for gene, data in item.items():
                try:
                    current_step = None
                    dir_name = [pid, sample_date, gene]

                    tried += 1

                    row = [pid,date,gene]
                    if has_compartment:
                        row.insert (3,compartment )
                        dir_name.append (compartment)

                    if has_replicate:
                        row.insert (4, replicate)
                        dir_name.append (replicate)

                    dir_name = '_'.join (dir_name)

                    merged_msa_to_info [data['merged_msa']] = [pid, date, gene, compartment, replicate]
                    current_step = "Checking for merged_json"

                    files_to_copy = [[data['merged_json'],None]]
                    # read coverage info

                    ci = None

                    current_step = "Opening merged_json"
                    with open (files_to_copy[0][0]) as fh:
                        current_step = "Loading %s" % files_to_copy[0][0]
                        ci = coverage_info(json.load (fh), minimum_coverage)

                    if ci[0] is not None:

                        cell_entry = None

                        if 'overall_region' in data:
                            files_to_copy.append ([data['overall_region'][1],dir_name])
                            spanned_region = "-".join([str (k) for k in ci[0]])


                            cell_entry = {'text' : spanned_region,
                                          'link' : join(dir_name,split(data['overall_region'][1])[1]),
                                          'target' : dir_name + "_" + spanned_region + ".fas",
                                          'consensus': None}

                            row.append (cell_entry)
                        else:
                            row.append ("-".join([str (k) for k in ci[0]]))

                        row.append ({'text' : '%d' % (ci[1]['median']),
                                     'pop-up' : {'mean' : '%.2f' % ci[1]['mean'],
                                                 'median' : ci[1]['median'],
                                                 'IQR' : '%.0f-%.0f' % (ci[1]['IQR'][0],ci[1]['IQR'][1]),
                                                 'range' : '%.0f-%.0f' % (ci[1]['min'],ci[1]['max'])}})

                        try:
                            row.append ({'text' : '%d (%d)' % (data['merged_counts']['total'], data['merged_counts']['unique']),
                                         'pop-up' : item['read_stats'],
                                         'unique_id': item ['id']})
                        except (KeyError, AttributeError, TypeError, ValueError) as e:
                            row.append (None)

                        files_to_copy.append ([data['tn93_json'], None])
                        with open (files_to_copy[-1][0]) as fh:
                            current_step = "Loading %s" % files_to_copy[-1][0]
                            tn93 = json.load (fh)

                        tn93_dist = tn93['Mean distance']
                        row.append (tn93_dist*100.)

                        files_to_copy.append ([data['region_merged_processed'], None])


                        with open (files_to_copy[-1][0]) as fh:
                            current_step = "Loading %s " %  files_to_copy[-1][0]
                            diversity = json.load (fh)


                        if len (diversity) == 0:
                            row.append (None)
                            row.append (None)
                        else:
                            row.append (diversity_info (diversity, 'S')['max']*100.)
                            row.append (diversity_info (diversity, 'NS')['max']*100.)

                        if 'tropism' in data and data['tropism']:
                            current_step = "Copying tropism data"
                            row.append (data['tropism']['X4'] * 100.)
                            #print (data['tropism']['X4'] * 100.)
                            has_tropism = True
                        else:
                            row.append ("No data")

                        files_to_copy.append ([data['json_rates'], None])
                        with open (files_to_copy[-1][0]) as fh:
                            current_step = "Loading json_rates %s" % files_to_copy[-1][0]
                            json_rates = json.load (fh)

                        json_rates ['consensus'] = consensus (json_rates, 0.25, False)
                        files_to_copy[-1][1] = json_rates
                        if cell_entry is not None:
                            cell_entry ['consensus'] = json_rates ['consensus']

                        #print (row)
                        if intrahost_info is not None:
                            try:
                                this_record = intrahost_info[pid][gene]
                                if has_compartment:
                                    this_record = this_record [compartment]

                                if has_replicate:
                                    this_record = this_record[replicate]


                                with open (this_record['rates.tsv'], 'r') as fh:
                                    intra_host = csv.reader (fh, delimiter = '\t')
                                    headers = next (intra_host)
                                    if len (headers) == 14:
                                        add_key_if_missing (processed ['intrahost'], pid, {})
                                        store_here = add_key_if_missing (processed ['intrahost'][pid], gene, {})

                                        if has_compartment:
                                            store_here = add_key_if_missing (store_here, compartment, {})
                                        if has_replicate and not ignore_replicate:
                                            store_here = add_key_if_missing (store_here, replicate, {})

                                        for r in intra_host:
                                            div_date = parse_a_date (r[0])
                                            store_here[div_date] = {}
                                            for c in range(1,len(r)):
                                                 store_here[div_date][headers[c]] = r[c]



                            except (KeyError, AttributeError, TypeError, FileNotFoundError, ValueError) as e:
                                print ("Failing intrahost_info key %s (%s)" % (gene, str (e)), file = sys.stderr)
                                pass
                    else:
                        row.extend ([None,ci[1]['median'],None,None,None,None,None])


                except (KeyError, AttributeError, TypeError, ValueError) as e:
                    if current_step:
                        row.extend ([None for k in range (len (row), len (processed['columns'])-1)])
                        print ("Failing %s" % current_step, row, e)
                    else:
                        continue

                result_path = os.path.join (store_dir,dir_name)
                if not os.path.exists (result_path):
                    os.makedirs (result_path)

                for file in files_to_copy:
                    if file[1] is not None:
                        if type (file[1]) is str:
                            with open (join(result_path,split(file[0])[1]), "w") as oh:
                                 with open (file[0], 'r') as fh:
                                    for record in SeqIO.parse (fh, "fasta"):
                                        print (">%s\n%s\n" % (re.sub(_clone_name_replacement, file[1], record.name), record.seq), file = oh)
                        elif type (file[1]) is dict:
                            copied_file = shutil.copy (file[0], result_path)
                            with open (copied_file, 'w') as fh:
                                json.dump (file[1], fh)


                    else:
                        shutil.copy (file[0], result_path)


                row.append (dir_name)
                #print (dir_name, row)
                processed['data'].append (row)

        except (KeyError,TypeError) as e:
            if record != 'F_ST':
                print ('Missing required record fields for %s' % (record), e, file = sys.stderr)

    if not has_tropism:
        for row in processed['data']:
            row.pop (-2)
        processed['columns'].pop (-2)

    print ("Compiled data on %d out of %d present individual NGS libraries" % (len (processed['data']), tried), file = sys.stderr)

    if intrahost_info is not None and pairwise_distance_info is not None:
        for d in processed['data']:

            try:
                store_dict = processed ['intrahost'][d[0]][d[2]]
                id = 2
                if has_compartment:
                    id += 1
                    store_dict = store_dict[d[id]]
                if has_replicate and not ignore_replicate:
                    id += 1
                    store_dict = store_dict[d[id]]


                tn93 = d[id + 5]

                if tn93 is None:
                    continue

                store_dict[d[1]]['tn93_diversity'] = tn93 * 0.01
            except (KeyError, AttributeError, TypeError) as e:
                #print (e, file = sys.stderr)
                continue

        for pair, distance_info in pairwise_distance_info.items():
            try:
                if short:
                    pair_info = pair.split ('-')
                    if len (pair_info) == 3:
                        tag1 = id_to_pid_date[int (pair_info[0])]
                        tag2 = id_to_pid_date[int (pair_info[1])]
                        gene = pair_info[2]
                        offset = 0
                    else:
                        continue
                else:
                    pair_info = pair.split ('|')
                    if len (pair_info) == 2:
                        tag1 = merged_msa_to_info [pair_info[0]]
                        tag2 = merged_msa_to_info [pair_info[1]]
                        gene = tag1 [2]
                        offset = 1
                    else:
                        continue
            except (KeyError) as e:
                continue


            pid = tag1[0]

            #print (tag1, tag2)

            try:
                store_dict = processed ['intrahost'][pid][gene]
                if has_compartment:
                    #store_dict = store_dict[tag1[3]]
                    if tag1[3] != tag2[3]:
                        continue
                '''
                if has_replicate:
                    store_dict = store_dict[tag1[4]]
                    if tag1[4] != tag2[4]:
                        continue
                '''

                #print (tag1, tag2)
                #store_dict [earilest_date_by_pid[pid]]['tn93_divergence'] = 0.0

                if pid == tag2 [0]:
                    baseline_date = earilest_date_by_pid[pid]
                    if baseline_date == tag1[1] or baseline_date[pid] == tag2[1]:

                        #add_key_if_missing (store_dict, tag1[1], {})
                        #add_key_if_missing (store_dict, tag2[1], {})
                        #store_dict [tag1[1]]['tn93_divergence'] = 0.
                        #store_dict [tag2[1]]['tn93_divergence'] = 0.

                        if earilest_date_by_pid[pid] == tag1[1]:
                            store_tag = tag2
                        else:
                            store_tag = tag1

                        store_here = store_tag [1]
                        if has_compartment:
                            store_dict = store_dict[store_tag[3]]
                        if has_replicate and not ignore_replicate:
                            store_dict = store_dict[store_tag[4]]


                        add_key_if_missing (store_dict, store_here, {})
                        store_dict [store_here]['tn93_divergence'] = distance_info['Mean']

                        if 'Histogram' in distance_info:
                           store_dict [store_here]['tn93_divergence_histogram'] =  distance_info['Histogram']

                        #26919/20010921/BP/2/env/
                        '''if '26919' in processed ['intrahost']:
                            if 'env' in processed ['intrahost']['26919']:
                                if 'BP' in processed ['intrahost']['26919']['env']:
                                    if '2' in processed ['intrahost']['26919']['env']['BP']:
                                        if '2001/09/21' in processed ['intrahost']['26919']['env']['BP']['2']:
                                            print ('BARF', pair, tag1, tag2)
                                            sys.exit (1)
                        '''

            except (KeyError, AttributeError, TypeError) as e:
                #print (pid, e)
                pass

    if has_fst:
        for f_pid, by_date in cache_file["F_ST"].items():
            for f_date, by_gene in by_date.items():
                for f_gene, by_pair in by_gene.items():
                    for pair_key, pair_data in by_pair.items():
                        try:
                            id1, id2 = pair_key.split ('|')
                            info1 = merged_msa_to_info[id1]
                            info2 = merged_msa_to_info[id2]
                            store_here = add_key_if_missing (processed["F_ST"], f_pid, {})
                            store_here = add_key_if_missing (store_here, parse_a_date(f_date), {})
                            store_here = add_key_if_missing (store_here, f_gene, [])
                            store_here.append ([info1[-1], info2[-1], pair_data])


                        except (KeyError) as e:
                            continue



    processed['data'].sort (key = lambda row : row[0: (3 + (1 if has_compartment else 0) + (1 if has_replicate else 0))])
    json.dump (processed, out, indent=1)



    return 0
Example #48
0
def getTargetFilename(filePath):
    if OFFLINE_MODE:
        _, fileName = path.split(filePath)
        return fixPath(path.join(TARGET_CSV_DIRECTORY, fileName))
    else:
        return filePath
Example #49
0
    def __init__(self,
                 observation_space,
                 action_space,
                 m_dir=None,
                 log_name=None,
                 start_step=0,
                 start_round=0,
                 buf_full=False,
                 load_buffer=False,
                 buf_count=0):
        """
        model : The actual training model
        t_model : Fixed target model
        """
        print('Model directory : {}'.format(m_dir))
        print('Log name : {}'.format(log_name))
        print('Starting from step {}'.format(start_step))
        print('Starting from round {}'.format(start_round))
        print('Buffer full? {}'.format(buf_full))
        print('Load buffer? {}'.format(load_buffer))
        print('Current buffer count : {}'.format(buf_count))
        self.action_n = action_space.n
        #Inputs
        if m_dir is None:
            left_input = keras.Input(observation_space['Left'].shape,
                                     name='Left')
            right_input = keras.Input(observation_space['Right'].shape,
                                      name='Right')
            # Spare eye model for later use
            left_input_shape = observation_space['Left'].shape
            right_input_shape = observation_space['Right'].shape
            left_eye_model = self.eye_model(left_input_shape, 'Left')
            right_eye_model = self.eye_model(right_input_shape, 'Right')
            # Get outputs of the model
            left_encoded = left_eye_model(left_input)
            right_encoded = right_eye_model(right_input)
            # Concatenate both eye's inputs
            concat = layers.Concatenate()([left_encoded, right_encoded])
            outputs = self.brain_layers(concat)
            # Build models
            self.model = keras.Model(inputs=[left_input, right_input],
                                     outputs=outputs)
            self.model.compile(optimizer='Adam', loss='mse', metrics=['mse'])
        else:
            self.model = keras.models.load_model(m_dir)
        self.t_model = keras.models.clone_model(self.model)
        self.t_model.set_weights(self.model.get_weights())
        self.model.summary()

        # Buffers
        if load_buffer:
            print('loading buffers...')
            buffers = np.load(path.join(m_dir, 'buffer.npz'))
            self.right_buffer = buffers['Right']
            self.left_buffer = buffers['Left']
            self.target_buffer = buffers['Target']
            buffers.close()
            print('loaded')
        else:
            self.right_buffer = np.zeros(
                np.concatenate(
                    ([hp.Buffer_size], observation_space['Right'].shape)))
            self.left_buffer = np.zeros(
                np.concatenate(
                    ([hp.Buffer_size], observation_space['Left'].shape)))
            self.target_buffer = np.zeros((hp.Buffer_size, self.action_n))

        # File writer for tensorboard
        if log_name is None:
            self.log_name = datetime.now().strftime('%m_%d_%H_%M_%S')
        else:
            self.log_name = log_name
        self.file_writer = tf.summary.create_file_writer(
            path.join('log', self.log_name))
        self.file_writer.set_as_default()
        print('Writing logs at ' + self.log_name)

        # Scalars
        self.start_training = False
        self.buffer_full = buf_full
        self.total_steps = start_step
        self.current_steps = 1
        self.buffer_count = buf_count
        self.score = 0
        self.rounds = start_round
        self.cumreward = 0

        # Savefile folder directory
        if m_dir is None:
            self.save_dir = path.join(
                'savefiles',
                datetime.now().strftime('%m_%d_%H_%M_%S'))
            self.save_count = 0
        else:
            self.save_dir, self.save_count = path.split(m_dir)
            self.save_count = int(self.save_count)
from torchvision import models
import torch.nn as nn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.autograd import Variable

import yaml
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt

here = osp.dirname(osp.abspath(__file__))  # output folder is located here
root_dir, _ = osp.split(here)
import sys
sys.path.append(root_dir)

import train
import models
import utils
from config import configurations


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-e',
                        '--exp_name',
                        default='resnet101_vggface_scratch')
    parser.add_argument('-c',
Example #51
0
 def get_resource(self, path, site):
     request = DummyRequest(postpath=path.split('/'), prepath=[])
     return site.getResourceFor(request)
Example #52
0
import sys
import os.path as op
sys.path.append(op.split(op.split(op.split(op.realpath(__file__))[0])[0])[0])
from datetime import datetime

import numpy as np
import tensorflow as tf
tf.enable_eager_execution()
import matplotlib.pyplot as plt

from input_pipelines.open_images.input_subset_bboxes import train_input


class params(object):
    height_feature_extractor = 512
    width_feature_extractor = 512
    preserve_aspect_ratio = False
    Nb = 8
    plotting = False
    distribute = False


class runconfig(object):
    train_distribute = False


if params.plotting:
    fig = plt.figure(0)
    axs = [None] * 2
    axs[0] = fig.add_subplot(2, 1, 1)
    axs[0].set_axis_off()
Example #53
0
def dash_report(
    info=None,
    sessions=None,
    tags=None,
    signals=None,
    recreate_plots=None,
    video_only=None,
):
    """Create a web report dash app.

    Parameters
    ----------

    info : dict
        patient info
    sessions : list
        list of session dirs
    tags : list
        tags for dynamic gait trials
    signals : ProgressSignals
        instance of ProgressSignals, used to send progress updates across threads
    recreate_plots : bool
        force recreation of report
    video_only : bool
        Create a video-only report. C3D data will not be read.
    """

    if recreate_plots is None:
        recreate_plots = False

    if video_only is None:
        video_only = False

    # relative width of left panel (1-12)
    # 3-session comparison uses narrower video panel
    # LEFT_WIDTH = 8 if len(sessions) == 3 else 7
    LEFT_WIDTH = 8
    VIDS_TOTAL_HEIGHT = 88  # % of browser window height

    if len(sessions) < 1 or len(sessions) > 3:
        raise ValueError('Need a list of one to three sessions')
    is_comparison = len(sessions) > 1
    report_name = _report_name(sessions)
    info = info or sessionutils.default_info()

    # tags for dynamic trials
    # if doing a comparison, pick representative trials only
    dyn_tags = tags or (cfg.eclipse.repr_tags if is_comparison else cfg.eclipse.tags)
    # this tag will be shown in the menu for static trials
    static_tag = 'Static'

    # get the camera labels
    # reduce to a set, since there may be several labels for given id
    camera_labels = set(cfg.general.camera_labels.values())
    # add camera labels for overlay videos
    # XXX: may cause trouble if labels already contain the string 'overlay'
    camera_labels_overlay = [lbl + ' overlay' for lbl in camera_labels]
    camera_labels.update(camera_labels_overlay)
    # build dict of videos for given tag / camera label
    # videos will be listed in session order
    vid_urls = dict()
    all_tags = dyn_tags + [static_tag] + cfg.eclipse.video_tags
    for tag in all_tags:
        vid_urls[tag] = dict()
        for camera_label in camera_labels:
            vid_urls[tag][camera_label] = list()

    # collect all session enfs into dict
    enfs = {session: dict() for session in sessions}
    data_enfs = list()  # enfs that are used for data
    signals.progress.emit('Collecting trials...', 0)
    for session in sessions:
        if signals.canceled:
            return None
        enfs[session] = dict(dynamic=dict(), static=dict(), vid_only=dict())
        # collect dynamic trials for each tag
        for tag in dyn_tags:
            dyns = sessionutils.get_enfs(session, tags=tag, trial_type='dynamic')
            if len(dyns) > 1:
                logger.warning('multiple tagged trials (%s) for %s' % (tag, session))
            dyn_trial = dyns[-1:]
            enfs[session]['dynamic'][tag] = dyn_trial  # may be empty list
            if dyn_trial:
                data_enfs.extend(dyn_trial)
        # require at least one dynamic trial for each session
        if not any(enfs[session]['dynamic'][tag] for tag in dyn_tags):
            raise GaitDataError('No tagged dynamic trials found for %s' % (session))
        # collect static trial (at most 1 per session)
        # -prefer enfs that have a corresponding c3d file, even for a video-only report
        # (so that the same static gets used for both video-only and full reports)
        # -prefer the newest enf file
        sts = sessionutils.get_enfs(session, trial_type='static')
        for st in reversed(sts):  # newest first
            st_c3d = sessionutils.enf_to_trialfile(st, '.c3d')
            if op.isfile(st_c3d):
                static_trial = [st]
                break
        else:
            # no c3ds were found - just pick the latest static trial
            static_trial = sts[-1:]
        enfs[session]['static'][static_tag] = static_trial
        if static_trial:
            data_enfs.extend(static_trial)
        # collect video-only dynamic trials
        for tag in cfg.eclipse.video_tags:
            dyn_vids = sessionutils.get_enfs(session, tags=tag)
            if len(dyn_vids) > 1:
                logger.warning(
                    'multiple tagged video-only trials (%s) for %s' % (tag, session)
                )
            enfs[session]['vid_only'][tag] = dyn_vids[-1:]

    # collect all videos for given tag and camera, listed in session order
    signals.progress.emit('Finding videos...', 0)
    for session in sessions:
        for trial_type in enfs[session]:
            for tag, enfs_this in enfs[session][trial_type].items():
                if enfs_this:
                    enf = enfs_this[0]  # only one enf per tag and session
                    for camera_label in camera_labels:
                        overlay = 'overlay' in camera_label
                        real_camera_label = (
                            camera_label[: camera_label.find(' overlay')]
                            if overlay
                            else camera_label
                        )
                        c3d = enf_to_trialfile(enf, 'c3d')
                        vids_this = videos.get_trial_videos(
                            c3d,
                            camera_label=real_camera_label,
                            vid_ext='.ogv',
                            overlay=overlay,
                        )
                        if vids_this:
                            vid = vids_this[0]
                            url = '/static/%s' % op.split(vid)[1]
                            vid_urls[tag][camera_label].append(url)

    # build dcc.Dropdown options list for cameras and tags
    # list cameras which have videos for any tag
    opts_cameras = list()
    for camera_label in sorted(camera_labels):
        if any(vid_urls[tag][camera_label] for tag in all_tags):
            opts_cameras.append({'label': camera_label, 'value': camera_label})
    # list tags which have videos for any camera
    opts_tags = list()
    for tag in all_tags:
        if any(vid_urls[tag][camera_label] for camera_label in camera_labels):
            opts_tags.append({'label': '%s' % tag, 'value': tag})
    # add null entry in case we got no videos at all
    if not opts_tags:
        opts_tags.append({'label': 'No videos', 'value': 'no videos', 'disabled': True})

    # this whole section is only needed if we have c3d data
    if not video_only:
        # see whether we can load report figures from disk
        data_c3ds = [enf_to_trialfile(enffile, 'c3d') for enffile in data_enfs]
        digest = numutils.files_digest(data_c3ds)
        logger.debug('report data digest: %s' % digest)
        # data is always saved into alphabetically first session
        data_dir = sorted(sessions)[0]
        data_fn = op.join(data_dir, 'web_report_%s.dat' % digest)
        if op.isfile(data_fn) and not recreate_plots:
            logger.debug('loading saved report data from %s' % data_fn)
            signals.progress.emit('Loading saved report...', 0)
            with open(data_fn, 'rb') as f:
                saved_report_data = pickle.load(f)
        else:
            saved_report_data = dict()
            logger.debug('no saved data found or recreate forced')

        # make Trial instances for all dynamic and static trials
        # this is currently needed even if saved report is used
        trials_dyn = list()
        trials_static = list()
        _trials_avg = dict()
        for session in sessions:
            _trials_avg[session] = list()
            for tag in dyn_tags:
                if enfs[session]['dynamic'][tag]:
                    if signals.canceled:
                        return None
                    c3dfile = enf_to_trialfile(enfs[session]['dynamic'][tag][0], 'c3d')
                    tri = Trial(c3dfile)
                    trials_dyn.append(tri)
                    _trials_avg[session].append(tri)
            if enfs[session]['static'][static_tag]:
                c3dfile = enf_to_trialfile(enfs[session]['static']['Static'][0], 'c3d')
                tri = Trial(c3dfile)
                trials_static.append(tri)

        emg_layout = None
        tibial_torsion = dict()

        # stuff that's needed to (re)create the figures
        if not saved_report_data:
            age = None
            if info['hetu'] is not None:
                # compute subject age at session time
                session_dates = [
                    sessionutils.get_session_date(session) for session in sessions
                ]
                ages = [age_from_hetu(info['hetu'], d) for d in session_dates]
                age = max(ages)

            # create Markdown text for patient info
            patient_info_text = '##### %s ' % (
                info['fullname'] if info['fullname'] else 'Name unknown'
            )
            if info['hetu']:
                patient_info_text += '(%s)' % info['hetu']
            patient_info_text += '\n\n'
            # if age:
            #     patient_info_text += 'Age at measurement time: %d\n\n' % age
            if info['report_notes']:
                patient_info_text += info['report_notes']

            model_normaldata = dict()
            avg_trials = list()

            # load normal data for gait models
            signals.progress.emit('Loading normal data...', 0)
            for fn in cfg.general.normaldata_files:
                ndata = normaldata.read_normaldata(fn)
                model_normaldata.update(ndata)
            if age is not None:
                age_ndata_file = normaldata.normaldata_age(age)
                if age_ndata_file:
                    age_ndata = normaldata.read_normaldata(age_ndata_file)
                    model_normaldata.update(age_ndata)

            # make average trials for each session
            avg_trials = [
                AvgTrial.from_trials(_trials_avg[session], sessionpath=session)
                for session in sessions
            ]
            # read some extra data from trials and create supplementary data
            for tr in trials_dyn:
                # read tibial torsion for each trial and make supplementary traces
                # these will only be shown for KneeAnglesZ (knee rotation) variable
                tors = dict()
                tors['R'], tors['L'] = (
                    tr.subj_params['RTibialTorsion'],
                    tr.subj_params['LTibialTorsion'],
                )
                if tors['R'] is None or tors['L'] is None:
                    logger.warning(
                        'could not read tibial torsion values from %s' % tr.trialname
                    )
                    continue
                # include torsion info for all cycles; this is useful when plotting
                # isolated cycles
                max_cycles = cfg.plot.max_cycles['model']
                cycs = tr.get_cycles(cfg.plot.default_cycles['model'])[:max_cycles]

                for cyc in cycs:
                    tibial_torsion[cyc] = dict()
                    for ctxt in tors:
                        var_ = ctxt + 'KneeAnglesZ'
                        tibial_torsion[cyc][var_] = dict()
                        # x = % of gait cycle
                        tibial_torsion[cyc][var_]['t'] = np.arange(101)
                        # static tibial torsion value as function of x
                        # convert radians -> degrees
                        tibial_torsion[cyc][var_]['data'] = (
                            np.ones(101) * tors[ctxt] / np.pi * 180
                        )
                        tibial_torsion[cyc][var_]['label'] = 'Tib. tors. (%s) % s' % (
                            ctxt,
                            tr.trialname,
                        )

                # in EMG layout, keep chs that are active in any of the trials
                signals.progress.emit('Reading EMG data', 0)
                try:
                    emgs = [tr.emg for tr in trials_dyn]
                    emg_layout = layouts.rm_dead_channels_multitrial(
                        emgs, cfg.layouts.std_emg
                    )
                    if not emg_layout:
                        emg_layout = 'disabled'
                except GaitDataError:
                    emg_layout = 'disabled'

        # define layouts
        # FIXME: should be definable in config
        _layouts = OrderedDict(
            [
                ('Patient info', 'patient_info'),
                ('Kinematics', cfg.layouts.lb_kinematics),
                ('Kinematics average', 'kinematics_average'),
                ('Static kinematics', 'static_kinematics'),
                ('Static EMG', 'static_emg'),
                ('Kinematics + kinetics', cfg.layouts.lb_kin_web),
                ('Kinetics', cfg.layouts.lb_kinetics_web),
                ('EMG', emg_layout),
                ('Kinetics-EMG left', cfg.layouts.lb_kinetics_emg_l),
                ('Kinetics-EMG right', cfg.layouts.lb_kinetics_emg_r),
                ('Muscle length', cfg.layouts.musclelen),
                ('Torso kinematics', cfg.layouts.torso),
                ('Time-distance variables', 'time_dist'),
            ]
        )
        # pick desired single variables from model and append
        # Py2: dict merge below can be done more elegantly once Py2 is dropped
        pig_singlevars_ = models.pig_lowerbody.varlabels_noside.copy()
        pig_singlevars_.update(models.pig_lowerbody_kinetics.varlabels_noside)
        pig_singlevars = sorted(pig_singlevars_.items(), key=lambda item: item[1])
        singlevars = OrderedDict(
            [(varlabel, [[var]]) for var, varlabel in pig_singlevars]
        )
        _layouts.update(singlevars)

        # add supplementary data for normal layouts
        supplementary_default = dict()
        supplementary_default.update(tibial_torsion)

        dd_opts_multi_upper = list()
        dd_opts_multi_lower = list()

        # loop through the layouts, create or load figures
        report_data_new = dict()
        for k, (label, layout) in enumerate(_layouts.items()):
            signals.progress.emit('Creating plot: %s' % label, 100 * k / len(_layouts))
            if signals.canceled:
                return None
            # for comparison report, include session info in plot legends and
            # use session specific line style
            emg_mode = None
            if is_comparison:
                legend_type = cfg.web_report.comparison_legend_type
                style_by = cfg.web_report.comparison_style_by
                color_by = cfg.web_report.comparison_color_by
                if cfg.web_report.comparison_emg_rms:
                    emg_mode = 'rms'
            else:
                legend_type = cfg.web_report.legend_type
                style_by = cfg.web_report.style_by
                color_by = cfg.web_report.color_by

            try:
                if saved_report_data:
                    logger.debug('loading %s from saved report data' % label)
                    if label not in saved_report_data:
                        # will be caught, resulting in empty menu item
                        raise RuntimeError
                    else:
                        figdata = saved_report_data[label]
                else:
                    logger.debug('creating figure data for %s' % label)
                    if isinstance(layout, basestring):  # handle special layout codes
                        if layout == 'time_dist':
                            figdata = timedist.do_comparison_plot(
                                sessions, big_fonts=True, backend='plotly'
                            )
                        elif layout == 'patient_info':
                            figdata = patient_info_text
                        elif layout == 'static_kinematics':
                            layout_ = cfg.layouts.lb_kinematics
                            figdata = plot_trials(
                                trials_static,
                                layout_,
                                model_normaldata=False,
                                cycles='unnormalized',
                                legend_type='short_name_with_cyclename',
                                style_by=style_by,
                                color_by=color_by,
                                big_fonts=True,
                            )
                        elif layout == 'static_emg':
                            layout_ = cfg.layouts.std_emg
                            figdata = plot_trials(
                                trials_static,
                                layout_,
                                model_normaldata=False,
                                cycles='unnormalized',
                                legend_type='short_name_with_cyclename',
                                style_by=style_by,
                                color_by=color_by,
                                big_fonts=True,
                            )
                        elif layout == 'kinematics_average':
                            layout_ = cfg.layouts.lb_kinematics
                            figdata = plot_trials(
                                avg_trials,
                                layout_,
                                style_by=style_by,
                                color_by=color_by,
                                model_normaldata=model_normaldata,
                                big_fonts=True,
                            )
                        elif layout == 'disabled':
                            # will be caught, resulting in empty menu item
                            raise RuntimeError
                        else:  # unrecognized layout; this is not caught by us
                            raise Exception('Unrecognized layout: %s' % layout)

                    else:  # regular gaitutils layout
                        figdata = plot_trials(
                            trials_dyn,
                            layout,
                            model_normaldata=model_normaldata,
                            emg_mode=emg_mode,
                            legend_type=legend_type,
                            style_by=style_by,
                            color_by=color_by,
                            supplementary_data=supplementary_default,
                            big_fonts=True,
                        )
                # save newly created data
                if not saved_report_data:
                    if isinstance(figdata, go.Figure):
                        # serialize go.Figures before saving
                        # this makes them much faster for pickle to handle
                        # apparently dcc.Graph can eat the serialized json directly,
                        # so no need to do anything on load
                        figdata_ = figdata.to_plotly_json()
                    else:
                        figdata_ = figdata
                    report_data_new[label] = figdata_

                # make the upper and lower panel graphs from figdata, depending
                # on data type
                def _is_base64(s):
                    try:
                        return base64.b64encode(base64.b64decode(s)) == s
                    except Exception:
                        return False

                # this is for old style timedist figures that were in base64
                # encoded svg
                if layout == 'time_dist' and _is_base64(figdata):
                    graph_upper = html.Img(
                        src='data:image/svg+xml;base64,{}'.format(figdata),
                        id='gaitgraph%d' % k,
                        style={'height': '100%'},
                    )
                    graph_lower = html.Img(
                        src='data:image/svg+xml;base64,{}'.format(figdata),
                        id='gaitgraph%d' % (len(_layouts) + k),
                        style={'height': '100%'},
                    )
                elif layout == 'patient_info':
                    graph_upper = dcc.Markdown(figdata)
                    graph_lower = graph_upper
                else:
                    # plotly fig -> dcc.Graph
                    graph_upper = dcc.Graph(
                        figure=figdata, id='gaitgraph%d' % k, style={'height': '100%'}
                    )
                    graph_lower = dcc.Graph(
                        figure=figdata,
                        id='gaitgraph%d' % (len(_layouts) + k),
                        style={'height': '100%'},
                    )
                dd_opts_multi_upper.append({'label': label, 'value': graph_upper})
                dd_opts_multi_lower.append({'label': label, 'value': graph_lower})

            except (RuntimeError, GaitDataError) as e:  # could not create a figure
                logger.warning(u'failed to create figure for %s: %s' % (label, e))
                # insert the menu options but make them disabled
                dd_opts_multi_upper.append(
                    {'label': label, 'value': label, 'disabled': True}
                )
                dd_opts_multi_lower.append(
                    {'label': label, 'value': label, 'disabled': True}
                )
                continue

        opts_multi, mapper_multi_upper = _make_dropdown_lists(dd_opts_multi_upper)
        opts_multi, mapper_multi_lower = _make_dropdown_lists(dd_opts_multi_lower)

        # if plots were newly created, save them to disk
        if not saved_report_data:
            logger.debug('saving report data into %s' % data_fn)
            signals.progress.emit('Saving report data to disk...', 99)
            with open(data_fn, 'wb') as f:
                pickle.dump(report_data_new, f, protocol=-1)

    def make_left_panel(split=True, upper_value='Kinematics', lower_value='Kinematics'):
        """Helper to make the left graph panels. If split=True, make two stacked panels"""

        # the upper graph & dropdown
        items = [
            dcc.Dropdown(
                id='dd-vars-upper-multi',
                clearable=False,
                options=opts_multi,
                value=upper_value,
            ),
            html.Div(
                id='div-upper', style={'height': '50%'} if split else {'height': '100%'}
            ),
        ]

        if split:
            # add the lower one
            items.extend(
                [
                    dcc.Dropdown(
                        id='dd-vars-lower-multi',
                        clearable=False,
                        options=opts_multi,
                        value=lower_value,
                    ),
                    html.Div(id='div-lower', style={'height': '50%'}),
                ]
            )

        return html.Div(items, style={'height': '80vh'})

    # create the app
    app = dash.Dash('gaitutils')
    # use local packaged versions of JavaScript libs etc. (no internet needed)
    app.css.config.serve_locally = True
    app.scripts.config.serve_locally = True
    app.title = _report_name(sessions, long_name=False)

    # this is for generating the classnames in the CSS
    num2words = {
        1: 'one',
        2: 'two',
        3: 'three',
        4: 'four',
        5: 'five',
        6: 'six',
        7: 'seven',
        8: 'eight',
        9: 'nine',
        10: 'ten',
        11: 'eleven',
        12: 'twelve',
    }
    classname_left = '%s columns' % num2words[LEFT_WIDTH]
    classname_right = '%s columns' % num2words[12 - LEFT_WIDTH]

    if video_only:
        app.layout = html.Div(
            [  # row
                html.Div(
                    [  # single main div
                        dcc.Dropdown(
                            id='dd-camera',
                            clearable=False,
                            options=opts_cameras,
                            value='Front camera',
                        ),
                        dcc.Dropdown(
                            id='dd-video-tag',
                            clearable=False,
                            options=opts_tags,
                            value=opts_tags[0]['value'],
                        ),
                        html.Div(id='videos'),
                    ],
                    className='12 columns',
                ),
            ],
            className='row',
        )
    else:  # the two-panel layout with graphs and video
        app.layout = html.Div(
            [  # row
                html.Div(
                    [  # left main div
                        html.H6(report_name),
                        dcc.Checklist(
                            id='split-left',
                            options=[{'label': 'Two panels', 'value': 'split'}],
                            value=[],
                        ),
                        # need split=True so that both panels are in initial layout
                        html.Div(make_left_panel(split=True), id='div-left-main'),
                    ],
                    className=classname_left,
                ),
                html.Div(
                    [  # right main div
                        dcc.Dropdown(
                            id='dd-camera',
                            clearable=False,
                            options=opts_cameras,
                            value='Front camera',
                        ),
                        dcc.Dropdown(
                            id='dd-video-tag',
                            clearable=False,
                            options=opts_tags,
                            value=opts_tags[0]['value'],
                        ),
                        html.Div(id='videos'),
                    ],
                    className=classname_right,
                ),
            ],
            className='row',
        )

        @app.callback(
            Output('div-left-main', 'children'),
            [Input('split-left', 'value')],
            [State('dd-vars-upper-multi', 'value')],
        )
        def update_panel_layout(split_panels, upper_value):
            split = 'split' in split_panels
            return make_left_panel(split, upper_value=upper_value)

        @app.callback(
            Output('div-upper', 'children'), [Input('dd-vars-upper-multi', 'value')]
        )
        def update_contents_upper_multi(sel_var):
            return mapper_multi_upper[sel_var]

        @app.callback(
            Output('div-lower', 'children'), [Input('dd-vars-lower-multi', 'value')]
        )
        def update_contents_lower_multi(sel_var):
            return mapper_multi_lower[sel_var]

    def _video_elem(title, url, max_height):
        """Create a video element with title"""
        if not url:
            return 'No video found'
        vid_el = html.Video(
            src=url,
            controls=True,
            loop=True,
            preload='auto',
            title=title,
            style={'max-height': max_height, 'max-width': '100%'},
        )
        # return html.Div([title, vid_el])  # titles above videos
        return vid_el

    @app.callback(
        Output('videos', 'children'),
        [Input('dd-camera', 'value'), Input('dd-video-tag', 'value')],
    )
    def update_videos(camera_label, tag):
        """Create a list of video divs according to camera and tag selection"""
        if tag == 'no videos':
            return 'No videos found'
        vid_urls_ = vid_urls[tag][camera_label]
        if not vid_urls_:
            return 'No videos found'
        nvids = len(vid_urls_)
        max_height = str(int(VIDS_TOTAL_HEIGHT / nvids)) + 'vh'
        return [_video_elem('video', url, max_height) for url in vid_urls_]

    # add a static route to serve session data. be careful outside firewalls
    @app.server.route('/static/<resource>')
    def serve_file(resource):
        for session in sessions:
            filepath = op.join(session, resource)
            if op.isfile(filepath):
                return flask.send_from_directory(session, resource)
        return None

    # add shutdown method - see http://flask.pocoo.org/snippets/67/
    @app.server.route('/shutdown')
    def shutdown():
        logger.debug('Received shutdown request...')
        _shutdown_server()
        return 'Server shutting down...'

    # inject some info of our own
    app._gaitutils_report_name = report_name

    # XXX: the Flask app ends up with a logger by the name of 'gaitutils', which has a default
    # stderr handler. since logger hierarchy corresponds to package hierarchy,
    # this creates a bug where all gaitutils package loggers propagate their messages into
    # the app logger and they get shown multiple times. as a dirty fix, we disable the
    # handlers for the app logger (they still get shown since they propagate to the root logger)
    app.logger.handlers = []

    return app
Example #54
0
 def _remove_query_params(path):
     return path.split('?')[0]
Example #55
0
def fit_dkimicro(data_files,
                 bval_files,
                 bvec_files,
                 mask=None,
                 min_kurtosis=-1,
                 max_kurtosis=3,
                 out_dir=None,
                 b0_threshold=0):
    """
    Fit the DKI model, save files with derived maps

    Parameters
    ----------
    data_files : str or list
        Files containing DWI data. If this is a str, that's the full path to a
        single file. If it's a list, each entry is a full path.
    bval_files : str or list
        Equivalent to `data_files`.
    bvec_files : str or list
        Equivalent to `data_files`.
    mask : ndarray, optional
        Binary mask, set to True or 1 in voxels to be processed.
        Default: Process all voxels.
    min_kurtosis : float, optional
        The minimal plausible value of kurtosis. Default: -1.
    max_kurtosis : float, optional
        The maximal plausible value of kurtosis. Default: 3.
    out_dir : str, optional
        A full path to a directory to store the maps that get computed.
        Default: maps get stored in the same directory as the last DWI file
        in `data_files`.
    b0_threshold : float


    Returns
    -------
    file_paths : a dict with the derived maps that were computed and full-paths
    to the files containing these maps.

    Note
    ----
    Maps that are calculated: FA, MD, AD, RD, MK, AK, RK

    """
    img, data, gtab, mask = ut.prepare_data(data_files,
                                            bval_files,
                                            bvec_files,
                                            mask=mask,
                                            b0_threshold=b0_threshold)

    dkimodel = dki_micro.KurtosisMicrostructureModel(gtab)
    dkifit = dkimodel.fit(data, mask=mask)

    AWF = dkifit.awf
    T = dkifit.tortuosity
    Da = dkifit.axonal_diffusivity
    hRD = dkifit.hindered_rd
    hAD = dkifit.hindered_ad
    evals = dkifit.hindered_evals
    hMD = (evals[..., 0] + evals[..., 1] + evals[..., 2]) / 3.0
    params = dkifit.model_params

    maps = [AWF, T, hAD, hRD, hMD, Da, params]
    names = ['AWF', 'T', 'hAD', 'hRD', 'hMD', 'Da', 'params']

    if out_dir is None:
        if isinstance(data_files, list):
            out_dir = op.join(op.split(data_files[0])[0], 'dki')
        else:
            out_dir = op.join(op.split(data_files)[0], 'dki')

    if not op.exists(out_dir):
        os.makedirs(out_dir)

    aff = img.affine
    file_paths = {}
    for m, n in zip(maps, names):
        file_paths[n] = op.join(out_dir, 'dkimicro_%s.nii.gz' % n)
        nib.save(nib.Nifti1Image(m, aff), file_paths[n])

    return file_paths
Example #56
0
    def rows(self, csv=False):
        """
        Returns each row based on the selected criteria.
        """

        # Store the index of each field against its ID for building each
        # entry row with columns in the correct order. Also store the IDs of
        # fields with a type of FileField or Date-like for special handling of
        # their values.
        field_indexes = {}
        file_field_ids = []
        date_field_ids = []
        for field in self.form_fields:
            if self.posted_data("field_%s_export" % field.id):
                field_indexes[field.id] = len(field_indexes)
                if field.is_a(fields.FILE):
                    file_field_ids.append(field.id)
                elif field.is_a(*fields.DATES):
                    date_field_ids.append(field.id)
        num_columns = len(field_indexes)
        include_entry_time = self.posted_data("field_0_export")
        if include_entry_time:
            num_columns += 1

        # Get the field entries for the given form and filter by entry_time
        # if specified.
        model = self.fieldentry_model
        field_entries = model.objects.filter(entry__form=self.form).order_by(
            "-entry__id").select_related("entry")
        if self.posted_data("field_0_filter") == FILTER_CHOICE_BETWEEN:
            time_from = self.posted_data("field_0_from")
            time_to = self.posted_data("field_0_to")
            if time_from and time_to:
                field_entries = field_entries.filter(
                    entry__entry_time__range=(time_from, time_to))

        # Loop through each field value ordered by entry, building up each
        # entry as a row. Use the ``valid_row`` flag for marking a row as
        # invalid if it fails one of the filtering criteria specified.
        current_entry = None
        current_row = None
        valid_row = True
        for field_entry in field_entries:
            if field_entry.entry_id != current_entry:
                # New entry, write out the current row and start a new one.
                if valid_row and current_row is not None:
                    if not csv:
                        current_row.insert(0, current_entry)
                    yield current_row
                current_entry = field_entry.entry_id
                current_row = [""] * num_columns
                valid_row = True
                if include_entry_time:
                    current_row[-1] = field_entry.entry.entry_time
            field_value = field_entry.value or ""
            # Check for filter.
            field_id = field_entry.field_id
            filter_type = self.posted_data("field_%s_filter" % field_id)
            filter_args = None
            if filter_type:
                if filter_type == FILTER_CHOICE_BETWEEN:
                    f, t = "field_%s_from" % field_id, "field_%s_to" % field_id
                    filter_args = [self.posted_data(f), self.posted_data(t)]
                else:
                    field_name = "field_%s_contains" % field_id
                    filter_args = self.posted_data(field_name)
                    if filter_args:
                        filter_args = [filter_args]
            if filter_args:
                # Convert dates before checking filter.
                if field_id in date_field_ids:
                    try:
                        y, m, d = field_value.split(" ")[0].split("-")
                    except ValueError:
                        filter_args.append(field_value)
                    else:
                        dte = date(int(y), int(m), int(d))
                        filter_args.append(dte)
                else:
                    filter_args.append(field_value)
                filter_func = FILTER_FUNCS[filter_type]
                if not filter_func(*filter_args):
                    valid_row = False
            # Create download URL for file fields.
            if field_entry.value and field_id in file_field_ids:
                url = reverse("admin:form_file", args=(field_entry.id, ))
                field_value = self.request.build_absolute_uri(url)
                if not csv:
                    parts = (field_value, split(field_entry.value)[1])
                    field_value = mark_safe("<a href=\"%s\">%s</a>" % parts)
            # Only use values for fields that were selected.
            try:
                current_row[field_indexes[field_id]] = field_value
            except KeyError:
                pass
        # Output the final row.
        if valid_row and current_row is not None:
            if not csv:
                current_row.insert(0, current_entry)
            yield current_row
Example #57
0
                    parse_args=False,
                    auto_metric_logging=False)
            else:
                experiment = Experiment(api_key=args.api_key,
                                        project_name='comp767_project',
                                        parse_args=False,
                                        auto_metric_logging=False)
    else:
        raise Exception
else:
    if args.api_key:
        experiment = Experiment(api_key=args.api_key,
                                project_name='comp767_project',
                                parse_args=False,
                                auto_metric_logging=False)
        _, experiment_name = split(dirname(realpath(__file__)))
        experiment.log_other('experiment_name', experiment_name)
        experiment.log_parameters(HYPERPARAMETERS)
    num_steps = 0
    best_score = 0
    last_evaluation = 0
    epsilon = HYPERPARAMETERS['epsilon_start']
print('=> num_steps: {}, best_score: {}, epsilon: {}, last_evaluation: {}\n'.
      format(num_steps, best_score, epsilon, last_evaluation))

target_net = DQN(env.action_space.n).to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()

start_time = time.time()
memory = ReplayMemory(HYPERPARAMETERS['memory_size'])
Example #58
0
    def submit(self, copy):
        """
            Move or copy each item in the origin list to the path in the
            destination list. Supports no more than one destination directory
            where copy == False.

            Ask Overwrite and Rename Dupes will alter the way we handle 
            existing data standing in the way. By default, duplicates are 
            renamed with an index. A messagebox can complain to the user
            if shutil raises a PermissionError, and the operation is skipped.
        """

        if (self.list_box_dest.size() > 1) and not copy:
            messagebox.showwarning(
                "Invalid Operation",
                "Move operation only supports a single destination directory.")
            return

        sources = self.list_box_source.get(0, "end")
        destinations = self.list_box_dest.get(0, "end")

        self.skipped_err = []

        for j, destination in enumerate(destinations):

            if isfile(destination):
                self.skipped_err.append(f"Invalid destination: {destination}")
                continue

            for i, source in enumerate(sources):
                self.progress(i, j)

                (_, filename) = split(source)
                future_destination = join(destination + sep, filename)

                if exists(future_destination):
                    if not self.settings_ask_overwrite.get() \
                    and not self.settings_rename_dupes.get():

                        if not self._delete(future_destination):
                            continue

                    if self.settings_ask_overwrite.get():

                        if self.ask_overwrite(future_destination):
                            if not self._delete(future_destination):
                                continue

                        else:
                            continue

                    if self.settings_rename_dupes.get():
                        future_destination = self.name_dupe(future_destination)

                if copy:
                    if not self._copy(source, future_destination):
                        continue
                else:
                    if not self._move(source, future_destination):
                        continue

        self.list_box_source.delete(0, "end")
        self.list_box_dest.delete(0, "end")

        if self.skipped_err:
            messagebox.showerror(title="Error(s)",
                                 message="\n\n".join(self.skipped_err))
Example #59
0
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import os.path as osp

import numpy as np
import cv2
from PIL import Image
import torch
import torchvision.transforms as transforms

from .model import BiSeNet

mapper_dict = [0, 1, 2, 3, 4, 5, 0, 11, 12, 0, 6, 8, 7, 9, 13, 0, 0, 10, 0]
mapper = np.frompyfunc(lambda x: mapper_dict[x], 1, 1)
n_classes = 19
save_pth = osp.split(osp.realpath(__file__))[0] + '/resnet.pth'

net = BiSeNet(n_classes=19)
net.load_state_dict(torch.load(save_pth, map_location=torch.device('cpu')))
net.eval()
to_tensor = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])


def mask(image: Image):
    assert image.size == (512, 512)
    with torch.no_grad():
        image = to_tensor(image)
        image = torch.unsqueeze(image, 0)
Example #60
0
def fit_dki(data_files,
            bval_files,
            bvec_files,
            mask=None,
            min_kurtosis=-1,
            max_kurtosis=3,
            out_dir=None,
            b0_threshold=0):
    """
    Fit the DKI model, save files with derived maps

    Parameters
    ----------
    data_files : str or list
        Files containing DWI data. If this is a str, that's the full path to a
        single file. If it's a list, each entry is a full path.
    bval_files : str or list
        Equivalent to `data_files`.
    bvec_files : str or list
        Equivalent to `data_files`.
    mask : ndarray, optional
        Binary mask, set to True or 1 in voxels to be processed.
        Default: Process all voxels.
    min_kurtosis : float, optional
        The minimal plausible value of kurtosis. Default: -1.
    max_kurtosis : float, optional
        The maximal plausible value of kurtosis. Default: 3.
    out_dir : str, optional
        A full path to a directory to store the maps that get computed.
        Default: maps get stored in the same directory as the last DWI file
        in `data_files`.
    b0_threshold : float


    Returns
    -------
    file_paths : a dict with the derived maps that were computed and full-paths
    to the files containing these maps.

    Note
    ----
    Maps that are calculated: FA, MD, AD, RD, MK, AK, RK

    """
    img, data, gtab, mask = ut.prepare_data(data_files,
                                            bval_files,
                                            bvec_files,
                                            mask=mask,
                                            b0_threshold=b0_threshold)

    dkimodel = dki.DiffusionKurtosisModel(gtab)
    dkifit = dkimodel.fit(data, mask=mask)

    FA = dkifit.fa
    MD = dkifit.md
    AD = dkifit.ad
    RD = dkifit.rd
    MK = dkifit.mk(min_kurtosis, max_kurtosis)
    AK = dkifit.ak(min_kurtosis, max_kurtosis)
    RK = dkifit.rk(min_kurtosis, max_kurtosis)
    params = dkifit.model_params

    maps = [FA, MD, AD, RD, MK, AK, RK, params]
    names = ['FA', 'MD', 'AD', 'RD', 'MK', 'AK', 'RK', 'params']

    if out_dir is None:
        if isinstance(data_files, list):
            out_dir = op.join(op.split(data_files[0])[0], 'dki')
        else:
            out_dir = op.join(op.split(data_files)[0], 'dki')

    if not op.exists(out_dir):
        os.makedirs(out_dir)

    aff = img.affine
    file_paths = {}
    for m, n in zip(maps, names):
        file_paths[n] = op.join(out_dir, 'dki_%s.nii.gz' % n)
        nib.save(nib.Nifti1Image(m, aff), file_paths[n])

    return file_paths