示例#1
0
def make_hdf_magphase(datadir, database_fname, fftlength):

    HALFFFTLEN = (fftlength / 2) + 1
    for stream in ['mag', 'real', 'imag', 'f0']:
        assert os.path.isdir(os.path.join(datadir, stream))

    f = h5py.File(database_fname, "w")

    for magfile in sorted(glob.glob(os.path.join(datadir, 'mag/*.mag'))):
        base = basename(magfile)
        print base
        skip_file = False
        for stream in ['mag', 'real', 'imag', 'f0']:
            if not os.path.isfile(
                    os.path.join(datadir, stream, base + '.' + stream)):
                skip_file = True
        if skip_file:
            print '  ---> skip!'
            continue

        utt_group = f.create_group(base)
        for stream in ['mag', 'real', 'imag']:
            speech = get_speech(
                os.path.join(datadir, stream, base + '.' + stream), HALFFFTLEN)
            utt_group.create_dataset(stream, data=speech)
        f0 = get_speech(os.path.join(datadir, 'f0', base + '.f0'), 1)
        f0_interp, vuv = speech_manip.lin_interp_f0(f0)
        utt_group.create_dataset('f0_interp', data=f0_interp)
        utt_group.create_dataset('vuv', data=vuv)

    f.close()
示例#2
0
    def organize_directory(self):
        if (self.scenes):
            # Iterate Over Scenes List
            for curr, scene in enumerate(self.scenes, 1):
                # Create Scene Folder
                util.mkdir(
                    util.form_path([self.image_path,
                                    SCENE.format(curr)]))

                # Move Images To Scene Folder
                for image in scene:
                    try:
                        # Generate Source and Destination Paths
                        src = util.absolute(image)
                        dst = util.normalize(
                            util.form_path([
                                util.dirname(image),
                                SCENE.format(curr),
                                util.basename(image)
                            ]))

                        # Move Images To Scene Folder
                        util.move(src, dst)
                    except FileNotFoundError:
                        pass

            # Update Prompt
            print("Organized All Images             ")
        else:
            util.perror("spectra: No scenes found to analyze")
示例#3
0
def verifyNshmForAll(csvFile, N_shm):

    for imgPath in read_imgs_masks(csvFile)[0]:
        directory = dirname(imgPath)
        prefix = basename(imgPath).split('.nii')[0]
        bvalFile = pjoin(directory, prefix + '.bval')
        verifyNshm(N_shm, bvalFile)
示例#4
0
def attach_to_max(contents):
    """
    Defines commands to send to Max, establishes a connection to its commandPort,
    then sends the code to inject debugpy
    """

    global run_code
    config = contents['arguments']

    # Format the simulated attach response to send it back to the debugger
    # while we set up the debugpy in the background
    attach_code = ATTACH_TEMPLATE.format(
        debugpy_path=debugpy_path,
        hostname=config['debugpy']['host'],
        port=int(config['debugpy']['port'])
    )

    # Format RUN_TEMPLATE to point to the temporary
    # file containing the code to run
    run_code = RUN_TEMPLATE.format(
        dir=dirname(config['program']),
        file_name=split(config['program'])[1][:-3] or basename(split(config['program'])[0])[:-3]
    )

    # then send attach code
    log('Sending attach code to Max')
    send_py_code_to_max(attach_code)
    log('Successfully attached to Max')

    # Then start the max debugging threads
    run(start_debugging, ((config['debugpy']['host'], int(config['debugpy']['port'])),))
 def main(self,infiles,tpfiles,dtype,interp,prog,ref_img):
   outfiles = FancyList()
   logfiles = FancyList()
   for i,infile in enumerate(infiles):
     tpfile = tpfiles[i]
     if prog == 'elastix':
       transformix = Transformix().setInput(
         infile = infile,
         tpfile = tpfile,
         dtype  = dtype,
         interp = interp
       )
       outfiles.append(transformix.requestOutput('outfile'))
       logfiles.append(transformix.requestOutput('logfile'))
     elif prog == 'niftyreg':
       regresample = RegResample().setInput(
         float_img = infile,
         ref_img = ref_img,
         tpfile = tpfile,
         outfile = self.tempfile('regresample_{}_{}.nii.gz'.format(util.basename(infile),i)),
         interp = interp
       )
       outfiles.append(regresample.requestOutput('outfile'))
   return FancyDict(
     outfiles = outfiles,
     logfiles = logfiles
   )
示例#6
0
def pt_newpy(**kwa):
    """newpy - Create a new python program

    usage: pytool newpy <program-name>

    Creates executable file <program-name>.py with skeletal
    contents. Run '<program-name>.py -L' to create link <program-name>.
    """
    if kwa['d']:
        pdb.set_trace()
    lname = kwa['PROGRAM']
    pname = lname + '.py'
    are_we_overwriting([lname, '{}.py'.format(pname)])

    basename = U.basename(lname)

    wbl = open(pname, 'w')
    wbl.writelines(["#!/usr/bin/env python\n",
                    "\"\"\"\n",
                    "%s - program description\n" % basename,
                    "\"\"\"\n",
                    "\n",
                    "import optparse\n",
                    "import pdb\n",
                    "import sys\n",
                    "import unittest\n",
                    "\n",
                    "def main(argv = None):\n",
                    "    if argv == None:\n",
                    "        argv = sys.argv\n",
                    "\n",
                    "    prs = optparse.OptionParser()\n",
                    "    prs.add_option('-d', '--debug',\n",
                    "                   action='store_true', "
                    "default=False,\n",
                    "                   dest='debug',\n",
                    "                   help='run the debugger')\n",
                    "    (opts, args) = prs.parse_args(argv)\n",
                    "\n",
                    "    if opts.debug:\n",
                    "        pdb.set_trace()",
                    "\n",
                    "\n",
                    "    # process arguments\n",
                    "    for arg in args:\n",
                    "        process(arg)\n",
                    "\n",
                    "class %sTest(unittest.TestCase):\n"
                    % basename.capitalize(),
                    "    def test_example(self):\n",
                    "        pass\n",
                    "\n",
                    "if __name__ == '__main__':\n",
                    "    main(sys.argv)\n"])
    wbl.close()

    os.chmod(pname, 0755)
    os.symlink(os.path.abspath(pname), lname)
示例#7
0
def joinBshells(imgPath, ref_bvals_file=None, ref_bvals=None, sep_prefix=None):

    if ref_bvals_file:
        print('Reading reference b-shell file ...')
        ref_bvals = read_bvals(ref_bvals_file)

    print('Joining b-shells for', imgPath)

    imgPath = local.path(imgPath)
    img = load(imgPath._path)
    dim = img.header['dim'][1:5]

    inPrefix = abspath(imgPath).split('.nii')[0]
    directory = dirname(inPrefix)
    prefix = basename(inPrefix)

    bvalFile = inPrefix + '.bval'
    bvecFile = inPrefix + '.bvec'

    if sep_prefix:
        harmPrefix = pjoin(directory, sep_prefix + prefix)
    else:
        harmPrefix = inPrefix

    if not isfile(harmPrefix + '.bval'):
        copyfile(bvalFile, harmPrefix + '.bval')
    if not isfile(harmPrefix + '.bvec'):
        copyfile(bvecFile, harmPrefix + '.bvec')

    bvals = np.array(read_bvals(inPrefix + '.bval'))

    joinedDwi = np.zeros((dim[0], dim[1], dim[2], dim[3]), dtype='float32')

    for bval in ref_bvals:

        # ind= np.where(bval==bvals)[0]
        ind = np.where(abs(bval - bvals) <= BSHELL_MIN_DIST)[0]

        if bval == 0.:
            b0Img = load(inPrefix + '_b0.nii.gz')
            b0 = b0Img.get_data()
            for i in ind:
                joinedDwi[:, :, :, i] = b0

        else:
            b0_bshell = load(harmPrefix + f'_b{int(bval)}.nii.gz').get_data()

            joinedDwi[:, :, :, ind] = b0_bshell[:, :, :, 1:]

    if not isfile(harmPrefix + '.nii.gz'):
        save_nifti(harmPrefix + '.nii.gz', joinedDwi, b0Img.affine,
                   b0Img.header)
    else:
        print(harmPrefix + '.nii.gz', 'already exists, not overwritten.')
示例#8
0
    def search(self):
        bdz = '' if self.bdz_var.get() == BDZ_DEFAULT else self.bdz_var.get()
        zxl = '' if self.zxl_var.get() == ZXL_DEFAULT else self.zxl_var.get()
        fzxl = '' if self.fzxl_var.get(
        ) == FZXL_DEFAULT else self.fzxl_var.get()
        tq = '' if self.tq_var.get() == TQ_DEFAULT else self.tq_var.get()
        name = '' if self.name_var.get(
        ) == GROUP_NAME_DEFAULT else self.name_var.get()
        # print('search', bdz, zxl, fzxl, tq, name)
        search_condition = [bdz, zxl, fzxl, tq, name]
        logger.info('search: %s', ','.join([bdz, zxl, fzxl, tq, name]))
        if search_condition != self.search_condition:
            result = self.group_frame.search(bdz, zxl, fzxl, tq, keyword=name)
            if name:
                # 用户使用了关键词搜索,更新最后一级未选择的下拉框
                if self.bdz_var.get() == BDZ_DEFAULT:
                    self.update_bdz_values(list(set([g.bdz for g in result])))
                    self.update_zxl_values([])
                    self.update_fzxl_values([])
                    self.update_tq_values([])
                elif self.zxl_var.get() == ZXL_DEFAULT:
                    self.update_zxl_values(
                        [] if bdz == '' else list(set([g.zxl
                                                       for g in result])))
                    self.update_fzxl_values([])
                    self.update_tq_values([])
                elif self.fzxl_var.get() == FZXL_DEFAULT:
                    self.update_fzxl_values(
                        [] if zxl == '' else list(set([g.fzxl
                                                       for g in result])))
                    self.update_tq_values([])
                elif self.tq_var.get() == TQ_DEFAULT:
                    self.update_tq_values(
                        [] if fzxl == '' else list(set([g.tq
                                                        for g in result])))
            else:
                # 用户关键词为空,更新下拉框为所有可能值
                self.update_bdz_values(list(self.bdz_dict.keys()))
                self.update_zxl_values([] if bdz == '' else self.bdz_dict[bdz])
                self.update_fzxl_values([] if zxl ==
                                        '' else self.zxl_dict[zxl])
                self.update_tq_values([] if fzxl ==
                                      '' else self.fzxl_dict[fzxl])

            if result:
                fzxl = result[0].fzxl
                for p in os.listdir(util.TD_PATH):
                    if fzxl == util.basename(p):
                        self.message_frame.set_tdinfo(
                            util.read_text(util.TD_PATH + path.sep + p))
                        break

            self.search_condition = search_condition
            return result
示例#9
0
    def get_sentence_set(self, set_name): 
        assert set_name in ['test', 'tune']

        first_stream = self.stream_list_target[0]

        if set_name == 'test':
            data_dirs = self.test_data_target_dirs[first_stream]
            name_patterns = self.config.get('test_patterns', [])
            limit = self.config['n_test_utts']
        elif set_name == 'tune':
            data_dirs = self.tune_data_target_dirs[first_stream]
            name_patterns = self.config.get('tune_patterns', [])
            limit = self.config['n_tune_utts']            
        else:
            sys.exit('Set name unknown: "%s"'%(set_name))

        flist = sorted(glob.glob(data_dirs + '/*.' + first_stream))
        flist = [basename(fname) for fname in flist]

        ## find all files containing one of the patterns in test_patterns
        L = len(flist)
        if name_patterns:
            selected_flist = []
            for (i,fname) in enumerate(flist):
                for pattern in name_patterns:
                    if pattern in fname:
                        if fname not in selected_flist:
                            selected_flist.append(fname)
                        
            flist = selected_flist 

        ## check sentences not in training:
        if 1:
            train_names = dict(zip(self.train_filenames, self.train_filenames))
            selected_flist = []
            for name in flist:
                if name in train_names:
                    print 'Warning: %s in train utternances!'%(name)
                else:
                    selected_flist.append(name)
            flist = selected_flist 


        ### Only synthesise n sentences:
        if limit > 0:
            flist = flist[:limit]

        nfiles = len(flist)
        if nfiles == 0:
            print 'No files found for set "%s" based on configured test_data_dir and test_pattern'%(set_name)
        else:
            self.report('get_sentence_set: synthesising %s utternances based on config'%(nfiles))
        return flist
示例#10
0
def pidcmd():
    """
    Collect a list of running processes and their command lines
    """
    rval = ""
    for proc in glob.glob("/proc/*"):
        bname = util.basename(proc)
        if not bname.isdigit():
            continue
        try:
            cmdline = util.contents(util.pathjoin(proc, "cmdline"))
            if 0 == len(cmdline):
                continue
        except IOError:
            continue
        rval += "%s %s\n" % (bname, cmdline.replace("\x00", " "))
    return rval
示例#11
0
def pidcmd():
    """
    Collect a list of running processes and their command lines
    """
    rval = ""
    for proc in glob.glob("/proc/*"):
        bname = util.basename(proc)
        if not bname.isdigit():
            continue
        try:
            cmdline = util.contents(util.pathjoin(proc, "cmdline"))
            if 0 == len(cmdline):
                continue
        except IOError:
            continue
        rval += "%s %s\n" % (bname, cmdline.replace("\x00", " "))
    return rval
示例#12
0
文件: upcmd.py 项目: Noncz/UPYun-cmd
def Puts(up, src, dst):
    err = False
    dst = os.path.join(dst, basename(src))
    err = Mkd(up, dst)

    if err:
        return err
    for root, dirs, files in os.walk(src):
        for name in files:
            src_name = os.path.join(root, name)
            dst_name = os.path.join(dst, name)
            err = Put(up, src_name, dst_name)
            if err:
                return err
        for name in dirs:
            src_dir = os.path.join(root, name)
            err = Puts(up, src_dir, dst)
            if err:
                return err
        break
示例#13
0
文件: upcmd.py 项目: Noncz/UPYun-cmd
def Puts(up, src, dst):
    err = False
    dst = os.path.join(dst, basename(src))
    err = Mkd(up, dst)

    if err:
        return err
    for root, dirs, files in os.walk(src):
        for name in files:
            src_name = os.path.join(root, name)
            dst_name = os.path.join(dst, name)
            err = Put(up, src_name, dst_name)
            if err:
                return err
        for name in dirs:
            src_dir = os.path.join(root, name)
            err = Puts(up, src_dir, dst)
            if err:
                return err
        break
示例#14
0
def pt_newtool(**kwa):
    """newtool - Create a new tool-style program

    usage: pytool newtool <program-name> <prefix>

    Creates executable file <program-name>.py with skeletal
    contents. The structure of the program is such that it is easy
    to add and describe new subfunctions.
    """
    lname = kwa["PROGRAM"]
    pname = lname + '.py'
    prefix = kwa["PREFIX"]
    are_we_overwriting([lname, pname])

    basename = U.basename(lname)
    wbl = open(pname, 'w')
    wbl.writelines(["#!/usr/bin/env python\n",
                    "\"\"\"\n",
                    "{} - program description\n".format(basename),
                    "\"\"\"\n",
                    "\n",
                    "from bscr import util as U\n",
                    "import optparse\n",
                    "import os\n",
                    "import re\n",
                    "import sys\n",
                    "\n",
                    "# ---------------------------------------"
                    "------------------------------------\n",
                    "def {}_example(argv):\n".format(prefix),
                    "    print('this is an example')\n",
                    "\n",
                    "# ---------------------------------------"
                    "------------------------------------\n",
                    "if __name__ == '__main__':\n",
                    "    U.dispatch('__main__',"
                    " '{}', sys.argv)".format(prefix)])
    wbl.close()

    os.chmod(pname, 0755)
    os.symlink(os.path.abspath(pname), lname)
示例#15
0
 def main(self, infiles, tpfiles, dtype, interp, prog, ref_img):
     outfiles = FancyList()
     logfiles = FancyList()
     for i, infile in enumerate(infiles):
         tpfile = tpfiles[i]
         if prog == 'elastix':
             transformix = Transformix().setInput(infile=infile,
                                                  tpfile=tpfile,
                                                  dtype=dtype,
                                                  interp=interp)
             outfiles.append(transformix.requestOutput('outfile'))
             logfiles.append(transformix.requestOutput('logfile'))
         elif prog == 'niftyreg':
             regresample = RegResample().setInput(
                 float_img=infile,
                 ref_img=ref_img,
                 tpfile=tpfile,
                 outfile=self.tempfile('regresample_{}_{}.nii.gz'.format(
                     util.basename(infile), i)),
                 interp=interp)
             outfiles.append(regresample.requestOutput('outfile'))
     return FancyDict(outfiles=outfiles, logfiles=logfiles)
示例#16
0
def instance_report():
    logging.info("Requesting a list of instances...")
    r = session.get(tenant_url + "/compute/instance/")
    util.check_response(r)

    instance_ids = r.json()

    logging.info("Found %d existing instances." % (len(instance_ids)))

    if len(instance_ids) > 0:
        t = []

        for id in instance_ids:
            logging.info("Requesting information for instance %s" % (id, ))

            # The instance ID refers to a collection.  Since we want to get a summary
            # of information about the collection, strip off the trailing slash.
            instance_url = tenant_url + "/compute/instance/" + id.rstrip("/")

            r = session.get(instance_url)
            util.check_response(r)
            info = r.json()

            name = "(unassigned)"
            if info["name"]:
                name = info["name"]

            ipv4_address = "(unassigned)"
            r = session.get(instance_url + "/network/" + public_interface + "/address")
            if r.status_code == 200:
                ipv4_address = r.json()["IPv4"]

            t.append((id, name, info["state"], ipv4_address, util.basename(info["size"]), util.basename(info["profile"])))

        print util.column_report("Instance Summary",
                                 ("Id", "Name", "State", "IPv4 Address", "Size", "Profile"), t)
    else:
        print "No VM instances currently exist."
        print
示例#17
0
    def load_priority_list(cls):
        """
        If one or more priority list files are configured, read them and put
        their contents first in the list of Checkables to be processed
        """
        rval = []
        cfg = CrawlConfig.get_config()
        priglob = cfg.get_d('cv', 'priority', '')
        if priglob == '':
            return rval

        pricomp = cfg.get_d('cv', 'completed',
                            U.pathjoin(U.dirname(priglob), 'completed'))

        for pripath in U.foldsort(glob.glob(priglob)):
            with open(pripath, 'r') as f:
                for line in f.readlines():
                    path = line.strip()
                    rval.append(Checkable(path=path, type='f'))
            os.rename(pripath, U.pathjoin(pricomp, U.basename(pripath)))

        return rval
示例#18
0
文件: upcmd.py 项目: Noncz/UPYun-cmd
def Get(up, src, dst, output=True):
    if isdir(up, src) and os.path.isdir(dst):
        return Gets(up, src, dst)

    if isdir(up, src) and not os.path.isdir(dst):
        return False

    if not isdir(up, src) and os.path.isdir(dst):
        dst += basename(src)

    err = False
    try:
        f = open(dst, "wb")
        up.get(src, f)
        if output:
            print src, "====>>>", dst
    except:
        err = True

    f.close()
    if err and output:
        print "Download Error"
        return err
示例#19
0
文件: upcmd.py 项目: Noncz/UPYun-cmd
def Get(up, src, dst, output=True):
    if isdir(up, src) and os.path.isdir(dst):
        return Gets(up, src, dst)

    if isdir(up, src) and not os.path.isdir(dst):
        return False

    if not isdir(up, src) and os.path.isdir(dst):
        dst += basename(src)

    err = False
    try:
        f = open(dst, "wb")
        up.get(src, f)
        if output:
            print src, "====>>>", dst
    except:
        err = True

    f.close()
    if err and output:
        print "Download Error"
        return err
示例#20
0
文件: upcmd.py 项目: Noncz/UPYun-cmd
def Gets(up, src, dst):
    err = False
    dst = os.path.join(dst, basename(src))
    try:
        os.mkdir(dst)
    except:
        err = True

    if err:
        return err
    root, dirs, files = walk(up, src)
    for name in files:
        src_name = os.path.join(root, name)
        dst_name = os.path.join(dst, name)
        err = Get(up, src_name, dst_name)
        if err:
            return err

    for name in dirs:
        src_dir = os.path.join(root, name)
        err = Gets(up, src_dir, dst)
        if err:
            return err
示例#21
0
    def load_priority_list(cls):
        """
        If one or more priority list files are configured, read them and put
        their contents first in the list of Checkables to be processed
        """
        rval = []
        cfg = CrawlConfig.get_config()
        priglob = cfg.get_d('cv', 'priority', '')
        if priglob == '':
            return rval

        pricomp = cfg.get_d('cv',
                            'completed',
                            U.pathjoin(U.dirname(priglob), 'completed'))

        for pripath in U.foldsort(glob.glob(priglob)):
            with open(pripath, 'r') as f:
                for line in f.readlines():
                    path = line.strip()
                    rval.append(Checkable(path=path, type='f'))
            os.rename(pripath, U.pathjoin(pricomp, U.basename(pripath)))

        return rval
示例#22
0
文件: upcmd.py 项目: Noncz/UPYun-cmd
def Gets(up, src, dst):
    err = False
    dst = os.path.join(dst, basename(src))
    try:
        os.mkdir(dst)
    except:
        err = True

    if err:
        return err
    root, dirs, files = walk(up, src)
    for name in files:
        src_name = os.path.join(root, name)
        dst_name = os.path.join(dst, name)
        err = Get(up, src_name, dst_name)
        if err:
            return err

    for name in dirs:
        src_dir = os.path.join(root, name)
        err = Gets(up, src_dir, dst)
        if err:
            return err
示例#23
0
文件: upcmd.py 项目: Noncz/UPYun-cmd
def Put(up, src, dst, output=True):
    # i'm so damn like python
    if os.path.isdir(src) and isdir(up, dst):
        return Puts(up, src, dst)

    if os.path.isdir(src) and not isdir(up, dst):
        return True

    if os.path.isfile(src) and isdir(up, dst):
        dst += basename(src)

    err = False
    try:
        f = open(src, "rb")
        up.put(dst, f)
        if output:
            print src, "====>>>", dst
    except:
        err = True

    f.close()
    if err and output:
        print "Upload Error"
        return err
示例#24
0
文件: upcmd.py 项目: Noncz/UPYun-cmd
def Put(up, src, dst, output=True):
    # i'm so damn like python
    if os.path.isdir(src) and isdir(up, dst):
        return Puts(up, src, dst)

    if os.path.isdir(src) and not isdir(up, dst):
        return True

    if os.path.isfile(src) and isdir(up, dst):
        dst += basename(src)

    err = False
    try:
        f = open(src, "rb")
        up.put(dst, f)
        if output:
            print src, "====>>>", dst
    except:
        err = True

    f.close()
    if err and output:
        print "Upload Error"
        return err
示例#25
0
    def process_images(self):
        if (self.image_list):
            # Initialize Process Counter
            curr = 0

            # Initialize Hash List
            self.hashes = []

            # Initalize Blurred Array
            self.blurred = []

            # Load Image Data Map
            image_data = load_image_data(self.image_path)

            # Error Check Image Data Map
            if (image_data is None):
                image_data = {}

            # Calculate Hash Values
            for image in self.image_list:
                # Create Data Object
                if (not (image in image_data)):
                    image_data[image] = data(lmod=util.get_lmod(image))

                # Calculate Imagehash
                self.hashes.append(imagehash.average_hash(Image.open(image)))

                # End Imagehash Calculation----------------------------------------------------------------------------------------------------------------------------

                # Store Image Name
                input_image = image

                # Store Recent Modification Time
                curr_lmod = util.get_lmod(image)

                # Calculate Blur Coefficient
                if ((image_data[image].vari is None)
                        or (image_data[image].nmax is None)
                        or (image_data[image].rmsv is None)
                        or (image_data[image].lmod < curr_lmod)):
                    # Compute RMS Value
                    loaded_image = Image.open(image).convert('L')
                    image_stats = ImageStat.Stat(loaded_image)
                    image_rms = image_stats.rms[0]

                    # Determine RMS Deficiency
                    if (image_rms < self.rms_threshold):
                        # Create Cache Folder
                        try:
                            util.mkdir(
                                util.form_path([self.image_path, TEMP_FOLD]))
                        except FileExistsError:
                            pass

                        # Create Cache File
                        input_image = util.form_path([
                            util.dirname(util.absolute(image)), TEMP_FOLD,
                            EQ_IMAGE.format(util.basename(image))
                        ])

                        # Equalize Image Histogram
                        image_file = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
                        clahe = cv2.createCLAHE(clipLimit=1.125,
                                                tileGridSize=(4, 4))
                        eq_image = clahe.apply(image_file)
                        cv2.imwrite(input_image, eq_image)

                    # Ignore Future Warnings
                    with warnings.catch_warnings():
                        warnings.filterwarnings("ignore")

                        # Compute Laplace Matrix
                        loaded_image = rgb2gray(io.imread(input_image))
                        laplace_data = laplace(loaded_image, ksize=10)

                    # Store Image Data
                    image_data[image].vari = variance(laplace_data)
                    image_data[image].nmax = np.amax(laplace_data)
                    image_data[image].rmsv = image_rms
                    image_data[image].lmod = curr_lmod

                # Group Blurry Images
                if ((image_data[image].vari < self.var_threshold)
                        and (image_data[image].nmax < self.max_threshold)):
                    self.blurred.append(image)

                # Update Prompt
                print("\rProcessing Images - {}% ".format(
                    int(curr * 100 / len(self.image_list))),
                      end="")
                curr += 1

            # End Variance Computation---------------------------------------------------------------------------------------------------------------------------------

            # Write Computed Data To Data File
            with open(util.form_path([self.image_path, DATA_FILE]),
                      'w') as data_file:
                for image in image_data:
                    if (image in self.image_list):
                        data_file.write("{},{},{},{},{}\n".format(
                            image, image_data[image].vari,
                            image_data[image].nmax, image_data[image].rmsv,
                            image_data[image].lmod))
            # Close File
            data_file.close()

            # End Write Operation--------------------------------------------------------------------------------------------------------------------------------------

            # Initialize Diff List
            self.hash_diffs = []

            # Calculate Hash Differences
            for i in range(len(self.hashes) - 1):
                self.hash_diffs.append(
                    (self.hashes[i + 1] - self.hashes[i]) * self.precision)

            # End Hash Difference Computation--------------------------------------------------------------------------------------------------------------------------

            # Update Prompt
            print("\rProcessed All Images   ")
        else:
            util.perror("spectra: Found no images to process")
示例#26
0
    a = ArgumentParser()
    a.add_argument('-f', dest='feature_dir', required=True)
    a.add_argument('-o', dest='output_dir', required=True)    
    a.add_argument('-N', dest='nfiles', type=int, default=0)  
    a.add_argument('-m', type=int, default=60, help='low dim feature size (compressed mel magnitude spectrum & cepstrum)')  
    a.add_argument('-p', type=int, default=45, help='low dim feature size (compressed mel phase spectra & cepstra)')          
    a.add_argument('-fftlen', type=int, default=1024)          
    a.add_argument('-ncores', type=int, default=0)   
    a.add_argument('-fs', type=int, default=48000)  
    a.add_argument('-pattern', type=str, default='', help='only synthesise files with this substring in their basename')  
    opts = a.parse_args()
    
    safe_makedir(opts.output_dir)
    
    baselist = [basename(fname) for fname in sorted(glob.glob(opts.feature_dir + '/lf0/*.lf0'))]

    #### temp
    # baselist2 = []
    # for base in baselist:
    #     if int(base.replace('hvd_', '')) > 600:
    #         baselist2.append(base)
    # baselist = baselist2


    if opts.pattern:
        baselist = [b for b in baselist if opts.pattern in b]

    if opts.nfiles > 0:
        baselist = baselist[:opts.nfiles]
示例#27
0
def parsePy2Dict(pyfile, pro_base_url, package, module, sub_module, modules,
                 module_desc, version, pycharm_project, project_kind):
    #===========================================================================
    # print 'package:' + package
    # print 'module:' + module
    # print 'sub_module:' + sub_module
    # print 'modules:' + str(modules)
    # print 'module_desc:' + str(module_desc)
    # print 'version:' + version
    #===========================================================================
    def _getAttributeValue(attribute):
        result = ''
        if type(attribute) == ast.Attribute:
            k = _getAttributeValue(attribute.value)
            result = result + k + '.' + attribute.attr
        elif type(attribute) == ast.Name:
            result = result + attribute.id
        return result

    result = []
    with open(pyfile) as f:
        try:
            p = ast.parse(f.read())
        except:
            return []
        # 把所有类选出来:  ast.ClassDef类型,且继承自TestCase及其子类
        classes = []
        for c in p.body:
            if type(c) == ast.ClassDef:
                # 先判断是否是已知列表里
                if list(
                        set(constant.TARGET_BASE_CLASS).intersection(
                            map(lambda name: name.id, c.bases))):
                    constant.TARGET_BASE_CLASS.append(c.name)
                    # 判断是否有run_test方法
                    if filter(
                            lambda f: type(f) == ast.FunctionDef and f.name ==
                            'run_test', c.body):
                        classes.append(c)
                # 再去匹配testcase
                elif filter(lambda name: t.search(name.id), c.bases):
                    constant.TARGET_BASE_CLASS.append(c.name)
                    # 判断是否有run_test方法
                    if filter(
                            lambda f: type(f) == ast.FunctionDef and f.name ==
                            'run_test', c.body):
                        classes.append(c)
        if module_desc == None:
            module_desc = p.body[0].value.s if p.body and type(
                p.body[0]) == ast.Expr else ''

        for c in classes:
            property = {}
            # 类名 (这个属性不会为空)
            for i in range(0, len(c.body)):
                property_object = c.body[i]
                # doc string只能是紧随类定义下面
                property['doc_string'] = property_object.value.s.strip(
                ).decode('utf-8') if i == 0 and type(
                    property_object) == ast.Expr else property.get(
                        'doc_string', '')
                if type(property_object) == ast.Assign:
                    key = property_object.targets[0].id
                    value = property_object.value
                    if type(value) == ast.Str:
                        value = value.s
                    elif type(value) == ast.Num:
                        value = value.n
                    elif type(value) == ast.Name:
                        value = value.id
                    elif type(value) == ast.Attribute:
                        value = _getAttributeValue(value)
                    else:
                        value = None
                    if value != None:
                        property[key] = value
            property['class_name'] = c.name
            property['local_path'] = pyfile
            property['base_url'] = pro_base_url
            property['project_name'] = util.basename(pro_base_url)
            property['package'] = package
            property['module'] = module
            property['sub_module'] = sub_module
            property['modules'] = modules
            property['module_desc'] = module_desc
            property['case_version'] = version
            property['logic_id'] = c.name.split('_')[0]
            property['name_for_query'] = '%s.%s.%s' % (package, sub_module,
                                                       c.name)

            src = '%s%s' % (c.name, pyfile)
            m = hashlib.md5()
            m.update(src)
            property['id'] = m.hexdigest()

            filemt = time.localtime(os.stat(pyfile).st_mtime)
            property['mtime'] = time.strftime("%Y/%m/%d %H:%M:%S", filemt)

            property['pycharm_project'] = pycharm_project
            property['project_kind'] = project_kind
            result.append(property)
    return result
示例#28
0
 def main(self,fixedimg,movingimgs,paramfiles,prog,fixedmasks,movingmasks,finalmask=-1,bgfrommask=None):
   tpfiles = []
   logfiles = []
   tpinfo = []
   step = 0
   if prog == 'elastix':
     params = util.ElastixParamFile(str(paramfiles[0]))
     try: useRegAladin = params['UseRegAladin']
     except: useRegAladin = False
     if useRegAladin:
       paramfiles = list(paramfiles) # copy paramfiles before using .pop()
       pf = paramfiles.pop(0)
       elastix = [None] * len(movingimgs)
       for i,movingimg in enumerate(movingimgs):
         elastix[i] = Elastix().setInput(
           fixed = fixedimg,
           moving = movingimg,
           paramfiles = [pf]
         )
         
       # call reg_aladin and use resulting affine matrix as input to elastix
       for m,fmask in enumerate(fixedmasks):
         regaladin = [None] * len(movingimgs)
         tp = [None] * len(movingimgs)
         mmasks = movingmasks[m] if movingmasks else None
         for i,movingimg in enumerate(movingimgs):
           regaladin[i] = NiftyReg().setInput(
             fixed = fixedimg,
             moving = movingimg,
             outfile = self.tempfile('regaladin{}_{}.txt'.format(m,util.basename(movingimg))),
             fixedmask = fmask,
             inaff = regaladin[i].requestOutput('outfile') if regaladin[i] else None
           )
           if mmasks:
             regaladin[i].setInput(movingmask = mmasks[i])
           regaladin2elastix = RegAladin2Elastix().setInput(
             tpbase = elastix[i].requestOutput('tp0'),
             afffile = regaladin[i].requestOutput('outfile')
           )
           # create elastix transformation file based on reg_aladin output
           tp[i] = regaladin2elastix.requestOutput('tpfile')
         tpfiles.append(FancyList(tp))
         tpinfo.append({
          'paramfile': pf,
          'fmask': fmask,
          'mmasks': mmasks
         })
     
     if len(paramfiles) > 0:
       # Elastix uses only the final mask (default: last available mask)
       fmask = fixedmasks[finalmask]
       if bgfrommask is not None:
         fixedimg = BackgroundFromMask().setInput(fixedimg,fixedmasks[bgfrommask],self.tempfile('fixed_bgfrommask.nii.gz')).requestOutput('outfile')
       mmasks = movingmasks[finalmask] if movingmasks else None
       if movingmasks and bgfrommask is not None:
         movingimgs = list(movingimgs)
         for i,movingimg in enumerate(movingimgs):            
           movingimgs[i] = BackgroundFromMask().setInput(movingimg,movingmasks[bgfrommask][i],self.tempfile('moving{}_bgfrommask.nii.gz'.format(i))).requestOutput('outfile')
       elastix = [None] * len(movingimgs)
       for i,movingimg in enumerate(movingimgs):            
         elastix[i] = Elastix().setInput(
           fixed = fixedimg,
           moving = movingimg,
           paramfiles = paramfiles,
           fixedmask = fmask
         )
         if mmasks:
           elastix[i].setInput(movingmask = mmasks[i])
         if len(tpfiles):
           elastix[i].setInput(initialtransform = tpfiles[-1][i])
         logfiles.append(elastix[i].requestOutput('log'))
       for p,pf in enumerate(paramfiles):
         tp = [None] * len(movingimgs)
         for i,movingimg in enumerate(movingimgs):
           tp[i] = elastix[i].requestOutput('tp{}'.format(p))
         tpfiles.append(FancyList(tp))
         tpinfo.append({
          'paramfile': pf,
          'fmask': fmask,
          'mmasks': mmasks
         })
         
   elif prog == 'niftyreg':
     # includes support for multiple masks
     for m,fmask in enumerate(fixedmasks):
       tp = [None] * len(movingimgs)
       mmasks = movingmasks[m] if movingmasks else None
       regaladin = [None] * len(movingimgs)
       for i,movingimg in enumerate(movingimgs):
         regaladin[i] = NiftyReg().setInput(
           fixed = fixedimg,
           moving = movingimg,
           outfile = self.tempfile('regaladin{}_{}.txt'.format(m,util.basename(movingimg))),
           fixedmask = fmask,
           inaff = regaladin[i].requestOutput('outfile') if regaladin[i] else None
         ) 
         if mmasks:
           regaladin.setInput(movingmask = mmasks[i])
         tp[i]  = regaladin[i].requestOutput('outfile')
       tpfiles.append(FancyList(tp))
       tpinfo.append({
        'paramfile': '[regaladin]',
        'fmask': fmask,
        'mmasks': mmasks
       })
   fancyLog(FancyList(tpfiles),'tpfiles')
   return FancyDict(
     tpfiles = FancyList(tpfiles),
     tpinfo = tpinfo,
     logfiles = FancyList(logfiles)
   )
示例#29
0
# ## this is the training data as regenerated by LSTM trained on it (for target cost):
# streams_dir = '/afs/inf.ed.ac.uk/group/cstr/projects/blizzard_entries/blizzard2017/hybrid_voice/data/predicted_params/train/'

# topoutdir = '/tmp/testpad'

## --------

## HTS style labels used in Blizzard:-
hts_quinphone_regex = '([^~]+)~([^-]+)-([^\+]+)\+([^\=]+)\=([^:]+)'
stream_list = ['mgc', 'lf0']
stream_dims = {'mgc': 60, 'lf0': 1}

for labfname in glob.glob(labdir + '/*.lab'):
    print labfname

    lab = read_label(labfname, hts_quinphone_regex)

    base = basename(labfname)
    for stream in stream_list:
        stream_file = os.path.join(streams_dir, stream, base + '.' + stream)
        if not os.path.isfile(stream_file):
            print 'skip!'
            continue
        speech = get_speech(stream_file, stream_dims[stream])
        speech = reinsert_terminal_silence(speech, lab)

        outdir = topoutdir + '/' + stream
        safe_makedir(outdir)
        put_speech(speech, outdir + '/' + base + '.' + stream)