示例#1
0
 def fetch( self ):
     """getting normal tarballs from SRC_URI"""
     utils.debug( "base fetch called", 1 )
     if ( self.noFetch ):
         utils.debug( "skipping fetch (--offline)" )
         return True
     if len( self.subinfo.targets ) and self.subinfo.buildTarget in self.subinfo.targets.keys():
         return utils.getFiles( self.subinfo.targets[ self.subinfo.buildTarget ], self.downloaddir )
     else:
         return utils.getFiles( "", self.downloaddir )
示例#2
0
def get_student_code() -> str:
    # 发生IO过程
    fl = getFiles('student_codes')
    random_ID = random.randint(0, (len(fl) - 1))  #随机选取一个代码文件进行读取
    with open(fl[random_ID], 'r', encoding='utf-8') as f:
        code_content = f.read()  # 读取该代码文件
    return code_content
示例#3
0
def make_file_list(gtzan_path, n_folds=5,):
    """
    Generates lists
    """
    audio_path = os.path.join(gtzan_path,'audio')
    out_path = os.path.join(gtzan_path,'lists')
    files_list = []
    for ext in ['.au', '.mp3', '.wav']:
        files = U.getFiles(audio_path, ext)
        files_list.extend(files)
    random.shuffle(files_list)
    
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    
    audio_list_path = os.path.join(out_path, 'audio_files.txt')
    open(audio_list_path,'w').writelines(['%s\n' % f for f in files_list])
    
    annotations = get_annotations(files_list)

    ground_truth_path = os.path.join(out_path, 'ground_truth.txt')
    open(ground_truth_path,'w').writelines(generate_mirex_list(files_list, annotations))
    generate_ground_truth_pickle(ground_truth_path)

    folds = get_folds(files_list, n_folds=n_folds)
    
    ### Single fold for quick experiments
    create_fold(0, 1, folds, annotations, out_path)
    
    for n in range(n_folds):
        create_fold(n, n_folds, folds, annotations, out_path)
def runProcess(processIndex, tasksQueue, resultsQueue, outputFolder, runMode, useLink):
    kill_received = False
    while not kill_received:
        tileAbsPath = None
        try:
            # This call will patiently wait until new job is available
            tileAbsPath = tasksQueue.get()
        except:
            # if there is an error we will quit
            kill_received = True
        if tileAbsPath == None:
            # If we receive a None job, it means we can stop
            kill_received = True
        else:
            tileOutputFolder = outputFolder + '/' + os.path.basename(tileAbsPath)
            os.system('mkdir -p ' + tileOutputFolder)
            tileFilesAbsPaths = utils.getFiles(tileAbsPath, recursive = True)
            for tileFileAbsPath in tileFilesAbsPaths:
                outputAbsPath = outputFolder + '/' + os.path.basename(tileAbsPath) + '/' + os.path.basename(tileFileAbsPath)
                commands = []
                if 's' in runMode:
                    commands.append('lassort.exe -i ' + tileFileAbsPath + ' -o ' + outputAbsPath)
                else:
                    if useLink:
                        commands.append('ln -s ' + tileFileAbsPath + ' ' + outputAbsPath)
                    else:
                        commands.append('cp ' + tileFileAbsPath + ' ' + outputAbsPath)
                if 'i' in runMode:
                    commands.append('lasindex -i ' + outputAbsPath)
                for command in commands:
                    utils.shellExecute(command, True)
            resultsQueue.put((processIndex, tileAbsPath)) 
示例#5
0
def doPath(path):
    #Checks if the folder is well formated, if not, it is formated well
    if path[len(path) - 1] != '/':
        path = path + '/'

    means = []
    integrals = []
    maximums = []
    files = utils.getFiles(path)
    for f in files:
        if '.txt' in f:
            m = doExercice(path + f)
            integrals.append(m["integral"])
            if m['integral'] < 0.:
                functions.plotMatrix(m, f)
                print f
            means.append(m["mean"])
            maximums.append(m["max"])
            #nextGraph()
        else:
            #TODO format .trc files
            print "The file " + path + f + " is not allowed in this version"
    mhist, ma, mi, bins = functions.histogram(means)
    covariance = functions.fitting(mhist, ma, mi, bins)

    inte = functions.getValidIntegrals(integrals, means, maximums, covariance)
    if len(inte) > 0:
        ihist, ma, mi, bins = functions.histogram(inte)
        functions.plot(ihist, ma, mi, bins)
示例#6
0
def genFacilityMap(customer, facility, map):

    buildingDir = PLAN_REPO_DIR + "/" + customer + "/" + facility + "/buildings"

    # Make sure the directory exists
    if not os.path.isdir(buildingDir):
        rospy.logerr("The specified facility or customer doesn't exist")
        exit(1)

    # Loop through each building
    for building in getSubDirs(buildingDir):

        map[building] = {}

        # Loop through each space in the building
        for space in getSubDirs(buildingDir + "/" + building):
            map[building][space] = {}

            spaceDir = buildingDir + "/" + building + "/" + space

            # Iterate through each track file in the space
            for file in getFiles(spaceDir):

                # Load the track file
                stream = open(spaceDir + "/" + file, "r")
                trackFile = yaml.load(stream)

                # Record the RFID code
                map[building][space][trackFile['attributes']
                                     ['rfidCode']] = None
                stream.close()
示例#7
0
def make_file_list(gtzan_path, n_folds=5,):
    """
    Generates lists
    """
    audio_path = os.path.join(gtzan_path,'audio')
    out_path = os.path.join(gtzan_path,'lists')
    files_list = []
    for ext in ['.au', '.mp3', '.wav']:
        files = U.getFiles(audio_path, ext)
        files_list.extend(files)
    random.shuffle(files_list)
    
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    
    audio_list_path = os.path.join(out_path, 'audio_files.txt')
    open(audio_list_path,'w').writelines(['%s\n' % f for f in files_list])
    
    annotations = get_annotations(files_list)

    ground_truth_path = os.path.join(out_path, 'ground_truth.txt')
    open(ground_truth_path,'w').writelines(generate_mirex_list(files_list, annotations))
    generate_ground_truth_pickle(ground_truth_path)

    folds = get_folds(files_list, n_folds=n_folds)
    
    ### Single fold for quick experiments
    create_fold(0, 1, folds, annotations, out_path)
    
    for n in range(n_folds):
        create_fold(n, n_folds, folds, annotations, out_path)
示例#8
0
    def fetch( self ):
        filenames = [ os.path.basename( self.subinfo.target() ) ]

        if ( self.noFetch ):
            utils.debug( "skipping fetch (--offline)" )
            return True

        self.setProxy()
        return utils.getFiles( self.subinfo.target(), self.downloadDir() )
示例#9
0
def main(args):

    print("Simulator started\n")

    if 'HIVE_SIMULATOR' not in os.environ:
        print("Running in TEST-mode")
        return test()

    hivesim = os.environ['HIVE_SIMULATOR']
    print("Hive simulator: %s\n" % hivesim)
    hive = hivemodel.HiveAPI(hivesim)

    status = hivemodel.BlockTestExecutor(
        hive_api=hive,
        testfiles=utils.getFiles("./tests/BlockchainTests"),
        rules=Rules.RULES_FRONTIER).run()

    status = hivemodel.BlockTestExecutor(
        hive_api=hive,
        testfiles=utils.getFiles("./tests/BlockchainTests/EIP150"),
        rules=Rules.RULES_TANGERINE).run()

    status = hivemodel.BlockTestExecutor(
        hive_api=hive,
        testfiles=utils.getFiles("./tests/BlockchainTests/Homestead"),
        rules=Rules.RULES_HOMESTEAD).run()

    status = hivemodel.BlockTestExecutor(
        hive_api=hive,
        testfiles=utils.getFiles("./tests/BlockchainTests/TestNetwork"),
        rules=Rules.RULES_TRANSITIONNET).run()

    status = hivemodel.BlockTestExecutor(
        hive_api=hive,
        testfiles=utils.getFilesRecursive("./tests/BlockchainTests/GeneralStateTests/"),
        rules=None).run()

    if not status:
        sys.exit(-1)

    sys.exit(0)
示例#10
0
def buildFeatures(src):
    """
    Replaces the features.fea file in the UFO with the external
    features.fea file

    *src* is the source directory with the UFOs and external features.fea file
    """
    ufos = getFiles(src, "ufo")
    feature = os.path.join(src, "features.fea")
    for ufo in ufos:
        shutil.copy(feature, ufo)
    print("🏗  Moved features into UFOs")
示例#11
0
def getFacilityWorkerGroups(customer, facility):

    groupFiles = []

    groupsDir = PLAN_REPO_DIR + "/" + customer + "/" + facility + "/groups"

    # Scan the groups folder for 'gateworker' groups
    for file in getFiles(groupsDir):
        if GATE_WORKER_NAME in file:
            groupFiles.append(groupsDir + "/" + file)

    return groupFiles
示例#12
0
def main(args):

    print("Simulator started\n")

    if 'HIVE_SIMULATOR' not in os.environ:
        print("Running in TEST-mode")
        return test()

    hivesim = os.environ['HIVE_SIMULATOR']
    print("Hive simulator: %s\n" % hivesim)
    hive = hivemodel.HiveAPI(hivesim)

    status = hive.blockTests(
        testfiles=utils.getFiles("./tests/BlockchainTests"),
        executor=hivemodel.BlockTestExecutor(hive, Rules.RULES_FRONTIER))
    #        start=0, end=2, whitelist=["newChainFrom5Block"])

    status = hive.blockTests(
        testfiles=utils.getFiles("./tests/BlockchainTests/EIP150"),
        executor=hivemodel.BlockTestExecutor(hive, Rules.RULES_TANGERINE))

    status = hive.blockTests(
        testfiles=utils.getFiles("./tests/BlockchainTests/Homestead"),
        executor=hivemodel.BlockTestExecutor(hive, Rules.RULES_HOMESTEAD))

    status = hive.blockTests(
        testfiles=utils.getFiles("./tests/BlockchainTests/TestNetwork"),
        executor=hivemodel.BlockTestExecutor(hive, Rules.RULES_TRANSITIONNET),
        whitelist=["DaoTransactions_EmptyTransactionAndForkBlocksAhead"])

    status = hive.blockTests(testfiles=utils.getFilesRecursive(
        "./tests/BlockchainTests/GeneralStateTests/"),
                             executor=hivemodel.BlockTestExecutor(hive, None))

    if not status:
        sys.exit(-1)

    sys.exit(0)
示例#13
0
def main(args):

    print("Simulator started\n")

    if 'HIVE_SIMULATOR' not in os.environ:
        print("Running in TEST-mode")
        return test()

    hivesim = os.environ['HIVE_SIMULATOR']
    print("Hive simulator: %s\n" % hivesim)
    hive = hivemodel.HiveAPI(hivesim)

    status = hive.blockTests(
        testfiles=utils.getFiles("./tests/BlockchainTests"),
        executor=hivemodel.BlockTestExecutor(hive, Rules.RULES_FRONTIER))
    #        start=0, end=2)

    status = hive.blockTests(
        testfiles=utils.getFiles("./tests/BlockchainTests/EIP150"),
        executor=hivemodel.BlockTestExecutor(hive, Rules.RULES_TANGERINE))

    status = hive.blockTests(
        testfiles=utils.getFiles("./tests/BlockchainTests/Homestead"),
        executor=hivemodel.BlockTestExecutor(hive, Rules.RULES_HOMESTEAD))

    status = hive.blockTests(
        testfiles=utils.getFiles("./tests/BlockchainTests/TestNetwork"),
        executor=hivemodel.BlockTestExecutor(hive, Rules.RULES_TRANSITIONNET))

    status = hive.blockTests(testfiles=utils.getFilesRecursive(
        "./tests/BlockchainTests/GeneralStateTests/"),
                             executor=hivemodel.BlockTestExecutor(hive))
    #        whitelist=["mload32bitBound_return2_d0g0v0_EIP150"])

    if not status:
        sys.exit(-1)

    sys.exit(0)
def index():
	files = utils.getFiles()  	
	
	data = {}
	
	#Dynamic data (based on files)
	data['files_count'] = len(files) 
	data['files'] = files
	data['file_types'] = utils.getFileTypes(files)

	#Static data (based on application)
	data['UPLOAD_FOLDER'] = app.config['UPLOAD_FOLDER']
	data['IMG_TYPES'] = IMG_TYPES 
	return render_template('index.html',data=data)	
示例#15
0
def evaluate(eval_root: list, eval_class_list: list, models: list):
    from sklearn.metrics import confusion_matrix
    from dataloader import single_loader
    real = []
    pred = []
    for idx, cl in enumerate(eval_class_list):
        wav_fl = getFiles(os.path.join(eval_root, cl))
        for wav_f in wav_fl:
            real.append(idx)
            print('Evaluating for {}'.format(wav_f))
            X = single_loader(wav_f, is_print_info=False, is_vision=False)
            pred.append(eval_class_list.index(get_result(X, models)))
    cm = confusion_matrix(real, pred)
    return cm, real, pred
示例#16
0
def make_file_list(gtzan_path, rand, n_folds, songs_per_genre, train_valid_ratio=0.8):
    """
    Generates lists
    """
    audio_path = os.path.join(gtzan_path, 'audio')
    out_path = os.path.join(gtzan_path, 'lists')
    files_list = []
    for ext in ['.au', '.mp3', '.wav']:
        files = U.getFiles(audio_path, ext)
        files_list.extend(files)

    annotations = get_annotations(files_list)

    if songs_per_genre is not None:
        # select only x songs per genre
        # create a dictionary {genre1: [song1, song2], genre2: [song3, song4]}
        genres_dic = {}
        for k, v in annotations.iteritems():
            genres_dic[v] = genres_dic.get(v, [])
            genres_dic[v].append(k)
        files_list = []
        for k in genres_dic.iterkeys():
            sample = rand.choice(
                genres_dic[k], size=songs_per_genre, replace=False)
            print "Selected %i songs for %s" % (len(sample), k)
            files_list.extend(sample)

    rand.shuffle(files_list)  # shuffle at the end of the selection
    annotations = get_annotations(files_list)  # update annotations

    if not os.path.exists(out_path):
        os.makedirs(out_path)

    audio_list_path = os.path.join(out_path, 'audio_files.txt')
    open(audio_list_path, 'w').writelines(['%s\n' % f for f in files_list])

    ground_truth_path = os.path.join(out_path, 'ground_truth.txt')
    open(ground_truth_path, 'w').writelines(
        generate_mirex_list(files_list, annotations))
    generate_ground_truth_pickle(ground_truth_path)

    folds = get_folds(files_list, n_folds=n_folds)

    # Single fold for quick experiments
    create_fold(0, 1, folds, annotations, out_path, train_valid_ratio)

    for n in range(n_folds):
        create_fold(
            n, n_folds, folds, annotations, out_path, train_valid_ratio)
def run(inputFolder, srid, dbName, dbPass, dbUser, dbHost, dbPort, numberProcs):
    # Make connection
    connectionString = utils.getConnectString(dbName, dbUser, dbPass, dbHost, dbPort)
    connection = psycopg2.connect(connectionString)
    cursor = connection.cursor()
    
    # Make it absolute path
    inputFolder = os.path.abspath(inputFolder)
    
    # Create table if it does not exist
    cursor.execute('CREATE TABLE ' + utils.DB_TABLE_RAW + ' (filepath text, numberpoints integer, minz double precision, maxz double precision, geom public.geometry(Geometry, %s))', [srid, ])
    connection.commit()
    connection.close()
    
    # Create queues for the distributed processing
    tasksQueue = multiprocessing.Queue() # The queue of tasks (inputFiles)
    resultsQueue = multiprocessing.Queue() # The queue of results
    
    inputFiles = utils.getFiles(inputFolder, recursive=True)
    numFiles = len(inputFiles)
    
    # Add tasks/inputFiles
    for i in range(numFiles):
        tasksQueue.put(inputFiles[i])
    for i in range(numberProcs): #we add as many None jobs as numberProcs to tell them to terminate (queue is FIFO)
        tasksQueue.put(None)

    processes = []
    # We start numberProcs users processes
    for i in range(numberProcs):
        processes.append(multiprocessing.Process(target=runProcess, 
            args=(i, tasksQueue, resultsQueue, connectionString, srid)))
        processes[-1].start()

    # Get all the results (actually we do not need the returned values)
    for i in range(numFiles):
        resultsQueue.get()
        print 'Completed %d of %d (%.02f%%)' % (i+1, numFiles, 100. * float(i+1) / float(numFiles))
    # wait for all users to finish their execution
    for i in range(numberProcs):
        processes[i].join()
        
    # Create an index for the geometries
    connection = psycopg2.connect(connectionString)
    cursor = connection.cursor()
    cursor.execute('CREATE INDEX ' + utils.DB_TABLE_RAW + '_geom ON '  + utils.DB_TABLE_RAW + ' USING GIST ( geom )')
    connection.commit()
    connection.close()
示例#18
0
    def fetch( self ):
        if self.noFetch:
            return True
        svnpath = self.kdeSvnPath()
        if svnpath:
            return base.baseclass.fetch( self )

        if len( self.subinfo.targets ) and self.subinfo.buildTarget in self.subinfo.targets.keys():
            for pkg in self.subinfo.languages.split():
                tgt = self.subinfo.buildTarget
                filename = self.subinfo.targets[ tgt ] + 'kde-l10n-' + pkg + '-' + tgt + '.tar.bz2'
                return utils.getFiles( filename, self.downloaddir )
        else:
            return False

        return True
示例#19
0
def change_packagename(project_path, dist_packagename, app_name=None):
    src_path = os.path.join(project_path, "src")

    # rename app name
    if app_name != None:
        replace_applicationname(project_path, app_name)

    # rename package
    src_packagename = get_packagename(project_path)
    if src_packagename == dist_packagename:
        return

    rename_packagename(src_path, src_packagename, dist_packagename)
    files = utils.getFiles(src_path, ".java")
    files.append(os.path.join(project_path, "AndroidManifest.xml"))
    for f in files:
        replace_packagename(f, src_packagename, dist_packagename)
示例#20
0
def buildTTFfiles(cff_root, ttf_root):
    """
    Copies all the mastering files from cff_root to ttf_root, save the ufo.
    Then compilies the source ufo into a source ttf for mastering.

    *cff_root* `string` path to the root of the CFF files
    *ttf_root* `string` path to the root of the TTF files
    """

    if os.path.exists(ttf_root):
        shutil.rmtree(ttf_root)

    ignore = shutil.ignore_patterns("*.ufo", )
    print("🏗  Copying files")
    shutil.copytree(cff_root, ttf_root, ignore=ignore)

    files = getFiles(cff_root, "ufo")
    print("🏗  Making TTF sources")

    outputFile = os.path.join(ttf_root, "make_ttf_source_output.txt")
    if os.path.exists(outputFile):
        os.remove(outputFile)

    printProgressBar(0, len(files), prefix='  ', suffix='Complete', length=50)
    for i, file in enumerate(files):
        oldPath = splitall(file)
        newPath = []
        for p in oldPath:
            if p == "CFF":
                p = "TTF"
            newPath.append(p)
        out = os.path.join(*newPath)
        out = out[:-4] + ".ttf"
        with open(outputFile, "a") as f:
            with redirect_stdout(f), redirect_stderr(f):
                ufo = DFont(file)
                ttf = compileTTF(ufo, useProductionNames=False)
                ttf.save(out)
        printProgressBar(i + 1,
                         len(files),
                         prefix='  ',
                         suffix='Complete',
                         length=50)
 def __init__(self,
              class_list: list,
              root: str = 'fakes',
              format_filiter: str = '.wav'):
     self.datas = {}
     self.class_list = class_list
     for cl in class_list:
         cl_data = []
         clp = os.path.join(root, cl)
         data_fl = getFiles(clp)
         X = np.array([])
         for f in data_fl:
             if format_filiter in f:
                 print('Loading {} ---> for {}'.format(f, cl))
                 sampling_freq, audio = wavfile.read(
                     f)  # Extract MFCC features
                 # Append to the variable X
                 mfcc_features = mfcc(audio, sampling_freq)
                 if len(X) == 0:
                     X = mfcc_features
                 else:
                     X = np.append(X, mfcc_features, axis=0)
         self.datas[cl] = X
     print("All data load finished--->")
示例#22
0
import sys
import utils
import params
import numpy as np
import pickle
import os

from sklearn.neighbors import BallTree
from scipy.spatial.distance import cosine

ficheros = utils.getFiles(params.DESCRIPTORS_FOLDER)
arrays = utils.loadNumpyArrays(ficheros)

if params.USE_PCA:
    from sklearn.decomposition import PCA
    pca = PCA(n_components=100)
    pca.fit(arrays)
    arrays = pca.transform(arrays)
    with open('./INDEX/pca.pickle', 'wb') as handle:
        pickle.dump(pca, handle, 2)

tree = BallTree(arrays, leaf_size=40, metric=cosine)
with open('./INDEX/index.pickle', 'wb') as handle:
    pickle.dump(tree, handle, 2)

print('INDEX  SAVED')
示例#23
0
def test():
    hive = HiveTestAPI()
    executor = hivemodel.BlockTestExecutor(hive, Rules.RULES_TANGERINE)
    hive.blockTests(testfiles=utils.getFiles("./tests/BlockchainTests"),
                    executor=executor)
示例#24
0
def makeSFNT(root, outputPath, kind="otf"):
    """
    Generates otf or ttf fonts using the Adobe FDK.

    This also autohints the generated fonts either with psautohint (cff) or
    ttfautohint (ttf)

    *root* is the root to find the source files in
    *outputPath* is the path to save the generated fonts to
    *kind* is either 'otf' or 'ttf'.
    """

    if kind == "ttf":
        source = "ttf"
    else:
        source = "ufo"

    # make sure output dir contains no files
    files = getFiles(outputPath, kind)
    if len(files) != 0:
        for file in files:
            os.remove(file)

    print(f"🏗  Initial {kind.upper()} building")
    files = getFiles(root, source)
    outputFile = os.path.join(outputPath, "makeotf_output.txt")
    if os.path.exists(outputFile):
        os.remove(outputFile)

    printProgressBar(0, len(files), prefix='  ', suffix='Complete', length=50)
    for i, file in enumerate(files):

        # Set the makeotf parameters
        # -r is release mode
        # -nshw quiets the "glyph not hinted" warnings, as we
        #  have yet to run the autohinter (we do that after fonts)
        #  are built

        args = ["makeotf", "-f", file, "-o", outputPath, "-r", "-nshw"]
        run = subprocess.run(args,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT,
                             universal_newlines=True)
        with open(outputFile, "a") as f:
            f.write(run.stdout)

        printProgressBar(i + 1,
                         len(files),
                         prefix='  ',
                         suffix='Complete',
                         length=50)

    print(f"🏗  {kind.upper()} table fixing")
    files = getFiles(outputPath, kind)
    printProgressBar(0, len(files), prefix='  ', suffix='Complete', length=50)
    for i, file in enumerate(files):
        font = TTFont(file)
        nameTableTweak(font)
        makeDSIG(font)
        font.save(file)
        printProgressBar(i + 1,
                         len(files),
                         prefix='  ',
                         suffix='Complete',
                         length=50)

    print(f"🏗  {kind.upper()} autohinting")
    files = getFiles(outputPath, kind)

    outputFile = os.path.join(outputPath, "autohint_output.txt")
    if os.path.exists(outputFile):
        os.remove(outputFile)

    printProgressBar(0, len(files), prefix='  ', suffix='Complete', length=50)
    for i, file in enumerate(files):
        if kind is "otf":
            args = ["psautohint", file]
            run = subprocess.run(args,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT,
                                 universal_newlines=True)
            with open(outputFile, "a") as f:
                f.write(run.stdout)
        elif kind is "ttf":
            ttfautohint_options.update(in_file=file,
                                       out_file=file,
                                       hint_composites=True)
            with open(outputFile, "a") as f:
                with redirect_stdout(f), redirect_stderr(f):
                    ttfautohint.ttfautohint()

        printProgressBar(i + 1,
                         len(files),
                         prefix='  ',
                         suffix='Complete',
                         length=50)
示例#25
0
def buildInstances(designspacePath, root, name_map):
    """
    Generates and cleans up the instances for building the static fonts.

    This sets a variety of font info related values (blueScale, weight class,
    panose, and fixed pitch)

    It also cleans up the standard ps stems, sets the font features to be
    nothing, and removes overlap.

    For each instance it generates the fontinfo, kern.fea, and features files

    *designspace* is a designspace object
    *root* is the root directory where the file structure has been built
    *name_map* is the name mapping dictionary
    """

    overlapGlyphs = [
        "Aogonek", "Aring", "Aringacute", "Ccedilla", "Ccedillaacute",
        "Dcroat", "Ecedillabreve", "Eng", "Eogonek", "Eth", "Hbar", "Iogonek",
        "Lslash", "Lslash.sans", "Nhookleft", "Ohorn", "Ohornacute",
        "Ohorndot", "Ohorngrave", "Ohornhook", "Ohorntilde", "Oogonek",
        "Oslash", "Oslashacute", "Q", "Scedilla", "Tbar", "Tcedilla", "Uhorn",
        "Uhornacute", "Uhorndot", "Uhorngrave", "Uhornhook", "Uhorntilde",
        "Uogonek", "aogonek", "aogonek.italic", "aogonek.simple", "aringacute",
        "aringacute.italic", "aringacute.simple", "ccedilla",
        "ccedilla.italic", "ccedillaacute", "ccedillaacute.italic", "dcroat",
        "ecedillabreve", "ecedillabreve.italic", "eogonek", "equal_equal.code",
        "hbar", "iogonek", "iogonek.italic", "iogonek.mono", "iogonek.simple",
        "lslash", "lslash.italic", "lslash.mono", "lslash.sans",
        "lslash.simple", "nhookleft", "notequal", "notequal.case",
        "numbersign_numbersign.code", "numbersign_numbersign_numbersign.code",
        "numbersign_numbersign_numbersign_numbersign.code", "ohorn",
        "ohornacute", "ohorndot", "ohorngrave", "ohornhook", "ohorntilde",
        "oogonek", "oslash", "oslashacute", "ringacute", "ringacute.case",
        "scedilla", "scedilla.italic", "tbar", "tcedilla", "uhorn",
        "uhorn.italic", "uhornacute", "uhornacute.italic", "uhorndot",
        "uhorndot.italic", "uhorngrave", "uhorngrave.italic", "uhornhook",
        "uhornhook.italic", "uhorntilde", "uhorntilde.italic", "uogonek",
        "uogonek.italic"
    ]

    doc = DesignSpaceProcessor()
    doc.useVarlib = True
    doc.roundGeometry = True
    doc.read(designspacePath)
    for i in doc.instances:
        fn, sn, _, _, _ = name_map[(i.familyName, i.styleName)]
        path = os.path.join(
            root,
            fn.strip().replace(" ", ""),
            sn.strip().replace(" ", ""),
            os.path.split(i.filename)[1].strip().replace(" ", ""))
        i.path = path
    print("🏗  Generating instance UFOs")
    doc.generateUFO()

    ufos = getFiles(root, ".ufo")
    fonts = [Font(ufo) for ufo in ufos]

    print("🏗  Getting blueScale")
    blueScale = getBlueScale(fonts)

    print("🏗  Setting values, removing overlap, writing files")
    length = len(fonts)
    printProgressBar(0, length, prefix='  ', suffix='Complete', length=50)
    for i, font in enumerate(fonts):
        font_dir = os.path.split(font.path)[0]

        # Font info
        # Get and set PS Font Full Name and PS Font Name
        _, _, fullname, ps, _ = name_map[(font.info.familyName,
                                          font.info.styleName)]
        font.info.postscriptFontName = ps
        font.info.postscriptFullName = fullname

        # Get weight value based on fullname
        # 'Regular' is not part of the fullname so we do a try/except
        # that will throw an IndexError if the fullname is a Regular
        # style ("Recursive Mono Csl", "Recursive Mono Lnr",
        # "Recursive Sans Csl", or "Recursive Sans Lnr"). We know then
        # that the font weight value should be 400. Likewise, if the
        # fourth item in the name is "Italic", the weight should be
        # 400, so we catch that here too.
        try:
            weight = fullname.split()[4]
            if weight == "Italic":
                weight = "Regular"
        except IndexError:
            weight = "Regular"

        # Set weight class
        font.info.openTypeOS2WeightClass = weightMap[weight][0]

        splitFn = fullname.split()
        # Set the Italic angle
        if "Italic" in splitFn:
            font.info.italicAngle = -15

        # Set Panose
        fillInPanoseValues(font, weight)

        # Set fixed pitch if font is Mono
        if "Mono" in splitFn or "Mn" in splitFn:
            font.info.postscriptIsFixedPitch = True

        # Fix standard stems
        fixStandardStems(font)

        # Set blueScale
        font.info.postscriptBlueScale = blueScale

        # Remove the font features, as this is wholely external and
        # causes issues with making TTFs
        font.features.text = ""

        # Font cleanup
        # Remove overlap in the font
        for name in overlapGlyphs:
            font[name].decompose()
        for glyph in font:
            glyph.removeOverlap()
        font.save(font.path)

        # External files
        # Write out the `fontinfo` file
        buildFontInfo(font.info.styleName, font_dir)

        # Write out the kerning feature file
        path = os.path.join(font_dir, "kern.fea")
        writeKerning(font, path)

        # Write out the font feature file
        writeFeature(font)

        printProgressBar(i + 1,
                         length,
                         prefix='  ',
                         suffix='Complete',
                         length=50)

    print("✅ Made UFO instances")
    batchCheckOutlines(root)
示例#26
0
import utils
import params

train_ficheros = utils.getFiles(params.TRAIN_IMAGES_PATH)
val_ficheros = utils.getFiles(params.VAL_IMAGES_PATH)
test_ficheros = utils.getFiles(params.TEST_IMAGES_PATH)

utils.getCnnDescriptors(train_ficheros, params.CNN_DESCRIPTORS_TRAIN_PATH,
                        'vgg16')

utils.getCnnDescriptors(val_ficheros, params.CNN_DESCRIPTORS_VAL_PATH, 'vgg16')

utils.getCnnDescriptors(test_ficheros, params.CNN_DESCRIPTORS_TEST_PATH,
                        'vgg16')
示例#27
0
import os
import numpy as np
import time

from scipy.sparse import vstack

from keras.models import Sequential
from keras.layers import Activation, Dense, Dropout
from keras.objectives import MSE, MAE
from keras.callbacks import EarlyStopping

TRAIN_SIZE = 0.8
BATCH_SIZE = 100000
EPOCHS = 1
print('Reading Files...')
text = utils.getFiles(params.TFIDF_DESCRIPTORS_TRAIN_PATH)
images = utils.getFiles(params.CNN_PCA_DESCRIPTORS_TRAIN_PATH)
INPUT_SHAPE = 369537
OUTPUT_SHAPE = 128

model = Sequential()
model.add(Dense(input_dim=INPUT_SHAPE, output_dim=150))
model.add(Activation("relu"))
model.add(Dropout(0.50))
model.add(Dense(output_dim=130))
model.add(Activation("relu"))
model.add(Dropout(0.50))
model.add(Dense(output_dim=128))
model.compile("nadam", "mae")

print('Training...')
示例#28
0
import numpy as np
import params
import utils
import pickle
from sklearn.decomposition import PCA

input_path = params.VLAD_DESCRIPTORS_TRAIN_PATH
#output_path = params.CNN_PCA_DESCRIPTORS_TRAIN_PATH

ficheros = utils.getFiles(input_path)
print('Loading Files...')
input_arrays = utils.loadNumpyArrays(ficheros)

print('Training model...')
pca = PCA(n_components=params.DIM)
pca.fit(input_arrays)

print('Saving Model...')

with open(params.PCA_MODEL_VLAD, 'wb') as handle:
    pickle.dump(pca, handle, protocol=2)

print('Done!')

#for array, name in zip(input_arrays,ficheros):
#    array_pca = pca.transform(array)
#    np.load(name, array_pca)
示例#29
0
import sys
import utils
import params
import numpy as np
import pickle
import os

from sklearn.neighbors import BallTree
#from scipy.spatial.distance import cosine

ficheros = utils.getFiles(params.CNN_PCA_DESCRIPTORS_TEST_PATH)
arrays = utils.loadNumpyArrays(ficheros)
n_arrays = []
for array in arrays:
    n_arrays.append(array / np.linalg.norm(array))
del arrays

order = dict(zip(range(len(ficheros)), ficheros))

tree = BallTree(n_arrays, leaf_size=40, metric='euclidean')

with open(params.CNN_TEST_INDEX, 'wb') as handle:
    pickle.dump(tree, handle, 2)

with open(params.CNN_TEST_ORDER, 'wb') as handle:
    pickle.dump(order, handle, 2)

print('INDEX AND ORDER SAVED')
示例#30
0
文件: simulator.py 项目: zilm13/hive
def test():
    hive = HiveTestAPI()
    executor = hivemodel.BlockTestExecutor()
    hive.blockTests(testfiles=utils.getFiles("./tests/BlockchainTests"),
                    executor=executor)
示例#31
0
import numpy as np

query_descriptor = utils.getDescriptor(params.QUERY_IMAGE)

if params.USE_PCA:

    with open('./INDEX/pca.pickle','rb') as handle:
        pca = pickle.load(handle)

with open('./INDEX/index.pickle','rb') as handle:
    tree = pickle.load(handle)

query_descriptor = pca.transform(query_descriptor)
dist, idxs = tree.query(query_descriptor[0], k=params.K)

ficheros = utils.getFiles(params.DATABASE_FOLDER)
selected_images = np.asarray(ficheros)[idxs]

plt.figure()
img = plt.imread(params.QUERY_IMAGE)
plt.imshow(img)


f, ax = plt.subplots(int(params.K/3) + 1, 3)

row = 0
column = 0
for i, image in enumerate(selected_images[0]):
    img = plt.imread(image)
    ax[row,column].imshow(img)
    ax[row,column].set_title(str(i+1)+ ' MATCH')
示例#32
0
    outputDir = comm.bcast(outputDir, root=0)
    outputPNGFlag = comm.bcast(outputPNGFlag, root=0)
    outputPNGDir = comm.bcast(outputPNGDir, root=0)
    outputNPYFlag = comm.bcast(outputNPYFlag, root=0)
    outputNPYDir = comm.bcast(outputNPYDir, root=0)
    scalingFactor = comm.bcast(scalingFactor, root=0)
    minPercentile = comm.bcast(minPercentile, root=0)
    maxPercentile = comm.bcast(maxPercentile, root=0)
    firstFrame = comm.bcast(firstFrame, root=0)
    lastFrame = comm.bcast(lastFrame, root=0)
    averageFrames = comm.bcast(averageFrames, root=0)

    if (rank == 0):
        print('Processing image sequence %s' % (inputDir))
        inputFileList = utils.getFiles(inputDir)
    else:
        inputFileList = None
    comm.barrier()

    inputDirList = []
    frameDM4List = []
    inputDM4FileList = []
    outputPNGFileList = []
    outputPNGFlagList = []
    outputNPYFileList = []
    outputNPYFlagList = []
    scaleList = []
    minPercentileList = []
    maxPercentileList = []
    averageFramesList = []
def run(inputFolder, outputFolder, tempFolder, extent, numberTiles, numberProcs):
    # Check input parameters
    if not os.path.isdir(inputFolder) and not os.path.isfile(inputFolder):
        raise Exception('Error: Input folder does not exist!')
    if os.path.isfile(outputFolder):
        raise Exception('Error: There is a file with the same name as the output folder. Please, delete it!')
    elif os.path.isdir(outputFolder) and os.listdir(outputFolder):
        raise Exception('Error: Output folder exists and it is not empty. Please, delete the data in the output folder!')
    # Get the number of tiles per dimension (x and y)
    axisTiles = math.sqrt(numberTiles) 
    if (not axisTiles.is_integer()) or (int(axisTiles) % 2):
        raise Exception('Error: Number of tiles must be the square of number which is power of 2!')
    axisTiles = int(axisTiles)
    
    # Create output and temporal folder
    utils.shellExecute('mkdir -p ' + outputFolder)
    utils.shellExecute('mkdir -p ' + tempFolder)
    
    (minX, minY, maxX, maxY) = extent.split(' ')
    minX = float(minX)
    minY = float(minY)
    maxX = float(maxX)
    maxY = float(maxY)
    
    if (maxX - minX) != (maxY - minY):
        raise Exception('Error: Tiling requires that maxX-minX must be equal to maxY-minY!')
    
    inputFiles = utils.getFiles(inputFolder, recursive=True)
    numInputFiles = len(inputFiles)
    print '%s contains %d files' % (inputFolder, numInputFiles)

    # Create queues for the distributed processing
    tasksQueue = multiprocessing.Queue() # The queue of tasks (inputFiles)
    resultsQueue = multiprocessing.Queue() # The queue of results
    
    # Add tasks/inputFiles
    for i in range(numInputFiles):
        tasksQueue.put(inputFiles[i])
    for i in range(numberProcs): #we add as many None jobs as numberProcs to tell them to terminate (queue is FIFO)
        tasksQueue.put(None)

    processes = []
    # We start numberProcs users processes
    for i in range(numberProcs):
        processes.append(multiprocessing.Process(target=runProcess, 
            args=(i, tasksQueue, resultsQueue, minX, minY, maxX, maxY, outputFolder, tempFolder, axisTiles)))
        processes[-1].start()

    # Get all the results (actually we do not need the returned values)
    numPoints = 0
    for i in range(numInputFiles):
        (processIndex, inputFile, inputFileNumPoints) = resultsQueue.get()
        numPoints += inputFileNumPoints
        print 'Completed %d of %d (%.02f%%)' % (i+1, numInputFiles, 100. * float(i+1) / float(numInputFiles))
    # wait for all users to finish their execution
    for i in range(numberProcs):
        processes[i].join()
    
    # Write the tile.js file with information about the tiles
    cFile = open(outputFolder + '/tiles.js', 'w')
    d = {}
    d["NumberPoints"] = numPoints
    d["numXTiles"] = axisTiles
    d["numYTiles"] = axisTiles
    d["boundingBox"] = {'lx':minX,'ly':minY,'ux':maxX,'uy':maxY}
    cFile.write(json.dumps(d,indent=4,sort_keys=True))
    cFile.close()
示例#34
0
        args.static = True
        args.woff = True

    if args.files:
        files = buildFiles(version=version)
    else:
        files = getFolders("recursive-MONO_CASL_wght_slnt_ital--full_gsub.designspace")

    if args.variable:
        build_variable(designspacePath=files["designspace"],
                       stylespacePath=files["stylespace"],
                       out=os.path.join(outPaths[0],
                                        f"Recursive_VF_{version}.ttf"))

    if args.varfiles:
        files = buildFiles(version=version, static=False)
    if args.statfiles:
        files = buildFiles(version=version, variable=False)

    if args.static:
        build_static(files["cff"], files["ttf"], out)

    if args.woff:
        for path in outPaths:
            if os.path.exists(path):
                ttfs = getFiles(path, "ttf")
                otfs = getFiles(path, "otf")
                fonts = ttfs + otfs
                print(f"🏗  Making WOFFs for {path}")
                makeWOFF(fonts, os.path.join(path, "WOFF2"))
示例#35
0
def test():
    hive = HiveTestAPI()
    executor = hivemodel.BlockTestExecutor()
    hive.blockTests(testfiles=utils.getFiles("./tests/BlockchainTests"), executor=executor)
示例#36
0
import os
from utils import getFiles, plot_confusion_matrix
from model import HMM_Model, get_result, train, load_models, evaluate
from dataloader import Dataloader, single_loader
import numpy as np

genre_list = [
    'blues', 'classical', 'jazz', 'country', 'pop', 'rock', 'metal', 'disco',
    'hiphop', 'reggae'
]

# dl = Dataloader(genre_list, root='genres')
# train(dl)

models = load_models(genre_list)

# Evaluate method 1:
cm, real, pred = evaluate('genres_small', genre_list, models)
plot_confusion_matrix(cm, genre_list, True)  # get plot
print(classification_report(real, pred, target_names=genre_list))  # get report

# Evaluate method 2:
fl = getFiles('genres_small')
for f in fl:
    X = single_loader(f, is_print_info=False, is_vision=False)
    print('Truth:{}, predict:{}'.format(f, get_result(X, models)))

# Single test:
X = single_loader('blues.00000.wav')
result = get_result(X, models)
print(result)