def get_inter_anno_agreement(NThreads = 12):
    """
    Replicating the interanno part of figure 11 in the Frontiers paper
    """
    if not os.path.exists("interanno.mat"):
        parpool = PPool(NThreads)
        filenames =  glob.glob("%s/*.jams"%JAMS_DIR)
        res = parpool.map(get_inter_anno_agreement_par, (filenames))
        sio.savemat("interanno.mat", {"res":res})
    res = sio.loadmat("interanno.mat")["res"]
    res = res[res[:, 0] > -1, :]
    sns.kdeplot(res[:, -1], shade=True)
    plt.xlim([0, 1])
    plt.title("L-Measure Inter-Annotator Agreement")
    plt.xlabel("L-Measure")
    plt.ylabel("Probability Density")
    plt.show()
示例#2
0
def doIBDTWMergeTrees(SSMA, SSMB, res, NThreads=8, Verbose=False):
    """
    Do a version of isometry blind time warping between SSMs
    which uses merge trees as a proxy for constrained DTW
    between each pair of rows
    :param SSMA: MXM self-similarity matrix
    :param SSMB: NxN self-similarity matrix
    :param res: Width of each pixel in the persistence image
    :param NThreads: How many threads to use in parallelization of
        row computation of CSWM
    :param Verbose: Whether or not to print which row is being aligned
    :returns {'X': An MxNPixels array of persistence images for SSM A
              'Y': An NxNPixels array of persistence images for SSM B
              'D': MxN cross-similarity matrix}
    """
    if NThreads > 1:
        if not Alignment.parpool:
            Alignment.parpool = PPool(NThreads)
    #Make a dummy persistence image to figure out resolution
    ret = getPersistenceImage(np.array([[0, 0]]), [-0.1, 1.1, -0.1, 1.1], res)
    PIDims = ret['PI'].shape
    M = SSMA.shape[0]
    N = SSMB.shape[0]
    if NThreads > 1:
        #Turn SSM rows into tuples
        SSMATup = tuple([SSMA[i, :] for i in range(M)])
        SSMBTup = tuple([SSMB[i, :] for i in range(N)])
        args = zip(SSMATup, M * [res])
        X = Alignment.parpool.map(doIBDTWMergeTreesHelper, args)
        args = zip(SSMBTup, N * [res])
        Y = Alignment.parpool.map(doIBDTWMergeTreesHelper, args)
    else:
        X = []
        for i in range(M):
            X.append(doIBDTWMergeTreesHelper((SSMA[i, :], res)))
        Y = []
        for i in range(N):
            Y.append(doIBDTWMergeTreesHelper((SSMB[i, :], res)))
    X = np.array(X)
    Y = np.array(Y)
    D = getCSM(X, Y)
    return {'X': X, 'Y': Y, 'D': D, 'PIDims': PIDims}
示例#3
0
    #What types of cross-similarity should be used to compare different blocks for different feature types
    CSMTypes = {
        'MFCCs': 'Euclidean',
        'SSMs': 'Euclidean',
        'Chromas': 'CosineOTI'
    }

    #Open collection and query lists
    fin = open("covers80collection.txt", 'r')
    allFiles = [f.strip() for f in fin.readlines()]
    fin.close()

    #Setup parallel pool
    NThreads = 8
    parpool = PPool(NThreads)

    #Precompute beat intervals, MFCC, and HPCP Features for each song
    NF = len(allFiles)
    args = zip(allFiles, [scratchDir] * NF, [hopSize] * NF, [Kappa] * NF,
               [CSMTypes] * NF, [FeatureParams] * NF, [TempoLevels] * NF,
               [{}] * NF)
    parpool.map(precomputeBatchFeatures, args)

    #Process blocks of similarity at a time
    N = len(allFiles)
    NPerBlock = 20
    ranges = getBatchBlockRanges(N, NPerBlock)
    args = zip(ranges, [Kappa] * len(ranges), [CSMTypes] * len(ranges),
               [allFiles] * len(ranges), [scratchDir] * len(ranges))
    res = parpool.map(compareBatchBlock, args)

def process(i):
    def thread(j):
        from model import User
        try:
            User.get_all(i, j)
            User.add_user(i, j)
            User.get_all(i, j)
        except Exception as exc:
            logging.error(f'{i}-{j} {type(exc).__name__}:{exc.args}')

    try:
        tpool = TPool(TPOOL_SIZE)
        list(
            tpool.apply_async(thread, (j, ))
            for j in range(1, THREAD_NUMBER + 1))
        tpool.close()
        tpool.join()
    except Exception as exc:
        logging.error(f'{i} {type(exc).__name__}:{exc.args}')


if __name__ == '__main__':
    ppool = PPool(PPOOL_SIZE)
    list(
        ppool.apply_async(process, (i, ))
        for i in range(1, PROCESS_NUMBER + 1))
    ppool.close()
    ppool.join()
from multiprocessing import Pool as PPool
import humanfriendly


def blah(j):
    sum = 0
    for i in range(10000):
        sum += i


def avg(lst):
    return sum(lst) / len(lst)


if __name__ == "__main__":
    size_of_queue = 10
    num_items = 1000
    num_samples = 100

    time_taken = []
    for _ in range(num_samples):
        start = time.time()
        pool = PPool(processes=size_of_queue)
        pool.map(blah, range(num_items))
        pool.close()

        time_taken.append(time.time() - start)

    print("Done (processes)")
    print(humanfriendly.format_timespan(avg(time_taken)))