예제 #1
0
파일: main.py 프로젝트: rainwoodman/lyamock
def main():
    numpy.seterr(all="ignore")
    A = parseargs()
    if A.serial:
        sharedmem.set_debug(True)

    fullname = "commands." + A.command
    module = __import__(fullname, fromlist=["main"])
    module.main(A)
예제 #2
0
파일: fit.py 프로젝트: rainwoodman/lyamock
def main(config):
    global cov
    DB = BootstrapDB(config)

    MASK = DB.dummy.imesh >= 0
    MASK &= DB.dummy.rmesh <= config.rmax
    MASK &= DB.dummy.rmesh >= config.rmin

    print "dof in fitting", MASK.sum()
    # create a dummy to test the fitting
    p0 = [-0.2, 3.5, 1.5, 1.5]

    eigenmodes = DB.eigenmodes
    dummy = eigenmodes(p0)

    covfull = numpy.load(config.CovarianceMatrixOutput)["cov"]
    cov = covfull[MASK][:, MASK]

    print "inverting"
    INV = linalg.inv(covfull[MASK][:, MASK])
    print "inverted"

    x, chi = fit1(dummy, eigenmodes, INV, MASK)

    print "x =", x
    print "p0 = bF, bQ, BF, BQ", p0

    error = poles_err(dummy, covfull)

    fitted = sharedmem.empty((len(DB), len(p0)))
    chi = sharedmem.empty((len(DB)))
    samples, models = [], []
    sharedmem.set_debug(True)

    def work(i):
        sample = DB(i)
        print "fitting", i
        fitted[i], chi[i] = fit1(sample, eigenmodes, INV, MASK)
        model = eigenmodes(fitted[i])
        print zip(sample[0].monopole, model[0].monopole)
        return i, sample, model

    def reduce(rt):
        i, s, m = rt
        samples.append((i, s))
        models.append((i, m))

    chunkmap(work, range(len(DB)), 100, reduce=reduce)
    samples = [s for i, s in sorted(samples)]
    models = [s for i, s in sorted(models)]
    numpy.savez("fit.npz", samples=samples, models=models, fittedparameters=fitted, chi=chi, error=error)
예제 #3
0
def test_wordcount():
    """ 
        An example word counting program. The parallelism is per line.

        In reality, the parallelism shall be at least on a file level to
        benefit from sharedmem / multiprocessing.
        
    """
    word_count = {
            'sharedmem': 0,
            'pool': 0,
            }

    with sharedmem.MapReduce() as pool:

        def work(line):
            # create a fresh local counter dictionary
            my_word_count = dict([(word, 0) for word in word_count])

            for word in line.replace('.', ' ').split():
                if word in word_count:
                    my_word_count[word] += 1

            return my_word_count

        def reduce(her_word_count):
            for word in word_count:
                word_count[word] += her_word_count[word]

        pool.map(work, file(__file__, 'r').readlines(), reduce=reduce)

        parallel_result = dict(word_count)

        # establish the ground truth from the sequential counter
        sharedmem.set_debug(True)

        for word in word_count:
            word_count[word] = 0

        pool.map(work, file(__file__, 'r').readlines(), reduce=reduce)
        sharedmem.set_debug(False)

    for word in word_count:
        assert word_count[word] == parallel_result[word]
예제 #4
0
def test_wordcount():
    """ 
        An example word counting program. The parallelism is per line.

        In reality, the parallelism shall be at least on a file level to
        benefit from sharedmem / multiprocessing.
        
    """
    word_count = {
        'sharedmem': 0,
        'pool': 0,
    }

    with sharedmem.MapReduce() as pool:

        def work(line):
            # create a fresh local counter dictionary
            my_word_count = dict([(word, 0) for word in word_count])

            for word in line.replace('.', ' ').split():
                if word in word_count:
                    my_word_count[word] += 1

            return my_word_count

        def reduce(her_word_count):
            for word in word_count:
                word_count[word] += her_word_count[word]

        pool.map(work, open(__file__, 'r').readlines(), reduce=reduce)

        parallel_result = dict(word_count)

        # establish the ground truth from the sequential counter
        sharedmem.set_debug(True)

        for word in word_count:
            word_count[word] = 0

        pool.map(work, open(__file__, 'r').readlines(), reduce=reduce)
        sharedmem.set_debug(False)

    for word in word_count:
        assert word_count[word] == parallel_result[word]