Exemple #1
0
# pickle fails for nested functions
def adder(augend):
  zero = [0]
  def inner(addend):
    return addend+augend+zero[0]
  return inner

# build from inner function
add_me = adder(5)

# build from lambda functions
squ = lambda x:x**2

# test 'dilled' multiprocessing for inner
print "Evaluate 10 items on 2 proc:"
pool.ncpus = 2
print pool
print pool.map(add_me, range(10))
print ''

# test 'dilled' multiprocessing for lambda
print "Evaluate 10 items on 4 proc:"
pool.ncpus = 4
print pool
print pool.map(squ, range(10))
print ''

# test for lambda, but with threads
print "Evaluate 10 items on 4 threads:"
tpool.nthreads = 4
print tpool
#!/usr/bin/env python

from pathos.multiprocessing import ProcessingPool as Pool
from pathos.multiprocessing import ThreadingPool as TPool
pool = Pool()
tpool = TPool()

def host(id):
    import socket
    return "Rank: %d -- %s" % (id, socket.gethostname())


print "Evaluate 10 items on 1 proc"
pool.ncpus = 1
res3 = pool.map(host, range(10))
print pool
print '\n'.join(res3)
print ''

print "Evaluate 10 items on 2 proc"
pool.ncpus = 2
res5 = pool.map(host, range(10))
print pool
print '\n'.join(res5)
print ''

print "Evaluate 10 items on ? proc"
pool.ncpus = None
res7 = pool.map(host, range(10)) 
print pool
print '\n'.join(res7)
        print("False Negative: %d" % (sum((pred_fc == False) & (train_label == 1))))

    # if we do not have trained classfiers, train
    else:
        print("boosters do not exist")

        print("start finding features")

        # contruct all filters
        wdw_stride = 2
        port = 0.75
        filters = GetFilter(wdw_stride, port)

        # parallel computing setup
        pool = Pool()
        pool.ncpus = 16

        # train classifiers and dump to file
        bstrs = open("./boosters.pkl", 'wb')
        tr_time = open("./train_time.pkl", 'wb')
        boosters = []
        num_booster = 4
        pred_fc = (np.ones((2 * n,)) == 1)
        for i in range(num_booster):
            boosters.append(AFS.AdaFeatureSelection(None, None))
            stime = time()
            boosters[-1].train(train_integral[pred_fc, :, :], train_label[pred_fc], filters, pool)
            pred = boosters[-1].predict(train_integral[pred_fc, :, :])
            pred_fc[pred_fc == True] = (pred == 1)
            duration = (time() - stime) / 60
            print("classifier %i: training time %.3f min" % (i+1, duration))
Exemple #4
0
    return data_channel_default


# Determine dimensions of 2D grid
shape2d = dat3d.shape[1:]
# Create tuples containing all the combinations of indices
indexlist = list(it.product(range(shape2d[0]), range(shape2d[1])))
# Dictionary to save the indices
wrapdictlist = []
for indices in indexlist:
    d = {}
    d['indices'] = indices
    wrapdictlist.append(d)

pool = ProcessingPool()
pool.ncpus = 6
output_list = pool.map(wrapperfunction, wrapdictlist)

# Now create arrays for data storage
qc_status_int = np.zeros(shape2d, dtype='float')
mk_pvalue = np.zeros(shape2d, dtype='float')
algorithm_succes = np.zeros(shape2d, dtype='int')

# Now start unpacking the results
for d in output_list:
    i, j = d['indices']
    qc_status_int[i, j] = d['qc_status_int']
    mk_pvalue[i, j] = d['mk_pvalue']
    algorithm_succes[i, j] = d['algorithm_succes']

plt.figure(figsize=(20, 15))
Exemple #5
0
    def inner(addend):
        return addend + augend + zero[0]

    return inner


# build from inner function
add_me = adder(5)

# build from lambda functions
squ = lambda x: x**2

# test 'dilled' multiprocessing for inner
print "Evaluate 10 items on 2 proc:"
pool.ncpus = 2
print pool
print pool.map(add_me, range(10))
print ''

# test 'dilled' multiprocessing for lambda
print "Evaluate 10 items on 4 proc:"
pool.ncpus = 4
print pool
print pool.map(squ, range(10))
print ''

# test for lambda, but with threads
print "Evaluate 10 items on 4 threads:"
tpool.nthreads = 4
print tpool
Exemple #6
0
# License: 3-clause BSD.  The full license text is available at:
#  - http://trac.mystic.cacr.caltech.edu/project/pathos/browser/pathos/LICENSE

from pathos.multiprocessing import ProcessingPool as Pool
from pathos.multiprocessing import ThreadingPool as TPool
pool = Pool()
tpool = TPool()


def host(id):
    import socket
    return "Rank: %d -- %s" % (id, socket.gethostname())


print "Evaluate 10 items on 1 proc"
pool.ncpus = 1
res3 = pool.map(host, range(10))
print pool
print '\n'.join(res3)
print ''

print "Evaluate 10 items on 2 proc"
pool.ncpus = 2
res5 = pool.map(host, range(10))
print pool
print '\n'.join(res5)
print ''

print "Evaluate 10 items on ? proc"
pool.ncpus = None
res7 = pool.map(host, range(10))