Example #1
0
def main():

    # Initialize the MPI pool
    pool = MPIPool()

    # Make sure only we run map() on the master process
    if not pool.is_master():
        pool.wait()
        sys.exit(0)

    # create some random input data
    x = np.random.uniform(size=10000)
    y = np.random.uniform(size=10000)
    tasks = np.vstack((x,y)).T

    vals = pool.map(worker, tasks)

    pool.close()

# create the pool
pool = MPIPool()

# only run map() on the master process, all other processes wait for their work
if not pool.is_master():
    pool.wait()
    # worker processes exit after they have done their work
    sys.exit(0)

# the following code is executed by the master process only
# create some random input data
x = np.random.uniform(size=10)
y = np.random.uniform(size=10)
tasks = list(zip(x, y))


# crate a callback function
def cb(x):
    print x


# map the function worker to tasks
# and execute them parallel by processes other than the master
results = pool.map(worker, tasks, callback=cb)

# close the pool
pool.close()

print 'results:', results
            for stat in output:
                stats_dict[stat].append(output[stat], os.path.basename(name1),
                                        os.path.basename(name2))

print("Finished designs at {}".format(datetime.now()))

# Convert to normal lists
if multiprocess:
    for stat in stats_dict:
        stats_dict[stat] = list(stats_dict[stat])

# Add the filename columns. The list order should be preserved since we're not
# using asynchronous mapping
for i, stat in enumerate(stats_dict.keys()):
    results_arr = np.array(stats_dict[stat]).T
    if i == 0:
        stats_dict["Data 1"] = results_arr[1]
        stats_dict["Data 2"] = results_arr[2]
    stats_dict[stat] = results_arr[0].astype(np.float)

# Now save the results
df = DataFrame(stats_dict)
df.to_csv(os.path.join(results_dir,
                       "view_angle_comparison_{}.csv".format(comp)))

if multiprocess:
    pool.close()

print("Done at {}".format(datetime.now()))