def calc_pure_python(show_output): # make a list of x and y values which will represent q # xx and yy are the co-ordinates, for the default configuration they'll look like: # if we have a 1000x1000 plot # xx = [-2.13, -2.1242, -2.1184000000000003, ..., 0.7526000000000064, 0.7584000000000064, 0.7642000000000064] # yy = [1.3, 1.2948, 1.2895999999999999, ..., -1.2844000000000058, -1.2896000000000059, -1.294800000000006] x_step = (float(x2 - x1) / float(w)) * 2 y_step = (float(y1 - y2) / float(h)) * 2 x = [] y = [] ycoord = y2 while ycoord > y1: y.append(ycoord) ycoord += y_step xcoord = x1 while xcoord < x2: x.append(xcoord) xcoord += x_step q = [] for ycoord in y: for xcoord in x: q.append(complex(xcoord, ycoord)) print "Total elements:", len(q) # split work list into continguous chunks, one per CPU # build this into chunks which we'll apply to map_async nbr_chunks = 1 # experiment with different nbrs of chunks #nbr_chunks = multiprocessing.cpu_count() print "Multiprocessing using {} chunks of data".format(nbr_chunks) chunk_size = len(q) / nbr_chunks # split our long work list into smaller chunks # make sure we handle the edge case where nbr_chunks doesn't evenly fit into len(q) if len(q) % nbr_chunks != 0: # make sure we get the last few items of data when we have # an odd size to chunks (e.g. len(q) == 100 and nbr_chunks == 3 nbr_chunks += 1 chunks = [(q[x * chunk_size:(x + 1) * chunk_size], maxiter) for x in xrange(nbr_chunks)] print chunk_size, len(chunks), len(chunks[0][0]) # STUDENTS TO SOLVE THIS SECTION # HERE end_time = datetime.datetime.now() secs = end_time - start_time print "Main took", secs validation_sum = sum(output) print "Total sum of elements (for validation):", validation_sum if show_output: if SHOW_IN_3D: plotting.show_3D(output) else: plotting.show_2D(output) return validation_sum
def calc_pure_python(show_output): # make a list of x and y values which will represent q # xx and yy are the co-ordinates, for the default configuration they'll look like: # if we have a 1000x1000 plot # xx = [-2.13, -2.1242, -2.1184000000000003, ..., 0.7526000000000064, 0.7584000000000064, 0.7642000000000064] # yy = [1.3, 1.2948, 1.2895999999999999, ..., -1.2844000000000058, -1.2896000000000059, -1.294800000000006] x_step = (float(x2 - x1) / float(w)) * 2 y_step = (float(y1 - y2) / float(h)) * 2 x = [] y = [] ycoord = y2 while ycoord > y1: y.append(ycoord) ycoord += y_step xcoord = x1 while xcoord < x2: x.append(xcoord) xcoord += x_step q = [] for ycoord in y: for xcoord in x: q.append(complex(xcoord, ycoord)) print "Total elements:", len(q) # split work list into continguous chunks, one per CPU # build this into chunks which we'll apply to map_async nbr_chunks = 1 # experiment with different nbrs of chunks #nbr_chunks = multiprocessing.cpu_count() print "ParallelPython using {} chunks of data".format(nbr_chunks) chunk_size = len(q) / nbr_chunks # split our long work list into smaller chunks # make sure we handle the edge case where nbr_chunks doesn't evenly fit into len(q) if len(q) % nbr_chunks != 0: # make sure we get the last few items of data when we have # an odd size to chunks (e.g. len(q) == 100 and nbr_chunks == 3 nbr_chunks += 1 chunks = [(q[x * chunk_size:(x + 1) * chunk_size], maxiter) for x in xrange(nbr_chunks)] print chunk_size, len(chunks), len(chunks[0][0]) start_time = datetime.datetime.now() # STUDENTS TO SOLVE THIS SECTION # HERE end_time = datetime.datetime.now() secs = end_time - start_time print "Main took", secs validation_sum = sum(output) print "Total sum of elements (for validation):", validation_sum if show_output: if SHOW_IN_3D: plotting.show_3D(output) else: plotting.show_2D(output) return validation_sum
def calc_pure_python(show_output): # make a list of x and y values which will represent q # xx and yy are the co-ordinates, for the default configuration they'll look like: # if we have a 1000x1000 plot # xx = [-2.13, -2.1242, -2.1184000000000003, ..., 0.7526000000000064, 0.7584000000000064, 0.7642000000000064] # yy = [1.3, 1.2948, 1.2895999999999999, ..., -1.2844000000000058, -1.2896000000000059, -1.294800000000006] x_step = (float(x2 - x1) / float(w)) * 2 y_step = (float(y1 - y2) / float(h)) * 2 x = [] y = [] ycoord = y2 while ycoord > y1: y.append(ycoord) ycoord += y_step xcoord = x1 while xcoord < x2: x.append(xcoord) xcoord += x_step q = [] for ycoord in y: for xcoord in x: q.append(complex(xcoord, ycoord)) start_time = datetime.datetime.now() output = calculate_z(q, maxiter) end_time = datetime.datetime.now() secs = end_time - start_time print "Main took", secs print "Total elements:", len(q) validation_sum = sum(output) print "Total sum of elements (for validation):", validation_sum if show_output: if SHOW_IN_3D: plotting.show_3D(output) else: plotting.show_2D(output) return validation_sum
def calc_pure_python(show_output): # make a list of x and y values which will represent q # xx and yy are the co-ordinates, for the default configuration they'll look like: # if we have a 1000x1000 plot # xx = [-2.13, -2.1242, -2.1184000000000003, ..., 0.7526000000000064, 0.7584000000000064, 0.7642000000000064] # yy = [1.3, 1.2948, 1.2895999999999999, ..., -1.2844000000000058, -1.2896000000000059, -1.294800000000006] x_step = (float(x2 - x1) / float(w)) * 2 y_step = (float(y1 - y2) / float(h)) * 2 x = [] y = [] ycoord = y2 while ycoord > y1: y.append(ycoord) ycoord += y_step xcoord = x1 while xcoord < x2: x.append(xcoord) xcoord += x_step q = [] for ycoord in y: for xcoord in x: q.append(complex(xcoord, ycoord)) print "Total elements:", len(q) start_time = datetime.datetime.now() output = calculate_z(q, maxiter) end_time = datetime.datetime.now() secs = end_time - start_time print "Main took", secs validation_sum = sum(output) print "Total sum of elements (for validation):", validation_sum if show_output: if SHOW_IN_3D: plotting.show_3D(output) else: plotting.show_2D(output) return validation_sum
def calc_pure_python(show_output): # make a list of x and y values which will represent q # xx and yy are the co-ordinates, for the default configuration they'll look like: # if we have a 1000x1000 plot # xx = [-2.13, -2.1242, -2.1184000000000003, ..., 0.7526000000000064, 0.7584000000000064, 0.7642000000000064] # yy = [1.3, 1.2948, 1.2895999999999999, ..., -1.2844000000000058, -1.2896000000000059, -1.294800000000006] x_step = (float(x2 - x1) / float(w)) * 2 y_step = (float(y1 - y2) / float(h)) * 2 x = [] y = [] ycoord = y2 while ycoord > y1: y.append(ycoord) ycoord += y_step xcoord = x1 while xcoord < x2: x.append(xcoord) xcoord += x_step q = [] for ycoord in y: for xcoord in x: q.append(complex(xcoord, ycoord)) print "Total elements:", len(q) # split work list into continguous chunks, one per CPU # build this into chunks which we'll apply to map_async nbr_chunks = 1 # experiment with different nbrs of chunks #nbr_chunks = multiprocessing.cpu_count() print "ParallelPython using {} chunks of data".format(nbr_chunks) chunk_size = len(q) / nbr_chunks # split our long work list into smaller chunks # make sure we handle the edge case where nbr_chunks doesn't evenly fit into len(q) if len(q) % nbr_chunks != 0: # make sure we get the last few items of data when we have # an odd size to chunks (e.g. len(q) == 100 and nbr_chunks == 3 nbr_chunks += 1 chunks = [(q[x * chunk_size:(x + 1) * chunk_size], maxiter) for x in xrange(nbr_chunks)] print chunk_size, len(chunks), len(chunks[0][0]) start_time = datetime.datetime.now() # create a Pool which will create Python processes # tuple of all parallel python servers to connect with ppservers = () # use this machine # I can't get autodiscover to work at home #ppservers=("*",) # autodiscover on network job_server = pp.Server(ppservers=ppservers) # it'll autodiscover the nbr of cpus it can use if first arg not specified print "Starting pp with", job_server.get_ncpus(), "local CPU workers" output = [] jobs = [] for chunk in chunks: print "Submitting job with len(q) {}".format(len(chunk[0])) job = job_server.submit(calculate_z, (chunk,), (), ()) jobs.append(job) for job in jobs: output_job = job() output += output_job # print statistics about the run print job_server.print_stats() end_time = datetime.datetime.now() secs = end_time - start_time print "Main took", secs validation_sum = sum(output) print "Total sum of elements (for validation):", validation_sum if show_output: if SHOW_IN_3D: plotting.show_3D(output) else: plotting.show_2D(output) return validation_sum
def calc_pure_python(show_output): # make a list of x and y values which will represent q # xx and yy are the co-ordinates, for the default configuration they'll look like: # if we have a 1000x1000 plot # xx = [-2.13, -2.1242, -2.1184000000000003, ..., 0.7526000000000064, 0.7584000000000064, 0.7642000000000064] # yy = [1.3, 1.2948, 1.2895999999999999, ..., -1.2844000000000058, -1.2896000000000059, -1.294800000000006] x_step = (float(x2 - x1) / float(w)) * 2 y_step = (float(y1 - y2) / float(h)) * 2 x = [] y = [] ycoord = y2 while ycoord > y1: y.append(ycoord) ycoord += y_step xcoord = x1 while xcoord < x2: x.append(xcoord) xcoord += x_step q = [] for ycoord in y: for xcoord in x: q.append(complex(xcoord, ycoord)) print "Total elements:", len(q) # split work list into continguous chunks, one per CPU # build this into chunks which we'll apply to map_async nbr_chunks = 1 # experiment with different nbrs of chunks #nbr_chunks = multiprocessing.cpu_count() print "ParallelPython using {} chunks of data".format(nbr_chunks) chunk_size = len(q) / nbr_chunks # split our long work list into smaller chunks # make sure we handle the edge case where nbr_chunks doesn't evenly fit into len(q) if len(q) % nbr_chunks != 0: # make sure we get the last few items of data when we have # an odd size to chunks (e.g. len(q) == 100 and nbr_chunks == 3 nbr_chunks += 1 chunks = [(q[x * chunk_size:(x + 1) * chunk_size], maxiter) for x in xrange(nbr_chunks)] print chunk_size, len(chunks), len(chunks[0][0]) start_time = datetime.datetime.now() # tuple of all parallel python servers to connect with # for localhost with 0 LOCAL_CPUS run 'ppserver.py -d -a' in another terminal # create a Pool which will create Python processes # tuple of all parallel python servers to connect with ppservers = ("*", ) # autodiscover on network NBR_LOCAL_CPUS = 0 # if 0, it sends jobs out to other ppservers # use 'ifconfig' on linux/mac to get IP addresses #ppservers = ('192.168.0.2',) # use this machine job_server = pp.Server(NBR_LOCAL_CPUS, ppservers=ppservers) # it'll autodiscover the nbr of cpus it can use if first arg not specified print "Starting pp with", job_server.get_ncpus(), "local CPU workers" output = [] jobs = [] for chunk in chunks: print "Submitting job with len(q) {}".format(len(chunk[0])) job = job_server.submit(calculate_z, (chunk, ), (), ()) jobs.append(job) for job in jobs: output_job = job() output += output_job # print statistics about the run print job_server.print_stats() end_time = datetime.datetime.now() secs = end_time - start_time print "Main took", secs validation_sum = sum(output) print "Total sum of elements (for validation):", validation_sum if show_output: if SHOW_IN_3D: plotting.show_3D(output) else: plotting.show_2D(output) return validation_sum
def calc_pure_python(show_output): # make a list of x and y values which will represent q # xx and yy are the co-ordinates, for the default configuration they'll look like: # if we have a 1000x1000 plot # xx = [-2.13, -2.1242, -2.1184000000000003, ..., 0.7526000000000064, 0.7584000000000064, 0.7642000000000064] # yy = [1.3, 1.2948, 1.2895999999999999, ..., -1.2844000000000058, -1.2896000000000059, -1.294800000000006] x_step = (float(x2 - x1) / float(w)) * 2 y_step = (float(y1 - y2) / float(h)) * 2 x = [] y = [] ycoord = y2 while ycoord > y1: y.append(ycoord) ycoord += y_step xcoord = x1 while xcoord < x2: x.append(xcoord) xcoord += x_step q = [] for ycoord in y: for xcoord in x: q.append(complex(xcoord, ycoord)) print "Total elements:", len(q) # split work list into continguous chunks, one per CPU # build this into chunks which we'll apply to map_async nbr_chunks = 1 # experiment with different nbrs of chunks #nbr_chunks = multiprocessing.cpu_count() print "Multiprocessing using {} chunks of data".format(nbr_chunks) chunk_size = len(q) / nbr_chunks # split our long work list into smaller chunks # make sure we handle the edge case where nbr_chunks doesn't evenly fit into len(q) if len(q) % nbr_chunks != 0: # make sure we get the last few items of data when we have # an odd size to chunks (e.g. len(q) == 100 and nbr_chunks == 3 nbr_chunks += 1 chunks = [(q[x * chunk_size:(x + 1) * chunk_size], maxiter) for x in xrange(nbr_chunks)] print chunk_size, len(chunks), len(chunks[0][0]) # create a Pool which will create Python processes p = multiprocessing.Pool() start_time = datetime.datetime.now() # send out the work chunks to the Pool # po is a multiprocessing.pool.MapResult po = p.map_async(calculate_z, chunks) # we get a list of lists back, one per chunk, so we have to # flatten them back together # po.get() will block until results are ready and then # return a list of lists of results results = po.get() # [[ints...], [ints...], []] output = [] for res in results: output += res end_time = datetime.datetime.now() secs = end_time - start_time print "Main took", secs validation_sum = sum(output) print "Total sum of elements (for validation):", validation_sum if show_output: if SHOW_IN_3D: plotting.show_3D(output) else: plotting.show_2D(output) return validation_sum
def calc_pure_python(show_output, args): # make a list of x and y values which will represent q # xx and yy are the co-ordinates, for the default configuration they'll look like: # if we have a 1000x1000 plot # xx = [-2.13, -2.1242, -2.1184000000000003, ..., 0.7526000000000064, 0.7584000000000064, 0.7642000000000064] # yy = [1.3, 1.2948, 1.2895999999999999, ..., -1.2844000000000058, -1.2896000000000059, -1.294800000000006] x_step = (float(x2 - x1) / float(w)) * 2 y_step = (float(y1 - y2) / float(h)) * 2 x = [] y = [] ycoord = y2 while ycoord > y1: y.append(ycoord) ycoord += y_step xcoord = x1 while xcoord < x2: x.append(xcoord) xcoord += x_step q = [] for ycoord in y: for xcoord in x: q.append(complex(xcoord, ycoord)) print "Total elements:", len(q) # split work list into continguous chunks, one per CPU # build this into chunks which we'll apply to map_async nbr_chunks = 10 # experiment with different nbrs of chunks print "Redis HotQueue using {} chunks of data".format(nbr_chunks) chunk_size = len(q) / nbr_chunks # split our long work list into smaller chunks # make sure we handle the edge case where nbr_chunks doesn't evenly fit into len(q) if len(q) % nbr_chunks != 0: # make sure we get the last few items of data when we have # an odd size to chunks (e.g. len(q) == 100 and nbr_chunks == 3 nbr_chunks += 1 chunks = [(q[x * chunk_size:(x + 1) * chunk_size], maxiter) for x in xrange(nbr_chunks)] print chunk_size, len(chunks), len(chunks[0][0]) start_time = datetime.datetime.now() if args.server: # reset queues hq_in.clear() hq_out.clear() # post chunks to the hq_in HotQueue for chunk_id, chunk in enumerate(chunks): print "Submitting job with len(q) {}".format(len(chunk[0])) hq_in.put((chunk_id, chunk)) # wait until the jobs are complete while len(hq_out) != len(chunks): if len(hq_out) > 0: print "We can see {} computed chunks".format(len(hq_out)) time.sleep(0.1) # the chunks may have been posted out of order results = [] while len(hq_out) > 0: results.append(hq_out.get()) results.sort() # reassemble the chunks output = [] for chunk_id, res in results: output += res end_time = datetime.datetime.now() secs = end_time - start_time print "Main took", secs validation_sum = sum(output) print "Total sum of elements (for validation):", validation_sum if show_output: if SHOW_IN_3D: plotting.show_3D(output) else: plotting.show_2D(output) return validation_sum else: # consume work and post partial results back into hq_out while len(hq_in) == 0: time.sleep(0.01) while len(hq_in) > 0: print "{} chunks of work to do".format(len(hq_in)) chunk_id, chunk = hq_in.get(block=True) result = calculate_z(chunk) hq_out.put((chunk_id, result))