def __init__(self, ncpus="autodetect", interface="0.0.0.0", broadcast="255.255.255.255", port=None, secret=None, timeout=None, loglevel=logging.WARNING, restart=False, proto=0): Server.__init__(self, ncpus, secret=secret, loglevel=loglevel, restart=restart, proto=proto) self.host = interface self.bcast = broadcast if port is not None: self.port = port else: self.port = self.default_port self.timeout = timeout self.ncon = 0 self.last_con_time = time.time() self.ncon_lock = six.moves._thread.allocate_lock() logging.debug("Strarting network server interface=%s port=%i" % (self.host, self.port)) if self.timeout is not None: logging.debug("ppserver will exit in %i seconds if no " "connections with clients exist" % (self.timeout)) six.moves._thread.start_new_thread(self.check_timeout, ())
def __init__( self, ncpus="autodetect", interface="0.0.0.0", broadcast="255.255.255.255", port=None, secret=None, timeout=None, loglevel=logging.WARNING, restart=False, proto=0, ): Server.__init__(self, ncpus, secret=secret, loglevel=loglevel, restart=restart, proto=proto) self.host = interface self.bcast = broadcast if port is not None: self.port = port else: self.port = self.default_port self.timeout = timeout self.ncon = 0 self.last_con_time = time.time() self.ncon_lock = thread.allocate_lock() logging.debug("Strarting network server interface=%s port=%i" % (self.host, self.port)) if self.timeout is not None: logging.debug("ppserver will exit in %i seconds if no " "connections with clients exist" % (self.timeout)) thread.start_new_thread(self.check_timeout, ())
def execute(processCount): n = 10000000 # 100 times fewer than C due to speed issues. delta = 1.0 / n startTime = time() sliceSize = n / processCount server = Server(secret='blahblahblah') jobs = [server.submit(processSlice, (i, sliceSize, delta)) for i in xrange(0, processCount)] pi = 4.0 * delta * sum(job() for job in jobs) elapseTime = time() - startTime out(__file__, pi, n, elapseTime, processCount) server.print_stats()
def execute(processCount): n = 10000000 # 100 times fewer than C due to speed issues. delta = 1.0 / n startTime = time() sliceSize = n / processCount server = Server(secret='blahblahblah') jobs = [ server.submit(processSlice, (i, sliceSize, delta)) for i in xrange(0, processCount) ] pi = 4.0 * delta * sum(job() for job in jobs) elapseTime = time() - startTime out(__file__, pi, n, elapseTime, processCount) server.print_stats()
def compute_dup_img(image, height, width, processes): result = zeros((height, width), dtype=tuple) pixels = list() row_cols = list() for row in range(height): for col in range(width): pixel = list() row_cols.append((row, col)) for krow in (-2, -1, 0, 1, 2): for kcol in (-2, -1, 0, 1, 2): x = row - krow y = col - kcol if x >= 0 and x < height and y >= 0 and y < width: pixel.append((int(image[x, y][0]), int(image[x, y][1]), int(image[x, y][2]))) else: pixel.append((0, 0, 0)) pixels.append(pixel) job_server = Server(ncpus=processes) jobs = [] start_range = 0 step = len(pixels) / processes for end_range in range(step, len(pixels) + step, step): end_range = min(end_range, len(pixels)) jobs.append(job_server.submit(compute_dup_for_lines, (range(start_range, end_range), pixels, row_cols))) start_range = end_range while len(jobs): for job in jobs: if job.finished: dup_distanses = job() if dup_distanses is None: job_server.destroy() logger.log('Couldn\'t complete one of the jobs', True) return None for row, col, value in dup_distanses: result[row, col] = (float32(value), float32(value), float32(value)) jobs.remove(job) return result
def scaled_img_to_graph(scaled_image, scaled_grad_image, scaled_dup_image, height, width, calc_weight, processes, weight_threshold=WEIGHT_THRESHOLD): size = height * width graph = Graph(size) _add_edge = graph.add_edge range_size = range(size) scaled_vers = list() img_pixels = list() grad_img_pixels = list() dup_img_pixels = list() for p in range_size: x, y = get_coords_for_pixel(p, width) scaled_vers.append((scale(x, height), scale(y, width))) img_pixels.append(scaled_image[x, y]) grad_img_pixels.append(scaled_grad_image[x, y]) dup_img_pixels.append(scaled_dup_image[x, y]) graph.set_diag([1. for i in range_size]) times = 30 new_size = size / times for main_part in range(times): new_begin = main_part * new_size job_server = Server(ncpus=processes) jobs = [] start_range = 0 step = new_size / processes for end_range in range(step, new_size + step, step): end_range = min(end_range, new_size) jobs.append(job_server.submit(calc_weights, (calc_weight, range(new_begin + start_range, new_begin + end_range), scaled_vers, img_pixels, grad_img_pixels, dup_img_pixels, weight_threshold), modules=('math',), depfuncs=(calc_intensity_weight, calc_weight_for_test, calc_texture_weight, calc_duplicate_weight))) start_range = end_range while len(jobs): for job in jobs: if job.finished: edges = job() if edges is None: job_server.destroy() logger.log('Couldn\'t complete one of the jobs', True) return None for pixel1, pixel2, weight in edges: _add_edge(pixel1, pixel2, weight) jobs.remove(job) del edges collect() job_server.destroy() graph.ready() return graph