def run_boids(dims, boid_q, big_boid_q, is_running): global __is_running __is_running = True boids, big_boids = create_boids(dims) # Increase to have more frames per velocity change. This slows down and smooths visualisation. smoothness = 3 t = SimpleTimer() i = 0 # Number of iterations after which to reset target for boids to move at. # Needs to be run more often in 2D than in 3D. new_target_iter = dims * 10 while __is_running and is_running.value: # apply rules that govern velocity boids.update_velocity() big_boids.update_velocity() for _ in xrange(smoothness): # move with a fixed velocity boids.move(1.0 / smoothness) big_boids.move(1.0 / smoothness) # copy the boids datastructure to avoid simultanious modification t.print_time("putting boids (copies) in queue") boid_q.put(boids.copy()) big_boid_q.put(big_boids.copy()) i += 1 if i % new_target_iter == 0: t.print_time("set new position") boids.set_random_direction()
def compress_binary_data(data): # 0x0: Get the length of data. length_of_data = len(data) number_of_zeros = 0 number_of_ones = 0 for b in data: if b is 1: number_of_ones += 1 else: number_of_zeros += 1 print('Number of 0s: ' + str(number_of_zeros)) print('Number of 1s: ' + str(number_of_ones)) percentage_of_digits_hit_needed = 0.15 timer = SimpleTimer() print('Obtaining linear functions...', end='') timer.start_timer() linear_functions = linear.get_formulas_for_linear_functions(percentage_of_digits_hit_needed, data, length_of_data, number_of_ones) timer.end_timer() timer.print_time() print('Obtaining quadratic functions...', end='') timer.start_timer() quadratic_functions = quadratic.get_formulas_for_quadratic_functions(percentage_of_digits_hit_needed, data, length_of_data, number_of_ones) timer.end_timer() timer.print_time() all_functions = [] print('Simplifying the linear functions.') print('Functions that work: ') for f in linear_functions: print(f) all_functions.append(f) for f in quadratic_functions: print(f) all_functions.append(f) print('Testing all combinations of functions to make a universal one...') return '1 LOLOL'
class BoidSimulation(worker.Worker): def init(self): self.t = SimpleTimer(use_process_name=True) # Number of iterations after which to reset target for boids to move at. # Needs to be run more often in 2D than in 3D. self.new_target_iter = 45 self.i = 0 self.init_boids() self.worker.add_result('boids', self.boids.copy()) self.worker.add_result('big_boids', self.big_boids.copy()) def init_boids(self): N_BIG_BOIDS = 0 self.boids, self.big_boids = create_boids_3D(num_boids, N_BIG_BOIDS, use_process=True) def iteration(self, input, input_nowait): self.t.print_time("viewer.run_boids(): top of loop") if 'escape' in input_nowait: escapes = input_nowait['escape'] for near, far in escapes: self.boids.add_escapes_between(near, far) if len(escapes) > 0: self.i = 0 self.boids.move(1.0) self.big_boids.move(1.0) self.t.print_time("viewer.run_boids(): moved boids") self.boids.update_velocity() self.big_boids.update_velocity() self.t.print_time("viewer.run_boids(): velocity computed") self.i += 1 if self.i % self.new_target_iter == 0: self.boids.clear_escapes() self.t.print_time("viewer.run_boids(): placed boids (copies) in queue") return {'boids': self.boids.copy(), 'big_boids': self.big_boids.copy()} def finalize(self): print "finalizing boids" self.boids.finalize()
def run_server_monitoring(): """This checks the CPU and Memory performance of all designated servers every X seconds. :return: Void. """ database_api = DatabaseSimpleCommands() timer = SimpleTimer() pulse_rate_in_seconds = 3.0 start_time = time.time() global keep_monitoring_servers while keep_monitoring_servers: timer.start_timer() # CPU/MEMORY Monitor here. global all_servers for server in all_servers: # Only poll the server if it is alive. if server.get_is_alive(): ''' From 'top's man page. ----------------------------------------------------------------------------------------------------------------------------------- -n :Number-of-iterations limit as: -n number Specifies the maximum number of iterations, or frames, top should produce before ending. ----------------------------------------------------------------------------------------------------------------------------------- -b :Batch-mode operation Starts top in 'Batch' mode, which could be useful for sending output from top to other programs or to a file. In this mode, top will not accept input and runs until the iterations limit you've set with the '-n' command-line option or until killed. ----------------------------------------------------------------------------------------------------------------------------------- -p :Monitor-PIDs mode as: -pN1 -pN2 ... or -pN1,N2,N3 ... Monitor only processes with specified process IDs. This option can be given up to 20 times, or you can provide a comma delimited list with up to 20 pids. Co-mingling both approaches is permitted. A pid value of zero will be treated as the process id of the top program itself once it is running. This is a command-line option only and should you wish to return to normal operation, it is not necessary to quit and restart top -- just issue any of these interactive commands: '=', 'u' or 'U'. The 'p', 'u' and 'U' command-line options are mutually exclusive. ------------------------------------------------------------------------------------------------------------------------------------ ''' p = subprocess.Popen(['top', '-p', str(server.get_pid()), '-n', '1', '-b'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() top_output = out.decode('ascii') #print(top_output) if 'utarsuno' in top_output: #print(top_output[top_output.index('utarsuno'):len(top_output) - 1]) output_split = top_output[top_output.index('utarsuno'):len(top_output) - 1].split() #print('CPU: ' + str(output_split[7]) + '\tMEMORY: ' + str(output_split[8])) database_api.insert_row_into_table(daa.GLOBAL_TABLE_SERVER_MONITOR.get_table_name(), daa.GLOBAL_TABLE_SERVER_MONITOR.get_all_column_names(), ["'" + server.get_name() + "'", "'" + simple_timer.get_now_timestamp_as_string() + "'", str(output_split[7]), str(output_split[8]), 'true']) else: server.set_is_alive(False) # The server is dead here database_api.insert_row_into_table(daa.GLOBAL_TABLE_SERVER_MONITOR.get_table_name(), daa.GLOBAL_TABLE_SERVER_MONITOR.get_all_column_names(), ["'" + server.get_name() + "'", "'" + simple_timer.get_now_timestamp_as_string() + "'", '0.0', '0.0', 'false']) else: # The server is dead here database_api.insert_row_into_table(daa.GLOBAL_TABLE_SERVER_MONITOR.get_table_name(), daa.GLOBAL_TABLE_SERVER_MONITOR.get_all_column_names(), ["'" + server.get_name() + "'", "'" + simple_timer.get_now_timestamp_as_string() + "'", '0.0', '0.0', 'false']) #if server == scraper_server: #print('Turning on the scraper server.') #scraper_server.launch_server() timer.end_timer() timer.print_time() time.sleep(pulse_rate_in_seconds - ((time.time() - start_time) % pulse_rate_in_seconds)) database_api.terminate() print('Server monitoring has terminated!')
big_boids = bds.get_result('big_boids') if with_shadow_model: shadow_bds = worker.WorkerProcess( 'unaltered boids', ShadowBoidSimulation(boids, big_boids), {}, { 'boids': 2, 'big_boids': 2 }) shadow_boids = shadow_bds.get_result('boids') shadow_big_boids = shadow_bds.get_result('big_boids') else: shadow_boids = shadow_big_boids = None t = SimpleTimer(name="main") t.print_time('Starting 3D interface') glgame = GLPyGame3D(settings, interactions_file) while bds.continue_run(): t.print_time('calling process_events()') points = process_events(glgame, bds, boids, big_boids, shadow_boids, shadow_big_boids) if glgame.animate and bds.continue_run(): t.print_time('getting boids from queue') boids = bds.get_result('boids') big_boids = bds.get_result('big_boids')
def calculate_velocity(self): t = SimpleTimer(use_process_name=True) tmp_velocity = np.zeros((self.size, self.dimensions)) self.adjacency_list t.print_time("computed adjacency list") if self.use_process: self.velocity_worker.add_input( 'matrix', (self.velocity, self.adjacency_list)) self.position_worker.add_input( 'matrix', (self.position, self.adjacency_list)) t.print_time("sent message to rule process") else: tmp_velocity += self.converge_velocity_neighbors() t.print_time("converge velocity") tmp_velocity += self.converge_position_neighbors() t.print_time("converge position") tmp_velocity += self.avoid_neighbors() t.print_time("avoid neighbor") # self.velocity += self.approach_position(self.center, self.rule1_factor) if self.enforce_bounds: tmp_velocity += self.ruleBounds() t.print_time("avoid bounds") if self.in_random_direction: tmp_velocity += self.ruleDirection() t.print_time("random direction") for e in self.escapes: tmp_velocity += self.escape_position(e, self.escape_threshold) t.print_time("avoid escapes") for bb in self.big_boids.position: tmp_velocity += self.escape_position(bb, self.escape_threshold) # self.velocity += (np.random.random((self.size, self.dimensions)) - 0.5)*0.00005 if self.use_process: tmp_velocity += self.velocity_worker.get_result('converged') tmp_velocity += self.position_worker.get_result('converged') t.print_time("applied process results") self.velocity += tmp_velocity self.apply_min_max_velocity() t.print_time("apply min max") self.update_redness()
class WorkerClient(object): def __init__(self, target, input_queues, nowait_queues, result_queues, is_running, *queues): self.target = target self.input_queues = input_queues self.nowait_queues = nowait_queues self.result_queues = result_queues self.is_running = is_running self.t = SimpleTimer(use_process_name=True) def work(self): self.t.reset() self.target.setWorkerClient(self) self.target.init() try: self.target.run() except Exception as e: print e self.t.print_time("finished running") # empty input queue clear_queues(self.input_queues) clear_queues(self.nowait_queues) close_queues(self.result_queues) self.t.print_time("finalizing target") self.target.finalize() self.t.print_time("finalized target") def get_all_input(self): return dict((k, self.get_input(k)) for k in self.input_queues.keys()) def get_all_nowait(self): nowait_inputs = {} for key in self.nowait_queues.keys(): nowait_inputs[key] = [] try: while True: value = self.get_input_nowait(key) if value is None: nowait_inputs[key] = None break nowait_inputs[key].append(value) except: continue return nowait_inputs def has_input_queue(self, queue): return queue in self.input_queues def has_input(self, queue): return not self.input_queues[queue].empty() def add_result(self, queue, value): self.result_queues[queue].put(value) def add_all_results(self, results): for queue, result in results.iteritems(): self.add_result(queue, result) def get_input(self, queue): value = self.input_queues[queue].get() # don't finalize final queue if value is None: del self.input_queues[queue] return value def get_input_nowait(self, queue): value = self.nowait_queues[queue].get_nowait() # don't finalize final queue if value is None: del self.nowait_queues[queue] return value def continue_run(self): return self.is_running.value
class WorkerProcess(object): def __init__(self, pname, target, input_queue_args, result_queue_args): self.t = SimpleTimer(name='manager_' + pname) input_queues = {} nowait_queues = {} result_queues = {} for name, size in input_queue_args.iteritems(): if size is None: input_queues[name] = mp.Queue() elif size is 0: nowait_queues[name] = mp.Queue() else: input_queues[name] = mp.Queue(maxsize=size) for name, size in result_queue_args.iteritems(): if size is None: result_queues[name] = mp.Queue() else: result_queues[name] = mp.Queue(maxsize=size) is_running = mp.Value('b', True) args = (target, input_queues, nowait_queues, result_queues, is_running) print args self.process = mp.Process(name=pname, target=do_work, args=args) self.process.start() self.client = WorkerClient(*args) def has_result_queue(self, queue): return queue in self.client.result_queues def get_result(self, queue): value = self.client.result_queues[queue].get() # don't finalize final queue if value is None: del self.client.result_queues[queue] return value def add_input(self, queue, value): self.client.input_queues[queue].put(value) def add_input_nowait(self, queue, value): self.client.nowait_queues[queue].put(value) def finalize(self): self.t.reset() self.stop_running() # send final call self.t.print_time("final iter value") close_queues(self.client.input_queues) self.t.print_time("final nowait value") close_queues(self.client.nowait_queues) # empty result queue clear_queues(self.client.result_queues, empty_first=True) self.process.join() def continue_run(self): return self.client.is_running.value def stop_running(self): self.client.is_running.value = False