def _set_http_profile_default_profile(self): default_profile = '/Common/http' result = self._http_profile.get_default_profile([self.PROFILE_NAME]) #TODO(retr0h): Pycontrol indicates a new http profile's parent # is 'http'. However, `tmsh list /ltm profile http` doesn't seem # to agree. # # ltm profile http test-pycontrol { # app-service none # insert-xforwarded-for enabled # } # # Expected: # # ltm profile http test-pycontrol { # app-service none # defaults-from http # insert-xforwarded-for enabled # } # if result[0] == default_profile: msg = ' - already has default profile' utils.print_yellow(msg) else: args_dict = { 'profile_names': [self.PROFILE_NAME], 'defaults': [default_profile] } self._http_profile.set_default_profile(**args_dict) msg = ' - added default profile' utils.print_green(msg)
def _set_http_profile_default_profile(self): default_profile = '/Common/http' result = self._http_profile.get_default_profile([self.PROFILE_NAME]) #TODO(retr0h): Pycontrol indicates a new http profile's parent # is 'http'. However, `tmsh list /ltm profile http` doesn't seem # to agree. # # ltm profile http test-pycontrol { # app-service none # insert-xforwarded-for enabled # } # # Expected: # # ltm profile http test-pycontrol { # app-service none # defaults-from http # insert-xforwarded-for enabled # } # if result[0] == default_profile: msg = ' - already has default profile' utils.print_yellow(msg) else: args_dict = {'profile_names': [self.PROFILE_NAME], 'defaults': [default_profile]} self._http_profile.set_default_profile(**args_dict) msg = ' - added default profile' utils.print_green(msg)
def save(model, data_dir): """Save the keras model directly and pickle our meta model class""" keras_file_path = os.path.join(data_dir, 'model.h5') pickle_file_path = os.path.join(data_dir, 'model.pkl') model.sample_model.save(filepath=keras_file_path) pickle.dump(model, open(pickle_file_path, 'wb')) print_green('Model saved to', pickle_file_path, keras_file_path)
def cook(path, caller_cwd): def delete_if_exists(path): if os.path.isfile(path): os.remove(path) local_cwd = os.getcwd() # Check if `path` is an absolute path to the recipe if os.path.isabs(path): recipe_path = os.path.realpath(path) recipe_basename = os.path.basename(recipe_path) mkdirp('.recipes') delete_if_exists(os.path.join(local_cwd, '.recipes', recipe_basename)) shutil.copyfile(recipe_path, os.path.join(local_cwd, '.recipes', recipe_basename)) recipe_path = os.path.join('/vagrant', '.recipes', recipe_basename) # Check if `path` is a relative path to the recipe (from the caller's perspective) elif os.path.isfile(os.path.realpath(os.path.join(caller_cwd, path))): recipe_path = os.path.realpath(os.path.join(caller_cwd, path)) recipe_basename = os.path.basename(recipe_path) mkdirp('.recipes') delete_if_exists(os.path.join(local_cwd, '.recipes', recipe_basename)) shutil.copyfile(recipe_path, os.path.join(local_cwd, '.recipes', recipe_basename)) recipe_path = os.path.join('/vagrant', '.recipes', recipe_basename) # Check if `path + (.sh)` is a relative path to the recipe (from the dev-box's perspective) elif os.path.isfile(os.path.realpath(os.path.join(local_cwd, 'recipes', path + '.sh'))): recipe_path = os.path.realpath(os.path.join(local_cwd, 'recipes', path + '.sh')) recipe_basename = os.path.basename(recipe_path) recipe_path = os.path.join('/vagrant', 'recipes', recipe_basename) # Recipe file was not found else: print_error('Error: recipe was not found') return print_green('# DevBox is now cooking') return run('sh {0}'.format(recipe_path))
def on_epoch_end(self, epoch, logs=None): print() print_green('Sampling model...') self.meta_model.update_sample_model_weights() for diversity in [0.2, 0.5, 1.0, 1.2]: print('Using diversity:', diversity) self.meta_model.sample(diversity=diversity) print('-' * 50)
def _create_http_profile(self): profiles = self._http_profile.get_list() if self.PROFILE_NAME in profiles: msg = ' - already exists' utils.print_yellow(msg) else: self._http_profile.create(profile_names=[self.PROFILE_NAME]) msg = ' - created' utils.print_green(msg)
def _set_snat_pool(self, name): result = self._virtual_server.get_snat_pool(virtual_servers=[name]) if result[0] == self.SNAT_POOL: msg = ' - snat pool already exists for {0}'.format(name) utils.print_yellow(msg) else: self._virtual_server.set_snat_pool(virtual_servers=[name], snatpools=[self.SNAT_POOL]) msg = ' - added snat pool for {0}'.format(name) utils.print_green(msg)
def _add_missing_members(self, pool, members, port): current_members = self._pool.get_member_v2(pool_names=[pool]) members_list = [m.address.replace("/Common/", "") for m in current_members[0]] for member in members: if member not in members_list: m = self._get_common_address_port(member, port) ms = self._get_common_ip_port_definition_sequence() ms.items = m self._pool.add_member_v2(pool_names=[pool], members=[ms]) msg = " - member {0} added to {1}".format(member, pool) utils.print_green(msg)
def _set_ntp_servers(self): inet = self._pc.System.Inet result = inet.get_ntp_server_address() if sorted(result) == sorted(self.NTP_SERVERS_LIST): msg = ' - already has ntp servers' utils.print_yellow(msg) else: inet \ .set_ntp_server_address(ntp_addresses=self.NTP_SERVERS_LIST) msg = ' - added ntp servers' utils.print_green(msg)
def initiate_scan(self, state): if state['is_logged_in'] is True: print(f'Scan For {self.name} (SSF: {(self.next_scan_sensitivity_level + 1) * 1.618})'.upper()) print() print_green(self.description) await_valid_input( f'\n{CONTINUE_PROMPT}'.upper(), ['Y','N'], lambda l: True if l.lower()=='n' else self.perform_scan(state) ) else: prompt_credentials( state, PASSWORD, on_success=lambda: self.initiate_scan(state), on_fail=lambda: True )
def _create_rule_x_forwarded_protocol(self): rules = self._rule.get_list() if self.RULE_NAME in rules: msg = ' - already has x-forwarded-protocol rule' utils.print_yellow(msg) else: struct = 'LocalLB.Rule.RuleDefinition' ctx = self._rule.typefactory.create(struct) ctx.rule_name = self.RULE_NAME ctx.rule_definition = self.IRULE self._rule.create([ctx]) msg = ' - created x-forwarded-protocol rule' utils.print_green(msg)
def _add_missing_members(self, pool, members, port): current_members = self._pool.get_member_v2(pool_names=[pool]) members_list = [m.address.replace('/Common/', '') for m in current_members[0]] for member in members: if member not in members_list: m = self._get_common_address_port(member, port) ms = self._get_common_ip_port_definition_sequence() ms.items = m self._pool.add_member_v2(pool_names=[pool], members=[ms]) msg = ' - member {0} added to {1}'.format(member, pool) utils.print_green(msg)
def _set_tcp_custom_keepalive(self): result = self._tcp_profile.get_keep_alive_interval([self.PROFILE_NAME]) if result[0].value == self.KEEP_ALIVE_INTERVAL: msg = ' - already has custom keepalive' utils.print_yellow(msg) else: ctx = self._tcp_profile.typefactory.create('LocalLB.ProfileULong') ctx.value = self.KEEP_ALIVE_INTERVAL ctx.default_flag = False args_dict = {'profile_names': [self.PROFILE_NAME], 'intervals': [ctx]} self._tcp_profile.set_keep_alive_interval(**args_dict) msg = ' - added custom keepalive' utils.print_green(msg)
def _import_certificate_from_file(self, cert_name, cert_file): if self._get_certificate(cert_name): msg = ' - cert {0} already exists'.format(cert_name) utils.print_yellow(msg) else: with open(cert_file, 'r') as file: data = file.read() args_dict = {'mode': self.MANAGEMENT_MODE_TYPE, 'cert_ids': [cert_name], 'pem_data': [data], 'overwrite': False} self._key_cert.certificate_import_from_pem(**args_dict) msg = ' - added cert {0}'.format(cert_name) utils.print_green(msg)
def _set_http_profile_x_forward_for(self): result = self._http_profile \ .get_insert_xforwarded_for_header_mode([self.PROFILE_NAME]) # NOQA if result[0].value == self.INSERT_X_FORWARDED_FOR: msg = ' - already has x-forwarding' utils.print_yellow(msg) else: ctx = self._http_profile.typefactory \ .create('LocalLB.ProfileProfileMode') ctx.value = self.INSERT_X_FORWARDED_FOR ctx.default_flag = False args_dict = {'profile_names': [self.PROFILE_NAME], 'modes': [ctx]} self._http_profile \ .set_insert_xforwarded_for_header_mode(**args_dict) msg = ' - added x-forwarding' utils.print_green(msg)
def _import_certificate_from_file(self, cert_name, cert_file): if self._get_certificate(cert_name): msg = ' - cert {0} already exists'.format(cert_name) utils.print_yellow(msg) else: with open(cert_file, 'r') as file: data = file.read() args_dict = { 'mode': self.MANAGEMENT_MODE_TYPE, 'cert_ids': [cert_name], 'pem_data': [data], 'overwrite': False } self._key_cert.certificate_import_from_pem(**args_dict) msg = ' - added cert {0}'.format(cert_name) utils.print_green(msg)
def _set_tcp_custom_keepalive(self): result = self._tcp_profile.get_keep_alive_interval([self.PROFILE_NAME]) if result[0].value == self.KEEP_ALIVE_INTERVAL: msg = ' - already has custom keepalive' utils.print_yellow(msg) else: ctx = self._tcp_profile.typefactory.create('LocalLB.ProfileULong') ctx.value = self.KEEP_ALIVE_INTERVAL ctx.default_flag = False args_dict = { 'profile_names': [self.PROFILE_NAME], 'intervals': [ctx] } self._tcp_profile.set_keep_alive_interval(**args_dict) msg = ' - added custom keepalive' utils.print_green(msg)
def create_draft(self, campaign_id, target_id, template_id): template = self.handle.get_template(template_id) subject_template = Template(template['subject']) body_template = Template(template['body']) target_row = self.handle.get_target(target_id) target_personalization_row = self.handle.get_target_personalization( target_id, campaign_id) draft = self.client.drafts.create() draft.to = [{ 'name': target_row['first_name'], 'email': target_row['email'] }] # draft.cc = [{ # 'name': settings.CC_NAME, # 'email': settings.CC_EMAIL # }] # draft.bcc = [{ # 'name': 'salesforceIQ', # 'email': settings.SALESFORCEIQ_EMAIL # }] draft.subject = subject_template.substitute(target_personalization_row) draft.body = body_template.substitute({ **target_row, **target_personalization_row }) draft.save() print(' ') utils.print_magenta('Draft Message to %s' % target_row['email']) utils.print_green('message id: ' + draft.id) utils.print_pretty(draft.to) utils.print_pretty(draft.subject) utils.print_pretty(draft.body) print(' ') self.handle.add_message(draft.id, None, campaign_id, None, draft.to[0]['email'], settings.SENDER_EMAIL, draft.subject, draft.body) return draft.id
def _create_pool(self, pool, hosts, port): pools = self._pool.get_list() if pool in pools: msg = " - {0} already exists".format(pool) utils.print_yellow(msg) else: # Odd, must create a sequence, even when the docs state othewise. # https://devcentral.f5.com/wiki/iControl.LocalLB__Pool__create_v2.ashx # NOQA member_sequence = self._get_common_ip_port_definition_sequence() members = [] for host in hosts: member = self._get_common_address_port(host, port) members.append(member) member_sequence.items = members self._pool.create_v2(pool_names=[pool], lb_methods=[self.LB_METHOD], members=[member_sequence]) msg = " - {0} created".format(pool) utils.print_green(msg)
def _set_chain_file(self, profile): result = self._ssl_profile.get_chain_file([profile]) cert_basename = '%s.crt' % cert.Cert.INTERMEDIATE_BUNDLE.split('/')[-1] re_cert = re.compile(r'%(cert_basename)s' % locals()) if result[0].value and re_cert.search(result[0].value): msg = ' - {0} already has chain file'.format(profile) utils.print_yellow(msg) else: struct = 'LocalLB.ProfileString' ctx = self._ssl_profile.typefactory.create(struct) ctx.value = '%s.crt' % cert.Cert.INTERMEDIATE_BUNDLE ctx.default_flag = False self._ssl_profile.set_chain_file(profile_names=[profile], chains=[ctx]) msg = ' - {0} added chain file'.format(profile) utils.print_green(msg)
def _create_pool(self, pool, hosts, port): pools = self._pool.get_list() if pool in pools: msg = ' - {0} already exists'.format(pool) utils.print_yellow(msg) else: # Odd, must create a sequence, even when the docs state othewise. # https://devcentral.f5.com/wiki/iControl.LocalLB__Pool__create_v2.ashx # NOQA member_sequence = self._get_common_ip_port_definition_sequence() members = [] for host in hosts: member = self._get_common_address_port(host, port) members.append(member) member_sequence.items = members self._pool.create_v2(pool_names=[pool], lb_methods=[self.LB_METHOD], members=[member_sequence]) msg = ' - {0} created'.format(pool) utils.print_green(msg)
def compile(filename): arguments = [ "lualatex", "--jobname={0}".format(filename), "--output-dir=out", "\"\\def\\filename{{{0}}} \\input{{./latex_template/root_singleSong.tex}}\"" .format(filename) ] FNULL = open(os.devnull, 'w') errFile = open('./out/compile.log', 'w+') print(' '.join(arguments)) code = subprocess.call(' '.join(arguments), shell=True, stderr=errFile) if code is 0: # compilation successful copyfile("./out/{0}.pdf".format(filename), path.join(pdfFolderPath, filename + ".pdf")) utils.print_green(filename, "success") return 1 else: utils.print_red(filename, "compilation failed!") printErrorLog(filename) return 0
async def fetch_file(blob_path, git_url, session): blob, path = blob_path if 'vendor' in path or 'node_modules' in path or 'uploads/' in path: return content = await decode_hash(blob, git_url, session) if not content: # search blob in unpacked files folder, filename = blob[:2], blob[2:] blob_path = os.path.join(os.getcwd(), '.git', 'objects', folder, filename) if not os.path.exists(blob_path): print_warn('ERROR getting [%s] %s' % (blob, path)) return with open(blob_path, 'rb') as f: content = f.read() create_file(path, content) print_green('Created [%s] %s' % (blob, path))
def train(self, data_dir, word_tokens, pristine_input, pristine_output, batch_size, seq_length, seq_step, embedding_size, rnn_size, num_layers, num_epochs, live_sample): """Train the model""" print_green('Loading data...') load_start = time.time() x, y, x_val, y_val = self._load_data(data_dir, word_tokens, pristine_input, pristine_output, batch_size, seq_length, seq_step) load_end = time.time() print_red('Data load time', load_end - load_start) print_green('Building model...') model_start = time.time() self._build_models(batch_size, embedding_size, rnn_size, num_layers) model_end = time.time() print_red('Model build time', model_end - model_start) print_green('Training...') train_start = time.time() validation_data = (x_val, y_val) if (x_val is not None) else None callbacks = [LiveSamplerCallback(self)] if live_sample else None self.train_model.fit(x, y, validation_data=validation_data, batch_size=batch_size, shuffle=False, epochs=num_epochs, verbose=1, callbacks=callbacks) self.update_sample_model_weights() train_end = time.time() print_red('Training time', train_end - train_start)
def compile(filename, ext): with open(path.join(choFolderPath, "{}.{}".format(filename, ext)), 'r', encoding="utf-8") as file: try: converted = convert(file.read()) except Exception as e: print(e) print(traceback.format_exc()) return 0 outFilePath = path.join(texFolderPath, "{}.tex".format(filename)) if path.isfile(outFilePath): with open(outFilePath, 'r', encoding="utf-8") as outFile: existingStr = outFile.read() if existingStr == converted: print("converted equals outFile for " + filename) # TODO: check if tabs changed return 2 with open(outFilePath, 'w', encoding="utf-8") as outFile: outFile.write(converted) utils.print_green(filename, "converted") return 1
def _create_virtual_server(self, name, vip_dict): virtual_servers = self._virtual_server.get_list() domain = vip_dict['dns'] if name in virtual_servers: msg = ' - {0} already exists'.format(name) utils.print_yellow(msg) else: vss = self._get_virtual_server_definition(name, vip_dict['ip'], vip_dict['front_port'], self.PROTOCOL_TYPE) pool = utils.pool_name(domain, vip_dict['back_port']) vsrs = self._get_virtual_server_resource(pool) vsps = self._get_virtual_server_profile(vip_dict['monitor'], domain) self._virtual_server.create(definitions=vss, wildmasks=[self.WILDMASKS], resources=vsrs, profiles=[vsps]) msg = ' - {0} created'.format(name) utils.print_green(msg)
def _set_monitor(self, pool, monitor): result = self._pool.get_monitor_association([pool]) templates = result[0].monitor_rule.monitor_templates if monitor in templates: msg = " - monitor {0} already exists for {1}".format(monitor, pool) utils.print_yellow(msg) else: struct = "LocalLB.MonitorRule" monitor_rule = self._pool.typefactory.create(struct) monitor_rule.type = self.MONITOR_RULE_TYPE monitor_rule.quorum = self.MONITOR_RULE_QUORUM monitor_rule.monitor_templates = [monitor] struct = "LocalLB.Pool.MonitorAssociation" monitor_assoc = self._pool.typefactory.create(struct) monitor_assoc.pool_name = pool monitor_assoc.monitor_rule = monitor_rule args_dict = {"monitor_associations": [monitor_assoc]} self._pool.set_monitor_association(**args_dict) msg = " - monitor {0} created for {1}".format(monitor, pool) utils.print_green(msg)
def setup(): print_green('# Setting up the Vagrant machine') os.system('vagrant up') time.sleep(15) os.system('vagrant up') print_green('# Start provision') run('/vagrant/cli/provision.sh') print_green('# Setup complete')
def _create_ssl_profile(self, profile, key_profile, cert_profile): profiles = self._ssl_profile.get_list() if profile in profiles: msg = ' - {0} already exists'.format(profile) utils.print_yellow(msg) else: struct = 'LocalLB.ProfileString' key_ctx = self._ssl_profile.typefactory.create(struct) key_ctx.value = key_profile key_ctx.default_flag = False struct = 'LocalLB.ProfileString' cert_ctx = self._ssl_profile.typefactory.create(struct) cert_ctx.value = cert_profile cert_ctx.default_flag = False #TODO(retr0h): Could batch add all at once. self._ssl_profile.create_v2(profile_names=[profile], keys=[key_ctx], certs=[cert_ctx]) msg = ' - {0} created'.format(profile) utils.print_green(msg)
def _set_monitor(self, pool, monitor): result = self._pool.get_monitor_association([pool]) templates = result[0].monitor_rule.monitor_templates if monitor in templates: msg = ' - monitor {0} already exists for {1}'.format(monitor, pool) utils.print_yellow(msg) else: struct = 'LocalLB.MonitorRule' monitor_rule = self._pool.typefactory.create(struct) monitor_rule.type = self.MONITOR_RULE_TYPE monitor_rule.quorum = self.MONITOR_RULE_QUORUM monitor_rule.monitor_templates = [monitor] struct = 'LocalLB.Pool.MonitorAssociation' monitor_assoc = self._pool.typefactory.create(struct) monitor_assoc.pool_name = pool monitor_assoc.monitor_rule = monitor_rule args_dict = {'monitor_associations': [monitor_assoc]} self._pool.set_monitor_association(**args_dict) msg = ' - monitor {0} created for {1}'.format(monitor, pool) utils.print_green(msg)
def point_embed_mesh1d(model, mesh1d, bounding_shape, **kwargs): ''' Embed points of mesh1d into Xd bounding shape. An attempt is made to insert intermediate points so that also edges are embedded ''' x = mesh1d.coordinates() foo = df.MeshFunction('size_t', mesh1d, 1, 0) foo.array()[:] = np.arange(1, 1 + mesh1d.num_cells()) df.File('foo.pvd') << foo mesh1d.init(1, 0) e2v = mesh1d.topology()(1, 0) topology = [list(e2v(e)) for e in range(mesh1d.num_entities(1))] target_l = trim.edge_lengths(mesh1d).vector().get_local() converged, nneeds = False, [mesh1d.num_cells()] niters = kwargs.get('niters', 5) base_geo = kwargs['save_geo'] for k in range(niters): # Some mesh which embeds points but where these points are not # necessarily edges if base_geo: kwargs['save_geo'] = '_'.join([base_geo, str(k)]) t = utils.Timer('%d-th iteration of %d point embedding' % (k, len(x)), 1) embedding_mesh, vmap = _embed_points(model, x, bounding_shape, **kwargs) t.done() assert _embeds_points(embedding_mesh, x, vmap) # See which edges need to be improved needs_embedding = _not_embedded_edges(topology, vmap, embedding_mesh) nneeds.append(len(filter(bool, needs_embedding))) utils.print_green(' ', '# edges need embedding %d (was %r)' % (nneeds[-1], nneeds[:-1])) converged = not any(needs_embedding) if kwargs['debug'] and k == niters - 1: gmsh.fltk.initialize() gmsh.fltk.run() # Here's some debugging functionality which saves progress on emebdding if kwargs['monitor']: # Force current mesh1d embedding help_topology = _force_embed_edges(deepcopy([list(vmap[edge]) for edge in topology]), embedding_mesh, needs_embedding, defaultdict(list)) # And see about the length of edges under that embedding new_l = _edge_lengths(embedding_mesh.coordinates(), help_topology, needs_embedding) np.savetxt(os.path.join(kwargs['monitor'], 'length_diff_iter%d.txt' % k), (new_l-target_l)/new_l) utils.print_green(' ', 'Max relative length error', np.max(new_l)) # And distance new_d = _edge_distances(embedding_mesh.coordinates(), help_topology, needs_embedding) np.savetxt(os.path.join(kwargs['monitor'], 'distance_diff_iter%d.txt' % k), new_d) utils.print_green(' ', 'Max relative distance error', np.max(new_d)) old_l = target_l.sum() new_l = new_l.sum() utils.print_green(' ', 'Target %g, Current %g, Relative Error %g' % (old_l, new_l, (new_l-old_l)/old_l)) # Save the edges which needed embedding embedding_mesh.init(1, 0) e2v = embedding_mesh.topology()(1, 0) edge_lookup = {tuple(sorted(e2v(e))): e for e in range(embedding_mesh.num_entities(1))} edge_f = df.MeshFunction('size_t', embedding_mesh, 1, 0) topology_as_edge = [] for tag, edge in enumerate(help_topology, 1): if needs_embedding[tag-1]: the_edge = [] for e in zip(edge[:-1], edge[1:]): edge_index = edge_lookup[tuple(sorted(e))] # assert edge_f[edge_index] == 0 # Never seen edge_f[edge_index] = tag the_edge.append(edge_index) topology_as_edge.append(the_edge) df.File(os.path.join(kwargs['monitor'], 'need_embedding_iter%d.pvd' % k)) << edge_f if converged: break # Insert auxiliary points and retry t = utils.Timer('%d-th iteration of point insert' % k, 1) x, topology = _embed_edges(topology, x, needs_embedding) t.done() assert len(topology) == mesh1d.num_cells() utils.print_green(' ', '# num points increased to %d' % len(x)) skew_embed_vertex = defaultdict(list) # We capitulate and make approximations; if not converged: utils.print_red(' ', 'Falling back to non-conforming `embedding`') if base_geo: kwargs['save_geo'] = '_'.join([base_geo, str(niters)]) embedding_mesh, vmap = _embed_points(model, x, bounding_shape, **kwargs) assert _embeds_points(embedding_mesh, x, vmap) needs_embedding = _not_embedded_edges(topology, vmap, embedding_mesh) # We "embed" the mesh using __only__ existing vertices - translate topology topology = [list(vmap[edge]) for edge in topology] # An edges that need embedding is a branch with terminal vertices - so the # idea is to insert the interior path vertices t = utils.Timer('Force embedding edges', 1) topology = _force_embed_edges(topology, embedding_mesh, needs_embedding, skew_embed_vertex) t.done() if kwargs['monitor']: # And see about the length of edges under that embedding new_l = _edge_lengths(embedding_mesh.coordinates(), topology, needs_embedding) np.savetxt(os.path.join(kwargs['monitor'], 'length_diff_final.txt'), (new_l-target_l)/target_l) utils.print_green(' ', 'Max relative length error', np.max(new_l)) # And distance new_d = _edge_distances(embedding_mesh.coordinates(), topology, needs_embedding) np.savetxt(os.path.join(kwargs['monitor'], 'distance_diff_final.txt'), new_d) utils.print_green(' ', 'Max relative distance error', np.max(new_d)) old_l = target_l.sum() new_l = new_l.sum() utils.print_green(' ', 'Target %g, Current %g, Relative Error %g' % (old_l, new_l, (new_l-old_l)/old_l)) # Save the edges which needed embedding embedding_mesh.init(1, 0) e2v = embedding_mesh.topology()(1, 0) edge_lookup = {tuple(sorted(e2v(e))): e for e in range(embedding_mesh.num_entities(1))} edge_f = df.MeshFunction('size_t', embedding_mesh, 1, 0) topology_as_edge = [] for tag, edge in enumerate(topology, 1): if needs_embedding[tag-1]: the_edge = [] for e in zip(edge[:-1], edge[1:]): edge_index = edge_lookup[tuple(sorted(e))] # assert edge_f[edge_index] == 0 # Never seen edge_f[edge_index] = tag the_edge.append(edge_index) topology_as_edge.append(the_edge) df.File(os.path.join(kwargs['monitor'], 'need_embedding_final.pvd')) << edge_f else: # Since the original 1d mesh likely has been changed we give # topology wrt to node numbering of the embedding mesh topology = [list(vmap[edge]) for edge in topology] assert len(topology) == mesh1d.num_cells() t = utils.Timer('Fishing for edges', 1) # Need to color the edge function; embedding_mesh.init(1, 0) e2v = embedding_mesh.topology()(1, 0) edge_lookup = {tuple(sorted(e2v(e))): e for e in range(embedding_mesh.num_entities(1))} edge_f = df.MeshFunction('size_t', embedding_mesh, 1, 0) topology_as_edge = [] for tag, edge in enumerate(topology, 1): the_edge = [] for e in zip(edge[:-1], edge[1:]): edge_index = edge_lookup[tuple(sorted(e))] # assert edge_f[edge_index] == 0 # Never seen edge_f[edge_index] = tag the_edge.append(edge_index) topology_as_edge.append(the_edge) encode_edge = lambda path: [edge_lookup[tuple(sorted(e))] for e in zip(path[:-1], path[1:])] # Finally encode skew edges as edges skew_embed_edge = {k: map(encode_edge, edge_as_vertex) for k, edge_as_vertex in skew_embed_vertex.items()} t.done() df.File('foo_final.pvd') << edge_f ans = utils.LineMeshEmbedding(embedding_mesh, # The others were not part of original data vmap[:mesh1d.num_vertices()], edge_f, utils.EdgeMap(topology, topology_as_edge), utils.EdgeMap(skew_embed_vertex, skew_embed_edge)) kwargs['save_embedding'] and utils.save_embedding(ans, kwargs['save_embedding']) return ans
def out_box(self, persons, timer=None): self.sem_mutex.acquire() if timer: self.start_male_time = timer.get('start_male_time') self.start_female_time = timer.get('start_female_time') self.start_third_gender_time = timer.get('start_third_gender_time') if persons[0] == FEMALE: self.count_female += 1 self.end_female_time.append(time.time()) female_enter_bathroom_message = ( '|ENTER-BATHROOM|> People #{0}:a woman just enterred into the bathroom' .format(persons[len(persons) - 1])) print_green(female_enter_bathroom_message) elif persons[0] == MALE: self.count_male += 1 self.end_male_time.append(time.time()) male_enter_bathroom_message = ( '|ENTER-BATHROOM|> People #{0}:a man just enterred into the bathroom' .format(persons[len(persons) - 1])) print_green(male_enter_bathroom_message) else: self.count_third_gender += 1 self.end_third_gender_time.append(time.time()) print( '|ENTER-BATHROOM|> People #{0}:a third gender people just enterred into the bathroom' .format(persons[len(persons) - 1])) self.start_box_time.append(time.time()) self.sem_mutex.release() self.count_bathroom += 1 sleep(self.TIME_IN_BATHROOM) self.sem_mutex.acquire() self.end_box_time.append(time.time()) self.count_bathroom -= 1 exit_message = '|EXIT|> People #{0}: left the bathroom'.format( persons[len(persons) - 1]) print_purple(exit_message) if persons[len(persons) - 1] == self.num_people: end_time = time.time() print(timer) print("\n{0}\nStatistics\n{0}\n".format(60 * "*")) print( "Total people:", self.count_male + self.count_female + self.count_third_gender) print("Total men: ", self.count_male) print("Total women: ", self.count_female) print('Total third gender person', self.count_third_gender) print("\n{0}\n".format(60 * "*")) total_time_male = (sum(self.end_male_time) - sum(self.start_male_time)) total_time_female = (sum(self.end_female_time) - sum(self.start_female_time)) total_time_third_gender = (sum(self.end_third_gender_time) - sum(self.start_third_gender_time)) average_male = 0 if not self.count_female else total_time_male / self.count_male average_female = 0 if not self.count_female else total_time_female / self.count_female average_third_gender = 0 if not self.count_third_gender else total_time_third_gender / self.count_third_gender print("average time men:", average_male, 'seconds') print("average time women:", average_female, 'seconds') print("average time third gender", average_third_gender, 'seconds') totalTime = end_time - self.start_time totalBoxTime = sum(self.end_box_time) - sum(self.start_box_time) print("Execution time:", totalTime, 'seconds') print("|BOX|> Usage time:", totalBoxTime, 'seconds') print("|BOX|> Rate time ocupation:", totalBoxTime / totalTime, 'seconds') print("\n{0}\n".format(60 * "*")) self.sem_mutex.release() self.sem_bathroom.release()