def submit_new(conf, cache): ''' Handle processing for a new paste. ''' paste_data = { 'code': bottle.request.POST.get('code', ''), 'name': bottle.request.POST.get('name', '').strip(), 'phone': bottle.request.POST.get('phone', '').strip(), 'private': bottle.request.POST.get('private', '0').strip(), 'syntax': bottle.request.POST.get('syntax', '').strip(), 'forked_from': bottle.request.POST.get('forked_from', '').strip(), 'webform': bottle.request.POST.get('webform', '').strip(), 'origin_addr': bottle.request.environ.get('REMOTE_ADDR', 'undef').strip(), 'recaptcha_answer': bottle.request.POST.get('g-recaptcha-response', '').strip()} cli_post = True if paste_data['webform'] == '' else False # Handle file uploads if type(paste_data['code']) == bottle.FileUpload: paste_data['code'] = '# FileUpload: {}\n{}'.format( paste_data['code'].filename, paste_data['code'].file.getvalue()) # Validate data (valid, err) = sanity.validate_data(conf, paste_data) if not valid: return bottle.jinja2_template('error.html', code=200, message=err) # Check recapcha answer if not cli post if utils.str2bool(conf.get('bottle', 'check_spam')) and not cli_post: if not sanity.check_captcha( conf.get('bottle', 'recaptcha_secret'), paste_data['recaptcha_answer']): return bottle.jinja2_template('error.html', code=200, message='Invalid captcha verification. ERR:677') # Check address against blacklist if sanity.address_blacklisted(cache, paste_data['origin_addr']): return bottle.jinja2_template('error.html', code=200, message='Address blacklisted. ERR:840') # Stick paste into cache paste_id = _write_paste(cache, paste_data) # Set cookie for user bottle.response.set_cookie( 'dat', json.dumps({ 'name': str(paste_data['name']), 'syntax': str(paste_data['syntax']), 'private': str(paste_data['private'])})) # Send user to page, or a link to page if cli_post: scheme = bottle.request.environ.get('REQUEST_SCHEME') host = bottle.request.get_header('host') return '{}://{}/{}\n'.format(scheme, host, paste_id) else: # Relay message to IRC if utils.str2bool(conf.get('bottle', 'relay_enabled')) and not sanity.address_greylisted(cache, paste_data['origin_addr']): irc.send_message(conf, cache, paste_data, paste_id) bottle.redirect('/' + paste_id)
def web_assign_user_to_ticket(request): result = {'success': False} #if True: try: user, token = check_auth(request) ticket_id = request.POST['ticket_id'] email = request.POST['email'] unassign = False try: unassign = str2bool(request.POST['unassign']) except: pass assign_user_to_ticket( user = user, ticket_id = ticket_id, email = email, unassign = unassign, ) result['ticket_id'] = ticket_id result['success'] = True except: pass return make_response(result)
def _write_paste(cache, paste_data): ''' Put a new paste into cache. Returns paste_id. ''' # Public pastes should have an easy to type key # Private pastes should have a more secure key id_length = 1 if paste_data['recaptcha_answer'] == '': id_length = 12 elif utils.str2bool(paste_data['private']): id_length = 8 # Pick a unique ID paste_id = binascii.b2a_hex(os.urandom(id_length)) # Make sure it's actually unique or else create a new one and test again while cache.exists('paste:' + paste_id): id_length += 1 paste_id = binascii.b2a_hex(os.urandom(id_length)) # Put the paste into cache cache.setex('paste:' + paste_id, json.dumps(paste_data), 345600) return paste_id
def send_message(conf, cache, paste, paste_id): ''' Send notification to channels ''' host = conf.get('bottle', 'relay_host') port = int(conf.get('bottle', 'relay_port')) pw = conf.get('bottle', 'relay_pass') # Build the message to send to the channel if paste['forked_from']: orig = json.loads(cache.get('paste:' + paste['forked_from'])) message = ''.join(['Paste from ', orig['name'], ' forked by ', paste['name'], ': [ ', conf.get('bottle', 'url'), paste_id, ' ]']) else: message = ''.join(['Paste from ', paste['name'], ': [ ', conf.get('bottle', 'url'), paste_id, ' ]']) # Get list of relay channels channels = None # Always admin channels, only normal channels if paste is not private if conf.get('bottle', 'relay_admin_chan') is not None: channels = conf.get('bottle', 'relay_admin_chan') if conf.get('bottle', 'relay_chan') is not None and not utils.str2bool(paste['private']): channels = ''.join([channels, ',', conf.get('bottle', 'relay_chan')]) else: if conf.get('bottle', 'relay_chan') is not None and not utils.str2bool(paste['private']): channels = conf.get('bottle', 'relay_chan') # For each channel, send the relay server a message # Note: Irccat does not use traditional channel names if channels: for channel in channels.split(','): try: s = socket.create_connection((host, port)) s.send('{};{};{}\n'.format(channel, pw, message)) s.close() except: print('Unable to send message to channel: {}'.format(channel))
def html(self): # Get the name and url name = self.name() url = self.getParam('url') # Do not download the image if self.__download: urllib.urlretrieve(url, name) img_name = name else: img_name = url # Flag for wrapping in div div = self.getParam('div-center') if isinstance(div, str): div = utils.str2bool(div) # Create the html <img> block img = '\n\n' if div: img += '<div style="text-align:center;>\n' img += '<figure style="float:left">\n' img += ' <a href="' + url + '">\n' img += ' <img src="' + img_name + '"' if self.isParamValid('style'): style = self.getParam('style') else: style = '' for prop in self._pars.groupKeys('html'): if self.isParamValid(prop): style += prop + ':' + self._pars[prop] + ';' if style: img += ' style="'+style+'"' img += '/>\n </a>\n' # Create a table that contains the image and caption (if desired) if self._pars['show_caption'] and self.isParamValid('caption'): img += ' <figcaption>\n' img += ' ' + self._pars['caption'] img += '\n </figcaption>\n' img += '</figure>\n' if div: img += '</div>' # Return the complete html return img
def authenticate(self, username, password): import ldap #use generally ad_uid_mask=(sAMAccountName=%s) in the settings #to get the record based on the login in AD uid = Config.auth_ad_uid_mask % username try: dc=('389', Config.auth_ad_domain_controller) port, host = dc #print "port",port,"host",":"+host+":" ldapo = ldap.open(host,port=int(port)) #TODO: need to test it if str2bool(Config.auth_ad_use_tls): ldapo.start_tls_s() #Very important to speed ldap searching with AD #set to 0 to turn off "chasing referrels" ldapo.set_option(ldap.OPT_REFERRALS, 0) #build the correct login name if "@%s"%self.get_domain_name() in username: bind_username = username else: bind_username ='******' % (username, self.get_domain_name()) #here, we test authentication ldapo.simple_bind_s(bind_username,password) #print "uid:%s" % uid user_data = self.search_ad(ldapo, '(&(objectClass=user)%s)' % uid) #print "user_data",user_data try: group_names = self.get_member_of(ldapo, user_data) #print "group_names",group_names except Exception, e: logging.error("Active Directory error: failed getting groups" " for user '%s': %s" % (username,e)) return None #we test if the user is a member of the ad required group if defined in the settings required_group = Config.auth_ad_required_group_name if required_group and not required_group in group_names: logging.warning("Active Directory: User %s is not in required group %s" % (username, required_group)) return None #return the username return username
def web_create_ticket_comment(request): """ Get all of the organizations that the user has access to """ #result = {'user': None} result = {'success': False} if True: #try: user, token = check_auth(request) #author_id = request.POST['author_id'] #project_id = request.POST['project_id'] ticket_id = request.POST['ticket_id'] contents = request.POST['contents'] close = False try: close = str2bool(request.POST['close']) except: pass if contents.strip() == '': raise Exception('no contents to comment') ticket = get_ticket(user.id, ticket_id) if ticket == None: raise Exception('invalid ticket id') ticket_comment = create_new_ticket_comment( user = user, ticket_id = ticket_id, contents = contents, close = close, ) result['ticket_comment_id'] = ticket_comment.id result['success'] = True #except: # pass return make_response(result)
def characters(self, content): if self.CurrentData == "status": self.block_device.status = content elif self.CurrentData == "deviceName": self.block_device.device_name = content elif self.CurrentData == "deleteOnTermination": self.block_device.delete_on_termination = str2bool(content) elif self.CurrentData == "volumeId": self.block_device.volume_id = content elif self.CurrentData == "dnsName": self.instance.dns_name = content elif self.CurrentData == "instanceId": self.instance.instance_id = content elif self.CurrentData == "instanceState": self.instance.instance_state = content elif self.CurrentData == "imageId": self.instance.image_id = content elif self.CurrentData == "privateDnsName": self.instance.private_dns_name = content elif self.CurrentData == "keyName": self.instance.key_name = content elif self.CurrentData == "launchTime": self.instance.launch_time = content elif self.CurrentData == "subnetId": self.instance.subnet_id = content elif self.CurrentData == "groupName": self.group.group_name = content elif self.CurrentData == "groupId": self.group.group_id = content elif self.CurrentData == "vpcId": self.instance.vpc_id = content elif self.CurrentData == "instanceType": self.instance.instance_type = content elif self.CurrentData == "privateIpAddress": self.instance.private_ip_address = content self.CurrentData = ""
def property_id_to_spec(property_id): oas_property = OasProperty.findDictById(property_id) oas_property_type = oas_property.get('data_type') property_spec = {} validation_extras = {} property_nullable = oas_property.get('nullable') if property_nullable != None: validation_extras['nullable'] = str2bool(property_nullable) if oas_property_type == 'string': property_spec['type'] = 'string' string_min_length = oas_property.get('string_min_length') string_max_length = oas_property.get('string_max_length') string_format = oas_property.get('string_format') string_pattern = oas_property.get('string_pattern') string_enum = oas_property.get('string_enum') if string_min_length != None: validation_extras['minLength'] = string_min_length if string_max_length != None: validation_extras['maxLength'] = string_max_length if string_format != None: validation_extras['format'] = string_format if string_pattern != None: validation_extras['pattern'] = string_pattern if string_enum != None: validation_extras['enum'] = string_enum.split(',') return {**property_spec, **validation_extras} elif oas_property_type == 'number': property_spec['type'] = 'number' number_format = oas_property.get('number_format') number_minimum = oas_property.get('number_minimum') number_exclusive_minimum = oas_property.get('number_exclusive_minimum') number_maximum = oas_property.get('number_maximum') number_exclusive_maximum = oas_property.get('number_exclusive_maximum') number_multiple_of = oas_property.get('number_multiple_of') if number_format != None: validation_extras['format'] = number_format if number_minimum != None: validation_extras['minimum'] = float(number_minimum) if number_exclusive_minimum != None: validation_extras['exclusiveMinimum'] = str2bool( number_exclusive_minimum) if number_maximum != None: validation_extras['maximum'] = float(number_maximum) if number_exclusive_maximum != None: validation_extras['exclusiveMaximum'] = str2bool( number_exclusive_maximum) if number_multiple_of != None: validation_extras['multipleOf'] = float(number_multiple_of) return {**property_spec, **validation_extras} elif oas_property_type == 'integer': property_spec['type'] = 'integer' integer_format = oas_property.get('integer_format') integer_minimum = oas_property.get('integer_minimum') integer_exclusive_minimum = oas_property.get( 'integer_exclusive_minimum') integer_maximum = oas_property.get('integer_maximum') integer_exclusive_maximum = oas_property.get( 'integer_exclusive_maximum') integer_multiple_of = oas_property.get('integer_multiple_of') if integer_format != None: validation_extras['format'] = integer_format if integer_minimum != None: validation_extras['minimum'] = int(integer_minimum) if integer_exclusive_minimum != None: validation_extras['exclusiveMinimum'] = str2bool( integer_exclusive_minimum) if integer_maximum != None: validation_extras['maximum'] = int(integer_maximum) if integer_exclusive_maximum != None: validation_extras['exclusiveMaximum'] = str2bool( integer_exclusive_maximum) if integer_multiple_of != None: validation_extras['multipleOf'] = float(integer_multiple_of) return {**property_spec, **validation_extras} elif oas_property_type == 'boolean': property_spec['type'] = 'boolean' return {**property_spec, **validation_extras} # valid type in json schema elif oas_property_type == 'null': property_spec['type'] = 'null' return {**property_spec, **validation_extras} elif oas_property_type == 'array': property_spec['type'] = 'array' oas_polymorphic_properties = OasPolymorphicProperty.findDict( property_id=oas_property.get('id')) for oas_polymorphic_property in oas_polymorphic_properties: property_spec['items'] = property_id_to_spec( oas_polymorphic_property.get('partial_property_id')) return {**property_spec, **validation_extras} elif oas_property_type == 'object': property_spec['type'] = 'object' oas_polymorphic_properties = OasPolymorphicProperty.findDict( property_id=oas_property.get('id')) if len(oas_polymorphic_properties) > 0: property_spec['properties'] = {} required_properties = [] for oas_polymorphic_property in oas_polymorphic_properties: partial_property_id = oas_polymorphic_property.get( 'partial_property_id') partial_property = OasProperty.findDictById( partial_property_id) partial_property_name = partial_property.get('property_name') partial_property_required = partial_property.get( 'object_required') if partial_property_required != None and str2bool( partial_property_required): required_properties.append(partial_property_name) property_spec['properties'][ partial_property_name] = property_id_to_spec( partial_property_id) if len(required_properties) > 0: property_spec['required'] = required_properties additional_properties = oas_property.get( 'object_additional_properties') if additional_properties != None: property_spec['additionalProperties'] = str2bool( additional_properties) return {**property_spec, **validation_extras} elif oas_property_type == 'oneOf': property_spec['oneOf'] = [] oas_polymorphic_properties = OasPolymorphicProperty.findDict( property_id=oas_property.get('id')) for oas_polymorphic_property in oas_polymorphic_properties: partial_property_id = oas_polymorphic_property.get( 'partial_property_id') property_spec['oneOf'].append( property_id_to_spec(partial_property_id)) discriminator_property_name = oas_property.get( 'one_of_discriminator_property_name') if discriminator_property_name != None: property_spec['discriminator'] = { 'propertyName': discriminator_property_name, 'mapping': {} } oas_discriminator_mappings = OasDiscriminatorMapping.findDict( property_id=oas_property.get('id')) for oas_discriminator_mapping in oas_discriminator_mappings: discriminator_property_value = oas_discriminator_mapping.get( 'discriminator_property_value') ref_schema_id = oas_discriminator_mapping.get('ref_schema_id') ref_schema_name = OasSchema.findDictById(ref_schema_id).get( 'schema_name') property_spec['discriminator']['mapping'][ discriminator_property_value] = schema_name2ref( ref_schema_name) return {**property_spec, **validation_extras} elif oas_property_type == 'anyOf': property_spec['anyOf'] = [] oas_polymorphic_properties = OasPolymorphicProperty.findDict( property_id=oas_property.get('id')) for oas_polymorphic_property in oas_polymorphic_properties: partial_property_id = oas_polymorphic_property.get( 'partial_property_id') property_spec['anyOf'].append( property_id_to_spec(partial_property_id)) return {**property_spec, **validation_extras} elif oas_property_type == 'allOf': property_spec['allOf'] = [] oas_polymorphic_properties = OasPolymorphicProperty.findDict( property_id=oas_property.get('id')) for oas_polymorphic_property in oas_polymorphic_properties: partial_property_id = oas_polymorphic_property.get( 'partial_property_id') property_spec['allOf'].append( property_id_to_spec(partial_property_id)) return {**property_spec, **validation_extras} elif oas_property_type == 'not': oas_polymorphic_properties = OasPolymorphicProperty.findDict( property_id=oas_property.get('id')) partial_property_id = oas_polymorphic_properties[0].get( 'partial_property_id') property_spec['not'] = property_id_to_spec(partial_property_id) return {**property_spec, **validation_extras} elif oas_property_type == '$ref': oas_polymorphic_properties = OasPolymorphicProperty.findDict( property_id=oas_property.get('id')) ref_schema_id = oas_polymorphic_properties[0].get('ref_schema_id') ref_schema = OasSchema.findDictById(ref_schema_id) ref_schema_name = ref_schema.get('schema_name') property_spec['$ref'] = schema_name2ref(ref_schema_name) return {**property_spec, **validation_extras} else: raise Exception('Invalid property.')
def silenceeval(filename): inp = input("Does this file have SILENCE problems? [y/n]") return u.str2bool(inp)
def humeval(filename): inp = input("Does this file have HUM problems? [y/n]") return u.str2bool(inp)
def snreval(filename): inp = input("Does this file have SNR problems? [y/n]") return u.str2bool(inp)
def __init__(self, parent): super().__init__(parent) self.parent = parent if not str2bool(config.get_setting('view', 'view_statusbar')): self.hide()
"proba.asm.contig"),"RunPipeline") usecontigs = True selected_programs["assemble"] = "none" bowtie_mapping = 1 elif "format:" in line: if f1 and not libadded: nread1 = utils.Read(format,f1,mated,interleaved) readobjs.append(nread1) nread2 = "" nlib = utils.readLib(format,mmin,mmax,nread1,nread2,mated,interleaved,innie,linkerType) readlibs.append(nlib) libadded = False format = line.replace("\n","").split("\t")[-1] elif "mated:" in line: mated = utils.str2bool(line.replace("\n","").split("\t")[-1]) elif "interleaved:" in line: interleaved = utils.str2bool(line.replace("\n","").split("\t")[-1]) elif "innie:" in line: innie = utils.str2bool(line.replace("\n","").split("\t")[-1]) elif "linker:" in line: linkerType = line.replace("\n","").split("\t")[-1] elif "f1:" in line: data = line.split("\t") fqlibs[data[0]] = data[1] f1 = "%s/Preprocess/in/%s"%(settings.rundir,data[1].split(",")[0]) inf = data[1].split(",") mean = int(inf[3]) stdev = int(inf[4])
def web_create_ticket_comment(request): """ Get all of the organizations that the user has access to """ start = time.time() #result = {'user': None} result = {'success': False} #if True: try: user, token = check_auth(request) #author_id = request.POST['author_id'] #project_id = request.POST['project_id'] ticket_id = request.POST['ticket_id'] contents = request.POST['contents'] close = False try: close = str2bool(request.POST['close']) except: pass reopen = False try: reopen = str2bool(request.POST['reopen']) except: pass if reopen == False and contents.strip() == '': raise Exception('no contents to comment') start_get_ticket = time.time() ticket = get_ticket(user.id, ticket_id) print "\n\ncreate_ticket_comment.json().get_ticket() executed in {0} seconds.\n\n".format( time.time() - start_get_ticket ) if ticket == None: raise Exception('invalid ticket id') start_create_new_ticket_comment = time.time() ticket_comment = create_new_ticket_comment( user = user, ticket_id = ticket_id, contents = contents, close = close, reopen = reopen, ) print "\n\ncreate_ticket_comment.json().create_new_ticket_comment() executed in {0} seconds.\n\n".format( time.time() - start_create_new_ticket_comment ) result['ticket_comment_id'] = ticket_comment.id result['success'] = True except: pass print "\n\ncreate_ticket_comment.json() executed in {0} seconds.\n\n".format( time.time() - start ) return make_response(result)
def get_test_type(self, args): if utils.str2bool(args.test_type) == True: self.test_type = TestType.positive else: self.test_type = TestType.negative
where <batchConfig.txt> contains configuration options """ import os import sys import utils import time import commands from batch.batchSubmission import BatchSubmission # setup config config = sys.argv[1] cfg = utils.read_config(config) # reset boolean values cfg['test'] = utils.str2bool( cfg['test'] ) cfg['submit'] = utils.str2bool( cfg['submit'] ) cfg['verbose'] = utils.str2bool( cfg['verbose'] ) # set 'global' options date = time.strftime("%d%b%Y") if cfg['date']=='today' else cfg['date'] cmaConfig = utils.read_config( cfg['config'] ) # get the selection from the cmaConfig file # loop over datasets to process, e.g., ttbar or qcd batch = BatchSubmission() ## -- set arguments for the batch jobs -- ## batch.username = cfg['username'] batch.executable = cfg['executable'] # executable to run for the program batch.test = cfg['test'] # just submit one job if testing
def read(self, comment="#"): # populate from file self.isValid = True workflow_file = None for p in self.path: if os.path.exists("%s%s%s.ini" % (p, os.sep, self.name.lower())): workflow_file = open( "%s%s%s.ini" % (p, os.sep, self.name.lower()), 'r') if workflow_file == None: print "Error: cannot open workflow %s" % (self.name) raise (JobSignalledBreak) lastPos = 0 while True: lastPos = workflow_file.tell() line = workflow_file.readline() (line, sep, commentLine) = line.partition(comment) if len(commentLine) != 0: continue if "programs:" in line: splitLine = line.replace("programs:", "").strip().split() for prog in splitLine: self.programList.add(prog.lower()) if "steps:" in line: self.stepList = [] splitLine = line.replace("steps:", "").strip().split() for step in splitLine: if step not in stepList: self.stepList.append(step.lower()) elif "md5:" in line: self.md5 = line.replace("md5:", "").strip() elif "inherit:" in line: splitLine = line.replace("inherit:", "").strip().split() for master in splitLine: self.super.append(master) masterFlow = None if master in _workflows: masterFlow = _workflows[master] else: masterFlow = Workflow( line.replace("inherit:", "").strip(), self.path) masterFlow.read() self.programList.update(masterFlow.programList) self.readlibs.extend(masterFlow.readlibs) self.commandList = (masterFlow.commandList + " " + self.commandList).strip() self.asmcontigs.extend(masterFlow.asmcontigs) self.isModifiable = masterFlow.isModifiable or self.isModifiable elif "modify:" in line: splitLine = line.replace("modify:", "").strip() self.isModifiable = self.isModifiable or str2bool(splitLine) elif "command:" in line: splitLine = line.replace("command:", "").strip() self.commandList = (self.commandList + " " + splitLine).strip() else: break workflow_file.seek(lastPos) # now read the library info (readasm, readl, ignore) = readConfigInfo(workflow_file) self.readlibs.extend(readl) self.asmcontigs.extend(readasm) workflow_file.close() if self.md5 != None: if self.md5 != getMD5(): self.isValid = False if _verbose: print "Read workflow %s" % (self.name) print "Inherits from %s" % (",".join(self.super)) print "Supported programs %s" % (",".join(self.programList)) print "Asm contigs %s" % (",".join(self.asmcontigs)) for lib in self.readlibs: print "Lib %s %s %s %s %s %s %s %s %s" % ( lib.id, lib.format, lib.mean, lib.stdev, lib.mmin, lib.mmax, lib.mated, lib.interleaved, lib.innie)
def main_body(): '''Main body of this file''' parser = argparse.ArgumentParser() # Configurations: read noisyspeech_synthesizer.cfg and gather inputs parser.add_argument( '--cfg', default='noisyspeech_synthesizer.cfg', help='Read noisyspeech_synthesizer.cfg for all the details') parser.add_argument('--cfg_str', type=str, default='noisy_speech') args = parser.parse_args() params = dict() params['args'] = args cfgpath = os.path.join(os.path.dirname(__file__), args.cfg) assert os.path.exists(cfgpath), f'No configuration file as [{cfgpath}]' cfg = CP.ConfigParser() cfg._interpolation = CP.ExtendedInterpolation() cfg.read(cfgpath) params['cfg'] = cfg._sections[args.cfg_str] cfg = params['cfg'] clean_dir = os.path.join(os.path.dirname(__file__), 'CleanSpeech') if cfg['speech_dir'] != 'None': clean_dir = cfg['speech_dir'] if not os.path.exists(clean_dir): assert False, ('Clean speech data is required') noise_dir = os.path.join(os.path.dirname(__file__), 'Noise') if cfg['noise_dir'] != 'None': noise_dir = cfg['noise_dir'] if not os.path.exists: assert False, ('Noise data is required') params['fs'] = int(cfg['sampling_rate']) params['audioformat'] = cfg['audioformat'] params['audio_length'] = float(cfg['audio_length']) params['silence_length'] = float(cfg['silence_length']) params['total_hours'] = float(cfg['total_hours']) if cfg['fileindex_start'] != 'None' and cfg['fileindex_start'] != 'None': params['num_files'] = int(cfg['fileindex_end']) - int( cfg['fileindex_start']) params['fileindex_start'] = int(cfg['fileindex_start']) params['fileindex_end'] = int(cfg['fileindex_end']) else: params['num_files'] = int( (params['total_hours'] * 60 * 60) / params['audio_length']) params['fileindex_start'] = 0 params['fileindex_end'] = params['num_files'] print('Number of files to be synthesized:', params['num_files']) params['is_test_set'] = utils.str2bool(cfg['is_test_set']) params['clean_activity_threshold'] = float(cfg['clean_activity_threshold']) params['noise_activity_threshold'] = float(cfg['noise_activity_threshold']) params['snr_lower'] = int(cfg['snr_lower']) params['snr_upper'] = int(cfg['snr_upper']) params['randomize_snr'] = utils.str2bool(cfg['randomize_snr']) params['target_level_lower'] = int(cfg['target_level_lower']) params['target_level_upper'] = int(cfg['target_level_upper']) if 'snr' in cfg.keys(): params['snr'] = int(cfg['snr']) else: params['snr'] = int((params['snr_lower'] + params['snr_upper']) / 2) params['noisyspeech_dir'] = utils.get_dir(cfg, 'noisy_destination', 'noisy') params['clean_proc_dir'] = utils.get_dir(cfg, 'clean_destination', 'clean') params['noise_proc_dir'] = utils.get_dir(cfg, 'noise_destination', 'noise') if 'speech_csv' in cfg.keys() and cfg['speech_csv'] != 'None': cleanfilenames = pd.read_csv(cfg['speech_csv']) cleanfilenames = cleanfilenames['filename'] else: cleanfilenames = glob.glob( os.path.join(clean_dir, params['audioformat'])) params['cleanfilenames'] = cleanfilenames shuffle(params['cleanfilenames']) params['num_cleanfiles'] = len(params['cleanfilenames']) # If there are .wav files in noise_dir directory, use those # If not, that implies that the noise files are organized into subdirectories by type, # so get the names of the non-excluded subdirectories if 'noise_csv' in cfg.keys() and cfg['noise_csv'] != 'None': noisefilenames = pd.read_csv(cfg['noise_csv']) noisefilenames = noisefilenames['filename'] else: noisefilenames = glob.glob( os.path.join(noise_dir, params['audioformat'])) if len(noisefilenames) != 0: shuffle(noisefilenames) params['noisefilenames'] = noisefilenames else: noisedirs = glob.glob(os.path.join(noise_dir, '*')) if cfg['noise_types_excluded'] != 'None': dirstoexclude = cfg['noise_types_excluded'].split(',') for dirs in dirstoexclude: noisedirs.remove(dirs) shuffle(noisedirs) params['noisedirs'] = noisedirs # Call main_gen() to generate audio clean_source_files, clean_clipped_files, clean_low_activity_files, \ noise_source_files, noise_clipped_files, noise_low_activity_files = main_gen(params) # Create log directory if needed, and write log files of clipped and low activity files log_dir = utils.get_dir(cfg, 'log_dir', 'Logs') utils.write_log_file(log_dir, 'source_files.csv', clean_source_files + noise_source_files) utils.write_log_file(log_dir, 'clipped_files.csv', clean_clipped_files + noise_clipped_files) utils.write_log_file(log_dir, 'low_activity_files.csv', \ clean_low_activity_files + noise_low_activity_files) # Compute and print stats about percentange of clipped and low activity files total_clean = len(clean_source_files) + len(clean_clipped_files) + len( clean_low_activity_files) total_noise = len(noise_source_files) + len(noise_clipped_files) + len( noise_low_activity_files) pct_clean_clipped = round(len(clean_clipped_files) / total_clean * 100, 1) pct_noise_clipped = round(len(noise_clipped_files) / total_noise * 100, 1) pct_clean_low_activity = round( len(clean_low_activity_files) / total_clean * 100, 1) pct_noise_low_activity = round( len(noise_low_activity_files) / total_noise * 100, 1) print("Of the " + str(total_clean) + " clean speech files analyzed, " + \ str(pct_clean_clipped) + "% had clipping, and " + str(pct_clean_low_activity) + \ "% had low activity " + "(below " + str(params['clean_activity_threshold']*100) + \ "% active percentage)") print("Of the " + str(total_noise) + " noise files analyzed, " + str(pct_noise_clipped) + \ "% had clipping, and " + str(pct_noise_low_activity) + "% had low activity " + \ "(below " + str(params['noise_activity_threshold']*100) + "% active percentage)")
def draw(self, ax): for k, value in self.spines.items(): k = k.split("-") direction = k[0] trait = k[1] if trait == "color": ax.spines[direction].set_color(value) elif trait == "linewidth": ax.spines[direction].set_linewidth(float(value)) elif trait == "visible": ax.spines[direction].set_visible(utils.str2bool(value)) # grid # good examples: # http://jonathansoma.com/lede/data-studio/matplotlib/adding-grid-lines-to-a-matplotlib-chart/ if self.grid == "on": ax.set_axisbelow(True) if self.grid_which in ["both", "minor"]: ax.minorticks_on() ax.tick_params(axis='x', which='minor', bottom=False) if self.grid_which == "both": ax.grid(which="major", axis=self.grid_axis, linestyle="-") ax.grid(which="minor", axis=self.grid_axis, linestyle=":") elif self.grid_which == "minor": ax.grid(which="minor", axis=self.grid_axis, linestyle=":") elif self.grid_which == "major": ax.grid(which="minor", axis=self.grid_axis, linestyle="-") else: ax.grid(False) if self.xlabel != "": ax.set_xlabel(self.xlabel.decode('string_escape')) if self.ylabel != "": ax.set_ylabel(self.ylabel) if self.title != "": ax.set_title(self.title) if self.xlim != None: ax.set_xlim(self.xlim) if self.ylim != None: ax.set_ylim(self.ylim) if len(self.hline_list) > 0: for h in self.hline_list: val = float(h[0]) ax.axhline(val, color=h[1], ls=h[2], lw=h[3]) if len(self.vline_list) > 0: for h in self.vline_list: val = float(h[0]) ax.axvline(val, color=h[1], ls=h[2], lw=h[3]) if self.yscale != "": ax.set_yscale(self.yscale) for annot in self.annotate_params: ann_text = annot[0] ann_xy = annot[1].strip() ann_xy = ann_xy[1:-1].split(",") ann_x = float(ann_xy[0].strip()) ann_y = float(ann_xy[1].strip()) ann_xy_text = annot[2].strip() ann_xy_text = ann_xy_text[1:-1].split(",") ann_x_text = float(ann_xy_text[0].strip()) ann_y_text = float(ann_xy_text[1].strip()) arrowprops = dict() tmp = annot[3].split(",")[0].strip() if len(tmp) > 0: arrowprops["facecolor"] = tmp tmp = annot[3].split(",")[1].strip() if len(tmp) > 0: arrowprops["shrink"] = tmp tmp = annot[3].split(",")[2].strip() if len(tmp) > 0: arrowprops["arrowstyle"] = tmp tmp = annot[3].split(",")[3].strip() if len(tmp) > 0: arrowprops["connectionstyle"] = tmp # Visit following link for different types of arrows: # https://matplotlib.org/users/annotations.html ax.annotate(ann_text, xy=(ann_x, ann_y), xytext=(ann_x_text, ann_y_text), arrowprops=arrowprops) # ax.annotate(ann_text, xy=(1, 1), xytext=(3,3), arrowprops=arrowprops) if self.xticks_label_show == "off": plt.setp(ax.get_xticklabels(), visible=False) if self.yticks_label_show == "off": plt.setp(ax.get_yticklabels(), visible=False) if self.xticks_pad != None: ax.tick_params(axis='x',pad=self.xticks_pad) if self.yticks_pad != None: ax.tick_params(axis='y',pad=self.yticks_pad) if self.background_color != "": ax.patch.set_facecolor(self.background_color) if self.legend == "on": ax.legend() if self.title != "": ax.set_title(self.title.decode('string_escape'))
def __init__(self, parent=None): super(Main, self).__init__(parent) self.setupUi(self) # Create the database tables if they don't exist db_create_tables() # ################### Saved app settings ######################## self.settings = QtCore.QSettings(QtCore.QSettings.IniFormat, QtCore.QSettings.UserScope, "QTierna", "QTierna") self.minimizeToTray = str2bool( self.settings.value("minimizeToTray", True)) # Exit behaviour self.time_zone = pytz.timezone( self.settings.value("time_zone", get_localzone().zone)) logger.debug('Settings loaded from %s. Initialized time_zone: %s' % (self.settings.fileName(), self.time_zone)) # ############# Reminders table config ########################## self.soon_color = QtGui.QColor(255, 0, 0, 127) # Reminder soon due self.mainTableWidget.setEditTriggers( QtGui.QAbstractItemView.NoEditTriggers) # Don't let user edit # Column widths total_width = self.mainTableWidget.width() self.mainTableWidget.setColumnWidth(0, (total_width / 10.0) * 2) self.mainTableWidget.setColumnWidth(1, (total_width / 10.0) * 2) self.mainTableWidget.setColumnWidth(2, (total_width / 10.0) * 6) # Hidden UTC and reminder ID columns self.mainTableWidget.setColumnWidth(3, 0) self.mainTableWidget.setColumnHidden(3, True) self.mainTableWidget.setColumnWidth(4, 0) self.mainTableWidget.setColumnHidden(4, True) # Table signals self.mainTableWidget.itemDoubleClicked.connect(self.table_dbl_click) # ########### Categories tree config ############################# # Font bold to oblig cats, not sure why my qt designerui didnt apply myFont = QtGui.QFont() myFont.setBold(True) self.mainTreeWidget.topLevelItem(0).setFont(0, myFont) self.mainTreeWidget.topLevelItem(0).child(0).setFont(0, myFont) self.mainTreeWidget.topLevelItem(0).child(1).setFont(0, myFont) self.mainTreeWidget.topLevelItem(0).child(2).setFont(0, myFont) # Tree signals self.mainTreeWidget.itemSelectionChanged.connect(self.refresh_table) # ##################### Actions #################################### self.actionAdd_Reminder.triggered.connect( self.addedit_rem_action_triggered) self.actionEdit_Reminder.triggered.connect( self.addedit_rem_action_triggered) self.actionRemove_Reminder.triggered.connect( self.remove_rem_action_triggered) self.actionAdd_Category.triggered.connect( self.addedit_cat_action_triggered) self.actionEdit_Category.triggered.connect( self.addedit_cat_action_triggered) self.actionDelete_Category.triggered.connect( self.remove_cat_action_triggered) self.actionExport_Data.triggered.connect(self.export_action_triggered) self.actionImport_Data.triggered.connect(self.import_action_triggered) self.actionPreferences.triggered.connect( self.preferences_action_triggered) self.actionAbout.triggered.connect(self.about_action_triggered) # ##################### Worker thread ############################# # Worker to run the eternal loop to check for due reminders # I'm doing it with moveToThread in this manner, rather than # just making the Worker class inherit from QThread # as apparently this is best practice now: # https://mayaposch.wordpress.com/2011/11/01/how-to-really-truly-use-qthreads-the-full-explanation/ # Alternatively put this in the if __main__ section with minor alts self.workerThread = QtCore.QThread() self.worker = Worker() self.worker.moveToThread(self.workerThread) self.workerThread.started.connect(self.worker.check_reminders_loop) self.worker.reminderisdue.connect(self.launch_notification) self.worker.refresh_human_dates.connect( self.refresh_human_dates) # Keep the humanized due dates refreshed self.workerThread.start() # #################### SysTray icon, menu ########################### # Init QSystemTrayIcon self.tray_icon = QtGui.QSystemTrayIcon(self) tray_icon = QtGui.QIcon() tray_icon.addPixmap( QtGui.QPixmap(":/icons/icons/alarm-clock-white.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) # self.tray_icon.setIcon(QtGui.QIcon("icons/alarm-clock-white.png")) self.tray_icon.setIcon(tray_icon) show_action = QtGui.QAction("Show", self) quit_action = QtGui.QAction("Exit", self) hide_action = QtGui.QAction("Hide", self) show_action.triggered.connect(self.show) hide_action.triggered.connect(self.hide) quit_action.triggered.connect(QtGui.qApp.quit) tray_menu = QtGui.QMenu() tray_menu.addAction(show_action) tray_menu.addAction(hide_action) tray_menu.addAction(quit_action) self.tray_icon.setContextMenu(tray_menu) self.tray_icon.show() # ################### CONTEXT MENUS ################################## # Popup context menu when right-click on tree widget for add/edit/rem cat self.mainTreeWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.mainTreeWidget.customContextMenuRequested.connect( self.on_tree_context_menu) self.tree_popup_menu = QtGui.QMenu(self) self.tree_popup_menu.addAction(self.actionAdd_Category) self.tree_popup_menu.addAction(self.actionEdit_Category) self.tree_popup_menu.addSeparator() self.tree_popup_menu.addAction(self.actionDelete_Category) # Popup context menu when right-click on row on the table widget for # add/edit/remove reminder self.mainTableWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.mainTableWidget.customContextMenuRequested.connect( self.on_table_context_menu) self.table_popup_menu = QtGui.QMenu(self) self.table_popup_menu.addAction(self.actionAdd_Reminder) self.table_popup_menu.addAction(self.actionEdit_Reminder) self.table_popup_menu.addSeparator() self.table_popup_menu.addAction(self.actionRemove_Reminder) # ############# Install event filters ############################### self.mainTreeWidget.installEventFilter(self) self.mainTableWidget.installEventFilter(self) # ############ Initial load of the tree and table ################### self.refresh_tree() self.refresh_table()
def main_body(): '''Main body of this file''' parser = argparse.ArgumentParser() # Configurations: read noisyspeech_synthesizer.cfg and gather inputs parser.add_argument('--cfg', default='noisyspeech_synthesizer.cfg', help='Read noisyspeech_synthesizer.cfg for all the details') parser.add_argument('--cfg_str', type=str, default='noisy_speech') args = parser.parse_args() params = dict() params['args'] = args cfgpath = os.path.join(os.path.dirname(__file__), args.cfg) assert os.path.exists(cfgpath), f'No configuration file as [{cfgpath}]' cfg = CP.ConfigParser() cfg._interpolation = CP.ExtendedInterpolation() cfg.read(cfgpath) params['cfg'] = cfg._sections[args.cfg_str] cfg = params['cfg'] clean_dir = os.path.join(os.path.dirname(__file__), 'datasets/clean') if cfg['speech_dir'] != 'None': clean_dir = cfg['speech_dir'] if not os.path.exists(clean_dir): assert False, ('Clean speech data is required') noise_dir = os.path.join(os.path.dirname(__file__), 'datasets/noise') if cfg['noise_dir'] != 'None': noise_dir = cfg['noise_dir'] if not os.path.exists: assert False, ('Noise data is required') params['fs'] = int(cfg['sampling_rate']) params['audioformat'] = cfg['audioformat'] params['audio_length'] = float(cfg['audio_length']) params['silence_length'] = float(cfg['silence_length']) params['total_hours'] = float(cfg['total_hours']) # clean singing speech params['use_singing_data'] = int(cfg['use_singing_data']) params['clean_singing'] = str(cfg['clean_singing']) params['singing_choice'] = int(cfg['singing_choice']) # clean emotional speech params['use_emotion_data'] = int(cfg['use_emotion_data']) params['clean_emotion'] = str(cfg['clean_emotion']) # clean mandarin speech params['use_mandarin_data'] = int(cfg['use_mandarin_data']) params['clean_mandarin'] = str(cfg['clean_mandarin']) # rir params['rir_choice'] = int(cfg['rir_choice']) params['lower_t60'] = float(cfg['lower_t60']) params['upper_t60'] = float(cfg['upper_t60']) params['rir_table_csv'] = str(cfg['rir_table_csv']) params['clean_speech_t60_csv'] = str(cfg['clean_speech_t60_csv']) if cfg['fileindex_start'] != 'None' and cfg['fileindex_end'] != 'None': params['num_files'] = int(cfg['fileindex_end'])-int(cfg['fileindex_start']) params['fileindex_start'] = int(cfg['fileindex_start']) params['fileindex_end'] = int(cfg['fileindex_end']) else: params['num_files'] = int((params['total_hours']*60*60)/params['audio_length']) params['fileindex_start'] = 0 params['fileindex_end'] = params['num_files'] print('Number of files to be synthesized:', params['num_files']) params['is_test_set'] = utils.str2bool(cfg['is_test_set']) params['clean_activity_threshold'] = float(cfg['clean_activity_threshold']) params['noise_activity_threshold'] = float(cfg['noise_activity_threshold']) params['snr_lower'] = int(cfg['snr_lower']) params['snr_upper'] = int(cfg['snr_upper']) params['randomize_snr'] = utils.str2bool(cfg['randomize_snr']) params['target_level_lower'] = int(cfg['target_level_lower']) params['target_level_upper'] = int(cfg['target_level_upper']) if 'snr' in cfg.keys(): params['snr'] = int(cfg['snr']) else: params['snr'] = int((params['snr_lower'] + params['snr_upper'])/2) params['noisyspeech_dir'] = utils.get_dir(cfg, 'noisy_destination', 'noisy') params['clean_proc_dir'] = utils.get_dir(cfg, 'clean_destination', 'clean') params['noise_proc_dir'] = utils.get_dir(cfg, 'noise_destination', 'noise') if 'speech_csv' in cfg.keys() and cfg['speech_csv'] != 'None': cleanfilenames = pd.read_csv(cfg['speech_csv']) cleanfilenames = cleanfilenames['filename'] else: #cleanfilenames = glob.glob(os.path.join(clean_dir, params['audioformat'])) cleanfilenames= [] for path in Path(clean_dir).rglob('*.wav'): cleanfilenames.append(str(path.resolve())) shuffle(cleanfilenames) # add singing voice to clean speech if params['use_singing_data'] ==1: all_singing= [] for path in Path(params['clean_singing']).rglob('*.wav'): all_singing.append(str(path.resolve())) if params['singing_choice']==1: # male speakers mysinging = [s for s in all_singing if ("male" in s and "female" not in s)] elif params['singing_choice']==2: # female speakers mysinging = [s for s in all_singing if "female" in s] elif params['singing_choice']==3: # both male and female mysinging = all_singing else: # default both male and female mysinging = all_singing shuffle(mysinging) if mysinging is not None: all_cleanfiles= cleanfilenames + mysinging else: all_cleanfiles= cleanfilenames # add emotion data to clean speech if params['use_emotion_data'] ==1: all_emotion= [] for path in Path(params['clean_emotion']).rglob('*.wav'): all_emotion.append(str(path.resolve())) shuffle(all_emotion) if all_emotion is not None: all_cleanfiles = all_cleanfiles + all_emotion else: print('NOT using emotion data for training!') # add mandarin data to clean speech if params['use_mandarin_data'] ==1: all_mandarin= [] for path in Path(params['clean_mandarin']).rglob('*.wav'): all_mandarin.append(str(path.resolve())) shuffle(all_mandarin) if all_mandarin is not None: all_cleanfiles = all_cleanfiles + all_mandarin else: print('NOT using non-english (Mandarin) data for training!') params['cleanfilenames'] = all_cleanfiles params['num_cleanfiles'] = len(params['cleanfilenames']) # If there are .wav files in noise_dir directory, use those # If not, that implies that the noise files are organized into subdirectories by type, # so get the names of the non-excluded subdirectories if 'noise_csv' in cfg.keys() and cfg['noise_csv'] != 'None': noisefilenames = pd.read_csv(cfg['noise_csv']) noisefilenames = noisefilenames['filename'] else: noisefilenames = glob.glob(os.path.join(noise_dir, params['audioformat'])) if len(noisefilenames)!=0: shuffle(noisefilenames) params['noisefilenames'] = noisefilenames else: noisedirs = glob.glob(os.path.join(noise_dir, '*')) if cfg['noise_types_excluded'] != 'None': dirstoexclude = cfg['noise_types_excluded'].split(',') for dirs in dirstoexclude: noisedirs.remove(dirs) shuffle(noisedirs) params['noisedirs'] = noisedirs # rir temp = pd.read_csv(params['rir_table_csv'], skiprows=[1], sep=',', header=None, names=['wavfile','channel','T60_WB','C50_WB','isRealRIR']) temp.keys() #temp.wavfile rir_wav = temp['wavfile'][1:] # 115413 rir_channel = temp['channel'][1:] rir_t60 = temp['T60_WB'][1:] rir_isreal= temp['isRealRIR'][1:] rir_wav2 = [w.replace('\\', '/') for w in rir_wav] rir_channel2 = [w for w in rir_channel] rir_t60_2 = [w for w in rir_t60] rir_isreal2= [w for w in rir_isreal] myrir =[] mychannel=[] myt60=[] lower_t60= params['lower_t60'] upper_t60= params['upper_t60'] if params['rir_choice']==1: # real 3076 IRs real_indices= [i for i, x in enumerate(rir_isreal2) if x == "1"] chosen_i = [] for i in real_indices: if (float(rir_t60_2[i]) >= lower_t60) and (float(rir_t60_2[i]) <= upper_t60): chosen_i.append(i) myrir= [rir_wav2[i] for i in chosen_i] mychannel = [rir_channel2[i] for i in chosen_i] myt60 = [rir_t60_2[i] for i in chosen_i] elif params['rir_choice']==2: # synthetic 112337 IRs synthetic_indices= [i for i, x in enumerate(rir_isreal2) if x == "0"] chosen_i = [] for i in synthetic_indices: if (float(rir_t60_2[i]) >= lower_t60) and (float(rir_t60_2[i]) <= upper_t60): chosen_i.append(i) myrir= [rir_wav2[i] for i in chosen_i] mychannel = [rir_channel2[i] for i in chosen_i] myt60 = [rir_t60_2[i] for i in chosen_i] elif params['rir_choice']==3: # both real and synthetic all_indices= [i for i, x in enumerate(rir_isreal2)] chosen_i = [] for i in all_indices: if (float(rir_t60_2[i]) >= lower_t60) and (float(rir_t60_2[i]) <= upper_t60): chosen_i.append(i) myrir= [rir_wav2[i] for i in chosen_i] mychannel = [rir_channel2[i] for i in chosen_i] myt60 = [rir_t60_2[i] for i in chosen_i] else: # default both real and synthetic all_indices= [i for i, x in enumerate(rir_isreal2)] chosen_i = [] for i in all_indices: if (float(rir_t60_2[i]) >= lower_t60) and (float(rir_t60_2[i]) <= upper_t60): chosen_i.append(i) myrir= [rir_wav2[i] for i in chosen_i] mychannel = [rir_channel2[i] for i in chosen_i] myt60 = [rir_t60_2[i] for i in chosen_i] params['myrir'] = myrir params['mychannel'] = mychannel params['myt60'] = myt60 # Call main_gen() to generate audio clean_source_files, clean_clipped_files, clean_low_activity_files, \ noise_source_files, noise_clipped_files, noise_low_activity_files = main_gen(params) # Create log directory if needed, and write log files of clipped and low activity files log_dir = utils.get_dir(cfg, 'log_dir', 'Logs') utils.write_log_file(log_dir, 'source_files.csv', clean_source_files + noise_source_files) utils.write_log_file(log_dir, 'clipped_files.csv', clean_clipped_files + noise_clipped_files) utils.write_log_file(log_dir, 'low_activity_files.csv', \ clean_low_activity_files + noise_low_activity_files) # Compute and print stats about percentange of clipped and low activity files total_clean = len(clean_source_files) + len(clean_clipped_files) + len(clean_low_activity_files) total_noise = len(noise_source_files) + len(noise_clipped_files) + len(noise_low_activity_files) pct_clean_clipped = round(len(clean_clipped_files)/total_clean*100, 1) pct_noise_clipped = round(len(noise_clipped_files)/total_noise*100, 1) pct_clean_low_activity = round(len(clean_low_activity_files)/total_clean*100, 1) pct_noise_low_activity = round(len(noise_low_activity_files)/total_noise*100, 1) print("Of the " + str(total_clean) + " clean speech files analyzed, " + \ str(pct_clean_clipped) + "% had clipping, and " + str(pct_clean_low_activity) + \ "% had low activity " + "(below " + str(params['clean_activity_threshold']*100) + \ "% active percentage)") print("Of the " + str(total_noise) + " noise files analyzed, " + str(pct_noise_clipped) + \ "% had clipping, and " + str(pct_noise_low_activity) + "% had low activity " + \ "(below " + str(params['noise_activity_threshold']*100) + "% active percentage)")
def leeFichero(self): # Comprobamos la cabecera para asegurar que es correcto. cabecera = self.file.readline() if string.find(cabecera,self.nombre) == -1: self.file.close() error("No es un fichero configSBT . ") else: # Incendios permitidos self.fuego = str2bool(self.file.readline()) # Viento permitido self.viento = str2bool(self.file.readline()) # Dirección del viento self.dir_viento = int(self.file.readline()) # Ataque físico permitido self.at_fisico = str2bool(self.file.readline()) # Fase de comprobación del calor self.fase_calor = str2bool(self.file.readline()) # Se permite devastar bosques self.dest_bosque = str2bool(self.file.readline()) # Se permite devastar Edificios self.dest_edificio = str2bool(self.file.readline()) # Chequeo de pilotaje self.pilotCheck = str2bool(self.file.readline()) # Chequeo de daños self.damageCheck = str2bool(self.file.readline()) # Chequeo de desconexión self.disconnectionCheck = str2bool(self.file.readline()) # Daños críticos permitidos self.criticos = str2bool(self.file.readline()) # Explosión de la munición permitida self.explosion_municion = str2bool(self.file.readline()) # Se pueden apagar radiadores self.apagar_radiadores = str2bool(self.file.readline()) # Chequeo de límite de tiempo self.timeLimitCheck = str2bool(self.file.readline()) # Límite de tiempo self.timeLimit = int(self.file.readline()) self.file.close()
def group(self, bool): if str2bool(bool): self.individualMode.setChecked(False) self.departmentMode.setChecked(False) self._taskMode = 'group'
def _validate(self): self._conf_dict['log']['debug'] = utils.str2bool( self._conf_dict['log']['debug']) self._conf_dict['default']['show_passwords'] = utils.str2bool( self._conf_dict['default']['show_passwords'])
def reading(self, bool): if str2bool(bool): self.researchType.setChecked(False) self.vfxType.setChecked(False) self._taskType = 'reading'
def main(): apikey = '' writefiles = False threads = 1 try: opts, args = getopt.getopt(sys.argv[1:], "a:w:t:h", ["apikey=", "writefiles=", "threads="]) except getopt.GetoptError: print( 'spartan.py -a <Athlinks API Key> -w <Write To File = True/False> -t <Threads Count>' ) sys.exit(2) for opt, arg in opts: if opt == '-h': print( 'spartan.py -a <Athlinks API Key> -w <Write To File = True/False> -t <Threads Count>' ) sys.exit() elif opt in ("-a", "--apikey"): apikey = arg elif opt in ("-w", "--writefiles"): writefiles = str2bool(arg) elif opt in ("-t", "--threads"): threads = int(arg) print('Write To Files : ', str(writefiles)) print('Threads count : ', str(threads)) spartan = Spartan() races = spartan.makeRequests(writefiles) #existing_events variable isn't used for anything existing_events = getExistingEvents() existing_races = getExistingRaces() #existing_events = [] #existing_races = [] total_races = sum(race.race_id != '0' for race in races) print("total races " + str(total_races)) print("num existing races " + str(len(existing_races))) print("num existing events " + str(len(existing_events))) athlinks = Athlinks(apikey) raceDetailsList = [] # this WILL wait until all are done with ThreadPoolExecutor(max_workers=threads) as executor: for race in races: if (race.race_id in existing_races): # print("found a match with " + race.race_id) # merge the two lists into one rather than append raceDetailsList.append(getRaceDetails(race)) else: if (str(race.race_id) != "0"): executor.submit(requestRace, race, raceDetailsList, writefiles, athlinks) # for each courseId of race, make a request to athlinks # this can be done in parallel # print(str(len(raceDetailsList))) existing_courses = getExistingCourses() start = time.time() with ThreadPoolExecutor(max_workers=threads) as executor: for race in raceDetailsList: executor.submit(requestRaceCourses, race, existing_courses, apikey, writefiles) end = time.time() #print("\n\n\n****\nGetting Courses took: " + str(end - start) + "\n*****\n\n") print("Completed data fetch") print("Normalizing course results....") normalizeCourseResults()
#cache cache = jobdict["cache"] [0] + "/" print cache # create the cache if it does not exist: if not os.path.exists(cache): os.makedirs(cache) logstring += "cache created \n" #Set the data file prefix dataprefix = jobdict["runprefix"][0] logstring += "found the data prefix " + dataprefix + "\n" #get the simlib simlibfilename = dataprefix +"_opsim.simlib" strategyfilename = jobdict["createsimlib"][1] if utils.str2bool(jobdict["createsimlib"][0]): logstring += "creating simlib file " + simlibfilename + " from stratgy file " + strategyfilename + "\n" sio.strategy2simlib(strategyfile=strategyfilename, cache=cache, dataprefix=dataprefix, simlibfile=simlibfilename) else: simlibfilename = strategyfilename logstring += "simlib file provided as "+ simlibfilename + "\n" if jobdict['simulate_using'][0] == 'snana': utils.modifysnanafiles(cache, sndict) os.system('sim_SNMix.pl Mix_sim.input') elif jobdict['simulate_using'][0] == 'sncosmo': pass print logstring
def individual(self, bool): if str2bool(bool): self.groupMode.setChecked(False) self.departmentMode.setChecked(False) self._taskMode = 'individual'
import cma from controller import Controller import numpy as np from misc import RolloutGenerator, ACTION_DIM, RNN_HIDDEN_DIM, VAE_LATENT_DIM from misc import load_parameters from utils import str2bool from misc import flatten_parameters from tqdm import tqdm import argparse parser = argparse.ArgumentParser() parser.add_argument('--use_rnn', type=str, required=True) args = parser.parse_args() use_rnn = str2bool(args.use_rnn) assert type(use_rnn) is bool # hardcoded parameters num_samples = 64 num_solutions = 16 num_workers = 6 time_limit = 1000 dr = 'temp' target_return = 700 # create tmp dir if non existent and clean it if existent tmp_dir = join(dr, 'tmp') if not exists(tmp_dir): mkdir(tmp_dir)
def department(self, bool): if str2bool(bool): self.individualMode.setChecked(False) self.groupMode.setChecked(False) self._taskMode = 'department'
def load_best_model(iou_file='iou_test.npy', **kwargs): best = 0 argmax_bf = -1 list_folder = [ f[0] for f in os.walk(kwargs['save_dir']) ][1:] # The first one is the current directory which we're not interested print(list_folder) for f in list_folder: param_file = os.path.join(f, 'hparam.txt') with open(param_file, 'r') as fi: param = fi.read() match = [] # Parameters in the file launch_search = True for k in kwargs.items(): print('bruuuuh') if k[0] == 'model': print('HERE') print(re.findall(r'"(.*?)"', param)) elif k[0] != 'save_dir' and k[0] != 'model_name' and k[ 1] is not None: t = type(k[1]) search = k[0] + '=' s = re.search( '(' + search + ')(.*?)(?=,)', param, re.IGNORECASE) # search the argument in the param file c = s.group(2) if t == type(False): print("C", c) c = U.str2bool(c) print(c) else: c = t(c) match.append(c == k[1]) if not c == k[1]: #print('c',c) #print('k',k) #print('FALSE') launch_search = False if launch_search: print('search in', f) maxx, argmax = get_max_file(os.path.join(f, iou_file)) if maxx > best: best_dir = f best = maxx argmax_bf = argmax try: model_name = kwargs['model_name'] try: load_pt = model_name + '_ep' + str(argmax_bf) + '.pt' model = torch.load(os.path.join(best_dir, load_pt)) print('Success to load', load_pt) except: print('In folder', best_dir, 'not all epoch was saved') load_pt = model_name + '.pt' model = torch.load(os.path.join(best_dir, load_pt)) print('Success to load', load_pt) except: raise Exception("No argument 'model_name' in params") return model, best_dir
def vfx(self, bool): if str2bool(bool): self.researchType.setChecked(False) self.readingType.setChecked(False) self._taskType = 'vfx'
def main_body(): '''Main body of this file''' parser = argparse.ArgumentParser() # Configurations: read noisyspeech_synthesizer.cfg and gather inputs parser.add_argument('--cfg', default='noisyspeech_synthesizer.cfg', help='Read noisyspeech_synthesizer.cfg for all the details') parser.add_argument('--cfg_str', type=str, default='noisy_speech') args = parser.parse_args() params = dict() params['args'] = args cfgpath = os.path.join(os.path.dirname(__file__), args.cfg) assert os.path.exists(cfgpath), f'No configuration file as [{cfgpath}]' cfg = CP.ConfigParser() cfg._interpolation = CP.ExtendedInterpolation() cfg.read(cfgpath) params['cfg'] = cfg._sections[args.cfg_str] cfg = params['cfg'] clean_dir = os.path.join(os.path.dirname(__file__), 'CleanSpeech') if cfg['speech_dir'] != 'None': clean_dir = cfg['speech_dir'] if not os.path.exists: assert False, ('Clean speech data is required') noise_dir = os.path.join(os.path.dirname(__file__), 'Noise') if cfg['noise_dir'] != 'None': noise_dir = cfg['noise_dir'] if not os.path.exists: assert False, ('Noise data is required') params['fs'] = int(cfg['sampling_rate']) params['audioformat'] = cfg['audioformat'] params['audio_length'] = float(cfg['audio_length']) params['silence_length'] = float(cfg['silence_length']) params['total_hours'] = float(cfg['total_hours']) if cfg['fileindex_start'] != 'None' and cfg['fileindex_start'] != 'None': params['fileindex_start'] = int(cfg['fileindex_start']) params['fileindex_end'] = int(cfg['fileindex_end']) params['num_files'] = int(params['fileindex_end'])-int(params['fileindex_start']) else: params['num_files'] = int((params['total_hours']*60*60)/params['audio_length']) print('Number of files to be synthesized:', params['num_files']) params['is_test_set'] = utils.str2bool(cfg['is_test_set']) params['clean_activity_threshold'] = float(cfg['clean_activity_threshold']) params['noise_activity_threshold'] = float(cfg['noise_activity_threshold']) params['snr_lower'] = int(cfg['snr_lower']) params['snr_upper'] = int(cfg['snr_upper']) params['randomize_snr'] = utils.str2bool(cfg['randomize_snr']) params['target_level_lower'] = int(cfg['target_level_lower']) params['target_level_upper'] = int(cfg['target_level_upper']) if 'snr' in cfg.keys(): params['snr'] = int(cfg['snr']) else: params['snr'] = int((params['snr_lower'] + params['snr_upper'])/2) params['noisyspeech_dir'] = utils.get_dir(cfg, 'noisy_destination', 'noisy') params['clean_proc_dir'] = utils.get_dir(cfg, 'clean_destination', 'clean') params['noise_proc_dir'] = utils.get_dir(cfg, 'noise_destination', 'noise') if 'speech_csv' in cfg.keys() and cfg['speech_csv'] != 'None': cleanfilenames = pd.read_csv(cfg['speech_csv']) cleanfilenames = cleanfilenames['filename'] else: cleanfilenames = glob.glob(os.path.join(clean_dir, params['audioformat'])) params['cleanfilenames'] = cleanfilenames shuffle(params['cleanfilenames']) params['num_cleanfiles'] = len(params['cleanfilenames']) params['noisefilenames'] = glob.glob(os.path.join(noise_dir, params['audioformat'])) shuffle(params['noisefilenames']) # Invoke multiple processes and fan out calls to main_gen() to these processes global clean_counter, noise_counter clean_counter = multiprocessing.Value('i', 0) noise_counter = multiprocessing.Value('i', 0) multi_pool = multiprocessing.Pool(processes=PROCESSES, initializer = init, initargs = (clean_counter, noise_counter, )) fileindices = range(params['num_files']) output_lists = multi_pool.starmap(main_gen, zip(repeat(params), fileindices)) flat_output_lists = [] num_lists = 6 for i in range(num_lists): flat_output_lists.append(extract_list(output_lists, i)) # Create log directory if needed, and write log files of clipped and low activity files log_dir = utils.get_dir(cfg, 'log_dir', 'Logs') utils.write_log_file(log_dir, 'source_files.csv', flat_output_lists[0] + flat_output_lists[3]) utils.write_log_file(log_dir, 'clipped_files.csv', flat_output_lists[1] + flat_output_lists[4]) utils.write_log_file(log_dir, 'low_activity_files.csv', flat_output_lists[2] + flat_output_lists[5]) # Compute and print stats about percentange of clipped and low activity files total_clean = len(flat_output_lists[0]) + len(flat_output_lists[1]) + len(flat_output_lists[2]) total_noise = len(flat_output_lists[3]) + len(flat_output_lists[4]) + len(flat_output_lists[5]) pct_clean_clipped = round(len(flat_output_lists[1])/total_clean*100, 1) pct_noise_clipped = round(len(flat_output_lists[4])/total_noise*100, 1) pct_clean_low_activity = round(len(flat_output_lists[2])/total_clean*100, 1) pct_noise_low_activity = round(len(flat_output_lists[5])/total_noise*100, 1) print("Of the " + str(total_clean) + " clean speech files analyzed, " + str(pct_clean_clipped) + \ "% had clipping, and " + str(pct_clean_low_activity) + "% had low activity " + \ "(below " + str(params['clean_activity_threshold']*100) + "% active percentage)") print("Of the " + str(total_noise) + " noise files analyzed, " + str(pct_noise_clipped) + \ "% had clipping, and " + str(pct_noise_low_activity) + "% had low activity " + \ "(below " + str(params['noise_activity_threshold']*100) + "% active percentage)")
def bandeval(filename): inp = input("Does this file have BANDWIDTH problems? [y/n]") return u.str2bool(inp)
cleanpath = os.path.join(cleandir, clean_filename) noise_filename = 'noise_fileid_'+os.path.splitext(noisy_filename)[0].split('fileid_')[1]+'.wav' noisepath = os.path.join(noisedir, noise_filename) noisy_filenames_list.append(noisy_filename) clean_filenames_list.append(clean_filename) noise_filenames_list.append(noise_filename) # Read clean, noise and noisy signals clean_signal, fs_clean = sf.read(cleanpath) noise_signal, fs_noise = sf.read(noisepath) noisy_signal, fs_noisy = sf.read(noisypath) # SNR Test # To do: add right path split to extract SNR if utils.str2bool(cfg['snr_test']): snr = int(noisy_filename.split('_snr')[1].split('_')[0]) snr_results_list.append(str(test_snr(clean=clean_signal, \ noise=noise_signal, expected_snr=snr))) else: snr_results_list.append(skipped_string) # Normalization test if utils.str2bool(cfg['norm_test']): tl = int(noisy_filename.split('_tl')[1].split('_')[0]) clean_norm_results_list.append(str(test_normalization(clean_signal))) noise_norm_results_list.append(str(test_normalization(noise_signal))) noisy_norm_results_list.append(str(test_normalization(noisy_signal, expected_rms=tl))) else: clean_norm_results_list.append(skipped_string) noise_norm_results_list.append(skipped_string)
def sateval(filename): inp = input("Does this file have SATURATION problems? [y/n]") return u.str2bool(inp)
def init_ui(self): self.resize(800, 600) self.setWindowTitle('Kettle') self.current_editor = self.create_editor() self.editors = [] self.central_widget = QWidget(self) QFontDatabase.addApplicationFont('../assets/font/Monoid-Regular.ttf') sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.sizePolicy().hasHeightForWidth()) self.setSizePolicy(sizePolicy) #self.statusbar = self.statusBar() self.statusbar = Statusbar(self) self.setStatusBar(self.statusbar) self.horizontal_layoutW = QWidget(self.central_widget) self.splitter = QSplitter(self.central_widget) self.tab_widget = QTabWidget(self.central_widget) self.tab_widget.setTabsClosable(True) self.tab_widget.currentChanged.connect(self.change_text_editor) self.tab_widget.tabCloseRequested.connect(self.remove_editor) self.new_document() self.dock_widget = QDockWidget('Project View', self.central_widget) print(self.sizeHint()) self.horizontal_layout = QHBoxLayout(self.central_widget) self.horizontal_layout.setSizeConstraint(QLayout.SetMaximumSize) self.treeView = QTreeWidget(self.central_widget) self.treeView.header().hide() self.treeView.itemDoubleClicked.connect(self.tree_clicked) self.dock_widget.setWidget(self.treeView) self.splitter.addWidget(self.tab_widget) self.horizontal_layout.addWidget(self.splitter) self.setCentralWidget(self.central_widget) self.addDockWidget(Qt.LeftDockWidgetArea, self.dock_widget) if utils.str2bool( config.get_setting('view', 'view_projectview', 'True')): self.dock_widget.show() else: self.dock_widget.hide() self.statusbar.showMessage('Line 0 | Column 0') self.splitter.setSizes([5, 300]) if config.get_setting('General', 'last_opened_project'): self.load_project_structure( config.get_setting('General', 'last_opened_project'), self.treeView) self.project_folder = config.get_setting('General', 'last_opened_project') exit_action = QAction(QIcon('exit.png'), '&Exit', self) exit_action.setShortcut('Ctrl+Q') exit_action.setStatusTip('Exit application') exit_action.triggered.connect(qApp.quit) new_action = QAction('New', self) new_action.triggered.connect(self.new_document) new_action.setShortcut('Ctrl+N') save_action = QAction('Save', self) save_action.triggered.connect(self.save_file) save_action.setShortcut('Ctrl+S') save_as_action = QAction("Save as..", self) save_as_action.triggered.connect(self.save_file_as) save_as_action.setShortcut('Ctrl+Alt+S') open_action = QAction('Open', self) open_action.triggered.connect(self.open_file) open_action.setShortcut('Ctrl+O') open_proj_action = QAction('Open Project', self) open_proj_action.triggered.connect(self.open_prof) settings_action = QAction('Settings', self) settings_action.triggered.connect(self.open_settings) undo_action = QAction('Undo', self) undo_action.triggered.connect(self.current_editor.undo) undo_action.setShortcut('Ctrl+Z') redo_action = QAction('Redo', self) redo_action.triggered.connect(self.current_editor.redo) redo_action.setShortcut('Ctrl+Y') cut_action = QAction('Cut', self) cut_action.triggered.connect(self.current_editor.cut) cut_action.setShortcut('Ctrl+X') copy_action = QAction('Copy', self) copy_action.triggered.connect(self.current_editor.copy) copy_action.setShortcut('Ctrl+C') paste_action = QAction('Paste', self) paste_action.triggered.connect(self.current_editor.paste) paste_action.setShortcut('Ctrl+V') select_all_action = QAction('Select all', self) select_all_action.triggered.connect(self.current_editor.selectAll) select_all_action.setShortcut('Ctrl+A') run_action = QAction('Run', self) run_action.triggered.connect(self.run) run_action.setShortcut('Ctrl+SPACE') view_status_action = QAction('View statusbar', self, checkable=True) view_status_action.setChecked( utils.str2bool(config.get_setting('view', 'view_statusbar'))) view_status_action.triggered.connect(self.statusbar.view_status) view_projectview_action = QAction('View project view', self, checkable=True) view_projectview_action.setChecked( utils.str2bool( config.get_setting('view', 'view_projectview', 'True'))) view_projectview_action.triggered.connect(self.view_projectview) github_link_action = QAction('Github', self) github_link_action.triggered.connect(self.open_github_link) about_action = QAction('About', self) about_action.triggered.connect(self.open_about) close_current_editor_action = QAction('Close current file', self) close_current_editor_action.triggered.connect(self.remove_editor) close_current_editor_action.setShortcut('Ctrl+W') open_notes_graph_action = QAction('Open graph', self) open_notes_graph_action.triggered.connect(self.open_notes_graph) create_new_notes_project_action = QAction('Create notes project', self) create_new_notes_project_action.triggered.connect( self.create_notes_project) menubar = self.menuBar() notes_menu = QMenu('Notes', self) file_menu = menubar.addMenu('&File') edit_menu = menubar.addMenu('&Edit') run_menu = menubar.addMenu('&Run') view_menu = menubar.addMenu('&View') help_menu = menubar.addMenu('&Help') notes_menu.addAction(open_notes_graph_action) notes_menu.addAction(create_new_notes_project_action) file_menu.addAction(new_action) file_menu.addAction(open_action) file_menu.addAction(open_proj_action) file_menu.addAction(save_action) file_menu.addAction(save_as_action) file_menu.addAction(close_current_editor_action) file_menu.addAction(settings_action) file_menu.addAction(exit_action) file_menu.addMenu(notes_menu) edit_menu.addAction(undo_action) edit_menu.addAction(redo_action) edit_menu.addAction(cut_action) edit_menu.addAction(copy_action) edit_menu.addAction(paste_action) edit_menu.addAction(select_all_action) run_menu.addAction(run_action) view_menu.addAction(view_status_action) view_menu.addAction(view_projectview_action) help_menu.addAction(github_link_action) help_menu.addAction(about_action) self.showMaximized() self.show()
def clickeval(filename): inp = input("Does this file have CLICK problems? [y/n]") return u.str2bool(inp)
def __init__(self): super(PLM, self).__init__(sys.argv) # Run all neccessary configuration to start PLM self.setWindowIcon(LogoIcon("Logo")) # Setup icon self.setOrganizationName(__organization__) self.setApplicationName(__appname__) self.setOrganizationDomain(__website__) self.setApplicationVersion(__version__) self.setApplicationDisplayName(__appname__) self.setCursorFlashTime(1000) self.setQuitOnLastWindowClosed(False) self.appID = self.sessionId() self.appKey = self.sessionKey() self.appPid = self.applicationPid() ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(self.appID) # Setup app ID self.logger = Loggers(self.__class__.__name__) self.settings = Settings(SETTING_FILEPTH['app'], ST_FORMAT['ini'], self) self.settings._settingEnable = True self.appInfo = self.dataConfig.appInfo # Configuration data self.set_styleSheet('dark') # Multithreading. self.threadManager = ThreadManager() self.database = LocalDatabase() # Database tool self.browser = Browser() self.eventManager = EventManager() self.buttonManager = ButtonManager() self.actionManager = ActionManager() self.registryLayout = RegistryLayout() self.layoutManager = LayoutManager(self.settings, self.registryLayout, self.actionManager, self.buttonManager, self.eventManager, self.threadManager, self) self.layoutManager.registLayout(self.browser) self.layoutManager.buildLayouts() self.layoutManager.globalSetting() self.signIn = self.layoutManager.signin self.signUp = self.layoutManager.signup self.forgotPassword = self.layoutManager.forgotPW self.sysTray = self.layoutManager.sysTray self.mainUI = self.layoutManager.mainUI self.shortcutLayout = self.layoutManager.shortcutLayout for layout in [self.signIn, self.signUp, self.forgotPassword, self.sysTray, self.mainUI]: layout.signals.connect('loginChanged', self.loginChanged) for layout in self.layoutManager.layouts(): if not layout.key in self.ignoreIDs: layout.signals.connect('showLayout', self.showLayout) layout.signals.connect('executing', self.executing) layout.signals.connect('openBrowser', self.openBrowser) layout.signals.connect('setSetting', self.setSetting) layout.signals.connect('sysNotify', self.sysNotify) # print(layout.key) layout.settings._settingEnable = True if layout.key in ['SignIn', 'SignUp', 'SysTray', 'ForgotPassword']: layout.signals.connect('loginChanged', self.loginChanged) if self.mainUI.mode == 'Offline': self.showLayout(self.mainUI.key, "show") else: try: self.username, token, cookie, remember = self.database.query_table('curUser') except (ValueError, IndexError): self.logger.info("Error occur, can not query data") self.signInEvent() else: if not str2bool(remember): self.signInEvent() else: r = requests.get(__localServer__, verify = False, headers = {'Authorization': 'Bearer {0}'.format(token)}, cookies = {'connect.sid': cookie}) if r.status_code == 200: if not self.sysTray.isSystemTrayAvailable(): self.logger.report(SYSTRAY_UNAVAI) self.exitEvent() else: self.loginChanged(True) self.sysTray.log_in() self.showLayout(self.mainUI.key, "show") else: self.showLayout('SignIn', "show") self.runEvent()
def nbeval(filename): inp = input("Does this file have NOISE BURSTS problems? [y/n]") return u.str2bool(inp)
def leeJugador(self,n_mechs,fichero,indice): # Comprobamos la posición en el fichero if int(self.nombre) == int(fichero[indice]): # Obtenemos la información común del Mech self.leeMech(n_mechs,fichero,indice) sub_indice = indice + 30 # ANDAR sub_indice+=1 self.andar = int(fichero[sub_indice]) # CORRER sub_indice+=1 self.correr = int(fichero[sub_indice]) # SALTAR sub_indice+=1 self.saltar = int(fichero[sub_indice]) # RADIADORES sub_indice+=1 self.rad_encendidos = int(fichero[sub_indice]) sub_indice+=1 self.rad_apagados = int(fichero[sub_indice]) # Heridas del MechWarrior sub_indice+=1 self.heridas = int(fichero[sub_indice]) # Consciencia del MechWarrior sub_indice+=1 self.consciente = str2bool(fichero[sub_indice]) self.slots = [] # Leemos los slots impactados """ Orden de las localizaciones (8): self.slots = [slots brazo izquierdo, slots torso izquierdo, slots pierna izquierda, slots pierna derecha, slots torso derecho, slots brazo derecho, slots torso central, slots cabeza] """ sub_indice+=1 localizacion = [] for i in range(11): localizacion.append(str2bool(fichero[sub_indice+i])) self.slots.append(list(localizacion)) sub_indice+=12 localizacion = [] for i in range(5): localizacion.append(str2bool(fichero[sub_indice+i])) self.slots.append(list(localizacion)) sub_indice+=6 localizacion = [] for i in range(11): localizacion.append(str2bool(fichero[sub_indice+i])) self.slots.append(list(localizacion)) sub_indice+=12 localizacion = [] for i in range(11): localizacion.append(str2bool(fichero[sub_indice+i])) self.slots.append(list(localizacion)) sub_indice+=12 localizacion = [] for i in range(5): localizacion.append(str2bool(fichero[sub_indice+i])) self.slots.append(list(localizacion)) sub_indice+=6 localizacion = [] for i in range(11): localizacion.append(str2bool(fichero[sub_indice+i])) self.slots.append(list(localizacion)) sub_indice+=12 localizacion = [] for i in range(11): localizacion.append(str2bool(fichero[sub_indice+i])) self.slots.append(list(localizacion)) sub_indice+=12 localizacion = [] for i in range(5): localizacion.append(str2bool(fichero[sub_indice+i])) self.slots.append(list(localizacion)) sub_indice+=6 # Localizaciones desde las que se ha disparado un arma. self.localizaciones = [] for i in range(8): self.localizaciones.append(str2bool(fichero[sub_indice+i])) sub_indice+=8 # MUNICIÓN para Soltar self.soltar_municion = int(fichero[sub_indice]) #~ print "Municion para soltar = ",self.soltar_municion self.municiones = [] for i in range(self.soltar_municion): mun_loc = (str(fichero[sub_indice+1]),int(fichero[sub_indice+2])) self.municiones.append(mun_loc) indice+=2
def read(self, comment = "#"): # populate from file self.isValid = True workflow_file = None for p in self.path: if os.path.exists("%s%s%s.ini"%(p, os.sep, self.name.lower())): workflow_file = open("%s%s%s.ini"%(p, os.sep, self.name.lower()), 'r') if workflow_file == None: print "Error: cannot open workflow %s"%(self.name) raise(JobSignalledBreak) lastPos = 0 while True: lastPos = workflow_file.tell() line = workflow_file.readline() (line, sep, commentLine) = line.partition(comment) if len(commentLine) != 0: continue if "programs:" in line: splitLine = line.replace("programs:", "").strip().split() for prog in splitLine: self.programList.add(prog.lower()) elif "md5:" in line: self.md5 = line.replace("md5:", "").strip() elif "inherit:" in line: splitLine = line.replace("inherit:", "").strip().split() for master in splitLine: self.super.append(master) masterFlow = None if master in _workflows: masterFlow = _workflows[master] else: masterFlow = Workflow(line.replace("inherit:", "").strip(), self.path) masterFlow.read() self.programList.update(masterFlow.programList) self.readlibs.extend(masterFlow.readlibs) self.commandList = (masterFlow.commandList + " " + self.commandList).strip() self.asmcontigs.extend(masterFlow.asmcontigs) self.isModifiable = masterFlow.isModifiable or self.isModifiable elif "modify:" in line: splitLine = line.replace("modify:", "").strip() self.isModifiable = self.isModifiable or str2bool(splitLine) elif "command:" in line: splitLine = line.replace("command:", "").strip() self.commandList = (self.commandList + " " + splitLine).strip() else: break workflow_file.seek(lastPos) # now read the library info (readasm, readl, ignore) = readConfigInfo(workflow_file) self.readlibs.extend(readl) self.asmcontigs.extend(readasm) workflow_file.close() if self.md5 != None: if self.md5 != getMD5(): self.isValid = False if _verbose: print "Read workflow %s"%(self.name) print "Inherits from %s"%(",".join(self.super)) print "Supported programs %s"%(",".join(self.programList)) print "Asm contigs %s"%(",".join(self.asmcontigs)) for lib in self.readlibs: print "Lib %s %s %s %s %s %s %s %s %s"%(lib.id, lib.format, lib.mean, lib.stdev, lib.mmin, lib.mmax, lib.mated, lib.interleaved, lib.innie)
def leeMech(self,n_mechs,fichero,indice): # Suponemos el indice en la linea con el nº de mech sub_indice = indice # Leemos la información común para todos los mechs # Si está operativo sub_indice+=1 self.operativo = str2bool(fichero[sub_indice]) sub_indice+=1 # Si está desconectado self.desconectado = str2bool(fichero[sub_indice]) sub_indice+=1 # Si está atascado self.atascado = str2bool(fichero[sub_indice]) sub_indice+=1 # Si está en el suelo self.suelo = str2bool(fichero[sub_indice]) sub_indice+=1 # El hexágono en el que se encuentra self.hexagono = fichero[sub_indice] sub_indice+=1 # Lado hacia el que está orientado el Mech self.lado = int(fichero[sub_indice]) sub_indice+=1 # Lado hacia el que está orientado el torso. self.lado_torso = int(fichero[sub_indice]) # Temperatura sub_indice+=1 self.temperatura = int(fichero[sub_indice]) sub_indice+=1 # Si está ardiendo self.ardiendo = str2bool(fichero[sub_indice]) sub_indice+=1 # Si tiene un garrote self.garrote = str2bool(fichero[sub_indice]) sub_indice+=1 # Tipo del garrote self.tipo_garrote = int(fichero[sub_indice]) # Leemos el estado del blindaje """ Orden de las localizaciones (11): self.blindaje = [brazo izquierdo, torso izquierdo, pierna izquierda, pierna derecha, torso derecho, brazo derecho, torso central, cabeza, parte de atrás del torso izquierdo, parte de atrás del torso derecho, parte de atrás del torso central] """ self.blindaje = [] for i in range(11): sub_indice+=1 self.blindaje.append(int(fichero[sub_indice])) # Leemos el estado de la estructura """ Orden de las localizaciones (8): self.estructura = [brazo izquierdo, torso izquierdo, pierna izquierda, pierna derecha, torso derecho, brazo derecho, torso central, cabeza] """ self.estructura = [] for i in range(8): sub_indice+=1 self.estructura.append(int(fichero[sub_indice]))
print("Example: pack.py configs/letv-x355pro.ini") quit() configFile = sys.argv[1] # Parse config file config = configparser.ConfigParser( interpolation=configparser.ExtendedInterpolation()) #config = configparser.ConfigParser() config.read(configFile) # Main main = config['Main'] firmwareFileName = main['FirmwareFileName'] projectFolder = main['ProjectFolder'] useHexValuesPrefix = utils.str2bool(main['useHexValuesPrefix']) SCRIPT_FIRMWARE_FILE_NAME = main['SCRIPT_FIRMWARE_FILE_NAME'] DRAM_BUF_ADDR = main['DRAM_BUF_ADDR'] MAGIC_FOOTER = main['MAGIC_FOOTER'] HEADER_SIZE = utils.sizeInt(main['HEADER_SIZE']) # Header header = config['HeaderScript'] headerScriptPrefix = config.get('HeaderScript', 'Prefix', raw=True) headerScriptSuffix = config.get('HeaderScript', 'Suffix', raw=True) # Parts parts = list(filter(lambda s: s.startswith('part/'), config.sections())) print("\n")
print ("Usage: pack.py <config file>") print ("Example: pack.py configs/letv-x355pro.ini") quit() configFile = sys.argv[1] # Parse config file config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation()) #config = configparser.ConfigParser() config.read(configFile) # Main main = config['Main']; firmwareFileName = main['FirmwareFileName'] projectFolder = main['ProjectFolder'] useHexValuesPrefix = utils.str2bool(main['useHexValuesPrefix']) SCRIPT_FIRMWARE_FILE_NAME = main['SCRIPT_FIRMWARE_FILE_NAME'] DRAM_BUF_ADDR = main['DRAM_BUF_ADDR'] MAGIC_FOOTER = main['MAGIC_FOOTER'] HEADER_SIZE = utils.sizeInt(main['HEADER_SIZE']) # XGIMI uses HEADER+BIN+MAGIC+HEADER_CRC to calculate crc2 # TVs with TP.MS338E.PB803 mainboard use HEADER+BIN+BIN_CRC+MAGIC+HEADER_CRC to calculate crc2 XGIMI_CRC = False PB803_CRC = False if 'CRC_TYPE' in main: if (main['CRC_TYPE'].upper() == 'XGIMI'): XGIMI_CRC = True if (main['CRC_TYPE'].upper() == 'PB803'): XGIMI_CRC = True