def as_bytes(self): inputs_as_str = [] for txin in self.inputs: inputs_as_str.append(txin.to_json()) outputs_as_str = [] for txout in self.outputs: outputs_as_str.append(txout.to_json()) return "{}{}{}{}{}{}{}".format(self.to, self.sender, listToString(inputs_as_str), listToString(outputs_as_str), self.type, self.extra, self.fee)
def printonfile(self, filename): myfile = open(filename, 'a') myfile.write("\nRoutes: ") for route in self.sigma: myfile.write("\n") myfile.write(utils.listToString(route.customer)) myfile.write("\nCost: ") myfile.write(str(self.costo)) myfile.close()
def add_alt_covers(self, issue_id, url_list): con = lite.connect(self.db_file) with con: con.text_factory = unicode cur = con.cursor() # remove all previous entries with this search term cur.execute("DELETE FROM AltCovers WHERE issue_id = ?", [issue_id]) url_list_str = utils.listToString(url_list) # now add in new record cur.execute( "INSERT INTO AltCovers " + "(issue_id, url_list) " + "VALUES(?, ?)", (issue_id, url_list_str))
def add_alt_covers( self, issue_id, url_list ): con = lite.connect( self.db_file ) with con: con.text_factory = unicode cur = con.cursor() # remove all previous entries with this search term cur.execute("DELETE FROM AltCovers WHERE issue_id = ?", [ issue_id ]) url_list_str = utils.listToString(url_list) # now add in new record cur.execute("INSERT INTO AltCovers " + "(issue_id, url_list ) " + "VALUES( ?, ? )" , ( issue_id, url_list_str) )
def kill_checker(): alive_crewmates = rospy.get_param('alive_crewmates') alive_crewmates = alive_crewmates.split() imposter6X = rospy.get_param('robot6/positionX') imposter6Y = rospy.get_param('robot6/positionY') imposter7X = rospy.get_param('robot7/positionX') imposter7Y = rospy.get_param('robot7/positionY') for crewmate in alive_crewmates: X = rospy.get_param(crewmate + '/positionX') Y = rospy.get_param(crewmate + '/positionY') dist6 = np.sqrt((X - imposter6X)**2 + (Y - imposter6Y)**2) dist7 = np.sqrt((X - imposter7X)**2 + (Y - imposter7Y)**2) if dist6 < 1 or dist7 < 1: print("CREWMATE WAS KILLED") alive_crewmates.remove(crewmate) rospy.set_param('alive_crewmates', listToString(alive_crewmates)) sleep(1)
def export_report(parameters, keys = 'uuid', range = [None,time.strftime('%Y-%m-%d')], type = 'csv', outputname = None, emailaddr = None, where = None, description = 'No description', distinct = 'Yes', groupid = 'default', ftpuser = '******', vendor_event = 'InstallationInfo', report_name = None, debug = False, raw_output = False, scp = False, target_user = None, target_server = None, target_dir = None): try: COMMAND = '' #Build Query into COMMAND (Prefix: "hive --config $HIVE_HOME/conf -e \"" is added in function export_hive_to_file()) COMMAND = "\"use " + groupid + "; " if report_name is not None: reportBuilder = report_builder.ReportBuilder(report_name,range,groupid) COMMAND += reportBuilder.getReportQuery() else: #Create QueryBuilder Object queryBuilder = query_builder.QueryBuilder() #Sanitize Parameters #Creates: ###eventList - A List of Event Objects that have already been populated by sanatize parameters function. ###parameters - A List of the original parameters requested in it's original order. ###parameters_alias - A List of the aliases of the original parameters requested in it's order. eventList, parameters, parametersAliasList = export_utils.sanitize_parameters(parameters,groupid,raw_output) #Sanitize Where where, where_tables = export_utils.sanitize_where(where, parameters, groupid, eventList, vendor_event) #Add eventList to queryBuilder queryBuilder.addToEvents(eventList) COMMAND += queryBuilder.buildQuery(groupid,type,range,parameters,raw_output) COMMAND += "\"" print print "Query:" print "-------------------------" print COMMAND print "-------------------------" print if debug == False: print "START TIME: [" + time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) + "]" print "START COMMAND: " + COMMAND outputpath, outputname, user, password = export_hive_to_file(COMMAND, utils.listToString(parametersAliasList), type, outputname, ftpuser) try: email("Report: Data Export Utility","complete!","smithmicro.com",emailaddr,"{'text':'email from export report', 'html':'<p>Your file is now complete. To access your report, click on this link: <a href=\"ftp://"+os.uname()[1]+"/"+type+"/"+outputname+"\">"+outputname+"</a> and enter your username and password below.</p><p>Username: <b>"+user.strip()+"</b><br />Password: <b>"+password.strip()+"</b></p>'}") except: print "The Email Server has Raised an Error" if bool(scp) == True: email_addr = user.strip() scp_method(email_addr, outputname, target_user, target_server, target_dir) except IOError, e: print "I/O error(%(error0)s):(%(error1)s)" % { "error0":e[0],"error1":e[1] } email("Export_Report","failed","analytics.smithmicro.com",emailaddr,"\nError:\nI/O error("+utils.xstr(e[0])+"): "+xstr(e[1])+"\n"+utils.xstr(sys.exc_info()[0]))
def __init__(self, name, numbers): self.id = name order1 = [numbers[0], numbers[1], numbers[2], '+'] order2 = [numbers[1], numbers[2], numbers[0], '+'] order3 = [numbers[2], numbers[0], numbers[1], '+'] order4 = [numbers[0], numbers[2], numbers[1], '-'] order5 = [numbers[1], numbers[0], numbers[2], '-'] order6 = [numbers[2], numbers[1], numbers[0], '-'] # self.orders contains the possible variants the numbers on the small tile may have, with the '+' and '-' signs on each of the faces self.orders = np.array([ listToString(order1), listToString(order2), listToString(order3), listToString(order4), listToString(order5), listToString(order6) ])
def convertXMLToMetadata(self, tree): root = tree.getroot() if root.tag != "comet": raise 1 return None metadata = GenericMetadata() md = metadata # Helper function def xlate(tag): node = root.find(tag) if node is not None: return node.text else: return None md.series = xlate("series") md.title = xlate("title") md.issue = xlate("issue") md.volume = xlate("volume") md.comments = xlate("description") md.publisher = xlate("publisher") md.language = xlate("language") md.format = xlate("format") md.pageCount = xlate("pages") md.maturityRating = xlate("rating") md.price = xlate("price") md.isVersionOf = xlate("isVersionOf") md.rights = xlate("rights") md.identifier = xlate("identifier") md.lastMark = xlate("lastMark") md.genre = xlate("genre") # TODO - repeatable field date = xlate("date") if date is not None: parts = date.split("-") if len(parts) > 0: md.year = parts[0] if len(parts) > 1: md.month = parts[1] md.coverImage = xlate("coverImage") readingDirection = xlate("readingDirection") if readingDirection is not None and readingDirection == "rtl": md.manga = "YesAndRightToLeft" # loop for character tags char_list = [] for n in root: if n.tag == "character": char_list.append(n.text.strip()) md.characters = utils.listToString(char_list) # Now extract the credit info for n in root: if ( n.tag == "writer" or n.tag == "penciller" or n.tag == "inker" or n.tag == "colorist" or n.tag == "letterer" or n.tag == "editor" ): metadata.addCredit(n.text.strip(), n.tag.title()) if n.tag == "coverDesigner": metadata.addCredit(n.text.strip(), "Cover") metadata.isEmpty = False return metadata
def convertXMLToMetadata(self, tree): root = tree.getroot() if root.tag != 'comet': raise 1 return None metadata = GenericMetadata() md = metadata # Helper function def xlate(tag): node = root.find(tag) if node is not None: return node.text else: return None md.series = xlate('series') md.title = xlate('title') md.issue = xlate('issue') md.volume = xlate('volume') md.comments = xlate('description') md.publisher = xlate('publisher') md.language = xlate('language') md.format = xlate('format') md.pageCount = xlate('pages') md.maturityRating = xlate('rating') md.price = xlate('price') md.isVersionOf = xlate('isVersionOf') md.rights = xlate('rights') md.identifier = xlate('identifier') md.lastMark = xlate('lastMark') md.genre = xlate('genre') # TODO - repeatable field date = xlate('date') if date is not None: parts = date.split('-') if len(parts) > 0: md.year = parts[0] if len(parts) > 1: md.month = parts[1] md.coverImage = xlate('coverImage') readingDirection = xlate('readingDirection') if readingDirection is not None and readingDirection == "rtl": md.manga = "YesAndRightToLeft" # loop for character tags char_list = [] for n in root: if n.tag == 'character': char_list.append(n.text.strip()) md.characters = utils.listToString(char_list) # Now extract the credit info for n in root: if (n.tag == 'writer' or n.tag == 'penciller' or n.tag == 'inker' or n.tag == 'colorist' or n.tag == 'letterer' or n.tag == 'editor'): metadata.addCredit(n.text.strip(), n.tag.title()) if n.tag == 'coverDesigner': metadata.addCredit(n.text.strip(), "Cover") metadata.isEmpty = False return metadata
def mapCVDataToMetadata(self, volume_results, issue_results, settings): # Now, map the Comic Vine data to generic metadata metadata = GenericMetadata() metadata.series = issue_results['volume']['name'] num_s = IssueString(issue_results['issue_number']).asString() metadata.issue = num_s metadata.title = issue_results['name'] metadata.publisher = volume_results['publisher']['name'] metadata.day, metadata.month, metadata.year = self.parseDateStr( issue_results['cover_date']) #metadata.issueCount = volume_results['count_of_issues'] metadata.comments = self.cleanup_html( issue_results['description'], settings.remove_html_tables) if settings.use_series_start_as_volume: metadata.volume = volume_results['start_year'] metadata.notes = "Tagged with the {0} fork of ComicTagger {1} using info from Comic Vine on {2}. [Issue ID {3}]".format( ctversion.fork, ctversion.version, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), issue_results['id']) #metadata.notes += issue_results['site_detail_url'] metadata.webLink = issue_results['site_detail_url'] person_credits = issue_results['person_credits'] for person in person_credits: if 'role' in person: roles = person['role'].split(',') for role in roles: # can we determine 'primary' from CV?? metadata.addCredit( person['name'], role.title().strip(), False) character_credits = issue_results['character_credits'] character_list = list() for character in character_credits: character_list.append(character['name']) metadata.characters = utils.listToString(character_list) team_credits = issue_results['team_credits'] team_list = list() for team in team_credits: team_list.append(team['name']) metadata.teams = utils.listToString(team_list) location_credits = issue_results['location_credits'] location_list = list() for location in location_credits: location_list.append(location['name']) metadata.locations = utils.listToString(location_list) story_arc_credits = issue_results['story_arc_credits'] arc_list = [] for arc in story_arc_credits: arc_list.append(arc['name']) if len(arc_list) > 0: metadata.storyArc = utils.listToString(arc_list) return metadata
def convertMetadataToXML( self, filename, metadata ): #shorthand for the metadata md = metadata # build a tree structure root = ET.Element("ComicInfo") root.attrib['xmlns:xsi']="http://www.w3.org/2001/XMLSchema-instance" root.attrib['xmlns:xsd']="http://www.w3.org/2001/XMLSchema" #helper func def assign( cix_entry, md_entry): if md_entry is not None: ET.SubElement(root, cix_entry).text = u"{0}".format(md_entry) assign( 'Title', md.title ) assign( 'Series', md.series ) assign( 'Number', md.issue ) assign( 'Count', md.issueCount ) assign( 'Volume', md.volume ) assign( 'AlternateSeries', md.alternateSeries ) assign( 'AlternateNumber', md.alternateNumber ) assign( 'StoryArc', md.storyArc ) assign( 'SeriesGroup', md.seriesGroup ) assign( 'AlternateCount', md.alternateCount ) assign( 'Summary', md.comments ) assign( 'Notes', md.notes ) assign( 'Year', md.year ) assign( 'Month', md.month ) assign( 'Day', md.day ) # need to specially process the credits, since they are structured differently than CIX credit_writer_list = list() credit_penciller_list = list() credit_inker_list = list() credit_colorist_list = list() credit_letterer_list = list() credit_cover_list = list() credit_editor_list = list() # first, loop thru credits, and build a list for each role that CIX supports for credit in metadata.credits: if credit['role'].lower() in set( self.writer_synonyms ): credit_writer_list.append(credit['person'].replace(",","")) if credit['role'].lower() in set( self.penciller_synonyms ): credit_penciller_list.append(credit['person'].replace(",","")) if credit['role'].lower() in set( self.inker_synonyms ): credit_inker_list.append(credit['person'].replace(",","")) if credit['role'].lower() in set( self.colorist_synonyms ): credit_colorist_list.append(credit['person'].replace(",","")) if credit['role'].lower() in set( self.letterer_synonyms ): credit_letterer_list.append(credit['person'].replace(",","")) if credit['role'].lower() in set( self.cover_synonyms ): credit_cover_list.append(credit['person'].replace(",","")) if credit['role'].lower() in set( self.editor_synonyms ): credit_editor_list.append(credit['person'].replace(",","")) # second, convert each list to string, and add to XML struct if len( credit_writer_list ) > 0: node = ET.SubElement(root, 'Writer') node.text = utils.listToString( credit_writer_list ) if len( credit_penciller_list ) > 0: node = ET.SubElement(root, 'Penciller') node.text = utils.listToString( credit_penciller_list ) if len( credit_inker_list ) > 0: node = ET.SubElement(root, 'Inker') node.text = utils.listToString( credit_inker_list ) if len( credit_colorist_list ) > 0: node = ET.SubElement(root, 'Colorist') node.text = utils.listToString( credit_colorist_list ) if len( credit_letterer_list ) > 0: node = ET.SubElement(root, 'Letterer') node.text = utils.listToString( credit_letterer_list ) if len( credit_cover_list ) > 0: node = ET.SubElement(root, 'CoverArtist') node.text = utils.listToString( credit_cover_list ) if len( credit_editor_list ) > 0: node = ET.SubElement(root, 'Editor') node.text = utils.listToString( credit_editor_list ) assign( 'Publisher', md.publisher ) assign( 'Imprint', md.imprint ) assign( 'Genre', md.genre ) assign( 'Web', md.webLink ) assign( 'PageCount', md.pageCount ) assign( 'LanguageISO', md.language ) assign( 'Format', md.format ) assign( 'AgeRating', md.maturityRating ) if md.blackAndWhite is not None and md.blackAndWhite: ET.SubElement(root, 'BlackAndWhite').text = "Yes" assign( 'Manga', md.manga ) assign( 'Characters', md.characters ) assign( 'Teams', md.teams ) assign( 'Locations', md.locations ) assign( 'ScanInformation', md.scanInfo ) # loop and add the page entries under pages node if len( md.pages ) > 0: pages_node = ET.SubElement(root, 'Pages') for page_dict in md.pages: page_node = ET.SubElement(pages_node, 'Page') page_node.attrib = page_dict # self pretty-print self.indent(root) # wrap it in an ElementTree instance, and save as XML tree = ET.ElementTree(root) return tree
def convertMetadataToXML(self, filename, metadata): #shorthand for the metadata md = metadata # build a tree structure root = ET.Element("ComicInfo") root.attrib['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance" root.attrib['xmlns:xsd'] = "http://www.w3.org/2001/XMLSchema" #helper func def assign(cix_entry, md_entry): if md_entry is not None: ET.SubElement(root, cix_entry).text = u"{0}".format(md_entry) assign('Title', md.title) assign('Series', md.series) assign('Number', md.issue) assign('Count', md.issueCount) assign('Volume', md.volume) assign('AlternateSeries', md.alternateSeries) assign('AlternateNumber', md.alternateNumber) assign('StoryArc', md.storyArc) assign('SeriesGroup', md.seriesGroup) assign('AlternateCount', md.alternateCount) assign('Summary', md.comments) assign('Notes', md.notes) assign('Year', md.year) assign('Month', md.month) assign('Day', md.day) # need to specially process the credits, since they are structured differently than CIX credit_writer_list = list() credit_penciller_list = list() credit_inker_list = list() credit_colorist_list = list() credit_letterer_list = list() credit_cover_list = list() credit_editor_list = list() # first, loop thru credits, and build a list for each role that CIX supports for credit in metadata.credits: if credit['role'].lower() in set(self.writer_synonyms): credit_writer_list.append(credit['person'].replace(",", "")) if credit['role'].lower() in set(self.penciller_synonyms): credit_penciller_list.append(credit['person'].replace(",", "")) if credit['role'].lower() in set(self.inker_synonyms): credit_inker_list.append(credit['person'].replace(",", "")) if credit['role'].lower() in set(self.colorist_synonyms): credit_colorist_list.append(credit['person'].replace(",", "")) if credit['role'].lower() in set(self.letterer_synonyms): credit_letterer_list.append(credit['person'].replace(",", "")) if credit['role'].lower() in set(self.cover_synonyms): credit_cover_list.append(credit['person'].replace(",", "")) if credit['role'].lower() in set(self.editor_synonyms): credit_editor_list.append(credit['person'].replace(",", "")) # second, convert each list to string, and add to XML struct if len(credit_writer_list) > 0: node = ET.SubElement(root, 'Writer') node.text = utils.listToString(credit_writer_list) if len(credit_penciller_list) > 0: node = ET.SubElement(root, 'Penciller') node.text = utils.listToString(credit_penciller_list) if len(credit_inker_list) > 0: node = ET.SubElement(root, 'Inker') node.text = utils.listToString(credit_inker_list) if len(credit_colorist_list) > 0: node = ET.SubElement(root, 'Colorist') node.text = utils.listToString(credit_colorist_list) if len(credit_letterer_list) > 0: node = ET.SubElement(root, 'Letterer') node.text = utils.listToString(credit_letterer_list) if len(credit_cover_list) > 0: node = ET.SubElement(root, 'CoverArtist') node.text = utils.listToString(credit_cover_list) if len(credit_editor_list) > 0: node = ET.SubElement(root, 'Editor') node.text = utils.listToString(credit_editor_list) assign('Publisher', md.publisher) assign('Imprint', md.imprint) assign('Genre', md.genre) assign('Web', md.webLink) assign('PageCount', md.pageCount) assign('LanguageISO', md.language) assign('Format', md.format) assign('AgeRating', md.maturityRating) if md.blackAndWhite is not None and md.blackAndWhite: ET.SubElement(root, 'BlackAndWhite').text = "Yes" assign('Manga', md.manga) assign('Characters', md.characters) assign('Teams', md.teams) assign('Locations', md.locations) assign('ScanInformation', md.scanInfo) # loop and add the page entries under pages node if len(md.pages) > 0: pages_node = ET.SubElement(root, 'Pages') for page_dict in md.pages: page_node = ET.SubElement(pages_node, 'Page') page_node.attrib = page_dict # self pretty-print self.indent(root) # wrap it in an ElementTree instance, and save as XML tree = ET.ElementTree(root) return tree
def __str__(self): vals = [] if self.isEmpty: return "No metadata" def add_string(tag, val): if val is not None and u"{0}".format(val) != "": vals.append((tag, val)) def add_attr_string(tag): val = getattr(self, tag) add_string(tag, getattr(self, tag)) add_attr_string("series") add_attr_string("issue") add_attr_string("issueCount") add_attr_string("title") add_attr_string("publisher") add_attr_string("year") add_attr_string("month") add_attr_string("day") add_attr_string("volume") add_attr_string("volumeCount") add_attr_string("genre") add_attr_string("language") add_attr_string("country") add_attr_string("criticalRating") add_attr_string("alternateSeries") add_attr_string("alternateNumber") add_attr_string("alternateCount") add_attr_string("imprint") add_attr_string("webLink") add_attr_string("format") add_attr_string("manga") add_attr_string("price") add_attr_string("isVersionOf") add_attr_string("rights") add_attr_string("identifier") add_attr_string("lastMark") if self.blackAndWhite: add_attr_string("blackAndWhite") add_attr_string("maturityRating") add_attr_string("storyArc") add_attr_string("seriesGroup") add_attr_string("scanInfo") add_attr_string("characters") add_attr_string("teams") add_attr_string("locations") add_attr_string("comments") add_attr_string("notes") add_string("tags", utils.listToString(self.tags)) for c in self.credits: primary = "" if 'primary' in c and c['primary']: primary = " [P]" add_string("credit", c['role'] + ": " + c['person'] + primary) # find the longest field name flen = 0 for i in vals: flen = max(flen, len(i[0])) flen += 1 # format the data nicely outstr = "" fmt_str = u"{0: <" + str(flen) + "} {1}\n" for i in vals: outstr += fmt_str.format(i[0] + ":", i[1]) return outstr
def convertXMLToMetadata( self, tree ): root = tree.getroot() if root.tag != 'comet': raise 1 return None metadata = GenericMetadata() md = metadata # Helper function def xlate( tag ): node = root.find( tag ) if node is not None: return node.text else: return None md.series = xlate( 'series' ) md.title = xlate( 'title' ) md.issue = xlate( 'issue' ) md.volume = xlate( 'volume' ) md.comments = xlate( 'description' ) md.publisher = xlate( 'publisher' ) md.language = xlate( 'language' ) md.format = xlate( 'format' ) md.pageCount = xlate( 'pages' ) md.maturityRating = xlate( 'rating' ) md.price = xlate( 'price' ) md.isVersionOf = xlate( 'isVersionOf' ) md.rights = xlate( 'rights' ) md.identifier = xlate( 'identifier' ) md.lastMark = xlate( 'lastMark' ) md.genre = xlate( 'genre' ) # TODO - repeatable field date = xlate( 'date' ) if date is not None: parts = date.split('-') if len( parts) > 0: md.year = parts[0] if len( parts) > 1: md.month = parts[1] md.coverImage = xlate( 'coverImage' ) readingDirection = xlate( 'readingDirection' ) if readingDirection is not None and readingDirection == "rtl": md.manga = "YesAndRightToLeft" # loop for character tags char_list = [] for n in root: if n.tag == 'character': char_list.append(n.text.strip()) md.characters = utils.listToString( char_list ) # Now extract the credit info for n in root: if ( n.tag == 'writer' or n.tag == 'penciller' or n.tag == 'inker' or n.tag == 'colorist' or n.tag == 'letterer' or n.tag == 'editor' ): metadata.addCredit( n.text.strip(), n.tag.title() ) if n.tag == 'coverDesigner': metadata.addCredit( n.text.strip(), "Cover" ) metadata.isEmpty = False return metadata
def getNextSentence(self, answer, botAction=actions.BotActions.NONE): #para depurar escribimos SUG por consola y emite un sugerencia if answer.strip() == "SUG": return self.forceSuggestion() self.logger.info("Input sentence " + answer) sentence = Sentence(answer) sentence.botAction = botAction #se generan los tokens de la frase --> tokenizer = ToktokTokenizer() self.tokens = [] for sent in sent_tokenize(answer, self.language): self.tokens.append(tokenizer.tokenize(sent)) self.logger.info("Generated Tokens " + utils.listToString(self.tokens)) sentence.tokens = self.tokens #<-- se generan los tokens de la frase #se generan los postags de la frase --> self.postag_tokens = [] for token in self.tokens: self.postag_tokens.append(pos_tag(token)) self.logger.info("Generated POS TAG Tokens " + utils.listToString(self.postag_tokens)) sentence.postags = self.postag_tokens #<-- se generan los posttags de la frase #se generan los stemmers de la frase --> stemmer = SnowballStemmer(self.language, ignore_stopwords=True) self.stemmer_text_words = [] for token in self.tokens: for word in token: self.stemmer_text_words.append(stemmer.stem(word)) self.logger.info("Generated Stemmer Tokens " + utils.listToString(self.stemmer_text_words)) sentence.stemmers = self.stemmer_text_words #<-- se generan los stemmers de la frase #se generar las clases de la frase de entrada --> self.classify_sentence(sentence) needReClassify = self.context.addSentence(sentence) if needReClassify: self.context.sentence.classes = [] self.classify_sentence(self.context.sentence) if "suggestion" in sentence.classes: return self.forceSuggestion() #--> se generar las clases de la frase de entrada # se ejecuta el motor de reglas --> self.myIntellect = MyIntellect() self.myIntellect.prepareLogger(self.logger) #se carga el fichero de base de conocimiento self.policy_file = self.myIntellect.local_file_uri( "./rulesset/rules.policy") policy_d = self.myIntellect.learn_policy(self.policy_file) policy_applied = self.myIntellect.learn(self.context) self.myIntellect.reason() self.myIntellect.forget_all() self.addPreference(self.myIntellect.preferences)
def task_manager(robot_name): try: timeout = 1 crewmateX = rospy.get_param(robot_name + "/positionX") crewmateY = rospy.get_param(robot_name + "/positionY") msg1 = rospy.wait_for_message("/" + robot_name + "/taskUpdate", RobotTaskUpdate, timeout) if msg1.need_path_update: if len(robotPaths[robot_name]) > 0: robotPaths[robot_name].pop(0) pub_update = rospy.Publisher(robot_name + '/taskUpdate', RobotTaskUpdate, queue_size=10) updateMsg = RobotTaskUpdate() updateMsg.robot_name = robot_name updateMsg.need_task_update = False updateMsg.need_path_update = False pub_update.publish(updateMsg) elif len(robotTasks[robot_name]) > 0: sleep(5) lasermsg = rospy.wait_for_message( "/" + robot_name + "/laser_0", LaserScan, 10) ranges = lasermsg.ranges mindist = min([i for i in lasermsg.ranges if i > 0.15]) minindex = ranges.index(mindist) if minindex < 100: ranges = ranges[minindex + 567:] + ranges elif minindex > 567: ranges = ranges + ranges[:100] ranges = [i for i in ranges if i < mindist + .2] if len(ranges) % 2 == 0: ranges = ranges[:len(ranges) - 1] flat = dummy_array(mindist, len(ranges)) difference = np.asarray(flat) - np.asarray(ranges) if difference[0] and difference[len(difference) - 1] < 0: print(robot_name, "This task is convex!") else: print(robot_name, "this task is concave!") X = crewmateX Y = crewmateY X = round(X * 4) / 4 Y = round(Y * 4) / 4 taskX = taskLocations[robotTasks[robot_name][0]][0] taskY = taskLocations[robotTasks[robot_name][0]][1] path = a_star_function(X, Y, taskX, taskY, robot_name) robotPaths[robot_name] = path robotPaths[robot_name].pop(0) robotTasks[robot_name].pop(0) else: ## don't want the controller to go anywhere new return if len(robotTasks[robot_name]) > 0 and len( robotPaths[robot_name]) == 0: return pub0 = rospy.Publisher('/tf', tf2_msgs.msg.TFMessage, queue_size=50) t = geometry_msgs.msg.TransformStamped() t.header.frame_id = "map_static" t.header.stamp = rospy.Time.now() t.child_frame_id = robot_name + 'goal' t.transform.translation.x = robotPaths[robot_name][0][0] t.transform.translation.y = robotPaths[robot_name][0][1] t.transform.translation.z = 0.0 t.transform.rotation.x = 0.0 t.transform.rotation.y = 0.0 t.transform.rotation.z = 0.0 t.transform.rotation.w = 1.0 tfm = tf2_msgs.msg.TFMessage([t]) pub0.publish(tfm) r.sleep() except Exception as e: if (len(robotPaths[robot_name]) == 0 and len(robotTasks[robot_name]) == 2): X = crewmateX Y = crewmateY X = round(X * 4) / 4 Y = round(Y * 4) / 4 taskX = taskLocations[robotTasks[robot_name][0]][0] taskY = taskLocations[robotTasks[robot_name][0]][1] path = a_star_function(X, Y, taskX, taskY, robot_name) robotPaths[robot_name] = path robotPaths[robot_name].pop(0) robotTasks[robot_name].pop(0) if len(robotTasks[robot_name]) > 0 and len( robotPaths[robot_name]) == 0: X = crewmateX Y = crewmateY taskX = taskLocations[robotTasks[robot_name][0]][0] taskY = taskLocations[robotTasks[robot_name][0]][1] path = a_star_function(X, Y, taskX, taskY, robot_name) robotPaths[robot_name] = path robotPaths[robot_name].pop(0) robotTasks[robot_name].pop(0) if len(robotTasks[robot_name]) > 0 and len( robotPaths[robot_name]) == 0: return if not finishedTasks[robot_name]: if len(robotTasks[robot_name]) == 0 and len( robotPaths[robot_name]) == 0: robots_with_tasks = rospy.get_param('robots_with_tasks') robots_with_tasks = robots_with_tasks.split() robots_with_tasks.remove(robot_name) finishedTasks[robot_name] = True rospy.set_param('robots_with_tasks', listToString(robots_with_tasks)) return pub0 = rospy.Publisher('/tf', tf2_msgs.msg.TFMessage, queue_size=50) t = geometry_msgs.msg.TransformStamped() t.header.frame_id = "map_static" t.header.stamp = rospy.Time.now() t.child_frame_id = robot_name + 'goal' try: t.transform.translation.x = robotPaths[robot_name][0][0] t.transform.translation.y = robotPaths[robot_name][0][1] except: print(robot_name + " has finished all its tasks!") sleep(100000) return t.transform.translation.z = 0.0 t.transform.rotation.x = 0.0 t.transform.rotation.y = 0.0 t.transform.rotation.z = 0.0 t.transform.rotation.w = 1.0 tfm = tf2_msgs.msg.TFMessage([t]) pub0.publish(tfm) r.sleep()
def __init__(self, name, numbers): self.id = name # Different orders of a hexagonal tile on one side: order1 = numbers[1:] + [numbers[0]] order2 = order1[1:] + [order1[0]] order3 = order2[1:] + [order2[0]] order4 = order3[1:] + [order3[0]] order5 = order4[1:] + [order4[0]] # Different orders of a hexagonal tile on the other side: numbers_inv = numbers[::-1] order1_inv = order1[::-1] order2_inv = order2[::-1] order3_inv = order3[::-1] order4_inv = order4[::-1] order5_inv = order5[::-1] # self.orders contains the different orders the numbers on the hexagonal tile can have, taking into account both sides self.orders = np.array([ listToString(numbers), listToString(order1), listToString(order2), listToString(order3), listToString(order4), listToString(order5), listToString(numbers_inv), listToString(order1_inv), listToString(order2_inv), listToString(order3_inv), listToString(order4_inv), listToString(order5_inv, ) ])
def __str__( self ): vals = [] if self.isEmpty: return "No metadata" def add_string( tag, val ): if val is not None and u"{0}".format(val) != "": vals.append( (tag, val) ) def add_attr_string( tag ): val = getattr(self,tag) add_string( tag, getattr(self,tag) ) add_attr_string( "series" ) add_attr_string( "issue" ) add_attr_string( "issueCount" ) add_attr_string( "title" ) add_attr_string( "publisher" ) add_attr_string( "year" ) add_attr_string( "month" ) add_attr_string( "day" ) add_attr_string( "volume" ) add_attr_string( "volumeCount" ) add_attr_string( "genre" ) add_attr_string( "language" ) add_attr_string( "country" ) add_attr_string( "criticalRating" ) add_attr_string( "alternateSeries" ) add_attr_string( "alternateNumber" ) add_attr_string( "alternateCount" ) add_attr_string( "imprint" ) add_attr_string( "webLink" ) add_attr_string( "format" ) add_attr_string( "manga" ) add_attr_string( "price" ) add_attr_string( "isVersionOf" ) add_attr_string( "rights" ) add_attr_string( "identifier" ) add_attr_string( "lastMark" ) if self.blackAndWhite: add_attr_string( "blackAndWhite" ) add_attr_string( "maturityRating" ) add_attr_string( "storyArc" ) add_attr_string( "seriesGroup" ) add_attr_string( "scanInfo" ) add_attr_string( "characters" ) add_attr_string( "teams" ) add_attr_string( "locations" ) add_attr_string( "comments" ) add_attr_string( "notes" ) add_string( "tags", utils.listToString( self.tags ) ) for c in self.credits: primary = "" if c.has_key('primary') and c['primary']: primary = " [P]" add_string( "credit", c['role']+": "+c['person'] + primary) # find the longest field name flen = 0 for i in vals: flen = max( flen, len(i[0]) ) flen += 1 #format the data nicely outstr = "" fmt_str = u"{0: <" + str(flen) + "} {1}\n" for i in vals: outstr += fmt_str.format( i[0]+":", i[1] ) return outstr