def parseSuperBackup(): tree = ET.parse('allsms.xml') #fix this later root = tree.getroot() newPersonDict = {} newFullTextDict = {} newNames = [] notFound = [] for message in root: phoneNumber = formatPhoneNumber(message.attrib['address']) if message.attrib['type'] == '2': sender = me elif message.attrib['name']: sender = titlecase(message.attrib['name']) elif phoneNumber in vCardDict.keys(): sender = titlecase(vCardDict[phoneNumber]) if sender not in newNames: newNames.append(sender) else: continue #don't add plain phone numbers date = message.attrib['time'] text = message.attrib['body'] dateFormatted = datetime.strptime(date, '%b %d, %Y %I:%M:%S %p') #"Jul 10, 2016 8:28:10 PM" addToNewDict(newPersonDict, dateFormatted, text, sender) addToNewDict(newFullTextDict, dateFormatted, text) if 'y' in input("Enter 'y' if you would like to match duplicate names from Android SMS"): matchDuplicates(newPersonDict) mergeAndSortPersonDict(newPersonDict, confident) mergeAndSortFullTextDict(newFullTextDict)
def check_input_matches_expected_output(in_, out): """Function yielded by test generator""" try : assert titlecase(in_) == out except AssertionError: print "%s != %s" % (titlecase(in_), out) raise
def matchAliases(existingNames, otherNames, otherNamesDict, confident): CUTOFFSCORE = 2 #play around with this for otherName in otherNames: candidates = possMatches(otherName, existingNames) #list of possible matches (determined by small edit distance) topCandidate, bestScore = candidates[0] correctMatch = False if not confident and bestScore < CUTOFFSCORE: if otherName.isdigit(): #phone number aliasDict[otherName] = otherName #if candidates[1][1] >= bestScore - 1: #multiple best matches within 1 of eachother elif candidates[1][1] == bestScore: #multiple best matches equal to eachother writingStyleSimilarityDict = {} #candidate existingName -> similarity to otherName toCompare = [candidates[0][0]] for candidate in candidates: if candidate[1] == bestScore: writingStyleSimilarityDict[candidate[0]] = writingStyleMatchScore(otherName, otherNamesDict, candidate[0]) topCandidates = sorted(writingStyleSimilarityDict.keys(), key = lambda x: -writingStyleSimilarityDict[x]) i = 0 while not correctMatch and i < len(topCandidates): topCandidate = topCandidates[i] correctMatch = True if 'y' in input("Enter 'y' if " + otherName + " should be matched with " + topCandidate + ": ") else False i += 1 else: correctMatch = True if 'y' in input("Enter 'y' if " + otherName + " should be matched with " + topCandidate + ": ") else False if correctMatch: aliasDict[otherName] = topCandidate else: aliasDict[otherName] = titlecase(otherName) elif confident: aliasDict[otherName] = topCandidate else: aliasDict[otherName] = titlecase(otherName)
def _titleCaseTitleAndChapter(self, xmlSoup): titles = xmlSoup.findAll("h1", {"class": "usc-title-head"}) for title in titles: # Clean em dash and title case title.string = u" \u2014 ".join([titlecase(s.lower()) for s in title.text.split(u"\u2014")]) chapters = xmlSoup.findAll("h3", {"class": "chapter-head"}) for chapter in chapters: # Clean em dash and title case chapter.string = u". ".join([titlecase(s.lower()) for s in chapter.text.split(u"\u2014")]) subchapters = xmlSoup.findAll("h3", {"class": "subchapter-head"}) for subchapter in subchapters: # Clean em dash and title case if u"\u2014" in subchapter.text: [prefix, suffix] = subchapter.text.split(u"\u2014") [heading, number] = prefix.split(" ", 1) heading = titlecase(heading.lower()) suffix = titlecase(suffix.lower()) subchapter.string = u"%s %s\u2014%s" % (heading, number, suffix) else: subchapter.string = titlecase(subchapter.text.lower()) return
def test_ordinals_list_item(self): """ Test - numbers ending in ordinals like 1st and 24th """ from titlecase import ORDINALS assert '34Th' not in titlecase(TEST_DATA[2][0]) assert '1st' in titlecase(TEST_DATA[2][0])
def normalize_ordinals(name): """ Change 'Eighth Plymouth' to '8th Plymouth', and '8 Plymouth' to '8th Plymouth' """ # work around "SD 1"/"HD 1" if name.startswith("SD "): name = name.replace("SD ", "") if name.startswith("HD "): name = name.replace("HD ", "") if name.isnumeric(): return name for key, val in ORDINALS.items(): # split words, to make sure that 'fifth' doesn't match 'thirty-fifth' if key in name.lower().split(' '): name = titlecase(name.lower().replace(key, val[1])) for key, val in NUMERALS.items(): # split words, to make sure that '5' doesn't match 'thirty-fifth' if key in name.lower().split(' '): name = titlecase(name.lower().replace(key, val[1])) # fix capitalization of "1ST", "2ND", etc" name = name.replace('1ST ', '1st ').replace('2ND ', '2nd ').replace('3RD ', '3rd ').replace('4TH ', '4th ').replace('5TH ', '5th ').replace('6TH ', '6th ').replace('7TH ', '7th ').replace('8TH ', '8th ').replace('9TH ', '9th ').replace('10TH ', '10th ') # do our best to strip extraneous spaces, inside and outside return name.replace(' ', ' ').replace(' ', ' ').strip()
def standardize_manufacturer(data, d, abbr): manufacturer = str(data["manufacturer"]) model = str(data["model"]) remove_hyphen = model.replace("-", "").lower() # Split the string into individual words. split() returns a list. split_model = remove_hyphen.split() # Combine model number if len(split_model[0]) < 4: split_model[0] += split_model[1] del split_model[1] # Spell check the model name. If it is an abbreviation, replace it with its full form. for i in range(1, len(split_model)): if split_model[i] in abbr: split_model[i] = titlecase(abbr[split_model[i]]) else: split_model[i] = titlecase(spell_correct.correct(split_model[i])) # Convert the model number to upper case. split_model[0] = split_model[0].upper() # Join the list with a single space to give the model string. model = " ".join(split_model) # Add the manufacturer and model to a dictionary of lists. if manufacturer not in d: d[manufacturer].append(model) elif model not in d[manufacturer]: d[manufacturer].append(model)
def insertInmateData(inmateInfo): fname = titlecase(inmateInfo['fname']) lname = titlecase(inmateInfo['lname']) bookingNumber = inmateInfo['bookingNumber'] pod = inmateInfo['pod'] bookingDate = inmateInfo['bookingDate'] mni = inmateInfo['mni'] mugshotURL = inmateInfo['mugshotURL'] totalBond = inmateInfo['totalBond'] status = titlecase(inmateInfo['status']) federal = inmateInfo['federal'] otherCounty = inmateInfo['otherCounty'] hold = inmateInfo['hold'] url = inmateInfo['url'] removed = 0 try: conn = pymysql.connect(host=dbInfo.host, unix_socket=dbInfo.unix_socket, user=dbInfo.user, passwd=dbInfo.passwd, db=dbInfo.db, charset=dbInfo.charset) cur = conn.cursor() cur.execute("USE %s" % (dbInfo.db)) cur.execute('INSERT INTO inmates(fname, lname, bookingNumber, pod, bookingDate, mni, mugshotURL, totalBond, status, federal, otherCounty, hold, url,removed) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',(fname,lname,bookingNumber,pod,bookingDate,mni,mugshotURL,totalBond,status,federal,otherCounty,hold,url,removed)) cur.connection.commit() cur.close() finally: conn.close()
def set_fields_from_data(self,data): xmldoc = minidom.parseString(data) self.authors=get_field(xmldoc,"primaryauthor") more_authors=get_fields(xmldoc,"author",' and ') if(len(more_authors)>0): self.authors+=' and '+more_authors self.authors = capitalize_authors(self.authors) self.abstract=get_field(xmldoc,"p") self.keywords=get_fields(xmldoc,"keyword",', ') self.journal=get_field(xmldoc,"source_title") if self.journal.isupper(): if UseTitlecase: self.journal = titlecase(self.journal.lower()) else: self.journal = self.journal.title() doi=get_last_field(xmldoc,"article_no") if len(doi) > 0: self.doi = doi[4:] else: self.doi = doi self.pages=get_field(xmldoc,"bib_pages") if self.pages == '-': artn = get_field(xmldoc,"article_no") self.pages = artn[4:] self.title=get_field(xmldoc,"item_title") if self.title.isupper(): if UseTitlecase: self.title = titlecase(self.title.lower()) else: self.title = self.title.title() self.year=get_attribute_from_field(xmldoc,"bib_issue","year") self.volume=get_attribute_from_field(xmldoc,"bib_issue","vol")
def test_from_all_lower(self): self.assertEqual(tc.titlecase('a very simple title'), 'A Very Simple Title') self.assertEqual(tc.titlecase('o\'shea is not a good band'), 'O\'Shea Is Not a Good Band') self.assertEqual(tc.titlecase('o\'do not wanton with those eyes'), 'O\'Do Not Wanton With Those Eyes')
def set_fields_from_data(self,isi_rec): """ xmlrec is a <REC> xml node """ xmldoc = isi_rec self.authors=get_fields(xmldoc,"AuCollectiveName",' and ') #self.authors = capitalize_authors(self.authors) self.abstract=get_field(xmldoc,"abstract") self.keywords=get_fields(xmldoc,"keyword",', ') self.journal=get_field(xmldoc,"source_title") if self.journal.isupper(): if UseTitlecase: self.journal = titlecase(self.journal.lower()) else: self.journal = self.journal.title() doi=get_last_field(xmldoc,"article_no") if len(doi) > 0: self.doi = doi[4:] else: self.doi = doi self.pages=get_field(xmldoc,"bib_pages") if self.pages == '-': artn = get_field(xmldoc,"article_no") self.pages = artn[4:] self.title=get_field(xmldoc,"item_title") if self.title.isupper(): if UseTitlecase: self.title = titlecase(self.title.lower()) else: self.title = self.title.title() self.year=get_attribute_from_field(xmldoc,"bib_issue","year") self.volume=get_attribute_from_field(xmldoc,"bib_issue","vol")
def create(key): try: title = "%s (%s)"%tuple(titlecase(key.replace('-',' ')).split('_')) except: title = titlecase(key.replace('-',' ')) data = {'content':"%s\n==========\n..."%title, 'key':key} return render_template('edit.html',**data)
def handle_row(self, row): atco_code = 'maneo-' + row['CODE'] defaults = { 'locality_centre': False, 'active': True, 'latlong': row['geometry'] } name = row.get('\ufeffAPPCOM', row.get('APPCOM')) name_parts = name.split(' - ', 1) if len(name_parts) == 2: if name_parts[1].startswith('Desserte'): name = name_parts[0] defaults['indicator'] = name_parts[1] else: defaults['town'] = titlecase(name_parts[1]) defaults['common_name'] = titlecase(name) stop = StopPoint.objects.update_or_create(atco_code=atco_code, defaults=defaults)[0] url = 'http://www.commentjyvais.fr/en/schedule/result/?' + urlencode({ 'schedule[stop_area][autocomplete-hidden]': 'stop_area:G50:SA:' + row['IDARRET'] }) res = session.get(url) soup = BeautifulSoup(res.text, 'lxml') line_elements = soup.find_all('div', {'class': 'line-info'}) lines = set() for element in line_elements: line = element.find('span', {'class': 'ctp-line-code'}) if line is None: continue line = line.text.strip() if line in lines: continue lines.add(line) if len(line) > 24: print(line) continue operator_name = element.find('img')['alt'].split()[0] operator = Operator.objects.update_or_create( id=slugify(operator_name).upper(), name=operator_name, region_id='FR' )[0] service = Service.objects.update_or_create( service_code='maneo-' + line, line_name=line, region_id='FR', date='2017-01-01' )[0] service.operator.add(operator) StopUsage.objects.update_or_create(service=service, stop=stop, defaults={ 'order': 0 })
def format_building(cls, sub_name, name, number): if not any([sub_name, name, number]): return '' # Define exception to the usual rule requiring a newline for the # building name. See p. 27 of PAF Guide for further information. building_str = '' exception = re.compile(r"^\d.*\d$|^\d.*\d[A-Za-z]$|^\d[A-Za-z]$|^.$") for component in [sub_name, name]: if component and exception.match(component): building_str += component if re.match(r"^[A-Za-z]$", component): building_str += u", " else: building_str += u" " else: # Check if final portion of string is numeric/alphanumeric. If # so, split and apply exception to that section only. parts = titlecase(component).split(' ') final = parts.pop() if (exception.match(component) and not number and not re.match(r'/^\d*$/', final)): building_str += u"%s\n%s " % (' '.join(parts), final) else: building_str += u"%s\n" % titlecase(component) if number: building_str += u"%d " % number return building_str.lstrip()
def parseJournalListFile(filename): fp = open(filename, "r") doc = fp.read() fp.close() soup = BeautifulSoup(doc) dts = soup.findAll("dt") journalList = {} for dt in dts: # Get the name minus the number fullName = getName.match(dt.text).groups()[1].strip() fullName = fullName.lower() fullName = titlecase(fullName) journalList[fullName] = {} # Get the following definition data elements, which include: # . frequency of publication # . ISSN # . address + indicies dds = dt.fetchNextSiblings("dd", limit=3) # We need to check if the ISSN is in the second dd; # if not, then we assume that there was no frequency given, # and we need to take only two dds instead if (dds[1].text.find("ISSN") == -1): dds = dt.fetchNextSiblings("dd", limit=2) journalList[fullName]["frequency"] = "none" journalList[fullName]["ISSN"] = dds[0].text[6:] address = dds[1].contents[0].lower() journalList[fullName]["address"] = titlecase(address) citationIndicies = dds[1].contents[1] links = citationIndicies.findAll("a") linkList = [] for link in links: linkList.append((link["href"], link.text)) journalList[fullName]["citationIndicies"] = linkList else: journalList[fullName]["frequency"] = dds[0].text.strip() journalList[fullName]["ISSN"] = dds[1].text[6:] address = dds[2].contents[0].lower() journalList[fullName]["address"] = titlecase(address) citationIndicies = dds[2].contents[1] links = citationIndicies.findAll("a") linkList = [] for link in links: linkList.append((link["href"], link.text)) journalList[fullName]["citationIndicies"] = linkList return journalList
def _read_and_write(raw_file_route, pipe_file_route, pairs): map = {'COUNTRY':1013,'DISTRICT':1014,'HSDESC':1012,'NAICS':1008,'SITC':1015} file_name = _extract_file_name(raw_file_route) if file_name not in map: print 'we have not yet defined a type_cd for data in file ' + file_name print 'program has exicted' return # if len(pairs)!=3: # print 'This program is intended to process input that has 3 columns (after adding pipes)' # print 'however the input document \'{0}\' contains {1} columns'.format(file_name,len(pairs)) # print 'program has exicted' # return # some constants client_id = 1 type_cd = map[file_name] raw_file = open(raw_file_route, 'r') sql_file_name = 'pht_code_table_{0}.SQL'.format(type_cd) sql_file = open(pipe_file_route + sql_file_name, 'w') # some sql overhead sql_file.write('DELETE FROM pht_code\nWHERE client_id=1 AND type_cd = \'{0}\';\n\r'.format(type_cd)) list_order=10 # i, j, k represents the column for code, description, and description_long # these values differ by file if type_cd==1015 or type_cd==1012: i = 0 j = 2 k = 1 else: i = 0 j = 1 k = 2 for line in raw_file: line = line.strip() code = titlecase(line[pairs[i][0]-1:pairs[i][1]].strip()) description = titlecase(line[pairs[j][0]-1:pairs[j][1]].strip()) try: description_long = titlecase(line[pairs[k][0]-1:pairs[k][1]].strip()) except: description_long='NULL' sql = 'INSERT INTO pht_code (client_id, type_cd, code, parent_cd, ext_code, description, description_long, list_order, created_by, insert_dt, expire_dt)\nVALUES (1, {0}, {1}, NULL, NULL, {2}, {3}, {4}, \'load_table_data\', NOW(), NULL);\n\r'.format(type_cd, _process_string_for_sql(str(type_cd)+'-'+code), _process_string_for_sql(description), description_long if description_long == 'NULL' else _process_string_for_sql(description_long), list_order) list_order=list_order+10 sql_file.write(sql) sql_file.write('COMMIT;') raw_file.close() sql_file.close()
def race(text): race_map = { "B": "Black" } if text in race_map: return titlecase(race_map[text]) else: return titlecase(text)
def test_callback(): def abbreviation(word, **kwargs): if word.upper() in ('TCP', 'UDP'): return word.upper() s = 'a simple tcp and udp wrapper' assert titlecase(s) == 'A Simple Tcp and Udp Wrapper' assert titlecase(s, callback=abbreviation) == 'A Simple TCP and UDP Wrapper' assert titlecase(s.upper(), callback=abbreviation) == 'A Simple TCP and UDP Wrapper'
def test_callback(): def abbreviation(word, **kwargs): if word.upper() in ('TCP', 'UDP'): return word.upper() s = 'a simple tcp and udp wrapper' assert titlecase(s) == 'A Simple Tcp and Udp Wrapper' assert titlecase(s, callback=abbreviation) == 'A Simple TCP and UDP Wrapper' assert titlecase(s.upper(), callback=abbreviation) == 'A Simple TCP and UDP Wrapper' assert titlecase(u'crème brûlée', callback=lambda x, **kw: x.upper()) == u'CRÈME BRÛLÉE'
def get_timings(input_filename): # Avoid needing this during venusian scan def errback(msg): print (msg) kardata = open(input_filename, 'rb').read() midifile = midi.midiParseData( kardata, errback, 'utf-8' ) if midifile is None: print ('Not a valid midi file %s' % input_filename) return lyrics_list = midifile.lyrics.list timings = [] lyrics_text = [] first_ms = lyrics_list[0].ms current_line = [] title = ' '.join([x.capitalize() for x in input_filename.split('_')]) artist = '' for i, lyric in enumerate(lyrics_list): if i == 0: title = titlecase.titlecase(lyric.text) if i == 1: artist = titlecase.titlecase(lyric.text) current_line.append([float(lyric.ms-first_ms)/1000, lyric.text]) try: next_lyric = lyrics_list[i+1] except IndexError: next_lyric = None if lyric.line != getattr(next_lyric, 'line', None): last_ms = lyric.ms newline = ( float(first_ms)/1000, float(last_ms)/1000, current_line, ) timings.append(newline) if next_lyric: first_ms = next_lyric.ms else: first_ms = last_ms line_text = ''.join( [syllable[1] for syllable in current_line] ) lyrics_text.append(line_text.rstrip()) current_line = [] timings.append( # why do we append this ( float(first_ms)/1000, float(lyrics_list[-1].ms)/1000, current_line, ) ) lyrics = '\n'.join(lyrics_text) return kardata, title, artist, lyrics, json.dumps(timings, indent=2)
def sex(text): sex_map = { "F": "Female", "M": "Male" } if text in sex_map: return titlecase(sex_map[text]) else: return titlecase(text)
def race(self, text): if not text: return None text = text.upper() if text in RACE_LOOKUP: return titlecase(RACE_LOOKUP[text]) else: return titlecase(text)
def sex(self, text): if not text: return None text = text.upper() if text in GENDER_LOOKUP: return titlecase(GENDER_LOOKUP[text]) else: return titlecase(text)
def handle_replace(self, m): found = m.group(0) if (found.upper() == found): return self.replace_text.upper() elif (found.lower() == found): return self.replace_text.lower() elif (titlecase(found) == found): return titlecase(self.replace_text) else: return self.replace_text
def __init__(self, title_season, file_path): Torrent.__init__(self, file_path) (self.title, self.season) = title_season # (title, season) # titlecase the title excluding any year in it (`The Night Of 2016`, not `The Night of 2016`) year = re.search(r'\d{4}', self.title) if year: title_without_year = year.string[:year.start()].strip() self.title = titlecase(title_without_year) + ' ' + year.group() else: self.title = titlecase(self.title.strip()) self.destination = os.path.join(Settings().tv_path, self.title, self.season)
def class_cell_parse(year, month, dayOfMonth, classCell): linkElement = classCell.find("a") seats = SEATS.match(linkElement["title"]) if seats is None: # "SORRY, CLASS IS FULL", e.g. return None seats = int(seats.groups()[0]) if seats <= 0: return None ret = event.Event() ret.title = titlecase.titlecase(linkElement.text.strip()) ret.description = titlecase.titlecase(linkElement["title"]) for abbreviation in [ "3D", "CAD", "CAM", "CNC", "DIY", "MIG", "SBU", "TIG", "TV", "ULS", "II", "III", "IV", "VI", "VII", "IX", "XI", ]: matcher = re.compile(r"\b" + abbreviation + r"\b", re.IGNORECASE) ret.title = matcher.sub(abbreviation, ret.title) ret.description = matcher.sub(abbreviation, ret.description) ret.link = "http://www.techshop.ws/" + linkElement["href"] timeElement = classCell.find("td").text.strip() (hour, minute, meridian) = TIME_OF_DAY.match(timeElement).groups() hour = int(hour) minute = int(minute) if meridian.upper().startswith("P") and hour < 12: hour += 12 ret.start_time = pytz.timezone(os.environ["TECHSHOP_LOCAL_TIMEZONE"]).localize( datetime.datetime(year, month, dayOfMonth, hour, minute, 0, 0) ) duration = linkElement["title"] ret.end_time = ret.start_time + datetime.timedelta(hours=float(DURATION.match(duration).groups()[0])) ret.time_zone = os.environ["TECHSHOP_LOCAL_TIMEZONE"] ret.location = os.environ["TECHSHOP_LOCATION_STRING"] return ret
def get_defaultname( m4afilename, showalbum = False ): mp4tags = mutagen.mp4.MP4(m4afilename) curdir = os.path.dirname( os.path.abspath( m4afilename ) ) if len(set([ '\xa9nam', '\xa9ART' ]) - set(mp4tags.keys())) != 0: return song_title = titlecase.titlecase( max(mp4tags.tags['\xa9nam']) ) song_artist = max(mp4tags.tags['\xa9ART']) song_title = song_title.replace('/', '-') song_artist = song_artist.replace('/', '-') if not showalbum: return '%s.%s.m4a' % ( song_artist, song_title ) else: song_album = titlecase.titlecase( max( mp4tags.tags['\xa9alb'] ) ) song_album = song_album.replace('/', '-') return '%s.%s.%s.m4a' % ( song_artist, song_album, song_title )
def vendor_csv(request, vendor_duns): vendor = Vendor.objects.get(duns=vendor_duns) setasides = SetAside.objects.all().order_by('far_order') naics = request.GET.get('naics-code', None) response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="search_results.csv"' writer = csv.writer(response) writer.writerow((vendor.name,)) writer.writerow(('SAM registration expires: ', vendor.sam_expiration_date.strftime("%m/%d/%Y"))) writer.writerow(('', )) writer.writerow([sa_obj.abbreviation for sa_obj in setasides]) vendor_sa = [] for sa in setasides: if sa in vendor.setasides.all(): vendor_sa.append('X') else: vendor_sa.append('') writer.writerow(vendor_sa) writer.writerow(('', )) writer.writerow(('DUNS', vendor.duns, '', 'Address:', titlecase(vendor.sam_address))) writer.writerow(('CAGE Code', vendor.cage, '', '', titlecase(vendor.sam_citystate[0:vendor.sam_citystate.index(',') + 1]) + vendor.sam_citystate[vendor.sam_citystate.index(',') + 1:])) writer.writerow(('Employees', vendor.number_of_employees, '', 'OASIS POC:', vendor.pm_name)) writer.writerow(('Annual Revenue', vendor.annual_revenue, '', '', vendor.pm_phone)) writer.writerow(('', '', '', '', vendor.pm_email.lower())) writer.writerow(('', )) if naics: writer.writerow(('This vendor\'s contract history for NAICS {0}'.format(naics), )) else: writer.writerow(('This vendor\'s contract history for all contracts', )) writer.writerow(('Date Signed', 'PIID', 'Agency', 'Type', 'Value ($)', 'Email POC', 'Status')) if naics: contracts = Contract.objects.filter(vendor=vendor, NAICS=naics).order_by('-date_signed') else: contracts = Contract.objects.filter(vendor=vendor).order_by('-date_signed') for c in contracts: if '_' in c.piid: piid = c.piid.split('_')[1] else: piid = c.piid writer.writerow((c.date_signed.strftime("%m/%d/%Y"), piid, titlecase(c.agency_name), c.get_pricing_type_display(), c.obligated_amount, (c.point_of_contact or "").lower(), c.get_reason_for_modification_display())) return response
def clean_table_name(table_name): """ title case, strip bogus white space, a few observed direct fixes for title casing...""" table_name = re.sub('\s+',' ',table_name) # some have multiple white spaces table_name = titlecase(table_name.lower()) for problem,fix in TABLE_NAME_REPLACEMENTS: table_name = re.sub(problem,fix,table_name) return table_name.strip()
def price(bot, trigger): """ Grab the price for the given card (and optional set). Information can come from any API that outputs JSON. """ try: card = None options = trigger.group(2).split(' !') options = [x.encode('utf-8') for x in options] if len(options) > 1: print("Name and set passed in, try getting them directly.") name = options[0] set_name = options[1] card = get_card(name, set_name) if card: print("Found card in cache/MTGPrice, replying.") bot.reply(titlecase.titlecase(options[0]) + ' | MTGPrice.com fair price: ' + card.value + ' | Set: ' + construct_set(set_name).replace('_', ' ')) else: print("Card not found in cache/MTGPrice.") bot.reply("No results.") else: print("No searching techniques worked, replying with failure.") bot.reply("No results.") except Exception as e: traceback.print_exc() print("Exception while searching: ") bot.reply("No results (or you broke me).")
def maybe_recapitalize_title(title): """ Recapitalize a title if it is mostly uppercase (number of uppercase letters > number of lowercase letters) >>> maybe_recapitalize_title(u'THIS IS CALLED SCREAMING') u'This Is Called Screaming' >>> maybe_recapitalize_title(u'This is just a normal title') u'This is just a normal title' >>> maybe_recapitalize_title(u'THIS IS JUST QUITE Awkward') u'THIS IS JUST QUITE Awkward' """ nb_upper, nb_lower = 0, 0 for letter in title: if letter.isupper(): nb_upper += 1 elif letter.islower(): nb_lower += 1 if nb_upper > nb_lower: return titlecase(title) else: return title
def run(self, edit): for region in self.view.sel(): if region.empty(): line = self.view.line(region) lineStr = self.view.substr(line) relIndex = region.begin() - line.begin() end = lineStr.find("}", relIndex) sta = lineStr[::-1].find("{", len(lineStr) - relIndex) if sta is not -1 and end is not -1: region = sublime.Region( line.begin() + (len(lineStr) - sta), line.begin() + end) else: region = self.view.line(region) s = self.view.substr(region) s = titlecase(s) self.view.replace(edit, region, s)
def to_titlecase(text): """ Converts string into a titlecased version of itself. A list of uppercase or lowercase abbreviation is read from config.py. Args: text (str): input string to convert. Returns: A titlecased version of the input string. """ def abbreviations(word, **_kwargs): if word.upper() in config.uppercase_words: return word.upper() if word.lower() in config.lowercase_words: return word.lower() for s in config.mixedcase_words: if word.lower() == s.lower(): return s if word and word[0] in ['\\', '$']: return word return titlecase.titlecase(text, callback=abbreviations)
def general(request): data = MovieObj.objects.all() print(data) for obj in data: obj.title = titlecase(obj.title) obj.title.replace(" ,", "") data = serializers.serialize('json', data) genreset = set() with open('explore/genre.txt') as f: lines = f.readlines() for line in lines: line = line.replace("\n","") arr = line.split('|') for word in arr: genreset.add(word) return render_to_response("general.html", {'data': mark_safe(data), 'genre' : list(genreset)}, RequestContext(request))
def manualJobTweet(job, year=None): image = SimulatorGeneratorImage.getImageFor( job, safeSearchLevel=config.get("services", "google_safesearch"), referer="http://twitter.com/SimGenerator") if (year == None): year = random.randint(config.getint("settings", "minyear"), datetime.date.today().year) artFile = "output-%s.png" % datetime.datetime.now().strftime( "%Y-%m-%d-%H%M.%f") artFile = os.path.join(tempfile.gettempdir(), artFile) SimulatorGeneratorImage.createBoxArt( job, year, image, artFile, maxSize=( str(twitterGlobalConfig["photo_sizes"]["large"]["w"] - 1), str(twitterGlobalConfig["photo_sizes"]["large"]["h"] - 1), ), deleteInputFile=True) tweet(titlecase.titlecase(job), year, artFile)
def init(): if drv.exist(fln.fDataframe) == False: link = "https://www.youtube.com/results?search_query=jake+and+amir+%s" titles = [] links = [] scripts = [] soup = getSoup(surl) for script in soup.findAll("div", {"class": "episode-script-inner"}): scripts.append( str(script.text).replace('...', '⋯').replace('..', '⋯')) for title in soup.findAll("td", {"class": "header-inner-title"}): titles.append(titlecase(title.text)) links.append(link % str(title.text).replace(' ', '+')) archive = pd.DataFrame({ 'Title': titles, 'Link': links, 'Script': scripts }) archive = archive.replace('[\t]+', '', regex=True) archive = archive.replace('[\r]+', '', regex=True) pkl.save(fln.fDataframe, archive) df = pkl.load(fln.fDataframe) return df
def normalize(d: Deck) -> str: try: name = d.original_name name = name.lower() name = replace_space_alternatives(name) name = remove_pd(name) name = remove_hashtags(name) name = remove_brackets(name) name = strip_leading_punctuation(name) unabbreviated = expand_common_abbreviations(name) if unabbreviated != name or name in ABBREVIATIONS.values(): name = unabbreviated elif whitelisted(name): pass else: name = add_colors_if_no_deckname(name, d.get('colors')) name = normalize_colors(name) name = add_archetype_if_just_colors(name, d.get('archetype_name')) name = remove_mono_if_not_first_word(name) name = ucase_trailing_roman_numerals(name) return titlecase.titlecase(name) except ValueError: raise InvalidDataException('Failed to normalize {d}'.format(d=repr(d)))
def _search_room(self, person_name, room_type): """ Used by the add_person function. Assists in the selection and allocation of a random room from the available rooms. """ rooms = self.rooms.get(room_type).keys() search_rooms = list(rooms) if not search_rooms: return "the system. No rooms available at the moment." searching = True while searching: random_room = random.choice(search_rooms) occupants = self.rooms.get(room_type)[random_room] if len(occupants) < random_room.max_occupants: person_title = titlecase(person_name) occupants.append(person_title) return random_room search_rooms.remove(random_room) if not len(search_rooms): searching = False return None
def check_caption_format(title, format_checks): result = True message = [] text = title.text.strip() for check in format_checks: if check['valid_result'] is True and check['test'].search(text) is None: result = False message.append(check['message']) elif check['valid_result'] is False and check['test'].search(text) is not None: result = False message.append(check['message']) # Also check title case title_to_check = RE_SPECIAL_CHAR.sub(' ', text) # remove extra spaces created by preformat title_to_check = RE_MULTI_SPACE.sub(' ', title_to_check) # use titlecase library to create title case of title and see if it is same as original if titlecase(title_to_check) != title_to_check: result = False message.append('Not in Title Case/Initial Caps') return result, message
def parse_name(self, name): if not isinstance(name, str): return name name = name.strip() # if name is one character or less then return it if len(name) <= 1: return name # if it contains any lowercase letters then return as is for c in name: if c.islower(): return name # try titlecasing try: name = titlecase.titlecase(name, self.title_exceptions) except: pass # Make sure first letter is capitalise return name[0].upper() + name[1:]
def prettify(value): """ Prettify a field value for display. Args: value (str) Returns: str """ punct = set(list(string.punctuation + string.whitespace)) # Strip everything off left side. value = value.lstrip(''.join(punct)) # Allow ), ], and . on right. value = value.rstrip(''.join(punct - set(list(')].')))) # Strip '.' unless last character is uppercase. if not re.search('\s[a-z]{1}\.$', value, re.I): value = value.rstrip('.') return titlecase(value)
def extract_shift(row): categorized = False date = arrow.get(row[0], 'YYYY-MM-DD HH:mm:ss Z').format('YYYY-MM-DD HH:mm:ss') name = row[6] # Clean up the name a bit if name[-3:] == 'USA': name = name[:-5] amount_usd = row[1][1:] unit = row[3] amount_unit = str_minus(row[4]) usd_per_unit = row[5] entry_type = MISC for k, v in accounts.iteritems(): if k.search(name): if v[0] != '': name = v[0] entry_type = v[1] categorized = True break if not categorized: print(name + ' : ' + amount_usd + ' : ' + date) # Log price of crypto log = 'P ' + date + ' ' + unit + ' ' + usd_per_unit + '\n\n' # Log exchange for USD log += date + ' ' + 'Shift Payments' + '\n' log += ' ' + 'Assets:Cash' + ' $' + amount_usd + '\n' log += ' ' + UNITS[ unit] + ' ' + unit + ' ' + amount_unit + '{' + usd_per_unit + '}\n' # Log purchase log += date + ' ' + titlecase(name) + '\n' log += ' ' + 'Assets:Cash' + ' $' + str_minus(amount_usd) + '\n' log += ' ' + entry_type + '\n\n' return log
def QueryPage(fullcountry, countryurl, country): url = "https://monitor.civicus.org/country/" + countryurl print(url) try: page = requests.get(url, headers=headers) soup = BeautifulSoup(page.text, "html.parser") country = titlecase(country) status = soup.find('div', attrs={ 'class': 'intro' }).get_text().split()[2] print(status) global coverview coverview = soup.find('div', attrs={ 'class': 'half' }).find('p').get_text() global cstatus cstatus = ("The civic space level of " + fullcountry + " is " + status) except: print("search failed!") cstatus = "I don't understand."
def normalize(d): name = d.original_name name = name.lower() name = replace_space_alternatives(name) name = remove_pd(name) name = remove_hashtags(name) name = remove_brackets(name) unabbreviated = expand_common_abbreviations(name) if unabbreviated != name: name = unabbreviated elif name in WHITELIST: pass else: removed_colors = False without_colors = remove_colors(name) if name != without_colors: removed_colors = True name = without_colors if name == '' and d.get('archetype'): name = d.archetype if removed_colors or name == '': name = prepend_colors(name, d.colors) name = ucase_trailing_roman_numerals(name) return titlecase.titlecase(name)
def randomTrendTweet(): trends = getTrends() if len(trends) == 0: sys.stderr.write("Couldn't get any trending topics. :-/\n") return trend = random.choice(trends) if trend[0] == "#": text = trend[1:] text = inflection.titleize(text) text = titlecase.titlecase(text) else: text = trend image = SimulatorGeneratorImage.getImageFor( text, safeSearchLevel=config.get("services", "google_safesearch"), referer="http://twitter.com/SimGenerator") year = random.randint(config.getint("settings", "minyear"), datetime.date.today().year) artFile = "output-%s.png" % datetime.datetime.now().strftime( "%Y-%m-%d-%H%M.%f") artFile = os.path.join(tempfile.gettempdir(), artFile) SimulatorGeneratorImage.createBoxArt( text, year, image, artFile, maxSize=( str(twitterGlobalConfig["photo_sizes"]["large"]["w"] - 1), str(twitterGlobalConfig["photo_sizes"]["large"]["h"] - 1), ), deleteInputFile=True) tweetString = text if trend[0] == "#": tweetString = trend + " " + tweetString tweet(tweetString, year, artFile)
def scan_for_sitemap(self, priority_map={}, change_freq='daily', default_priority='0.5', inspect_content=False, inspect_title=False): # Scan through the pages and return a list of sitemap entries results = self.scan(generate_urls=True, inspect_content=inspect_content, inspect_title=inspect_title) sitemap = [] for result in results: if inspect_content: if result.status_code != 200: continue if not result.is_html: continue # priority_map maps endpoint -> priority priority = priority_map.get(result.endpoint, default_priority) title = result.title if not title: # Make one up! title = titlecase.titlecase(result.endpoint.split('.')[1].replace('_', ' ')) sitemap.append(SiteMapEntry(result.url, result.last_modified, change_freq, priority, title)) return sitemap
def build_search_query(params): """Builds search query from API parameters""" qb = ESQueryBuilder() if 'query' in params: ror_id = get_ror_id(params.get('query')) if ror_id is not None: qb.add_id_query(ror_id) else: qb.add_string_query(params.get('query')) else: qb.add_match_all_query() if 'filter' in params: filters = [ f.split(':') for f in filter_string_to_list(params.get('filter', '')) if f ] # normalize filter values based on casing conventions used in ROR records for f in filters: if f[0] == 'types': f[1] = f[1].title() if f[0] == 'country.country_code': f[1] = f[1].upper() if f[0] == 'country.country_name': f[1] = titlecase(f[1]) filters = [(f[0], f[1]) for f in filters] qb.add_filters(filters) qb.add_aggregations([('types', 'types'), ('countries', 'country.country_code')]) qb.paginate(int(params.get('page', 1))) return qb.get_query()
def clickgen3(): # Generate 'Website' cite and add to document newweb_title = title3.get().capitalize() + ". " newwebpub = titlecase(publisher3.get()) + ", " date = year3.get() + "-" + year3_1.get() + ", " newurl = str(url3.get()) + "." if len(newweb_title) < 3: newweb_title = "" if len(newwebpub) < 3: newwebpub = "" if len(date) < 8: date = date.lstrip("-") if len(date) < 6: date = "" if len(newurl) < 4: newurl = "" p = doc.add_paragraph() p.paragraph_format.first_line_indent = Inches(-0.25) p.style = "MLAWC" p.add_run(newweb_title).italic = True p.add_run(newwebpub + date + newurl) doc.save("MLA_Works_Cited.docx") os.system('start MLA_Works_Cited.docx')
def notify_received(self, user, template_id=None): """ Notify the contact associated with the user that the submission has been received. """ template_id = template_id or self.status.send_confirmation_notification if template_id: context = { "full_name": self.contact.name.strip() if self.contact else "", "company_name": titlecase(self.organisation.name) if self.organisation else "", "case_number": self.case.reference, "case_name": self.case.name, "case_type": self.case.type.name, "submission_type": self.type.name, } self.notify(sent_by=user, contact=user.contact, context=context, template_id=template_id)
def ensemble_prompt(curr_instruments, db_): """ Prompt for creating ensembles. :param curr_instruments: Current list of instruments. :param db_: database to load to/from :return: lynames.Ensemble object """ ensemble_names = db_interface.explore_table(db_.table("ensembles"), search=("name", "")) ensembles = [ titlecase(' '.join(name.split('_'))) for name in ensemble_names ] ensemble_name = prompt("Please enter a name for the ensemble: ", completer=InsensitiveCompleter(ensembles)) ensemble_name_normal = lynames.normalize_name(ensemble_name) new_ens = None if ensemble_name_normal in ensemble_names: load = prompt( f"{ensemble_name} is in the database, would you like to load it? " "[Y/n] ", default='Y', validator=YNValidator()) if answered_yes(load): return lynames.Ensemble.load_from_db(ensemble_name, db_) while True: new_ens = common.create_ensemble(ensemble_name, db_, curr_instruments) if isinstance(new_ens, lynames.Ensemble): break else: retry = prompt(f"No new ensemble was created. Try again? ", validator=YNValidator(), default='Y') if not answered_yes(retry): break return new_ens
def parse_daily_areas_pdf(date, country, local_pdf_file): if country == "Northern Ireland": pdf = pdfplumber.open(local_pdf_file) for page in pdf.pages: try: table = page.extract_table() if table[0][0] == "Local Government District": output_rows = [[ "Date", "Country", "AreaCode", "Area", "TotalCases" ]] for table_row in table[1:]: if table_row[0].lower() == "total": continue area = normalize_whitespace(titlecase(table_row[0])) area = area.replace("Ards and North Down", "North Down and Ards") area_code = lookup_local_government_district_code(area) cases = normalize_int(table_row[1]) output_row = [date, country, area_code, area, cases] output_rows.append(output_row) return output_rows except IndexError: pass # no table on page return None
def translate(self, sheet): """Translate sheet Column A is DE, column B is EN Only fill in translation if there is none yet Save excel only once, no longer after every sheet. """ if sheet.title in self.case.keys(): if self.case[sheet.title] == "exclude": print( f" exclude sheet '{sheet.title}' from google translation " ) return elif self.case[sheet.title] == "lower": print(" forcing lowercase") elif self.case[sheet.title] == "title": print(" forcing Title Case") # tried V3 client (advanced), but then it gets complicated c = 1 # 1-based line counter for de in sheet["A"]: if c != 1 and de.value is not None: en = sheet[f"B{c}"] if en.value is None: en = self._translate_v2(de.value.strip()) if sheet.title in self.case.keys(): if self.case[sheet.title] == "lower": en = en.lower() elif self.case[sheet.title] == "title": en = titlecase(en) print(f" {de.value} -> {en}") sheet[f"B{c}"] = en # without saving after every translation i get 403 User # Rate Limit Exceeded from Google occasionally c += 1
def send_individual_email(mainHTML, email, name=None, mydate=datetime.datetime.now().date(), verify=True): assert (emailAddress is not None), "Error, email address must not be None" if emailName is None: fromEmail = emailAddress else: fromEmail = '%s <%s>' % (emailName, emailAddress) subject = titlecase.titlecase('Plex Email Newsletter For %s' % mydate.strftime('%B %Y')) msg = MIMEMultipart() msg['From'] = fromEmail msg['Subject'] = subject if name is None: msg['To'] = email htmlstring = mainHTML else: msg['To'] = '%s <%s>' % (name, email) firstname = name.split()[0].strip() htmlstring = re.sub('Hello Friend,', 'Hello %s,' % firstname, mainHTML) # body = MIMEText(htmlstring, 'html', 'utf-8') msg.attach(body) send_email_lowlevel(msg, verify=verify)
# .title() is naive # titlecase seems better but not perfect # (considering in-company style guide variety) it's good enough # pip instal titlecase (https://pypi.org/project/titlecase/) from titlecase import titlecase punct = "it's an auth'ring 'aight-mare: a f**ked-up s**tstorm!" titled = punct.title() print(titled) # It'S An Auth'Ring 'Aight-Mare: A F**Ked-Up S**Tstorm! titlecased = titlecase(punct) print(titlecased) # It's an Auth'ring 'Aight-Mare: A F**ked-Up S**tstorm! # better: It's an Auth'ring 'Aight-Mare: A F**ked-up S**tstorm!
#!/usr/bin/python3 # not proud of this but it works (tm). import pathlib import titlecase for filename in pathlib.Path('.').glob('**/*.md'): f = open(filename, 'r') print('file: %s' % filename) lines = f.readlines() if lines: if lines[0].startswith('#'): print(filename, 'has a header') print('---') else: sanitized_filename = str(filename).replace('-', ' ') sanitized_filename = sanitized_filename.replace('.md', '') sanitized_filename = sanitized_filename.replace('journal/', '') header = '# ' + titlecase.titlecase(sanitized_filename) + '\n' print('adding header:', header) lines.insert(0, header) with open(filename, 'w') as f_rw: f_rw.writelines(lines) print('---')
### ### Regex for identifying candidate subject/topics subject = re.compile(r"\n[^\s\d\n\[\(\.\-—_\$].+\s\n", re.UNICODE) data_directory = './cleaned_txts/' for filename in os.listdir(data_directory): #if filename.startswith('19'): with open(data_directory + filename, 'r', encoding="utf-8") as f: text = f.read() assembly_time_flag = False for found in re.findall(subject, text): #if found.upper() == found and not found.endswith('. \n') and not found.endswith('.') and not found.endswith('. ') and not found.startswith('HON') and not found.startswith('\nYEA') and not found.startswith('\nNAY') and not found.startswith("'"): #print(found) #the 0th element is the whole match if titlecase( str(found) ) == found and '.' not in found and '?' not in found and '(' not in found and 'Hon' not in found: print(found) assembly_time_flag = True #if not assembly_time_flag: #for found in re.findall(evening_session, text): #print(found[0]) #print(found[1]) #assembly_time_flag = True #if not assembly_time_flag: #print(text)
def create_report_periodicals(path): """Return list with information for each periodical and data of latest update""" if path == '': path = os.path.dirname(os.path.realpath(__file__)) + "/latest/" report_periodicals = [] latest_update = datetime.min.date() files = [each for each in os.listdir(path) if each.endswith('.json')] for f in files: with open(path + f) as json_file: with Timer(True) as t: t.__enter__() data = json.load(json_file) t.__exit__() issues = data["Issues"] pubFrequency = data["PublicationFrequency"] with Timer(True) as t: t.__enter__() issues = sorted(issues, key=lambda k: k['Timestamp'], reverse=True) # Find latest index max_val = [0, 0] latest_index = 0 for i in xrange(len(issues)): for p in xrange(len(issues[i]["Parts"])): if issues[i]["Parts"][p]["Key"] == "8": numbers = ( issues[i]["Parts"][p]).get("Value").split(".") val = issues[i]["Parts"][p]["Value"].split(".") val = numbers if (int(max_val[0]) == int(val[0]) and int(max_val[1]) < int(val[1])) or (int( max_val[0]) < int(val[0])): max_val = val latest_index = i date_object = "n/a" formatted_issue = "n/a" string_date = date_object issues_last_12_months = [0 for i in range(12)] number_issues = len(issues) if issues: string_date = issues[0]["Timestamp"] date_object = parser.parse(string_date).date() if latest_update < date_object: latest_update = date_object formatted_issue = format_issue(issues[latest_index]) number_issues = len(issues) latest_issue_year = extract_year(issues[latest_index]) issues_last_12_months = statistic_last_12_months(issues) else: print("---> info missing: " + f + " " + date_object) row = (titlecase(data["CatalogTitle"]), date_object, data["StanfordLibraryId"], data["CallNumber"], formatted_issue, pubFrequency, number_issues, string_date, latest_issue_year, issues_last_12_months) report_periodicals.append(row) t.__exit__() # sort list by last issue date with Timer(True) as t: t.__enter__() report_periodicals = sorted(report_periodicals, key=lambda item: item[7], reverse=True) t.__exit__() return [report_periodicals, latest_update]
def add_deck(): flask_login.login_user(current_user) deck = request.get_json() # Check if we're editing an existing deck if deck: deck_id = deck['edit_id'] deck_description = deck['description'] deck_formats = deck['formats'] deck_legality = deck['formats'] deck_tags = deck['tags'] deck_name = titlecase(deck['name'].strip()) deck_cards = deck['cards'] deck_makeup = deck['makeup'] # This needs to be set to the currently logged-in user. deck_author = "Casanova Killing Spree" # This needs to be calculated somehow. deck_colors = "{r}{b}" deck_makeup = deck['makeup'] deck_makeup_length = len(deck_makeup) if deck_makeup_length != 0: deck_makeup_w = (float(deck_makeup.count('W')) / deck_makeup_length) * 100 deck_makeup_u = (float(deck_makeup.count('U')) / deck_makeup_length) * 100 deck_makeup_b = (float(deck_makeup.count('B')) / deck_makeup_length) * 100 deck_makeup_r = (float(deck_makeup.count('R')) / deck_makeup_length) * 100 deck_makeup_g = (float(deck_makeup.count('G')) / deck_makeup_length) * 100 deck_makeup = str(deck_makeup_w) + ', ' + str(deck_makeup_u) + ', ' + str( deck_makeup_b) + ', ' + str(deck_makeup_r) + ', ' + str(deck_makeup_g) print deck_makeup else: deck_makeup = "0.0, 0.0, 0.0, 0.0, 0.0" # This is based on the Featured image selected while building. deck_image = "414494" deck_likes = 0 # These are probably obsolete. deck_mainboard = "main" deck_maybeboard = "maybe" deck_sideboard = "side" # Double checking form fields that should've been verified on the front-end. if deck_name == "": error = Markup("<strong>Oops!</strong>") flash(error + " Looks like your deck doesn't have a name.", 'error') elif deck_tags == "": error = Markup("<strong>Oops!</strong>") flash(error + " Looks like your deck doesn't have any tags.", 'error') elif deck_legality == "": error = Markup("<strong>Oops!</strong>") flash(error + " Looks like your deck isn't legal in any format.", 'error') else: db = get_db() for card in deck_cards: if deck_cards[card]['featured'] == 1: deck_image = deck_cards[card]['image'] if deck_id == '': cur_cards = db.execute( 'INSERT INTO decks values (null, ?, ?, null, date("now"), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, date("now"), ?)', (deck_author, deck_colors, deck_description, deck_formats, deck_image, deck_legality, deck_likes, deck_mainboard, deck_maybeboard, deck_name, deck_sideboard, deck_tags, deck_makeup)) else: cur_cards = db.execute( 'UPDATE decks SET colors = ?, description = ?, formats = ?, image = ?, legality = ?, name = ?, tags = ?, updated = ? WHERE id = ?', (deck_colors, deck_description, deck_formats, deck_image, deck_legality, deck_name, deck_tags, time.strftime('%Y-%m-%d'), deck_id)) deck_row = cur_cards.lastrowid if deck_id != '': db.execute('DELETE FROM decksToCards WHERE deckId=?', (deck_id, )) for card in deck_cards: quantity = deck_cards[card]['quantity'] for i in range(int(quantity)): if deck_cards[card]['foil'] == 1: card_foil = 1 else: card_foil = 0 if deck_cards[card]['featured'] == 1: card_featured = 1 else: card_featured = 0 if deck_cards[card]['commander'] == 1: card_commander = 1 else: card_commander = 0 if deck_id == '': db.execute('INSERT INTO decksToCards VALUES(NULL, ' + str(deck_row) + ', "' + deck_cards[card]['image'] + '", ' + str(card_foil) + ', ' + str(card_featured) + ', ' + str(card_commander) + ', 0, 0, 0, 1)') else: db.execute( 'INSERT INTO decksToCards VALUES(NULL, ?, ?, ?, ?, ?, 0, 0, 0, 1);', (deck_id, card, card_foil, card_featured, card_commander)) db.execute( 'UPDATE decksToCards SET featured = 0 WHERE featured = 1;' ) db.execute( 'UPDATE decksToCards SET foil = ?, featured = ?, commander = ? WHERE cardId = ?', (card_foil, card_featured, card_commander, card)) if deck_id == '': print "Inserted Multiverse ID " + card + " into Deck " + str( deck_row) + " " + str(quantity) + " times." else: print "Inserted Multiverse ID " + str( card) + " into Deck " + str(deck_id) + " " + str( quantity) + " times." db.commit() return 'success' return redirect(url_for('decks'))
def _read_csv(self, filename): with open(filename) as csvFile: playlist_data = csv.DictReader(csvFile) previous_time = timedelta() playlist = [] playlist_index = 0 for index, row_data in enumerate(playlist_data): if index == 0 and "name" in row_data and "start time" in row_data: # info row self.playlist_name = row_data["name"] self.playlist_date = row_data["start time"].split(",")[0] continue time_string = (row_data["start time"].replace( ".", ":").strip().split(" ")[0]) row_data["start time"] = datetime.strptime( time_string, "%H:%M:%S") if index == 1: start_time = row_data["start time"] title = row_data["name"] if " - " in title: title = title.replace(" - ", " (") + ")" title = title.replace("(Clean)", "").replace("(clean)", "") title = title.replace("(Dirty)", "").replace("(dirty)", "") title = title.replace("(Original Mix)", "").replace("(original Mix)", "") title = title.replace("(Dirty-", "(").replace("(dirty-", "(") title = title.replace("(Clean-", "(").replace("(clean-", "(") title = title.replace(" )", ")") title = title.replace("( ", "(") # split at all whitespace chars and recombine -> remove extra spaces and linebreaks... title = " ".join(title.split()) play_time = row_data["start time"] - start_time song_data = { "artist": titlecase(row_data["artist"]), "song": titlecase(title), "time": play_time, "playtime": play_time - previous_time, "starttime": row_data["start time"], } if song_data["playtime"] < timedelta(seconds=60): song_data["playtime"] = timedelta(seconds=60) # sum duplicate song playtimes if (playlist_index and playlist[playlist_index - 1]["song"] == song_data["song"] and playlist[playlist_index - 1]["artist"] == song_data["artist"]): playlist[playlist_index - 1]["playtime"] += song_data["playtime"] else: playlist.append(song_data) playlist_index += 1 previous_time = play_time for i in range(1, len(playlist)): playlist[i - 1]["playtime"] = playlist[i]["playtime"] self.playlist = playlist self.playlist_file = filename
def titlecase(cls, text): """A wrapper for titlecase function""" return titlecase(text)