def _TTLSerialize(ttlSLI, ttlABS, forceUpdateDtABS, statusItem): dtResult = (datetime.datetime.utcnow() + ttlSLI) str_dtSLI = dtResult.strftime("%Y%m%dT%H%M%S") str_tsSLI = str(int(ttlSLI.total_seconds())) if (ttlSLI == _NO_EXPIRATION): str_dtSLI = _No_TTL str_tsSLI = _No_TTL dtResult = (datetime.datetime.utcnow() + ttlABS) str_dtABS = dtResult.strftime("%Y%m%dT%H%M%S") str_tsABS = str(int(ttlABS.total_seconds())) if (ttlABS == _NO_EXPIRATION): str_dtABS = _No_TTL str_tsABS = _No_TTL if (forceUpdateDtABS != datetime.datetime.max): str_dtABS = forceUpdateDtABS.strftime("%Y%m%dT%H%M%S") strResult = str_dtSLI + "|" + str_tsSLI + "|" + str_dtABS + "|" + str_tsABS + "|" + str( statusItem) return strResult
def copy(self): """Copy current selection to clipboard in CSV format.""" selection_model = self.selectionModel() if not selection_model.hasSelection(): return False selected_indexes = sorted(selection_model.selectedIndexes(), key=lambda index: 2 * index.row() + index.column()) row_first = selected_indexes[0].row() row_last = selected_indexes[-1].row() row_count = row_last - row_first + 1 data_indexes = row_count * [None] data_values = row_count * [None] data_model = self.model() for selected_index in selected_indexes: data = data_model.data(selected_index) row = selected_index.row() if selected_index.column() == 0: data_indexes[row - row_first] = data else: data_values[row - row_first] = data with io.StringIO() as output: writer = csv.writer(output, delimiter='\t') if all(stamp is None for stamp in data_indexes): for value in data_values: writer.writerow([locale.str(value) if value is not None else ""]) elif all(value is None for value in data_values): for index in data_indexes: writer.writerow([index if index is not None else ""]) else: for index, value in zip(data_indexes, data_values): index = index if index is not None else "" value = locale.str(value) if value is not None else "" writer.writerow([index, value]) QApplication.clipboard().setText(output.getvalue()) return True
def htmlCode(self): examnr = self.examnr if examnr < 0: return '<em>Error: Invalid exam number</em>' table = [] table.append([ '', 'Matr.-Nr.', 'Name', 'Punkte', 'Punkte in den einzelnen Aufgaben' ]) l = Utils.SortNumerAlpha(Data.people.keys()) maxscore = 0 counter = 0 for k in l: p = Data.people[k] if (examnr < len(p.exams) and p.exams[examnr] != None): exam = p.exams[examnr] if (exam.maxscore != 0): maxscore = exam.maxscore if (exam.registration == 1 and exam.totalscore != -1): counter += 1 table.append([ str(counter), k, Utils.CleanWeb(p.lname) + ', ' + Utils.CleanWeb(p.fname), locale.str(exam.totalscore), Utils.CleanWeb(exam.scores) ]) s = ('<p>Maximal erreichbare Punktzahl: ' + locale.str(maxscore) + '</p>\n') s += createHTMLTable(table) return s
def recibirAtaque(self, danyoRecibido): print('Has recibido ' + str(danyoRecibido)) self.vida -= danyoRecibido if self.vida <= 0: print('Has muerto') else: print('Te queda ' + str(self.vida) + ' de vida')
def get_match_roster(match_id, team_id): sql_string = "CALL sentiosp_drupal6.SP_GET_MATCH_ROSTER ("+ str(match_id) +","+ str(team_id) +");" print sql_string cursor = connection.cursor() cursor.execute(sql_string) data = cursor.fetchall() return data
def _items(self, cr, uid, lines, context=None): data = [] for line in lines: # A: Header - Vrijednost 3 obavezna u svakom retku line_data = [self._quoted('3')] # B: Kataloški broj - Kataloški broj artikla - KONTO PRODUKTA će biti šifra!!! line_data.append(self._quoted(line.account_id.code[4:])) # C: Količina - Prodana količina artikla line_data.append(self._quoted(locale.str(line.quantity))) # D: Oznaka stope PDV - 0 = 0%, 2 = 10%, 4 = 25%, 5 = 5% tax_percent = line.invoice_line_tax_id[0].amount * 100 tax_code = {0:'0', 10:'2', 25:'4', 5:'5'}.get(tax_percent, '-1') line_data.append(self._quoted(tax_code)) # E: Oznaka oslobođ. PDV - Ako nema oslobođenja PDV ostaviti prazno line_data.append(self._quoted('')) # F: Cijena - Veleprodajna cijena prodanog artikla line_data.append(self._quoted(locale.str(line.price_unit))) # G: Rabat - Odobreni rabat line_data.append(self._quoted(locale.str(line.discount))) # H: Opis artikla - Dodatni opis za ovu stavku ako postoji (ako je prefix # opis pregazi naziv iz baze artikala) line_data.append(self._quoted('#' + line.product_id.name)) line_csv = self._delimiter().join(line_data) data.append(line_csv) return '\n'.join(data)
def _items(self, cr, uid, lines, context=None): data = [] for line in lines: # A: Header - Vrijednost 3 obavezna u svakom retku line_data = [self._quoted('3')] # B: Kataloški broj - Kataloški broj artikla - KONTO PRODUKTA će biti šifra!!! line_data.append(self._quoted(line.account_id.code[4:])) # C: Količina - Prodana količina artikla line_data.append(self._quoted(locale.str(line.quantity))) # D: Oznaka stope PDV - 0 = 0%, 2 = 10%, 4 = 25%, 5 = 5% tax_percent = line.invoice_line_tax_id[0].amount * 100 tax_code = { 0: '0', 10: '2', 25: '4', 5: '5' }.get(tax_percent, '-1') line_data.append(self._quoted(tax_code)) # E: Oznaka oslobođ. PDV - Ako nema oslobođenja PDV ostaviti prazno line_data.append(self._quoted('')) # F: Cijena - Veleprodajna cijena prodanog artikla line_data.append(self._quoted(locale.str(line.price_unit))) # G: Rabat - Odobreni rabat line_data.append(self._quoted(locale.str(line.discount))) # H: Opis artikla - Dodatni opis za ovu stavku ako postoji (ako je prefix # opis pregazi naziv iz baze artikala) line_data.append(self._quoted('#' + line.product_id.name)) line_csv = self._delimiter().join(line_data) data.append(line_csv) return '\n'.join(data)
def parse(self, xml_detail): title = self.parse_title(xml_detail) authors = self.parse_authors(xml_detail) comments = self.parse_comments(xml_detail) rating = self.parse_rating(xml_detail) isbn = self.parse_isbn(xml_detail) publisher, pub_year = self.parse_publisher(xml_detail) tags = self.parse_tags(xml_detail) serie, serie_index = self.parse_serie(xml_detail, title) cover = self.parse_cover(xml_detail) if title is not None and authors is not None: mi = Metadata(title, authors) mi.languages = {'ces'} mi.comments = as_unicode(comments) mi.identifiers = {self.plugin.name:str(self.number)} mi.rating = rating mi.tags = tags mi.publisher = publisher mi.pubdate = pub_year mi.isbn = isbn mi.series = serie mi.series_index = serie_index mi.cover_url = cover if cover: self.plugin.cache_identifier_to_cover_url(str(self.number), cover) return mi else: self.log('Result skipped for because title or authors not found') return None
def __call__(self, args): # get all tasks tasks = self.gettasks(args) # check before executing plan print "Plan for {0} :".format(self.name()) for i in range(0,len(tasks)): task = tasks[i] try: print " Step {0} : {1}.".format(str(i+1),task.name()) for reportline in task.check(): print " " + reportline except Exception as e: print " ==> Action is not started because {0}: {1} {2}".format(e,type(e),e.args) sys.exit() # execute plan print "Execute {0} :".format(self.name()) for i in range(0,len(tasks)): task = tasks[i] try: print " Step {0} : {1}.".format(str(i+1),task.name()) for reportline in task.run(): print " " + reportline except Exception as e: print " ==> Action abort because {0}: {1} {2}".format(e, type(e),e.args) sys.exit() # done print "{0} executed with success.".format(self.name())
def unconvert(self): '''Convert value from the appropriate type to a string.''' if type(self.value) in types.StringTypes: # nothing to do return self.value if self.is_boolean(): # A wee bit extra for Python 2.2 if self.value == True: return "True" else: return "False" if type(self.value) == types.TupleType: if len(self.value) == 0: return "" if len(self.value) == 1: v = self.value[0] if type(v) == types.FloatType: return locale.str(self.value[0]) return str(v) # We need to separate out the items strval = "" # We use a character that is invalid as the separator # so that it will reparse correctly. We could try all # characters, but we make do with this set of commonly # used ones - note that the first one that works will # be used. Perhaps a nicer solution than this would be # to specifiy a valid delimiter for all options that # can have multiple values. Note that we have None at # the end so that this will crash and die if none of # the separators works <wink>. if self.delimiter is None: if type(self.allowed_values) == types.TupleType: self.delimiter = ' ' else: v0 = self.value[0] v1 = self.value[1] for sep in [' ', ',', ':', ';', '/', '\\', None]: # we know at this point that len(self.value) is at # least two, because len==0 and len==1 were dealt # with as special cases test_str = str(v0) + sep + str(v1) test_tuple = self._split_values(test_str) if test_tuple[0] == str(v0) and \ test_tuple[1] == str(v1) and \ len(test_tuple) == 2: break # cache this so we don't always need to do the above self.delimiter = sep for v in self.value: if type(v) == types.FloatType: v = locale.str(v) else: v = str(v) strval += v + self.delimiter strval = strval[:-len(self.delimiter)] # trailing seperator else: # Otherwise, we just hope str() will do the job strval = str(self.value) return strval
def unconvert(self): '''Convert value from the appropriate type to a string.''' if type(self.value) in str: # nothing to do return self.value if self.is_boolean(): # A wee bit extra for Python 2.2 if self.value == True: return "True" else: return "False" if type(self.value) == tuple: if len(self.value) == 0: return "" if len(self.value) == 1: v = self.value[0] if type(v) == float: return locale.str(self.value[0]) return str(v) # We need to separate out the items strval = "" # We use a character that is invalid as the separator # so that it will reparse correctly. We could try all # characters, but we make do with this set of commonly # used ones - note that the first one that works will # be used. Perhaps a nicer solution than this would be # to specifiy a valid delimiter for all options that # can have multiple values. Note that we have None at # the end so that this will crash and die if none of # the separators works <wink>. if self.delimiter is None: if type(self.allowed_values) == tuple: self.delimiter = ' ' else: v0 = self.value[0] v1 = self.value[1] for sep in [' ', ',', ':', ';', '/', '\\', None]: # we know at this point that len(self.value) is at # least two, because len==0 and len==1 were dealt # with as special cases test_str = str(v0) + sep + str(v1) test_tuple = self._split_values(test_str) if test_tuple[0] == str(v0) and \ test_tuple[1] == str(v1) and \ len(test_tuple) == 2: break # cache this so we don't always need to do the above self.delimiter = sep for v in self.value: if type(v) == float: v = locale.str(v) else: v = str(v) strval += v + self.delimiter strval = strval[:-len(self.delimiter)] # trailing seperator else: # Otherwise, we just hope str() will do the job strval = str(self.value) return strval
def test_str_float(self): import _locale import locale _locale.setlocale(_locale.LC_ALL, self.language_en) assert locale.str(1.1) == '1.1' _locale.setlocale(_locale.LC_ALL, self.language_pl) assert locale.str(1.1) == '1,1'
def getEpisodeSeason(self, seasonNumber): request = 'http://api.betaseries.com/shows/episodes/' + self.serieName + '.xml?key=' + self.apiKey \ + '&season=' + str(seasonNumber) + '&hide_note' try: resultHttp = urllib2.urlopen(request) root = ET.parse(resultHttp).getroot() except urllib2.HTTPError, err: print('HTTPError = ' + str(err))
def prepareSiteQuery(self): trafficquery = "Select * from datavis.site" + str( self.sitenumber ) + "totalflow where parsedDate=:time Order By totalBytes desc LIMIT 50;" site1healthquery = "Select * from datavis.site" + str( self.sitenumber) + "health where Starttime=:time;" self.communicationQuery.prepare(trafficquery) self.healthQuery.prepare(site1healthquery)
def generate_string(self): """ Generate the MLT keyframe string for the XML: 0=0%,0%:100%x100%:100; -1=0%,0%:100%x100%:100 """ # generate string output = "%d=%s%%,%s%%:%s%%x%s%%:%s; " % (self.frame, locale.str(self.x), locale.str(self.y), locale.str(self.width), locale.str(self.height), locale.str(self.alpha * 100.0)) # return string return output
def latex_estimated_costs(self): if self.estimated_costs: if self.estimated_costs > 1000: return str(self.estimated_costs/1000) + " Mrd \\cite{" + self.estimated_costs_cite + "}" else: return str(self.estimated_costs) + " Mio \\cite{" + self.estimated_costs_cite + "}" else: return "-"
def getAnnee(): year = datetime.today().year # .now() # datetime.date._year List = [] for i in range(1, 5, 1): List.append((str(year - 1) + "-" + str(year), str(year - 1) + "-" + str(year))) year -= 1 return List
def cli(directory, outfile, delimiter, quotechar, performance): locale.setlocale(locale.LC_ALL, '') if directory is None or not os.path.exists(directory): click.echo("directory must be an exisiting directory") exit(1) if outfile is None: click.echo("outfile must be set") exit(1) found_files = {} for dpath, _, fnames in os.walk(directory): yaml_files = [os.path.join(dpath, fname) for fname in fnames if fname.endswith(".yaml") or fname.endswith(".yml")] for yaml_file in yaml_files: with open(yaml_file) as yam: found_files[yaml_file] = yaml.safe_load(yam) os.makedirs(os.path.abspath(os.path.join(outfile, os.pardir)), exist_ok=True) with open(outfile, mode='w') as csvFile: writer = csv.writer(csvFile, delimiter=delimiter, quotechar=quotechar, quoting=csv.QUOTE_MINIMAL) if performance: run_times = {k.replace(directory, ""): v["runtimes"] for k, v in found_files.items()} title_line = ["fileName", "rawTitle"] lines = [] for file_name, runtime_dict in run_times.items(): # add more column names if we have multiple files, this code assumes same depth for every found file if len(title_line) <= 2 and "/" in file_name: additional_col_titles = extract_titles_from(file_name) title_line.extend(additional_col_titles) for name, value_or_dict in runtime_dict.items(): line = [file_name, name] additional_cols = extract_values_from(file_name) line.extend(additional_cols) if isinstance(value_or_dict, dict): for keyList, value in flatten(value_or_dict): if value != "error": lines.append(line + [locale.str(value)] + keyList) else: lines.append(line + [value] + keyList) else: line.append(locale.str(value_or_dict)) lines.append(line) title_line.append("time") writer.writerow(title_line) for line in lines: writer.writerow(line) else: test_results = {k.replace(directory, ""): v["cases"] for k, v in found_files.items()} title_line = ["fileName"] value_titles = ["from", "to", "port", "resultName", "result"] lines = [] for file_name, result_dict in test_results.items(): if len(title_line) <= 1 and "/" in file_name: title_line.extend(extract_titles_from(file_name)) file_name_values = extract_values_from(file_name) for keyList, value in flatten(result_dict): lines.append([file_name] + file_name_values + keyList + [value]) writer.writerow(title_line + value_titles) for line in lines: writer.writerow(line)
def getmail_mode1(keyword, list_suffix, num): dict_mail = {} list_mail = [] dict_re_suffix = {} # 对所有后缀匹配正则表达式,并且将其添加到list中 for suffix in list_suffix: reg_mail = r"([\w_\.]+)@<em>" + suffix + "</em>" re_mail = re.compile(reg_mail) dict_re_suffix[re_mail] = suffix # i为每页的起始链接计数,num为该页所包含的链接计数,q为关键词 # 此处循环查找,获得包含指定链接计数的Google页面 i = int(num[0]) num = int(num[1]) c = int(ceil(float(num - i) / 100)) print "c: " + str(c) for x in range(c): global html global urlstr try: urlstr = ( "http://www.google.com.hk/search?num=" + str(min(num - i, 100)) + "&q=" + keyword + generater(list_suffix) + "&start=" + str(i) ) # urlstr='file:///C:/Users/vincent/Desktop/test1.html' print urlstr opener = Request(urlstr, headers=headers) html = urlopen(opener) except: return ["Google connection error"] html_data = html.read() for em in dict_re_suffix.keys(): suffix = dict_re_suffix[em] data = em.finditer(html_data) # 利用字典特性去重、排序 for m in data: dict_mail[m.groups()[0]] = (urlstr, suffix) i += 100 result = [] link = [] for addr in dict_mail.keys(): a = addr + "@" + dict_mail[addr][1] b = dict_mail[addr][0] result.append(a) link.append(b) print result print link return (result, link)
def main(): connection = mysql.connector.connect( user='******', password='******', host='clientdb.cl3farr1e4to.eu-central-1.rds.amazonaws.com', database='clientdb', use_pure=False, auth_plugin='mysql_native_password') mycursor = connection.cursor() # retrieve feedbacks mycursor.execute("SELECT description FROM review") data = mycursor.fetchall() feedbacks = [] for x in data: x = (str(x).strip('(' '),')) feedbacks.append(x.strip("''")) # retrieve clients clients = [] mycursor.execute("SELECT client_id FROM review") data = mycursor.fetchall() for x in data: x = (str(x).strip('(' '),')) clients.append(x.strip("''")) clients_to_keep = [] i = 0 for feedback in feedbacks: feedback_polarity = TextBlob(feedback).sentiment.polarity print(feedback_polarity) if feedback_polarity > 0: clients_to_keep.append(clients[i]) i += 1 continue clients_to_keep = list(dict.fromkeys(clients_to_keep)) mycursor.execute("DROP TABLE IF EXISTS clients_retention") mycursor.execute( "CREATE TABLE clients_retention (id INT PRIMARY KEY, client_id INT, foreign key (client_id) references " "client(id))") i = 1 for id in clients_to_keep: sql = "INSERT INTO clients_retention VALUES (%s, %s);" mycursor.execute(sql, (int(i), int(id))) i += 1 # print('Positive_feebacks Count : {}'.format(len(positive_feedbacks))) # print(positive_feedbacks) connection.commit() print(clients_to_keep) connection.close()
def writeColsToSingleFiles(self, folder, name, cols, separator = ',', f = '{0:05d}'): Utils.createFolderIfMissing(folder) extractedCols = self.col(cols) for i in range(0,self.length()): fileOut = folder + '/' + name + f.format(i) + '.txt' data = str(extractedCols[i,0]) for j in range(1,len(cols)): data = data + separator + str(extractedCols[i,j]) with open(fileOut, 'w') as infoFile: infoFile.write(data)
def copy(self): '''Copy command_line or last stack_item ''' if len(self.v_command_line.get()) == 0: clipboard.copy(locale.str(self.stack.get_values(1)[0])) else: try: clipboard.copy(locale.str(float(self.v_command_line.get()))) except: clipboard.copy(self.v_command_line.get())
def get_filesize_string(file_bytes): if file_bytes < 1000: line = locale.str(file_bytes) + " Bytes" elif file_bytes < 1000000: file_kb = round(file_bytes / 1000, 2) line = locale.str(file_kb) + " KB" else: file_mb = round(file_bytes / 1000000, 2) line = locale.str(file_mb) + " MB" return line
def write_file(self, file_path): self.metadata['mime_type'] = 'text/plain' text = self.value.get_text() t = locale.str(self._t_h_bar.get_temp()) h = locale.str(self._t_h_bar.get_humid()) self.metadata['fulltext'] = text f = open(file_path, 'w') f.writelines([text + "\n", t + "\n", h + "\n"]) f.close()
def runService(self, timered): pu = self.getProxyUsage() #O site do consumo não segue um padrão para formatação de números com ponto flutuante #Os números são ajustados para o padrão americano, convertidos para float e # depois convertidos para string utilizando a configuração do usuário bytes = float(pu[1].replace(',', 'x').replace('.', ',').replace('x', '.')) percent = float(pu[3]) texto = u"Proxy usage: %s%sB (%s%%) %s" % \ (locale.str(bytes), pu[2], locale.str(percent), pu[0]) self.setIcon(percent, texto)
def collect_json_strings(list_commit_sha, repo_name, project_name, app_name, project_type): commit_counter = 0 data = [] for each_sha in range(len(list_commit_sha)): try: page2 = requests.get('https://api.github.com/repos/' + repo_name + '/' + project_name + '/commits/' + list_commit_sha[each_sha] + '?client_id=' + client_id + '&client_secret=' + client_secret) commit = page2.json() date = commit['commit']['author']['date'] sha = commit['sha'] commiter = commit['commit']['author']['name'] commit_counter += 1 commit_string = '{"date":"' + date + '","project_type":"' + project_type + \ '","commiter":"' + commiter + '","commit_id":"' + sha + '","files":[' except: continue index = 0 for each in commit['files']: index += 1 try: commit_string = commit_string + '{"path":"' + each[ 'filename'] + '","additions":"' + str( each['additions']) + '","deletions":"' + str( each['deletions']) + '","changes":"' + str( each['changes']) + '"}' if index < len(commit['files']): commit_string = commit_string + ',' except: continue commit_string = commit_string + ']}' print("Project name: " + project_name + ". " + str(len(list_commit_sha) - each_sha) + ' Commits lasted...') try: json_file = json.loads(commit_string) data.append(json_file) except: print("This commit has a problematic structure.") continue commit_string = '' print(' ') df = pd.DataFrame.from_dict(json_normalize(data), orient='columns') try: result = df.sort_values('date') create_folder(repo_name, app_name) Export = result.to_json('/Project folders/' + stack_name + '/' + repo_name + '/' + app_name + '/' + project_name + r'.json') print("Processing of project: " + project_name + " is finished.") print(" ") except: print("Processing of project: " + project_name + " is FAILED !!!.") fail_list.append(repo_name + ' // ' + project_name)
def test_pasting_to_last_row_expands_model(self): selection_model = self._table_view.selectionModel() model = self._table_view.model() selection_model.select(model.index(3, 1), QItemSelectionModel.Select) copied_data = locale.str(-4.4) + '\n' + locale.str(-5.5) QApplication.clipboard().setText(copied_data) self._table_view.paste() series = TimeSeriesFixedResolution("2019-08-08T15:00", "1h", [1.1, 2.2, 3.3, -4.4, -5.5], False, False) self.assertEqual(model.value, series)
def test_pasted_cells_are_selected(self): selection_model = self._table_view.selectionModel() model = self._table_view.model() selection_model.select(model.index(0, 1), QItemSelectionModel.Select) copied_data = locale.str(-1.1) + '\n' + locale.str(-2.2) QApplication.clipboard().setText(copied_data) self._table_view.paste() selected_indexes = selection_model.selectedIndexes() self.assertEqual(len(selected_indexes), 2) self.assertTrue(model.index(0, 1) in selected_indexes) self.assertTrue(model.index(1, 1) in selected_indexes)
def __str__(self): if self.stack.fcalc.options.get('detail') == 'on': if self.function: return locale.str(self.value) + "=" + str(self.str_detail()) else: return locale.str(self.value) else: if self.value is None: return "None" else: return locale.str(self.value)
def generate_string(self): """ Generate the MLT keyframe string for the XML: 0=0%,0%:100%x100%:100; -1=0%,0%:100%x100%:100 """ # generate string output = "%d=%s%%,%s%%:%s%%x%s%%:%s; " % (int(round( self.frame)), locale.str(self.x), locale.str( self.y), locale.str(self.width), locale.str( self.height), locale.str(self.alpha * 100.0)) # return string return output
def add_commas(number): encoding = locale.getlocale()[1] or "UTF-8" raw_with_commas = locale.format("%0.2f", number, grouping=True).decode(encoding) locale_test = locale.format("%01.1f", 0.1).decode(encoding) if len(locale_test) == 3 and not locale_test[1].isdigit(): if locale_test[0] == locale.str(0) and locale_test[2] == locale.str(1): return raw_with_commas.rstrip(locale_test[0]).rstrip(locale_test[1]) if locale_test[2] == locale.str(0) and locale_test[0] == locale.str(1): return raw_with_commas.lstrip(locale_test[2]).lstrip(locale_test[1]) return raw_with_commas
def calculateRunningPercentile(self, recipient, zip, year, contribution, percentile): total_repeat_contributions_count = 0 total_repeat_contributions = 0 current_calculations = None year_recipient_zip = year + recipient + zip if year_recipient_zip not in self.year_recipient_zip_dict: # new current_calculations = Calculations() # initialize top node of binary tree if it is not set if current_calculations.contributions_root_node is None: current_calculations.contributions_root_node = Node( contribution) else: # existing current_calculations = self.year_recipient_zip_dict[ year_recipient_zip] # add a contribution to the tree binary_insert(current_calculations.contributions_root_node, Node(contribution)) # update totals as records are added current_calculations.total_number_from_repeat += 1 current_calculations.total_received_from_repeat += Decimal( contribution) # update the reference to calculations for this recipient year/zip self.year_recipient_zip_dict[year_recipient_zip] = current_calculations total_repeat_contributions_count = current_calculations.total_number_from_repeat total_repeat_contributions = current_calculations.total_received_from_repeat index = np.ceil((float(percentile) / float(100)) * float(total_repeat_contributions_count)) index_value = get_value_at_index( current_calculations.contributions_root_node, index) returnList = [ recipient, zip, year, str(index_value), str(total_repeat_contributions), str(total_repeat_contributions_count) ] returnDelimited = "|".join(returnList) return returnDelimited
def GenerateLojeProductSheet(self, product_ident_list, start_index, manufacturer="", increase_index=True): sheet = [] product_index = start_index - 1 product_ident_list = self._ProcessProductQuantities(product_ident_list) print product_ident_list for product_ident, quantity in product_ident_list: row = {} product_index += 1 row[self.ID_HEADER] = product_index if self._category_on_label: barcode = "%s%06d" % (product_ident[0].upper(), product_index) try: int(barcode) except ValueError: pass else: raise RuntimeError( "Product Barcode couldn't be an Integer") else: barcode = "%06d" % product_index row[self.BARCODE_HEADER] = barcode row[self.IDENT_HEADER] = product_ident.upper() try: category = self._primary_categories[product_ident[0].lower()] except KeyError: raise ProductCodeError(product_ident[0]) row[self.CATEGORY_HEADER] = category row["unidade compra"] = self._product_unity row["unidade venda"] = self._product_unity price = float(product_ident[3:]) / 10. cost = price / self.price_list[0] price2 = cost * self.price_list[1] row["custo"] = locale.str(round(cost, 2)) row["preco"] = locale.str(round(price, 2)) row["preco2"] = locale.str(round(price2, 2)) row["estoque"] = "v" try: sec_category = self._secondary_categories[ product_ident[1:3].lower()] except KeyError: raise ProductCodeError(product_ident[1:3]) row["descricao"] = "%s %s" % (category, sec_category) row["categoria2"] = sec_category row["fabricante"] = manufacturer row[self.QUANTITY_HEADER] = quantity sheet.append(row) return sheet
def test_paste_to_multirow_selection_limits_pasted_data(self): selection_model = self._table_view.selectionModel() model = self._table_view.model() selection_model.select(model.index(0, 1), QItemSelectionModel.Select) selection_model.select(model.index(1, 1), QItemSelectionModel.Select) copied_data = locale.str(-1.1) + '\n' + locale.str( -2.2) + '\n' + locale.str(-3.3) QApplication.clipboard().setText(copied_data) self._table_view.paste() series = TimeSeriesFixedResolution("2019-08-08T15:00", "1h", [-1.1, -2.2, 3.3, 4.4], False, False) self.assertEqual(model.value, series)
def main(): for i in range(1, 130): i = i * 2 try: print("正在爬取第" + str(i) + "页新闻") func('http://news.nefu.edu.cn/dlyw/' + str(i) + '.htm') except: print("error") print("新闻爬取完毕," + "共" + str(len(a)) + "条新闻") x = pandas.DataFrame(a) x.head(10) x.to_excel('./nefu_news.xlsx')
def asLocaleStr(value): try: locale.setlocale(locale.LC_ALL, "") if isinstance(value, int): return locale.str(value) elif isinstance(value, float): return locale.str(value) else: logger.debug("No locale conversion to string available for type %s" % str(type(value))) except: logger.warning("Locale translation did not work.")
def _expenses(self, cr, uid, expenses, travel_order, context=None): lines = [] for expense in expenses: line = [] # header - Vrijednost 3 obavezna u svakom retku line.append(self._quoted('3')) # vrsta_putnog_troska expense_type = self.get_expense_id(cr, uid, expense.product_id.id) if expense_type: line.append(self._quoted(expense_type)) else: raise Warning( _("Missing or invalid expense type for '{}' in travel order {} ({})!" .format(expense.name, self._travel_order_name, travel_order.employee_id.name))) # oznaka_placeno paid = 1 if expense.journal_id.type == 'bank' else 0 line.append(self._quoted(paid)) # nije plaćeno # tip_isplate_joppd line.append(self._quoted('0')) #ostalo # opis_troska line.append(self._quoted(expense.name)) # kolicina qty = expense.unit_quantity line.append(self._quoted(locale.str(qty))) # cijena price = expense.unit_amount line.append(self._quoted(locale.str(price))) # valuta currency_name = expense.currency_id.name currency_code = self.get_currency_code(cr, uid, expense.currency_id.name) if currency_code: line.append(self._quoted(currency_code)) else: raise Warning( _("Missing or invalid currency for expense '{}' in travel order {} ({})!" .format(expense.name, self._travel_order_name, travel_order.employee_id.name))) # tecaj currency_rate = expense.lcy_rate line.append(self._quoted(locale.str(currency_rate))) # oznaka_pdv line.append(self._quoted('')) data = self._delimiter().join(line) lines.append(data) csv_rows = '\n'.join(lines) return (csv_rows)
def p_indice_error(self, p): ''' indice : indice LBR error RBR | LBR error RBR | error RBR | LBR error ''' print(p.slice[3]) if (len(p) == 5): print("Erro sintático no índice na linha " + str(p.slice[3].lineno)) elif p.slice[2].type == "error": print("Erro sintático no índice na linha " + str(p.slice[2].lineno))
def readProj(self) -> None: """Read parameter data from project file.""" proj = QgsProject.instance() title = proj.title() burninDepth = proj.readNumEntry(title, 'params/burninDepth', Parameters._BURNINDEPTH)[0] self._dlg.burninDepth.setText(str(burninDepth)) channelWidthMultiplier = proj.readDoubleEntry( title, 'params/channelWidthMultiplier', Parameters._CHANNELWIDTHMULTIPLIER)[0] self._dlg.widthMult.setText(locale.str(channelWidthMultiplier)) channelWidthExponent = proj.readDoubleEntry( title, 'params/channelWidthExponent', Parameters._CHANNELWIDTHEXPONENT)[0] self._dlg.widthExp.setText(locale.str(channelWidthExponent)) channelDepthMultiplier = proj.readDoubleEntry( title, 'params/channelDepthMultiplier', Parameters._CHANNELDEPTHMULTIPLIER)[0] self._dlg.depthMult.setText(locale.str(channelDepthMultiplier)) channelDepthExponent = proj.readDoubleEntry( title, 'params/channelDepthExponent', Parameters._CHANNELDEPTHEXPONENT)[0] self._dlg.depthExp.setText(locale.str(channelDepthExponent)) reachSlopeMultiplier = proj.readDoubleEntry( title, 'params/reachSlopeMultiplier', Parameters._MULTIPLIER)[0] self._dlg.reachSlopeMultiplier.setValue(reachSlopeMultiplier) tributarySlopeMultiplier = proj.readDoubleEntry( title, 'params/tributarySlopeMultiplier', Parameters._MULTIPLIER)[0] self._dlg.tributarySlopeMultiplier.setValue(tributarySlopeMultiplier) meanSlopeMultiplier = proj.readDoubleEntry( title, 'params/meanSlopeMultiplier', Parameters._MULTIPLIER)[0] self._dlg.meanSlopeMultiplier.setValue(meanSlopeMultiplier) mainLengthMultiplier = proj.readDoubleEntry( title, 'params/mainLengthMultiplier', Parameters._MULTIPLIER)[0] self._dlg.mainLengthMultiplier.setValue(mainLengthMultiplier) tributaryLengthMultiplier = proj.readDoubleEntry( title, 'params/tributaryLengthMultiplier', Parameters._MULTIPLIER)[0] self._dlg.tributaryLengthMultiplier.setValue(tributaryLengthMultiplier) upslopeHRUDrain = proj.readNumEntry(title, 'params/upslopeHRUDrain', Parameters._UPSLOPEHRUDRAIN)[0] self._dlg.upslopeHRUDrain.setText(str(upslopeHRUDrain)) settings = QSettings() if settings.contains('/QSWATPlus/FontSize'): self._dlg.pointSizeBox.setValue( int(settings.value('/QSWATPlus/FontSize'))) else: self._dlg.pointSizeBox.setValue(Parameters._DEFAULTFONTSIZE)
def _itinerary_expenses(self, cr, uid, itinerary_lines, travel_order, context=None): lines = [] for itinerary in itinerary_lines: line = [] # header - Vrijednost 3 obavezna u svakom retku line.append(self._quoted('3')) # vrsta_putnog_troska = 1 (kilomterina) line.append(self._quoted('1')) # oznaka_placeno line.append(self._quoted(0)) # nije unaprijed plaćeno # tip_isplate_joppd - za privatni auto = 4 Isplata u gotovini, za službeni = 0 - nije plaćeno if itinerary.vehicle_id.type == 'private': line.append( self._quoted('4')) # kilometrina se isplaćuje u gotovini else: # službeni line.append(self._quoted('0')) # kilometrina se ne isplaćuje # opis_troska description = 'Kilometrina' line.append(self._quoted(description)) # kolicina qty = itinerary.distance line.append(self._quoted(locale.str(qty))) # cijena # print itinerary.vehicle_type.lower(), itinerary.vehicle_id.name, itinerary.vehicle_id.type if itinerary.vehicle_id.type == 'private': price = 2.0 #HRK/km else: price = 0.0 line.append(self._quoted(locale.str(price))) # valuta currency_name = 'HRK' currency_code = self.get_currency_code(cr, uid, currency_name) line.append(self._quoted(currency_code)) # tecaj currency_rate = 1.0 line.append(self._quoted(locale.str(currency_rate))) # oznaka_pdv line.append(self._quoted('')) data = self._delimiter().join(line) lines.append(data) csv_rows = '\n'.join(lines) return (csv_rows)
def asLocaleStr(value): try: locale.setlocale(locale.LC_ALL, '') if isinstance(value, int): return locale.str(value) elif isinstance(value, float): return locale.str(value) else: logger.debug( "No locale conversion to string available for type %s" % str(type(value))) except: logger.warning("Locale translation did not work.")
def bytes_to_human(bytes_i): """Display a file size in human terms (megabytes, etc.) using SI standard""" storage_multipliers = {1000 ** 5: 'PB', 1000 ** 4: 'TB', 1000 ** 3: 'GB', 1000 ** 2: 'MB', 1000: 'kB', 1: 'B'} assert(isinstance(bytes_i, (int, long))) if 0 == bytes_i: return "0" if bytes_i >= 1000 ** 3: decimals = 2 elif bytes_i >= 1000: decimals = 1 else: decimals = 0 for key in sorted(storage_multipliers.keys(), reverse=True): if bytes_i >= key: abbrev = round((1.0 * bytes_i) / key, decimals) suf = storage_multipliers[key] return locale.str(abbrev) + suf if bytes_i < 0: return "-" + bytes_to_human(abs(bytes_i))
def compare_file(local_path): """ Compare two files, downloaded and local """ if not os.path.exists(local_path): assert False, 'No local file %s' % local_path outcome = open(world.cfg["dir_name"] + '/file_compare', 'w') # first remove all commented and blank lines of both files downloaded_stripped = strip_file(world.cfg["dir_name"] + '/downloaded_file') local_stripped = strip_file(local_path) line_number = 1 error_flag = True for i, j in zip(downloaded_stripped, local_stripped): if i != j: outcome.write('Line number: ' + locale.str(line_number) + ' \n\tDownloaded file line: "' + i.rstrip('\n') + '" and local file line: "' + j.rstrip('\n') + '"\n') error_flag = False line_number += 1 if error_flag: remove_local_file(world.cfg["dir_name"] + '/file_compare') assert error_flag, 'Downloaded file is NOT the same as local. Check %s/file_compare for details'\ % world.cfg["dir_name"] if len(downloaded_stripped) != len(local_stripped): assert len(downloaded_stripped) > len(local_stripped), 'Downloaded file is part of a local file.' assert len(downloaded_stripped) < len(local_stripped), 'Local file is a part of a downlaoded life.'
def ResetAllSettings(self): RESPONSE = { 'failure_indication':1, 'error_code':None, 'message':None, 'data':None } try: if self.DBConnected() is True: """Remove all data from all tables.""" self.userCollectionBasic.remove() self.userCollectionFullProfile.remove() self.userProfilesLastRowCountTable.remove() self.userProfileLastCrawledProfileIDTable.remove() self.InitLastRowCountTable() self.InitLastCrawledProfileIdTable() RESPONSE['failure_indication'] = 0 RESPONSE['error_code'] = ERROR_CODES.get('SUCCESS') RESPONSE['message'] = 'Successful' RESPONSE['data'] = '' else: RESPONSE['failure_indication'] = 1 RESPONSE['error_code'] = ERROR_CODES.get('DATABASE_NOT_CONNECTED') RESPONSE['message'] = 'Database not connected.' RESPONSE['data'] = '' except Exception,exp: RESPONSE['failure_indication'] = 1 RESPONSE['error_code'] = ERROR_CODES.get('UNKNOWN_ERROR') RESPONSE['message'] = 'Exception occured. %s' % str(exp) RESPONSE['data'] = ''
def ReadLastCrawledProfileId(self): RESPONSE = { 'failure_indication':1, 'error_code':None, 'message':None, 'data':None } try: if self.DBConnected() is True: lastCrawledProfileRow = self.userProfileLastCrawledProfileIDTable.find_one({'profile':'basic'}) if lastCrawledProfileRow is not None and type(lastCrawledProfileRow) is dict: profileId = lastCrawledProfileRow.get('last_crawled_profile_id') RESPONSE['failure_indication'] = 0 RESPONSE['error_code'] = ERROR_CODES.get('SUCCESS') RESPONSE['message'] = 'Successful' RESPONSE['data'] = profileId else: RESPONSE['failure_indication'] = 1 RESPONSE['error_code'] = ERROR_CODES.get('NOT_FOUND') RESPONSE['message'] = 'No rows found for last crawl profile records.' RESPONSE['data'] = '' else: RESPONSE['failure_indication'] = 1 RESPONSE['error_code'] = ERROR_CODES.get('DATABASE_NOT_CONNECTED') RESPONSE['message'] = 'Database not connected.' RESPONSE['data'] = '' except Exception,exp: RESPONSE['failure_indication'] = 1 RESPONSE['error_code'] = ERROR_CODES.get('UNKNOWN_ERROR') RESPONSE['message'] = 'Exception occured. %s' % str(exp) RESPONSE['data'] = ''
def UpdateLastCrawledProfileIdTable(self,value): RESPONSE = { 'failure_indication':1, 'error_code':None, 'message':None, 'data':None } try: if self.DBConnected() is True: self.userProfileLastCrawledProfileIDTable.update({'profile':'basic'},{'$set':{'last_crawled_profile_id':value}},True) RESPONSE['failure_indication'] = 0 RESPONSE['error_code'] = ERROR_CODES.get('SUCCESS') RESPONSE['message'] = 'Successful' RESPONSE['data'] = '' else: RESPONSE['failure_indication'] = 1 RESPONSE['error_code'] = ERROR_CODES.get('DATABASE_NOT_CONNECTED') RESPONSE['message'] = 'Database not connected.' RESPONSE['data'] = '' except Exception,exp: RESPONSE['failure_indication'] = 1 RESPONSE['error_code'] = ERROR_CODES.get('UNKNOWN_ERROR') RESPONSE['message'] = 'Exception occured. %s' % str(exp) RESPONSE['data'] = ''
def ReadLastRowCountFullProfile(self): RESPONSE = { 'failure_indication':1, 'error_code':None, 'message':None, 'data':None } try: if self.DBConnected() is True: lastRowCountRowFullProfile = self.userProfilesLastRowCountTable.find_one({'profile':'full'}) if lastRowCountRowFullProfile is not None and type(lastRowCountRowFullProfile) is dict: RESPONSE['failure_indication'] = 0 RESPONSE['error_code'] = ERROR_CODES.get('SUCCESS') RESPONSE['message'] = 'Successful' RESPONSE['data'] = lastRowCountRowFullProfile.get("last_row_count") else: RESPONSE['failure_indication'] = 1 RESPONSE['error_code'] = ERROR_CODES.get('NOT_FOUND') RESPONSE['message'] = 'No rows for last row count of full profile has been found in the db.' RESPONSE['data'] = '' else: RESPONSE['failure_indication'] = 1 RESPONSE['error_code'] = ERROR_CODES.get('DATABASE_NOT_CONNECTED') RESPONSE['message'] = 'Database not connected.' RESPONSE['data'] = '' except Exception,exp: RESPONSE['failure_indication'] = 1 RESPONSE['error_code'] = ERROR_CODES.get('UNKNOWN_ERROR') RESPONSE['message'] = 'Exception occured. %s' % str(exp) RESPONSE['data'] = ''
def InitLastCrawledProfileIdTable(self): RESPONSE = { 'failure_indication':1, 'error_code':None, 'message':None, 'data':None } try: if self.DBConnected() is True: lastCrawledProfileIdRow = self.userProfileLastCrawledProfileIDTable.find_one({'profile':'basic'}) if lastCrawledProfileIdRow is None: self.UpdateLastCrawledProfileIdTable(0) RESPONSE['failure_indication'] = 0 RESPONSE['error_code'] = ERROR_CODES.get('SUCCESS') RESPONSE['message'] = 'Successful' RESPONSE['data'] = '' else: RESPONSE['failure_indication'] = 1 RESPONSE['error_code'] = ERROR_CODES.get('DATABASE_NOT_CONNECTED') RESPONSE['message'] = 'Database not connected.' RESPONSE['data'] = '' except Exception,exp: RESPONSE['failure_indication'] = 1 RESPONSE['error_code'] = ERROR_CODES.get('UNKNOWN_ERROR') RESPONSE['message'] = 'Exception occured. %s' % str(exp) RESPONSE['data'] = ''
def ajax_load_matches(request): if not request.is_ajax(): raise Http404 league_id = request.GET.get('sendValue') sql_string = "SELECT MATCH_ID, HOME_TEAM_ID, VISITOR_TEAM_ID FROM sentiosp_drupal6.tf_match WHERE MATCH_LEAGUE_ID =" + league_id + ";" cursor = connection.cursor() cursor.execute(sql_string) matches = cursor.fetchall() home_team_ids = list(match[1] for match in matches) away_team_ids = list(match[2] for match in matches) team_ids = list(set(home_team_ids + away_team_ids)) sql_string = "SELECT TEAM_ID, TEAM_NAME FROM sentiosp_drupal6.td_team WHERE " for team_id in team_ids: sql_string += " TEAM_ID = " + str(team_id) + " OR" sql_string += " FALSE" cursor = connection.cursor() cursor.execute(sql_string) returned_data = cursor.fetchall() team_names = dict(returned_data) data = list() for match in matches: try: data.append((match[0], team_names[match[1]], team_names[match[2]] )) except: print match return HttpResponse(simplejson.dumps(data), mimetype='application/json')
def saveCsv(self): output_dir = CONF.get('paths', 'output_dir') user_path = os.path.join(output_dir, 'sans-titre.csv') fname = QFileDialog.getSaveFileName(self, u"Exporter la table", user_path, u"CSV (séparateur: point virgule) (*.csv)") if fname: CONF.set('paths', 'output_dir', os.path.dirname(str(fname))) try: now = datetime.now() csvfile = open(fname, 'wb') writer = UnicodeWriter(csvfile, dialect= csv.excel, delimiter=';') writer.writerow([u'OpenFisca']) writer.writerow([u'Calculé le %s à %s' % (now.strftime('%d-%m-%Y'), now.strftime('%H:%M'))]) writer.writerow([u'Système socio-fiscal au %s' % CONF.get('simulation', 'datesim')]) writer.writerow([]) for row in self.data: if not row.desc in ('root'): outlist = [row.desc] for val in row.vals: outlist.append(locale.str(val)) writer.writerow(outlist) csvfile.close() except Exception, e: QMessageBox.critical( self, "Error saving file", str(e), QMessageBox.Ok, QMessageBox.NoButton)
def send_through_http(host_address, host_port, command): world.control_channel = requests.post("http://" + host_address + ":" + locale.str(host_port), headers={"Content-Type": "application/json"}, data=command).text result = json.loads(world.control_channel) log.info(json.dumps(result, sort_keys=True, indent=2, separators=(',', ': '))) return result
def bytes_to_human(bytes_i): # type: (int) -> str """Display a file size in human terms (megabytes, etc.) using preferred standard (SI or IEC)""" if bytes_i < 0: return '-' + bytes_to_human(-bytes_i) from bleachbit.Options import options if options.get('units_iec'): prefixes = ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi'] base = 1024.0 else: prefixes = ['', 'k', 'M', 'G', 'T', 'P'] base = 1000.0 assert(isinstance(bytes_i, (int, long))) if 0 == bytes_i: return "0" if bytes_i >= base ** 3: decimals = 2 elif bytes_i >= base: decimals = 1 else: decimals = 0 for exponent in range(0, len(prefixes)): if bytes_i < base: abbrev = round(bytes_i, decimals) suf = prefixes[exponent] return locale.str(abbrev) + suf + 'B' else: bytes_i /= base return 'A lot.'
def get_roles(user, obj=None): """Returns *all* roles of the passed user. This takes direct roles and roles via the user's groups into account. If an object is passed local roles will also added. Then all local roles from all ancestors and all user's groups are also taken into account. This is the method to use if one want to know whether the passed user has a role in general (for the passed object). **Parameters:** user The user for which the roles are returned. You can also pass in a group to get the roles for just this group obj The object for which local roles will returned. """ roles = [] if isinstance(user, Group):#if a groupis passed in not a single user groups=[user] else: groups = user.groups.all() groups_ids_str = ", ".join([str(g.id) for g in groups]) groups_ids_str = groups_ids_str or "''" # Gobal roles for user and the user's groups cursor = connection.cursor() cursor.execute("""SELECT role_id FROM permissions_principalrolerelation WHERE (user_id=%s OR group_id IN (%s)) AND content_id is NULL""" % (user.id, groups_ids_str)) for row in cursor.fetchall(): roles.append(get_role(row[0])) # Local roles for user and the user's groups and all ancestors of the # passed object. while obj: ctype = ContentType.objects.get_for_model(obj) cursor.execute("""SELECT role_id FROM permissions_principalrolerelation WHERE (user_id='%s' OR group_id IN (%s)) AND content_id='%s' AND content_type_id='%s'""" % (user.id, groups_ids_str, obj.id, ctype.id)) for row in cursor.fetchall(): roles.append(get_role(row[0])) try: obj = obj.get_parent_for_permissions() except AttributeError: obj = None return roles
def addmmsi(mmsi,id): if (id == ''): return mmsimap[id] = mmsi if (verbose >= 1): print("adding MMSI: ", mmsi, id) if (loglevel >= 1): logfile.write("adding MMSI: " + locale.str(mmsi) + " " + id + "\n")
def pingrep(self, client, event): if not event.arguments[0] == "PING": return 0 current_milli_time = int(round(time.time() * 1000)) diff = current_milli_time - int(event.arguments[1]) secs = locale.str(diff / 1000) # milisegudos -> segundos client.msg(self.chan, event.source + " tiene un lag de " + secs + " segundos.")
def GenerateLojeProductSheet(self, product_ident_list, start_index, manufacturer="", increase_index=True): sheet = [] product_index = start_index - 1 product_ident_list = self._ProcessProductQuantities(product_ident_list) print product_ident_list for product_ident, quantity in product_ident_list: row = {} product_index += 1 row[self.ID_HEADER] = product_index if self._category_on_label: barcode = "%s%06d" % (product_ident[0].upper(), product_index) try: int(barcode) except ValueError: pass else: raise RuntimeError("Product Barcode couldn't be an Integer") else: barcode = "%06d" % product_index row[self.BARCODE_HEADER] = barcode row[self.IDENT_HEADER] = product_ident.upper() try: category = self._primary_categories[product_ident[0].lower()] except KeyError: raise ProductCodeError(product_ident[0]) row[self.CATEGORY_HEADER] = category row["unidade compra"] = self._product_unity row["unidade venda"] = self._product_unity price = float(product_ident[3:]) / 10. cost = price / self.price_list[0] price2 = cost * self.price_list[1] row["custo"] = locale.str(round(cost, 2)) row["preco"] = locale.str(round(price, 2)) row["preco2"] = locale.str(round(price2, 2)) row["estoque"] = "v" try: sec_category = self._secondary_categories[product_ident[1:3].lower()] except KeyError: raise ProductCodeError(product_ident[1:3]) row["descricao"] = "%s %s" % (category, sec_category) row["categoria2"] = sec_category row["fabricante"] = manufacturer row[self.QUANTITY_HEADER] = quantity sheet.append(row) return sheet