def __generate(self, limit, row_num, filename, order_num, file_count): order_data = get_data(self.order_template) order_columns = order_data['订单归集'] for order_num_count in range(limit): good_subscript = randrange( 0, len(self.goods_data) - self.goods_in_order_num) for goods_count in range(0, self.goods_in_order_num): column_data = ['' for j in range(row_num)] column_data[0] = order_num + str(order_num_count) column_data[5] = self.customer_data[file_count]['name'] column_data[7] = self.customer_data[file_count]['contactor'] column_data[8] = '11111111111' column_data[9] = self.customer_data[file_count]['area'] column_data[10] = '北京市' column_data[11] = '北京市' column_data[12] = '东城区' column_data[14] = self.goods_data[good_subscript + goods_count]['code'] column_data[15] = self.goods_data[good_subscript + goods_count]['name'] column_data[17] = self.goods_data[good_subscript + goods_count]['marketPrice'] column_data[18] = randrange(1, 50) order_columns.append(column_data) new_order_data = OrderedDict() new_order_data.update({'订单归集': order_columns}) save_data(filename, new_order_data)
def get_input_post(request): #print("I am called") try: excel_file = request.FILES['files'] print("hello world") if (str(excel_file).split('.')[-1] == "xls"): data = xls_get(excel_file) print(data) print("xls file called") elif (str(excel_file).split('.')[-1] == "xlsx"): data = xlsx_get(excel_file) print("xlsx file called") else: return render(request, 'index.html') json_data = json.dumps(data) #print(json_data) j = 1 for i in range(5): data['Sheet1'][i + 1][j] = str( int(data['Sheet1'][i + 1][j]) + int(data['Sheet1'][i + 1][j])) print(data['Sheet1'][i + 1][j]) save_data("my_output.xlsx", data) return render(request, 'success.html') except MultiValueDictKeyError: return render(request, 'index.html')
def export_xlsx(request): # dump database into spreadsheet OUTFILE = os.path.join(settings.MEDIA_ROOT, 'output.xlsx') data = OrderedDict() places = Place.objects.all() data_list = [[ p.location_name, p.street_address, p.city, p.zip, p.borough, p.county, p.phone, p.hours, p.type, p.latitude, p.longitude, p.hours, p.note, p.website ] for p in places] data_list.insert(0, [ 'LOCATION', 'STREET ADDRESS', 'CITY', 'ZIP', 'BOROUGH', 'COUNTY', 'PHONE', 'HOURS', 'TYPE', 'LATITUDE', 'LONGITUDE', 'HOURS', 'NOTE', 'WEBSITE' ]) print(data_list) data.update({'Sheet 1': data_list}) save_data(OUTFILE, data) return redirect('/media/output.xlsx')
def save_xls_file(xls_header,xls_sheet, file_name): sheet_data = [] data = OrderedDict() if os.path.exists(file_name): #d打开表格文件 with xlrd.open_workbook(file_name) as rb: #获取表格 sheet = rb.sheet_by_index(0) #行数 nrows = sheet.nrows #遍历将每一行内容加入sheet_data for i in range(nrows): sheet_data.append(sheet.row_values(i)) #每次再次写入内容之前先插入一个空白行 list_space = [] sheet_data.append(list_space) #添加表头 sheet_data.append(xls_header) for row_data in xls_sheet: sheet_data.append(row_data) # 添加sheet表 data.update({u"Sheet1": sheet_data}) print(type(data)) # 保存成xls文件 save_data(file_name,data)
def write_data(product_price, excel_data, companies): rows, columns = excel_data.shape j = 0 i = 0 new_data = [] new_data.append([]) nested_list = new_data[0] for companies_name in companies: nested_list.append(companies_name) while i < rows: new_data.append([]) nested_list = new_data[i + 1] while j < columns: column = excel_data.columns[j] selected_data = excel_data[column].tolist() nested_list.append(selected_data[i]) j += 1 j = 0 i += 1 new_data.append([]) nested_list = new_data[-1] for item in product_price: nested_list.append(item) data = get_data("Data.xlsx") data.update({"Sheet1": new_data}) save_data("Data.xlsx", data)
def export5(): t0 = time.clock() # https://github.com/pyexcel/pyexcel-xlsx from collections import OrderedDict from pyexcel_xlsx import save_data from io import BytesIO data = OrderedDict() titles = [ "CommodityNo", "CommodityName", "Quantity", "RealQuantity", "Price", "MakeDate", "Maker" ] data.update({"统计": titles}) res = db.session.execute("SELECT * FROM tbl_purchaseinfo;").fetchall() data.update({"统计": [list(r) for r in res]}) io = BytesIO() save_data(io, data) response = make_response(io.getvalue()) response.headers[ "Content-type"] = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" response.headers[ "Content-Disposition"] = "attachment; filename=new_big_file5.xlsx;" t = time.clock() - t0 print(t) return response
def saveStats(self): data = get_data(self.currFile) save_data(self.oldFile, data) namesDict = {} for i in range(1, 5): namesDict[data['cricStats'][i][0]] = i strings = ['BatArchives', 'BatArchives', 'BowlArchives'] for i in range(len(strings)): len1 = len(data[strings[i]][0]) el = [] for j in range(len1): el.append('') data[strings[i]].append(el) for i in range(self.noOfInnings): data = self.innings[i].writeStats(data, namesDict, self.batsmanNames[i], self.bowlerNames[i]) save_data(self.currFile, data) saveData = messagebox.showinfo("Stats Info", "Stats saved succesfully!!!!") self.lead = self.innings[0].retScore() - self.innings[1].retScore() if self.selected.get() == 1 and self.inningsCount == 0: self.window2 = Toplevel() self.window2.title("Innings-2") self.window2.geometry('1350x1200') self.clickedStartMatch(False) self.runsButtons(17, self.window2) self.inningsCount += 1 self.window2.mainloop() self.window2.destroy() self.window1.destroy()
def encode(self, data: dict, **options) -> io.BinaryFileStream: """ Encodes the data into a Office Open XML Workbook file-like stream. Arguments: data: The data to encode **options: The encoding options Returns: A Office Open XML Workbook file-like stream Raises: geodatabr.core.encoders.EncodeError: If data fails to encode """ try: xlsx_file = io.BinaryFileStream() xlsx_data = types.OrderedMap() for entity, records in data.items(): xlsx_data[entity] = [list(records.first().keys())] \ + [list(record.values()) for record in records] pyexcel_xlsx.save_data(xlsx_file, xlsx_data) xlsx_file.seek(0) return xlsx_file except Exception: raise encoders.EncodeError
def generate_payment_xlsx(self): if len(self.get_payment_files()) == len( self.customers.get_customer_files()): logging.warning('no need to generate payment xlsx') return fund_account_code = ['0001', '0002'] fund_account_abstract = [ u'现金充值', u'其他充值', u'销售返点', u'退款', u'订单付款', u'其他付款' ] for customer_file in self.customers.get_customer_files(): customer_data = get_data(customer_file) customer_columns = list(customer_data[u'客户数据']) fund_account_data = get_data(self.pay_template) fund_account_data_columns = list(fund_account_data['Sheet1']) for customer in customer_columns[2:]: fund_account_data_column = [ customer[0], fund_account_code[randrange(0, len(fund_account_code))], randrange(500, 10000), fund_account_abstract[randrange( 0, len(fund_account_abstract))] ] fund_account_data_columns.append(fund_account_data_column) new_fund_account_data = OrderedDict() new_fund_account_data.update({'Sheet1': fund_account_data_columns}) save_data(customer_file.replace('customers', 'payment'), new_fund_account_data)
def save(self, formatted=False): if formatted: temp_filename = os.path.join(mkdtemp(), mktemp(suffix='.xlsx')) pyexcel_xlsx.save_data(temp_filename, self.excel_raw) formatter = ExcelFormatter( excel_data_file=temp_filename, excel_formatting_file=self.excel_filename, out_file=self.excel_filename) formatter.do_formatting() formatter.save() else: pyexcel_xlsx.save_data(self.excel_filename, self.excel_raw)
def deal_data(self): cur = datetime.datetime.now() year = cur.year month = cur.month # 获取demo excel_data = deepcopy(Globals.get_origin_excel_data()) target_file_name = self.out_put_filename origin_data = [] target_rows = [] # 处理数据 for i in range(len(origin_data)): one_data = origin_data[i] row = deepcopy(Globals.pingzheng_demo) row2 = deepcopy(Globals.pingzheng_demo) target_rows.append(row) target_rows.append(row2) row[Globals.get_pingzheng_idx("记账日期")] = data_str row2[Globals.get_pingzheng_idx("记账日期")] = data_str row[Globals.get_pingzheng_idx("业务日期")] = data_str row2[Globals.get_pingzheng_idx("业务日期")] = data_str row[Globals.get_pingzheng_idx("辅助账业务日期")] = data_str row2[Globals.get_pingzheng_idx("辅助账业务日期")] = data_str row[Globals.get_pingzheng_idx("会计期间")] = str(cur.month) row2[Globals.get_pingzheng_idx("会计期间")] = str(cur.month) row[Globals.get_pingzheng_idx("凭证类型")] = "转" row2[Globals.get_pingzheng_idx("凭证类型")] = "转" row[Globals.get_pingzheng_idx("凭证号")] = "20180600269" row2[Globals.get_pingzheng_idx("凭证号")] = "20180600269" row[Globals.get_pingzheng_idx("分录号")] = i * 2 + 1 row2[Globals.get_pingzheng_idx("分录号")] = i * 2 + 2 zhaiyao = "" row[Globals.get_pingzheng_idx("摘要")] = zhaiyao row2[Globals.get_pingzheng_idx("摘要")] = zhaiyao row[Globals.get_pingzheng_idx("科目")] = "" row2[Globals.get_pingzheng_idx("科目")] = "" row[Globals.get_pingzheng_idx("方向")] = 1 row[Globals.get_pingzheng_idx("原币金额")] = 1 row2[Globals.get_pingzheng_idx("原币金额")] = 1 row2[Globals.get_pingzheng_idx("原币金额")] = -1 row[Globals.get_pingzheng_idx("借方金额")] = 1 row[Globals.get_pingzheng_idx("现金流量标记")] = 2 row2[Globals.get_pingzheng_idx("现金流量标记")] = 2 row[Globals.get_pingzheng_idx("辅助账摘要")] = zhaiyao row2[Globals.get_pingzheng_idx("辅助账摘要")] = zhaiyao row[Globals.get_pingzheng_idx("核算项目1")] = "长益租户" row[Globals.get_pingzheng_idx("名称1")] = "" row[Globals.get_pingzheng_idx("编码1")] = "" for row in target_rows: excel_data["凭证"].append(row) save_data(target_file_name, excel_data) Globals.eval_date_format(target_file_name, ["B", "C", "BN"])
def output(bot, update): print(update) uid = update.message.from_user.id if uid not in ADMINS: return foud = OrderedDict() res = UndefinedRequests.select(UndefinedRequests.request, fn.COUNT(UndefinedRequests.id).alias('count')). \ group_by(UndefinedRequests.request).execute() foud.update({'Отсутствия в базе': [(r.request, r.count) for r in res]}) fname = str(dt.now()) + '.xlsx' save_data(fname, foud) bot.sendDocument(uid, document=open(fname, 'rb')) os.remove(fname)
def export_xlsx(request): # dump database into spreadsheet OUTFILE = os.path.join(settings.MEDIA_ROOT , 'output.xlsx') data = OrderedDict() places = Place.objects.all() data_list = [ [ p.location_name, p.street_address, p.city, p.zip, p.borough, p.county, p.phone, p.hours, p.type, p.latitude, p.longitude, p.hours, p.note, p.website ] for p in places] data_list.insert(0, [ 'LOCATION', 'STREET ADDRESS', 'CITY', 'ZIP', 'BOROUGH', 'COUNTY', 'PHONE', 'HOURS', 'TYPE', 'LATITUDE', 'LONGITUDE', 'HOURS', 'NOTE', 'WEBSITE' ]) print(data_list) data.update({'Sheet 1': data_list }) save_data(OUTFILE, data) return redirect('/media/output.xlsx')
def main(): # Get command line arguments args = get_arguments() # Get all tables and read in as nested lists combined_data = dict() table_fps = args.table_dir.glob('*tsv') for table_fp in table_fps: with table_fp.open('r') as fh: data = [line.rstrip().split('\t') for line in fh] combined_data[table_fp.stem] = data # Write excel pyexcel_xlsx.save_data(str(args.output_fp), combined_data)
def print_minorsheet(self): # Get data for index data = get_data(self._minor_input_path)["Sheet1"] full_list = [data[0]] # Create result for row in self._minor_index: list = [row[0]] for secrow in self._minor_index: if row[1] is None or secrow[1] is None: list.append(None) else: current = self._chord.index(row[1]) next = self._chord.index(secrow[1]) if current == next: list.append(-36) else: list.append(self.getminorvalue(current, next)) full_list.append(list) print(full_list) # Save result sheetx = {"Sheet1": full_list} save_data(self._minor_output_path, sheetx) # Calculate Correctness using the Chord Flow file data = get_data(self._minor_input_path, start_row=1)["Sheet1"] Yessum = 0.0 Yes = 0 Nosum = 0.0 No = 0 data[len(self._minor_index) - 1].append(None) for row in range(0, len(self._minor_index)): for column in range(0, len(self._minor_index)): if data[row][column + 1] == "Yes": Yes += 1 Yessum += exp(full_list[row + 1][column + 1]) else: No += 1 Nosum += exp(full_list[row + 1][column + 1]) print("Yes: ", Yes, Yessum, Yessum / Yes, "No: ", No, Nosum, Nosum / No) # Display the correctness of Major and Minor self._minorsum = Yessum + Nosum self._minorcount = Yes + No print(self._majorcount, self._majorsum, self._majorsum / self._majorcount, self._minorcount, self._minorsum, self._minorsum / self._minorcount)
def outputXLSX(self, filename, rowlist): content_data = OrderedDict() content_data.update({TestFields().__WORKSHEET_NAME__: rowlist}) data_io = BytesIO() save_data(data_io, content_data) # Get a response object and set its attributes # before return a response object. resp = make_response() resp.content_type = \ 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' resp.headers['Content-disposition'] = \ 'attachment;filename={0}'.format(filename) resp.data = data_io.getvalue() return resp
def export_same_69code_sku(): """导出69码重复的商品sku到xd.xlsx """ sql = "SELECT sku_code,product_code,jd_sku_code,sync_date,create_date,update_date FROM t_invoicing_product_sku " \ "WHERE product_code IN (" \ "SELECT temp.product_code FROM (SELECT product_code FROM t_invoicing_product_sku " \ "GROUP BY product_code HAVING count(product_code) > 1) temp) ORDER BY product_code" result = query_all(sql, None) # 打开预定义的文件表头文件 xh = get_data("d:/商品表表头.xlsx") # 拼接整表数据 xd = OrderedDict() xd.update({"Sheet1": xh['Sheet1'] + list(result)}) # 保存到另外一个文件中 save_data("d:/重复69码的sku.xlsx", xd)
def load_data(self): self.log(self.my_filename.split('\\')[-1] + " 导入成功。") self.my_data = get_data(self.my_filename)["Sheet1"] title_row = self.my_data[0] kemu_idx = title_row.index("科目编码") kemu_value = "1122.01.01" zhaiyao_idx = title_row.index("摘要") del self.my_data[0] temp_data = [title_row] for row in self.my_data: if kemu_value == row[kemu_idx]: if "数据采集" in row[zhaiyao_idx]: temp_data.append(row) self.my_data = temp_data save_data(self.out_put_filename2, {"筛选收款": temp_data})
def __generate(self, limit, row_num, filename, goods_name): goods_data = get_data(self.goods_template) goods_columns = goods_data['商品数据'] for i in range(limit): column_data = ['' for j in range(row_num)] column_data[0] = goods_name + str(i) column_data[1] = goods_name + str(i) column_data[2] = str(self_brands[randrange(0, len(self_brands))]) column_data[3] = str(self_categories[randrange(0, len(self_categories))]) # column_data[11] = u'相关介绍: 性能测试初始化的商品数据,采用较短的介绍长度' column_data[11] = u'件' # column_data[17] = u'上架' # column_data[19] = str(random.randrange(10, 50)) # column_data[20] = str(random.randrange(50, 200)) goods_columns.append(column_data) new_goods_data = OrderedDict() new_goods_data.update({'商品数据': goods_columns}) save_data(filename, new_goods_data)
def write_xlsx(self, file: Union[str, BinaryIO]) -> None: """ Write the contents in XLSX (Excel) format to a file. Args: file: filename or file-like object """ if XLSX_VIA_PYEXCEL: # use pyexcel_xlsx data = self._get_pyexcel_data(convert_for_openpyxl) pyexcel_xlsx.save_data(file, data) else: # use openpyxl # Marginal performance gain with write_only. Does not automatically # add a blank sheet wb = XLWorkbook(write_only=True) valid_name_dict = self.get_pages_with_valid_sheet_names() for page, title in valid_name_dict.items(): ws = wb.create_sheet(title=title) page.write_to_openpyxl_xlsx_worksheet(ws) wb.save(file)
def gen_xlsx(xlsx_filename, xml_filename): try: # data with format {'sheet1': [[line1], [line2]]} print('Reading data from xlsx....') data = read_data(xlsx_filename) except: raise ValueError('*.xlsx and program must be in same directory!') # get first sheet name sheet_1 = list(data.keys())[0] first_line = ['IP', 'OS', 'Port', 'Protocol', 'Service', 'Banner'] data[sheet_1][0] = first_line lines = get_xml_info(xml_filename) for i in lines: data[sheet_1].append(i) print('saving data to result.xlsx') try: save_data('result.xlsx', data) except: print('Close result.xlsx and try again!')
def standardizeMinor(self): # Standardize the error of Major and Minor to Major # Get data full_list = get_data(self._minor_output_path)["Sheet1"] # Normalize data normalize_value = self._majorsum / self._majorcount normalize_value *= self._minorcount / self._minorsum for row in full_list[1:]: for column in range(1, len(row)): value = exp(row[column]) * normalize_value row[column] = log(value) # Store data sheetx = {"Sheet1": full_list} save_data(self._minor_output_path, sheetx) # Calculate Correctness using the Chord Flow file data = get_data(self._minor_input_path, start_row=1)["Sheet1"] Yessum = 0.0 Yes = 0 Nosum = 0.0 No = 0 data[len(self._minor_index) - 1].append(None) for row in range(0, len(self._minor_index)): for column in range(0, len(self._minor_index)): if data[row][column + 1] == "Yes": Yes += 1 Yessum += exp(full_list[row + 1][column + 1]) else: No += 1 Nosum += exp(full_list[row + 1][column + 1]) print("Yes: ", Yes, Yessum, Yessum / Yes, "No: ", No, Nosum, Nosum / No) # Display the correctness of Major and Minor self._minorsum = Yessum + Nosum self._minorcount = Yes + No print(self._majorcount, self._majorsum, self._majorsum / self._majorcount, self._minorcount, self._minorsum, self._minorsum / self._minorcount)
def load_data(self): excel1 = get_data(self.my_filename1) the_keys1 = list(excel1.keys())[0:3] excel2 = get_data(self.my_filename2) the_keys2 = list(excel2.keys())[0:3] gongzi_list = self.eval_gongzi(excel1[the_keys1[0]], excel2[the_keys2[0]]) shebao_list = self.eval_shebao(excel1[the_keys1[1]], excel2[the_keys2[1]]) gongjijin_list = self.eval_gongjijin(excel1[the_keys1[2]], excel2[the_keys2[2]]) self.my_data = { "工资": gongzi_list, "社保": shebao_list, "公积金": gongjijin_list, } save_data(self.out_put_filename1, self.my_data) self.refer_data = get_data(self.refer_filename)
def export(excel_dict, file_name): data = OrderedDict() data.update(excel_dict) io = StringIO() save_data(io, data) response = Response() response.status_code = 200 response.data = io.getvalue() response_headers = Headers({ 'Pragma': "public", # required, 'Expires': '0', 'Cache-Control': 'must-revalidate, post-check=0, pre-check=0', 'Content-Type': "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", 'Content-Disposition': 'attachment; filename=\"%s\";' % file_name, 'Content-Transfer-Encoding': 'binary', 'Content-Length': len(response.data) }) response.headers = response_headers return response
def save_xls_file(url,list): #list = []: 原图,处理后的图,数字矩形区域,图片名,全路径,结果图路径,过程图路径,结果List OD = OrderedDict() # sheet表的数据 sheet_1 = [] row_1_data = [u"图片名称", u"图片路径", u"结果集"] # 每一行的数据 sheet_1.append(row_1_data) for data in list: imgnmae=data[3] imgpath=data[4] imgres=data[7] new_num_list=[str(x) for x in imgres] row_2_data = [imgnmae,imgpath, ",".join(new_num_list)] sheet_1.append(row_2_data) # 添加sheet表 OD.update({u"这是结果表": sheet_1}) # 保存成xls文件 save_data(url, OD) # if __name__ == '__main__': # # read_xls_file() # save_xls_file("url",[])
def create_table(self, users, record): '''Crea un archivos .xlsx que contienen estadisticas (mediana y promedio) por usuario''' data = OrderedDict() spread = [["User", "Before", "During", "After"]] for user in users: try: b = self.restore[user]["%s_before" % record] d = self.restore[user]["%s_during" % record] a = self.restore[user]["%s_after" % record] spread.append([user, b, d, a]) except KeyError: pass spread.append([ "Average", "=AVERAGE(B2:B297)", "=AVERAGE(C2:C297)", "=AVERAGE(D2:D297)" ]) spread.append([ "Median", "=MEDIAN(B2:B297)", "=MEDIAN(C2:C297)", "=MEDIAN(D2:D297)" ]) data.update({"Sheet 1": spread}) save_data("results/%s.xlsx" % record, data)
def BER_Check(): with open("BER_Record_20181109.txt", 'w') as recordfile: rd_data = get_data(Excel_filename) print rd_data.keys() # print keys sv_data = OrderedDict() # Excel save directory print rd_data[rd_data.keys()[0]] print rd_data[rd_data.keys()[1]] print rd_data[rd_data.keys()[2]] len1 = len(rd_data) len2 = len(rd_data.values()[0]) len3 = len(rd_data.values()[0][0]) print len1, len2, len3 sheet_data = [[[0 for y in xrange(len3)] for x in xrange(len2 * 2)] for z in xrange(len1)] # create a sheet list for Excel for i in xrange(len1): for j in xrange(len2): for k in xrange(len3): print i, j, k sheet_data[i][j * 2][k] = rd_data[rd_data.keys()[i]][j][ k] # Chip ID sheet_data[i][j * 2 + 1][k] = search_ber_file( rd_data[rd_data.keys()[i]][j][k]) # BER Check Results if search_ber_file( rd_data[rd_data.keys()[i]][j] [k]) != "BP" and search_ber_file(rd_data[rd_data.keys( )[i]][j][k]) != "BMP" and rd_data[rd_data.keys( )[i]][j][k] != 9008: # list NP, NF chip location recordfile.write( "%s %2d %2d %s %s\n" % (rd_data.keys()[i], j, k, rd_data[rd_data.keys()[i]][j][k], search_ber_file( rd_data[rd_data.keys()[i]][j][k]))) sv_data.update({rd_data.keys()[i]: sheet_data[i]}) # sheet_name and sheet_data save_data("LOCx2 Passed Chips in Trays_20181109_BERChecked.xlsx", sv_data) # save excel file
emisor = factura['cfdi:Emisor']['@Nombre'] except: emisor = 'na' # lo agregamos a la lista de datos try: lista.append([ anio, mes, dia, hora, folio, emisor, cp, descripcion, float(total), float(iva), item ]) except: print(item + ' TypeError') else: print(item + ' ErrorVersion') # Agregamos le total de registros lista.append(['total', to]) # Guardamos los datos data.update({"MagicAccounting": lista}) save_data("Contabilidad.xlsx", data) # Mensajes de exito print("CFDI version 3.3") print("Se han escrito " + str(to) + " registros") print("Presione una tecla para continuar") if sys.version_info[0] < 3: raw_input() else: input()
def as_excel_file(self): io = BytesIO() pyexcel_xlsx.save_data(io, {'Sheet 1': self.as_rows}) return io.getvalue()
def deal_zhifubao_data(self): target_excel = {"凭证": []} target_data = target_excel["凭证"] row1 = [ "公司", "记账日期", "业务日期", "会计期间", "凭证类型", "凭证号", "分录号", "摘要", "科目", "币种", "汇率", "方向", "原币金额", "数量", "单价", "借方金额", "贷方金额", "制单人", "过账人", "审核人", "附件数量", "过账标记", "机制凭证模块", "删除标记", "凭证序号", "单位", "参考信息", "是否有现金流量", "现金流量标记", "业务编号", "结算方式", "结算号", "辅助账摘要", "核算项目1", "编码1", "名称1", "核算项目2", "编码2", "名称2", "核算项目3", "编码3", "名称3", "核算项目4", "编码4", "名称4", "核算项目5", "编码5", "名称5", "核算项目6", "编码6", "名称6", "核算项目7", "编码7", "名称7", "核算项目8", "编码8", "名称8", "发票号", "换票证号", "客户", "费用类别", "收款人", "物料", "财务组织", "供应商", "辅助账业务日期", "到期日", ] target_data.append(row1) row2 = [ "公司", "记账日期", "会计期间", "凭证类型", "凭证号", "币种", "分录号", "对方分录号", "主表信息", "附表信息", "原币", "本位币", "报告币", "主表金额系数", "附表金额系数", "性质", "核算项目1", "编码1", "名称1", "核算项目2", "编码2", "名称2", "核算项目3", "编码3", "名称3", "核算项目4", "编码4", "名称4", "核算项目5", "编码5", "名称5", "核算项目6", "编码6", "名称6", "核算项目7", "编码7", "名称7", "核算项目8", "编码8", "名称8" ] target_excel["现金流量"] = [row2] refer_data = self.refer_data bank_data = self.my_data target_rows = [] row_num = len(bank_data) - 1 date_str = self.label.text() month = date_str.split('/')[1] last_month = int(month) - 2 if last_month == 0: last_month = 12 elif last_month == -1: last_month = 11 if last_month < 10: last_month = "0" + str(last_month) else: last_month = str(last_month) year = date_str.split('/')[0] day = date_str.split('/')[2] # 替换信息 # replace_dict = self.get_replace_dict() # 银行凭证号 #pingzhenghao_bank_idx = bank_data[0].index("凭证号") # 原币金额索引 #yuanbijine_bank_idx = bank_data[0].index("原币金额") # 核算项目索引 #hesuanxiangmu_bank_idx = bank_data[0].index("核算项目") # 贷方金额 daifangjine_bank_idx = bank_data[0].index("贷方发生额") # 摘要 zhaiyao_bank_idx = bank_data[0].index("摘要") # 交易时间 jiaoyitime_bank_idx = bank_data[0].index("交易时间") for i in range(row_num): idx = i + 1 row = [] row2 = [] target_rows.append(row) # target_rows.append(row2) # 公司 row.append("E018") row2.append("E018") # 记账日期 row.append(date_str) row2.append(date_str) # 业务日期 row.append(date_str) row2.append(date_str) # 会计期间 row.append(int(month)) row2.append(int(month)) # 凭证类型 row.append("收") row2.append("收") # 凭证号 row.append("20181100963") row2.append("20181100963") # 分录号 row.append(idx) row2.append(idx * 2) # 摘要 jiaoyitime_str = bank_data[idx][jiaoyitime_bank_idx].split()[0] middle_str = "收到 微信一点停支付宝返款-" zhiayao_str = bank_data[idx][zhaiyao_bank_idx] zhaiyao_value = jiaoyitime_str + middle_str + zhiayao_str row.append(zhaiyao_value) row2.append("") # 科目 row.append("1002.01") row2.append("1002.01") # 币种 row.append("BB01") row2.append("BB01") # 汇率 row.append(1) row2.append(1) # 方向 row.append(1) row2.append(0) # 原币金额 money_str = bank_data[idx][daifangjine_bank_idx].replace(",", "") curr_money = float(money_str) row.append(curr_money) row2.append(curr_money) # sum = sum + int(curr_money * 100) # 数量 row.append(0) row2.append(0) # 单价 row.append(0) row2.append(0) # 借方金额 row.append(curr_money) row2.append("") # 贷方金额 row.append("") row2.append("") # 制单人 row.append(Globals.eas_name) row2.append(Globals.eas_name) # 过账人 row.append("") row2.append("") # 审核人 row.append("") row2.append("") # 附件数量 row.append(1) row2.append(1) # 过账标记 row.append("TRUE") row2.append("TRUE") # 机制凭证 row.append("") row2.append("") # 删除标记 row.append("FALSE") row2.append("FALSE") # 凭证序号 row.append("1544876875657--0") row2.append("1544876875657--0") # 单位 row.append("") row2.append("") # 参考信息 row.append("") row2.append("") # 是否有现金流量 row.append("") row2.append("") # 现金流量标记 row.append(6) row2.append(6) # 业务编号 row.append("") row2.append("") # 结算方式 row.append("") row2.append("") # 结算号 row.append("") row2.append("") # 辅助账摘要 row.append(zhaiyao_value) row2.append("") # 核算项目1 row.append("银行账户") row2.append("") # 编码1 row.append("001.E018") row2.append("") # 名称1 row.append("工商银行古北新区支行1001282509300071274") row2.append("") # 核算项目2 row.append("现金流量项目") row2.append("") # 编码2 row.append("111") row2.append("") # 名称2 row.append("出租业务所收到的现金") row2.append("") # 项目3 row.append("") row2.append("") row.append("") row2.append("") row.append("") row2.append("") # 项目4 row.append("") row2.append("") row.append("") row2.append("") row.append("") row2.append("") # 项目5 row.append("") row2.append("") row.append("") row2.append("") row.append("") row2.append("") # 项目6 row.append("") row2.append("") row.append("") row2.append("") row.append("") row2.append("") # 项目7 row.append("") row2.append("") row.append("") row2.append("") row.append("") row2.append("") # 项目8 row.append("") row2.append("") row.append("") row2.append("") row.append("") row2.append("") # 发票号 row.append("") row2.append("") # 换票证号 row.append("") row2.append("") # 客户 row.append("") row2.append("") # 费用类别 row.append("") row2.append("") # 收款人 row.append("") row2.append("") # 物料 row.append("") row2.append("") # 财务组织 row.append("") row2.append("") # 供应商 row.append("") row2.append("") # 辅助帐业务日期 row.append(date_str) row2.append("") # 到期日 row.append("") row2.append("") for row in target_rows: target_data.append(row) # 处理结算的 last_row = deepcopy(target_data[-1]) title_row = target_data[0] jine_idx = title_row.index("原币金额") sum = 0 for i in range(row_num): idx = i + 1 row = target_data[idx] sum = sum + float(row[jine_idx]) last_row[title_row.index("摘要")] = "收到 上海七宝万科停车场停车费-支付宝返款" last_row[title_row.index("科目")] = "1122.99" last_row[title_row.index("方向")] = "0" last_row[title_row.index("原币金额")] = float("%.2f" % (sum)) last_row[title_row.index("借方金额")] = "" last_row[title_row.index("贷方金额")] = float("%.2f" % (sum)) last_row[title_row.index("辅助账摘要")] = "收到 上海七宝万科停车场停车费-支付宝返款" last_row[title_row.index("核算项目1")] = "客户" last_row[title_row.index("编码1")] = "0503.E018" last_row[title_row.index("分录号")] = len(target_data) idx = title_row.index("名称1") last_row[idx] = "微信一点停" last_row[idx + 1] = "" last_row[idx + 2] = "" last_row[idx + 3] = "" target_data.append(last_row) save_data(self.out_put_filename, target_excel) # 处理日期 Globals.eval_date_format(self.out_put_filename, ["B", "C", "BN"])
def write_excel(fileInDesktopName, excel_object): if type(excel_object) == Excel: excel_data = excel_object.Serialization() else: excel_data = excel_object save_data(Globals.desktop_path + fileInDesktopName, excel_data)
def saveSheet(excel, report): excel = {sheet: excel[sheet], "Report": report} pe.save_data(fileName, excel)