def excel(data, name): book = Workbook() title_info = [] for i in data: for k, v in i.items(): title_info.append(k) title = list(set(title_info)) if len(data) < 65535: excel = book.add_sheet(name, cell_overwrite_ok=True) for x in range(len(title)): excel.write(0, x, title[x].decode('UTF-8')) for i in range(len(data)): for j in range(len(data[i])): excel.write(i + 1, j, data[i][title[j]]) else: page = len(data) / 65535 for k in range(1, page + 2): excel = book.add_sheet(name + '_' + str(k), cell_overwrite_ok=True) for x in range(len(title)): excel.write(0, x, title[x].decode('UTF-8')) if k < page + 1: for i in range(65535 * (k - 1), 65535 * k): for j in title: excel.write(i - 65535 * (k - 1) + 1, j, data[i][title[j]]) else: for i in range(65535 * k, len(data)): for j in title: excel.write(i - 65535 * k + 1, j, data[i][title[j]]) date = datetime.datetime.now().strftime('%Y%m%d') book.save('../static/' + name + '.' + date + '.xls')
class ExcelWorkbook(object): def __init__(self, encoding='utf-8', style_compression=0): self.wb = Workbook(encoding, style_compression) self.sheets = [] def sheet(self, sheet_name=''): """get a sheet by name, if no name creates a new sheet""" if not self.sheets: return self.add_sheet(sheet_name) else: for sheet in self.sheets: if sheet.name == sheet_name: return sheet def add_sheet(self, sheet_name='Sheet 1', headers=[], header_line_no=0, use_borders=False, styleHeaders=None, styleDict={}, widths={}): ws = self.wb.add_sheet(sheet_name, cell_overwrite_ok=True) self.sheets.append(ExcelSheet(ws, headers, header_line_no, use_borders, styleHeaders, styleDict, widths)) return self.sheets[-1] def save(self, filename): """saves excel file""" if os.path.splitext(filename)[-1] not in ('.xls', '.xlsx'): filename = os.path.splitext(filename)[0] + '.xls' for sheet in iter(self): sheet.autoFit() self.wb.save(filename) return filename def __iter__(self): """generator to iterate through sheets""" for sheet in self.sheets: yield sheet
def merge_synonym_counts(): """ Merge counts for synonyms. """ with open('data/text/summary/2014.json', 'r') as wh: press_briefings = json.load(wh) from xlwt import Workbook book = Workbook() for synonyms in SYNONYMS: sheet = book.add_sheet('%s (+%i)' % (synonyms[0], len(synonyms))) header = sheet.row(0) for i, col in enumerate(['Week', 'Count']): header.write(i, col) for i, sunday in enumerate(all_sundays(2014)): row = sheet.row(i + 1) sunday = sunday.strftime('%Y-%m-%d') count = 0 for word in synonyms: count += press_briefings[sunday].get(word, 0) for i, col in enumerate([sunday, count]): row.write(i, col) book.save('data/text/summary/synonyms.xls')
def render_to_response(self, context, **response_kwargs): from xlwt import Workbook book = Workbook() sheet1 = book.add_sheet(self.derive_title()) fields = self.derive_fields() # build up our header row for col in range(len(fields)): field = fields[col] sheet1.write(0, col, unicode(self.lookup_field_label(dict(), field))) # then our actual values for row in range(len(self.object_list)): obj = self.object_list[row] for col in range(len(fields)): field = fields[col] value = unicode(self.lookup_field_value(dict(), obj, field)) # skip the header sheet1.write(row + 1, col, value) # Create the HttpResponse object with the appropriate header. response = HttpResponse(mimetype='application/vnd.ms-excel') response['Content-Disposition'] = 'attachment; filename=%s' % self.derive_filename() book.save(response) return response
class ExcelWriter(object): def __init__(self): # grab excel file and columns self.wb_read = open_workbook('pids.xlsx') self.wb_write = Workbook() self.output = self.wb_write.add_sheet('PID and Names') def go(self): row_counter = 1 for sheet in self.wb_read.sheets(): for row in range(sheet.nrows): pid_to_query = sheet.cell(row,3).value.replace('-','') pid_to_query = str(pid_to_query) owner_name = pid_getter.get_pid(pid_to_query) print row_counter, "Query of PID", sheet.cell(row,0).value, "Owner name = ", owner_name # write to new file # self.output.write(row_counter,0,pid_to_query) # self.output.write(row_counter,1,owner_name) # writing to a new file by copying everything else for col in range(sheet.ncols): self.output.write(row_counter,col,sheet.cell(row,col).value) self.output.write(row_counter,sheet.ncols,owner_name) self.wb_write.save('names.xlsx') row_counter += 1
def process_test(request): start_date=request.GET.get('start','') end_date=request.GET.get('end','') now = dt.datetime.now().isocalendar() this_week_start,this_week_end = get_week_days(now[0],now[1]) if start_date == '': start_date=this_week_start.strftime("%Y-%m-%d") if end_date == '': end_date=this_week_end.strftime("%Y-%m-%d") start_date=time.strptime(start_date,'%Y-%m-%d') start_date=dt.datetime.fromtimestamp(time.mktime(start_date)) end_date=time.strptime(end_date,'%Y-%m-%d') end_date=dt.datetime.fromtimestamp(time.mktime(end_date)) end_date=end_date+dt.timedelta(1) wb = Workbook() ws = wb.add_sheet('Sheetname') ws.write(0, 0, 'Firstname') ws.write(0, 1, 'Surname') ws.write(1, 0, 'Hans') ws.write(1, 1, 'Muster') fname = 'process_excel-testfile.xls' response = HttpResponse(mimetype="application/ms-excel") response['Content-Disposition'] = 'attachment; filename=%s' % fname wb.save(response) return response
def init_xls(sheetname='1'): book = Workbook() if sheetname: sheet = book.add_sheet(sheetname) else: sheet = book.add_sheet('1') return book, sheet
def init(): global ws,ws2,ws3,firstTime,wb,rs if os.path.exists(excelFileName): rb = open_workbook(excelFileName, formatting_info=True) rs = rb.sheet_by_index(0) wb = copy(rb) ws = wb.get_sheet(0) ws2 = wb.get_sheet(1) ws3 = wb.get_sheet(2) firstTime = False else: wb = Workbook() ws = wb.add_sheet('string') ws2 = wb.add_sheet('string_array') ws3 = wb.add_sheet('plurals') firstTime = True
def render(self, request, context, **response_kwargs): from xlwt import Workbook, XFStyle, easyxf w = Workbook(encoding='utf-8') ws = w.add_sheet('Report') style = XFStyle() row = 0 heading_xf = easyxf('font:height 200; font: bold on; align: wrap on, vert centre, horiz center') ws.write(row, 0, '#', style) for col, fieldname in enumerate(context['report'].headers, start=1): ws.write(row, col, str(fieldname), heading_xf) ws.col(col).width = 5000 ws.row(row).height = 500 # we have to prepare all the styles before going into the loop # to avoid the "More than 4094 XFs (styles)" Error styles = self._get_styles(context) for rownum, data in enumerate(context['report']): ws.write(rownum + 1, 0, rownum + 1) for idx, (fieldname, rowvalue) in enumerate(data.items()): style = styles[rowvalue.column.name] try: ws.write(rownum + 1, idx + 1, with_widget(rowvalue, format='xls'), style) except Exception: #logger.warning("TODO refine this exception: %s" % e) ws.write(rownum + 1, idx + 1, smart_str(with_widget(rowvalue)), style) f = StringIO.StringIO() w.save(f) f.seek(0) return f.read()
def make_xls(sheetname='sheet_1', filename='filename.xls', columns=[], objs=[]): book = Workbook() sheet = book.add_sheet(sheetname) def index_of(key, list_2d): for i, one in enumerate(list_2d): if key == one[0]: return i attrs = [] for inner_list in columns: attrs.append(inner_list[0]) for i in xrange(len(columns)): sheet.write(0, i, columns[i][1]) sheet.col(i).width = columns[i][2] * 256 for i, obj in enumerate(objs, start=1): for attr in attrs: if isinstance(obj, dict): sheet.write(i, index_of(attr, columns), obj[attr]) else: sheet.write(i, index_of(attr, columns), obj.__getattribute__(attr)) book.save(filename)
def write_excel(totals): """Write out the collected totals to an excel file """ workbook = Workbook() worksheet = workbook.add_sheet('New Sheet') # write the header for the first block total_hours = 0 total_tasks = 0 block_tasks, block_total = write_time_block(worksheet, totals) total_hours += block_total total_tasks += block_tasks block_tasks, block_total = write_time_block(worksheet, totals, block_tasks) total_hours += block_total total_tasks += block_tasks # write out the total hours worksheet.write(total_tasks + 6, 0, 'Monthly Total', summary_total_header) worksheet.write(total_tasks + 6, 1, total_hours, data_cell) # write out the user and date name = raw_input('Who is this report for? ') worksheet.write(total_tasks + 8, 0, 'Name: %s' % (name)) worksheet.write(total_tasks + 9, 0, 'Date: %s' % (datetime.strftime(datetime.today(), '%m/%d/%Y'))) # write the signature field worksheet.write(total_tasks + 8, 6, 'Signature:') # save the file to disk curpath = os.path.dirname(__file__) workbook.save(os.path.join(curpath, 'test.xls'))
def create_xls(data): work = Workbook(encoding='utf-8') work_sheet = work.add_sheet(u'账单') #head of table work_sheet.write(0, 0, 'ID') work_sheet.write(0, 1, u'名字') work_sheet.write(0, 2, u'编码') work_sheet.write(0, 3, u'数量') work_sheet.write(0, 4, u'单价') work_sheet.write(0, 5, u'合计') work_sheet.write(0, 6, u'备注') i = 1 total_price = 0 for row in data: work_sheet.write(i, 0, str(i)) work_sheet.write(i, 1, data[row]['name']) work_sheet.write(i, 2, data[row]['code']) work_sheet.write(i, 3, data[row]['number']) work_sheet.write(i, 4, data[row]['price']) work_sheet.write(i, 5, data[row]['total_price']) work_sheet.write(i, 6, data[row]['comment']) total_price += data[row]['number'] * data[row]['price'] i = i + 1 work_sheet.write(i, 4, u'总价:') work_sheet.write(i, 5, total_price) time_stamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") file_name = "download_xls/bill_%s.xls" % time_stamp work.save(file_name) return file_name
def do_export(self): """ Does actual export. Called from a celery task. """ book = Workbook() self.render_book(book) temp = NamedTemporaryFile(delete=True) book.save(temp) temp.flush() org_root = getattr(settings, 'SITE_ORGS_STORAGE_ROOT', 'orgs') filename = '%s/%d/%s/%s.xls' % (org_root, self.org_id, self.directory, random_string(20)) default_storage.save(filename, File(temp)) self.filename = filename self.save(update_fields=('filename',)) subject = "Your export is ready" download_url = self.org.make_absolute_url(reverse(self.download_view, args=[self.pk])) send_email([self.created_by], subject, 'utils/email/export', {'download_url': download_url}) # force a gc import gc gc.collect()
def output_mesg(company_lack): book = Workbook() sheet1 = book.add_sheet(u'1') i = 0 num = 1 for key, value in company_lack.items(): for s, d in value.items(): sheet1.write(i, 0, key) sheet1.write(i, num, s) sheet1.write(i, num+1, d) i = i + 1 num = 1 book.save('4.xls') # 存储excel book = xlrd.open_workbook('4.xls') print('----------------------------------------------------------------------------------------') print('----------------------------------------------------------------------------------------') print(u'计算完成') print('----------------------------------------------------------------------------------------') print('----------------------------------------------------------------------------------------') time.sleep(10)
def write_file(result_list, deal_date, company_name, filename): ''' given a list, put it into excel file. deal_date specifies a string which will be rendered as bold company_name and filename are self-explanatory ''' w = Workbook() sheet = w.add_sheet(company_name) row = 2 boldfont = easyxf(strg_to_parse='font: bold on') normalfont = easyxf(strg_to_parse='') sheet.write(0, 0, company_name) sheet.write(1, 0, 'Date') sheet.write(1, 1, 'Open') sheet.write(1, 2, 'Close') for line in result_list: elements = line.decode('utf8').split(',') date_string = elements[0] open_value = elements[1] close_value = elements[4] if datetime.strptime(date_string, '%Y-%m-%d') == deal_date: style = boldfont else: style = normalfont sheet.write(row, 0, date_string, style) sheet.write(row, 1, open_value, style) sheet.write(row, 2, close_value, style) row += 1 print(date_string, open_value, close_value) w.save(filename)
def main(): timestamp = strftime("%Y-%m-%d_%H-%M-%S") out_dir = 'outputs/'+timestamp+'_xl' os.makedirs(out_dir) out_file=out_dir+'/'+timestamp+'_test.xls' data_sets=[] data_sets.append(DataSet('dss1', [[Cell.from_link('one','http://www.google.com'),Cell.from_display('two')],[Cell.from_display('one2'),Cell.from_display('two2')]])) data_sets.append(DataSet('dss2', [[Cell.from_display('oneb'),Cell.from_display('twob')]])) book = Workbook() for ds in data_sets: # my_print_dataset(ds) add_data_set_sheet(ds, book) # sheet.write(1,0,Formula('HYPERLINK("http://www.google.com";"Python")'),style) # if len(row_data) > 0: book.save(out_file) os.system("start "+out_file)
def write_datatests(crisis_def_list, location = "./out", suffix = ""): book = Workbook() sheet1 = book.add_sheet('Sheet 1') result_crises = combine_crises(crisis_def_list) row_num = 0 for country_code in sort(result_crises.keys()): years = sort(list(result_crises[country_code])) try: len(years) except: print(years) print(country_code) sheet1.write(row_num, 0, country_code) crisis_row = sheet1.row(row_num) crisis_row.write(1, "crisis") for j in range(len(years)): crisis_row.write(j+2, years[j]) normal_row = sheet1.row(row_num+1) normal_row.write(1, "normal") normal_years = pick_normal_years(years) for j in range(len(normal_years)): normal_row.write(j+2, normal_years[j]) row_num+=2 saveloc = os.path.expanduser(location)+suffix+".xls" book.save(saveloc)
def create_excel_file(db, kurzus): book = Workbook(encoding='utf-8') #sheet = book.add_sheet('{0} - {1} - {2}'.format(kurzus['nev'],kurzus['nap'],kurzus['sav'])) sheet = book.add_sheet('Névsor') sheet = create_shit_head(sheet, kurzus_infok) kurzus_hallgatok = get_kurzus_hallgatok(db, kurzus['id']) sorszam = 1 for user in kurzus_hallgatok: sheet.write(sorszam+4,0,sorszam,easyxf( 'borders: left thick, right thick, top thick, bottom thick;' )) sheet.write(sorszam+4,1,user['neptun'],easyxf( 'borders: left thick, right thick, top thick, bottom thick;' )) sheet.write(sorszam+4,2,user['nev'],easyxf( 'borders: left thick, right thick, top thick, bottom thick;' )) for i in xrange(3,20): sheet.write(sorszam+4,i,'',easyxf( 'borders: left thick, right thick, top thick, bottom thick;' )) sorszam = sorszam + 1 book.save('{0}_{1}_{2}.xls'.format(ekezet_eltunteto(kurzus['nev'].lower()), ekezet_eltunteto(kurzus['nap'].lower()), kurzus['sav']))
def print_responseTime_persecond(self, folder = "." ): book = Workbook() sheet1 = book.add_sheet("response time") sheet2 = book.add_sheet("transactions") title = "Test Name: %s" % (self.name) sheet1.write(0,0,title) sheet2.write(0,0,title) column = 1 for id in self.test_ids: sheet1.write(1, column, "TEST" + id) sheet2.write(1, column, "TEST" + id) column += 1 results = self.perSecondResult rows = range(1, self.max_second /TIME_INTERVAL + 1) for row in rows: sheet1.write(row + 1, 0, row*TIME_INTERVAL ) sheet2.write(row + 1, 0, row*TIME_INTERVAL ) column = 1 for id in self.test_ids: key = id + "-" + str(row*TIME_INTERVAL) if results.has_key(key): result = results[key] sheet1.write(row + 1, column, result.getAvg()) sheet2.write(row + 1, column, result.transactions) column += 1 book.save(folder + "/" + self.name + "_bytime.xls")
def tmz(): from xlwt import Workbook rates = tst.EvaluatorRate.query.join('evaluator','service_provider','contact').\ filter_by(active=True).all() language_dict = {} for rate in rates: language_dict.setdefault(rate.language,[]).\ append(rate.evaluator.service_provider.contact.time_zone) distro = Workbook() row = 0 active = distro.add_sheet('Active Evaluators') for language in language_dict: tmzs = language_dict[language] tmzs = list(set(tmzs)) tmz_str = '' for tmz in tmzs: tmz_str = tmz_str + '%s;' % tmz tmz_str = tmz_str[:-1] active.write(row, 0, language.name) active.write(row, 2, tmz_str) row+=1 distro.save('evaluators.xls')
def write_file(lis,book): in_book = open_workbook(book) in_sheet = in_book.sheet_by_index(0) out_book = Workbook() sheet1 = out_book.add_sheet('Sheet 1') sheet1.write(0,0,'Chr #') sheet1.write(0,1,'Range') sheet1.write(0,2,'Length') sheet1.write(0,3,'Chr location') sheet1.write(0,4,'CNV Type') sheet1.write(0,5,'Copy #') sheet1.write(0,6,'Confidence') sheet1.write(0,7,'Precision') sheet1.write(0,8,'Gene') row = int(in_sheet.nrows) col = int(in_sheet.ncols) row_num = 0 # to keep track of the row number in the new file for tup in lis: for r in range(0,row): nchr = in_sheet.cell(r,0).value if tup[0] == nchr: start = int((in_sheet.cell(r,1).value).split('-')[0]) if start == tup[1]: row_num+=1 for c in range(0,col): value = in_sheet.cell(r,c).value sheet1.write(row_num,c,value) return out_book
def configErrorReporting(headers): """ Configure import exception log, which is an Excel spreadsheet in the same format as the input format, but with an extra column added - "Error", which contains the error message. Can only be called after first row of input Excel spreadsheet is read to initialize the global, "headers" """ dateFmt = easyxf( 'font: name Arial, bold True, height 200;', #'borders: left thick, right thick, top thick, bottom thick;', num_format_str='MM-DD-YYYY' ) headerFmt = easyxf( 'font: name Arial, bold True, height 200;', ) global errorsWorkbook, erroutSheet, erroutRow errorsWorkbook = Workbook() erroutSheet = errorsWorkbook.add_sheet('Import Errors') for colnum in range(0, len(headers)): erroutSheet.write(0, colnum, headers[colnum][0], tern(headers[colnum][0]==xlrd.XL_CELL_DATE, dateFmt, headerFmt)) # Add extra column for error message erroutSheet.write(0, len(headers), "Error", headerFmt) erroutSheet.flush_row_data() erroutRow = 1 errorsWorkbook.save('errors.xls')
def exportToExcel(self,objectProject): book = Workbook(); sheet1 = book.add_sheet('Sheet 1') if( objectProject): i=0; row1 = sheet1.row(i) ; row1.write(0, ('ประเภท').decode('UTF8') ); row1.write(1, ('ชื่อโครงการ').decode('UTF8')); row1.write(2, ('รายละเอืยด').decode('UTF8') ); row1.write(3, ('งบประมาณรวม').decode('UTF8') ); row1.write(4, ('งบประมาณ').decode('UTF8') ); row1.write(5, ('เงินบำรุง').decode('UTF8') ); row1.write(6, ('งบประมาณอื่น').decode('UTF8') ); row1.write(7, ('งบประมาณอื่นจาก').decode('UTF8') ); row1.write(8, ('ผู้รับผิดชอบ').decode('UTF8') ); row1.write(9, ('กลุ่ม').decode('UTF8') ); row1.write(10, ('หน่วย/งาน').decode('UTF8') ); i=i+1; style = XFStyle(); style.num_format_str = '#,##0.00'; for value in objectProject: row1 = sheet1.row(i) ; row1.write(0, value.get('project_type').decode('UTF8') ); row1.write(1, value.get('project_name').decode('UTF8') ); row1.write(2, value.get('detail').decode('UTF8') ); row1.write(3, value.get('allBudget') ,style ); row1.write(4, value.get('project_budget' ) ,style ); row1.write(5, value.get('maintenance_funds_budget'),style ); row1.write(6, value.get('budget_other') ,style ); if(value.get('budget_other_from')): row1.write(7, value.get('budget_other_from').decode('UTF8') ); if(value.get('user_name')): row1.write(8, value.get('user_name').decode('UTF8') ); row1.write(9, value.get('division').decode('UTF8') ); row1.write(10, value.get('section').decode('UTF8') ); i=i+1; dirTempFile = gettempdir() + _os.sep + str('simple.xls'); book.save(dirTempFile); #book.save(TemporaryFile()); return dirTempFile;
def write_report_xl(self, itfs): headers = ("S/N", "LC NUMBER", "CUSTOMER NAME", "CCY", "AMOUNT", 'RELATIONSHIP MANAGER') xl = Workbook() self.xlsh = xl.add_sheet("ITF-REPORT") row_in_xlsh = 0 self.write_xl_row(("", "", "ITF INTEREST REPORT",), row_in_xlsh) row_in_xlsh += 1 self.write_xl_row(headers, row_in_xlsh) row_in_xlsh += 1 sequence = 1 for itf in itfs: self.write_xl_row( (sequence, itf.lc_number, itf.customer.name, itf.currency(), itf.amount, itf.rm_name(),), row_in_xlsh) row_in_xlsh += 1 sequence += 1 xl.save(open(itf_report_xl_path, 'wb')) return '%d Itfs reported for the given period' % len(itfs)
def write(self, collection, filename=None, fail_silently=False): # create new book book = Workbook() # write dataset for name, dataset in collection.items(): self._write_dataset(dataset, book) # write peakset if there are more than single dataset if len(dataset) > 1: sheet = book.add_sheet('peakset') offsets = [0, 1] for name, dataset in collection.items(): # write classify name # Note: +1 for heading line sheet.write(offsets[0]+1, 0, get_sheet_name(name)) # write peakset self._write_peakset(dataset, offsets, sheet, self.peakset_basecolumn, self.peakset_method, self.peakset_where_function) # update offsets offsets[0] += len(dataset) + 1 # save book.save(filename or self.default_filename)
def write(self, dirname=None): if dirname is None: dirname = self.description self.create_workbooks() dir = self.safe_mkdir(dirname) print 'Saving annotation in directory %s' % dir for workbook_name, sheets in self.workbooks.items(): workbook_name = self.escape_name(workbook_name) workbook = Workbook() for sheet_name, sentences in sorted(sheets.items()): sheet_name = self.escape_name(sheet_name) sheet = workbook.add_sheet(sheet_name) sheet.col(1).width = 0x3000 sheet.col(3).width = 0x3000 for index, sentence in enumerate(sentences): self.write_splitted(sentence, sheet, index) meta_sheet = workbook.add_sheet('meta') meta_sheet.write(0, 0, self.description) meta_sheet.write(1, 0, str(datetime.now())) outfile = os.path.join(dir, '%s.xls' % workbook_name) workbook.save(outfile) sentence_file_name = os.path.join(dir, 'sentences.json') write_sentence_file(self.sentences, sentence_file_name)
class XLWriter(BookWriter): """ xls, xlsx and xlsm writer """ def __init__(self, file, encoding='ascii', style_compression=2, **keywords): """Initialize a xlwt work book :param encoding: content encoding, defaults to 'ascii' :param style_compression: undocumented, but 2 is magically better reference: `style_compression <https://groups.google.com/ forum/#!topic/python-excel/tUZkMRi8ITw>`_ """ BookWriter.__init__(self, file, **keywords) self.wb = Workbook(style_compression=style_compression, encoding=encoding) def create_sheet(self, name): """Create a xlwt writer""" return XLSheetWriter(self.wb, None, name) def close(self): """ This call actually save the file """ self.wb.save(self.file)
def fwriteinexcel(xlsname, results_gen, results_load_lambda, results_branch): book = Workbook() Sheet1 = book.add_sheet('Sheet1') Sheet1.write(0, 0, 'Bus') Sheet1.write(0, 1, 'Generation (MW)') Sheet1.write(0, 2, 'Load (MW)') Sheet1.write(0, 3, 'Lambda (€/MWh)') Sheet1.write(0, 5, 'From Bus') Sheet1.write(0, 6, 'To Bus') Sheet1.write(0, 7, 'P (MW)') for i in range(len(results_gen)): Sheet1.write(i+1, 0, results_gen[i, 0]) Sheet1.write(i+1, 1, results_gen[i, 1]) compteur = 0 for j in range(len(results_load_lambda)): if j != 15 and j != 16 and j != 32 and j != 38 and j != 39: Sheet1.write(compteur+1, 2, results_load_lambda[j, 1]) Sheet1.write(compteur+1, 3, results_load_lambda[j, 2]) compteur += 1 for k in range(len(results_branch)): Sheet1.write(k+1, 5, results_branch[k, 0]) Sheet1.write(k+1, 6, results_branch[k, 1]) Sheet1.write(k+1, 7, results_branch[k, 2]) book.save(xlsname)
def fog1270(): """ Create an xls with active evaluators emails by language """ from xlwt import Workbook rates = tst.EvaluatorRate.query.join('evaluator','service_provider','contact').\ filter_by(active=True).all() language_dict = {} for rate in rates: language_dict.setdefault(rate.language,[]).\ append(rate.evaluator.service_provider.contact.get_email_address()) distro = Workbook() row = 0 active = distro.add_sheet('Active Evaluators') for language in language_dict: emails = language_dict[language] emails = list(set(emails)) email_str = '' for email in emails: email_str = email_str + '%s;' % email email_str = email_str[:-1] active.write(row, 0, language.name) active.write(row, 2, email_str) row+=1 distro.save('evaluators.xls')
def write_entries_to_xl(self, contras): xl = Workbook() self.xlsh = xl.add_sheet("POSTINGS") self.row_in_xlsh = 0 self.write_batch_no_and_headers() sequence = 1 total_dr, total_cr = 0, 0 for contra in contras: self.write_xl_row( (sequence, contra.account.number, abs(contra.amount), contra.dr_cr(), contra.entry_code(), contra.narration, contra.ref, contra.branch_for_itf_int,), self.row_in_xlsh) sequence += 1 self.row_in_xlsh += 1 total_dr += contra.amount if contra.dr_cr() == 'DR' else 0 total_cr += contra.amount if contra.dr_cr() == 'CR' else 0 contra.entry.time_processed_for_posting = datetime.now() contra.entry.save() return xl
from Crypto.Cipher import ARC2 from datetime import datetime from Crypto import Random # Encryption from xlwt import Workbook wb = Workbook() output = wb.add_sheet('ARC2') output.write(0, 0, 'Message Size') output.write(0, 1, 'Encryption time') output.write(0, 2, 'Decryption time') output.write(0, 3, 'Cipher text Size') #print(message) #time1=time.time() def encryption(message): start = datetime.now() cipher = ARC2.new(key, ARC2.MODE_CFB, iv) cipher_text = cipher.encrypt(message) end = datetime.now() total = end - start total = total.total_seconds() print("Encryption time:", total) return cipher_text, total #print (cipher_text) #time2=time.time() #print((time2-time1)*1000)
from bs4 import BeautifulSoup import requests from xlwt import Workbook from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys import time import code1 import re from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC import validators wb = Workbook() wb1 = Workbook() sheet1 = wb.add_sheet('sheet1') sheet2 = wb1.add_sheet('sheet1') sheet1.write(0, 0, 'Name') sheet2.write(0, 0, 'Name') sheet1.col(0).width = 8000 sheet2.col(0).width = 8000 sheet1.write(0, 1, 'Website') sheet2.write(0, 1, 'Phone Number') sheet2.col(1).width = 8000 sheet1.col(1).width = 12000 sheet2.write(0, 2, 'Email') sheet2.col(2).width = 10000 global count1, count2 count2 = count1 = 0
worddic[word][name].append(lm) break return worddic #read file filelines = read_file(nameoffile) tiernames = get_tier_names(filelines) #creates worddic tierslist = strip_tiers(filelines, tiernames) worddic = word_dic(tierslist, tiernames) #print(worddic, '\n') #writes worddic to sheet wb = Workbook() sheet1 = wb.add_sheet('Textgrid') numtiers = len(tiernames) #for every interval tier for wordi, elem in enumerate(worddic): #write interval tier name, elem, starttime,endtime sheet1.write(numtiers * wordi * 2, 0, tiernames[0]) j = 1 for tup in elem: sheet1.write(numtiers * wordi * 2, j, tup) j += 1 #for each point tier, write landmark and time row for namei, name in enumerate(tiernames): if namei != 0:
def teste(args, semente=None): numJogadores = 101 numJogadas = 10000 memoria = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] jogadores = [] vuns = [] vzeros = [] somaTotal = [] random.seed(semente) for i in range(numJogadores): nome = "PerceptronSimples-" + str(i) p = Perceptron(numInputs=13, taxaAprendizado=args, semente=random.randint(0, 1000)) jogador = Jogador(nome, p) jogadores.append(jogador) #Essa parte e usada para escrever a planilha wb = Workbook() sheet1 = wb.add_sheet('tabela') for i in range(1, numJogadas + 1): sheet1.write(i, 0, "jogada " + str(i)) sheet1.write(0, 1, "Uns") sheet1.write(0, 2, "Zeros") sheet1.write(0, 3, "Soma") sheet1.write(0, 4, "Diferença") #[fim] planilha for i in range(numJogadas): soma = 0 jogadas = [] for j in range(numJogadores): jogada = jogadores[j].jogar(memoria[-13:]) jogadas.append(jogada) soma += jogada minoria = -np.sign(soma) for j in range(numJogadores): if jogadas[j] == minoria: jogadores[j].addVitorias(i) else: jogadores[j].treinar(minoria, memoria[-13:]) if (minoria < 0): vuns.append(abs(math.floor(soma / 2) - (numJogadores - 1) / 2)) vzeros.append( abs(math.floor(soma / 2) + ((numJogadores - 1) / 2) + 1)) else: vuns.append( abs(math.ceil(soma / 2) - ((numJogadores - 1) / 2) - 1)) vzeros.append(abs(math.ceil(soma / 2) + ((numJogadores - 1) / 2))) #Usado para planilha sheet1.write(i + 1, 1, vuns[i]) sheet1.write(i + 1, 2, vzeros[i]) sheet1.write(i + 1, 3, vuns[i] + vzeros[i]) sheet1.write(i + 1, 4, int(abs(soma))) #[fim] planilha memoria.append(minoria) somaTotal.append(abs(soma)) print(soma) #os.system('clear') #salva a planilha wb.save('resultados/' + str(numJogadas) + ' - 101jogadores - 13inpts - ' + str(args) + 'eta - 02.ods') return somaTotal
def render_excel(filename, title_list, data_list, file_extension='.xls'): if file_extension == '.csv': response = HttpResponse(mimetype='text/csv') response['Content-Disposition'] = 'attachment; filename=' + filename csv_writer = csv.writer(response) csv_writer.writerow(title_list) for row_item_list in data_list: for i in xrange(0, len(row_item_list)): if row_item_list[i]: if isinstance(row_item_list[i], datetime.datetime): row_item_list[i] = row_item_list[i].strftime( '%Y-%m-%d %H:%M:%S') elif isinstance(row_item_list[i], datetime.date): row_item_list[i] = row_item_list[i].strftime( '%Y-%m-%d') elif isinstance(row_item_list[i], datetime.time): row_item_list[i] = row_item_list[i].strftime( '%H:%M:%S') if isinstance(row_item_list[i], basestring): row_item_list[i] = row_item_list[i].encode("utf-8") csv_writer.writerow(row_item_list) else: import StringIO output = StringIO.StringIO() export_wb = Workbook() export_sheet = export_wb.add_sheet('Sheet1') col_idx = 0 for col_title in title_list: export_sheet.write(0, col_idx, "%s" % col_title) col_idx += 1 row_idx = 1 for row_item_list in data_list: col_idx = 0 for cell_value in row_item_list: if cell_value: cell_value_is_date = False if isinstance(cell_value, datetime.datetime): cell_value = xlrd.xldate.xldate_from_datetime_tuple( (cell_value.year, cell_value.month, cell_value.day, cell_value.hour, cell_value.minute, cell_value.second), 0) cell_value_is_date = True elif isinstance(cell_value, datetime.date): cell_value = xlrd.xldate.xldate_from_date_tuple( (cell_value.year, cell_value.month, cell_value.day), 0) cell_value_is_date = True elif isinstance(cell_value, datetime.time): cell_value = xlrd.xldate.xldate_from_time_tuple( (cell_value.hour, cell_value.minute, cell_value.second)) cell_value_is_date = True elif isinstance(cell_value, models.Model): cell_value = str(cell_value) if cell_value_is_date: s = XFStyle() s.num_format_str = 'M/D/YY' export_sheet.write(row_idx, col_idx, cell_value, s) else: export_sheet.write(row_idx, col_idx, cell_value) col_idx += 1 row_idx += 1 export_wb.save(output) output.seek(0) str_out = output.getvalue() response = HttpResponse(str_out) response['Content-Type'] = 'application/vnd.ms-excel' response['Content-Disposition'] = 'attachment; filename=' + filename return response
others += 1 total = numNouns + numVerbs + numMods print("Number of nouns: " + str(numNouns)) print("Number of verbs: " + str(numVerbs)) print("Number of modifiers: " + str(numMods)) print("Percentage of nouns: " + str(numNouns / total)) print("Percentage of verbs: " + str(numVerbs / total)) print("Percentage of modifiers: " + str(numMods / total)) # print("Number of others: " + str(others)) else: print("Enter what you would like to call the .xls file: ") title = input() statsWB = Workbook() #creates a new Workbook sheet1 = statsWB.add_sheet('Sheet 1') #creates sheet1 sheet # (row, column) sheet1.write(0, 1, "% Nouns") sheet1.write(0, 2, "% Verbs") sheet1.write(0, 3, "% Modifiers (Adj & Adv)") emotions = ["happy", "sad", "informative"] row = 1 for emot in emotions: filename = emot + ".txt" f = open(filename, 'r') raw = f.read() tokens = nltk.word_tokenize(raw)
def main(): usdinr = [] usdinr_change = [] dates = [] diffrates = [] ratesD = [] ratesF = [] # expiry in 12 months T = 12 wb = excel.open_workbook(filename='India_data.xlsx') wb1 = Workbook() ws1 = wb1.add_sheet('Prices') # code outputs tons of data to an excel spreadsheet including prices using # 1. Vanilla monte carlo # 2. Antithetic monte carlo # 3. Black Scholes Model # and also outputs the standard error of these price estimations # We simulate 4 different paths with strike prices a few std's away from the actual prevailing price on the date. # The labels below aren't accurate. We settled on different sigma values in the final simulation. ws1.write(0, 0, 'Date') ws1.write(0, 1, 'USDINR') ws1.write(0, 2, 'Vanilla K=-1sigma') ws1.write(0, 3, 'Vanilla K=-0.5sigma') ws1.write(0, 4, 'Vanilla K=+0.5sigma') ws1.write(0, 5, 'Vanilla K=+1sigma') ws1.write(0, 6, 'Antithetic K=-1sigma') ws1.write(0, 7, 'Antithetic K=-0.5sigma') ws1.write(0, 8, 'Antithetic K=+0.5sigma') ws1.write(0, 9, 'Antithetic K=+1sigma') ws1.write(0, 10, 'BS K=-1sigma') ws1.write(0, 11, 'BS K=-0.5sigma') ws1.write(0, 12, 'BS K=+0.5sigma') ws1.write(0, 13, 'BS K=+1sigma') ws1.write(0, 14, 'Strike Price K=-1sigma') ws1.write(0, 15, 'Strike Price K=-0.5sigma') ws1.write(0, 16, 'Strike Price K=+0.5sigma') ws1.write(0, 17, 'Strike Price K=+1sigma') ws1.write(0, 18, 'Std Error Vanilla K=-1sigma') ws1.write(0, 19, 'Std Error Vanilla K=-0.5sigma') ws1.write(0, 20, 'Std Error Vanilla K=+0.5sigma') ws1.write(0, 21, 'Std Error Vanilla K=+1sigma') ws1.write(0, 22, 'Std Error Antithetic K=-1sigma') ws1.write(0, 23, 'Std Error Antithetic K=-0.5sigma') ws1.write(0, 24, 'Std Error Antithetic K=+0.5sigma') ws1.write(0, 25, 'Std Error Antithetic K=+1sigma') # cell co-ordinates work like array indexes within the spreadsheet for i in range(2, 121): single_usd_inr = wb.sheet_by_name("Sheet1").cell_value(i, 3) usdinr.append(single_usd_inr) usdinr_change.append( math.log(1 + wb.sheet_by_name("Sheet1").cell_value(i, 4))) single_date = wb.sheet_by_name("Sheet1").cell_value(i, 0) ws1.write(i, 0, single_date) ws1.write(i, 1, single_usd_inr) dates.append( datetime.fromordinal( datetime(1900, 1, 1).toordinal() + int(single_date) - 2)) ratesD.append( math.log(1 + wb.sheet_by_name("Sheet1").cell_value(i, 1) / 100) / 12) ratesF.append( math.log(1 + wb.sheet_by_name("Sheet1").cell_value(i, 9) / 100) / 12) diffrates.append(ratesD[i - 2] - ratesF[i - 2]) optionPricesVanilla = [] optionPricesAntithetic = [] optionPricesBS = [] for i in range(12, len(dates) - T): sigma1 = np.std(usdinr_change[i - 12:i - 1]) sigma = np.std(usdinr[i - 12:i - 1]) mean = np.average(usdinr_change[i - 12:i - 1]) # Strike prices are decided as +/- [0.75, 0.25] sigma from spot price. # Since this is a simulation, we calculate the option price through simulated paths and compare it against the # actual strike price during the selected expiry date to decide if the option is exercised or not. K = [ usdinr[i] - 0.75 * sigma * math.sqrt(T), usdinr[i] - 0.25 * sigma * math.sqrt(T), usdinr[i] + 0.25 * sigma * math.sqrt(T), usdinr[i] + 0.75 * sigma * math.sqrt(T) ] ws1.write(i, 14, K[0]) ws1.write(i, 15, K[1]) ws1.write(i, 16, K[2]) ws1.write(i, 17, K[3]) optionPrice = [] stderr = [] for k in K: optionPrice_single, stderr_single = price_asian_option_vanilla( usdinr[i], ratesD[i], ratesF[i], diffrates[i], sigma1, T, 100, 120, k) optionPrice.append(optionPrice_single) stderr.append(stderr_single) optionPricesVanilla.append(optionPrice) ws1.write(i, 2, optionPrice[0]) ws1.write(i, 3, optionPrice[1]) ws1.write(i, 4, optionPrice[2]) ws1.write(i, 5, optionPrice[3]) ws1.write(i, 18, stderr[0]) ws1.write(i, 19, stderr[1]) ws1.write(i, 20, stderr[2]) ws1.write(i, 21, stderr[3]) optionPrice = [] stderr = [] for k in K: optionPrice_single, stderr_single = price_asian_option_antithetic( usdinr[i], ratesD[i], ratesF[i], diffrates[i], sigma1, T, 100, 120, k) optionPrice.append(optionPrice_single) stderr.append(stderr_single) optionPricesAntithetic.append(optionPrice) ws1.write(i, 6, optionPrice[0]) ws1.write(i, 7, optionPrice[1]) ws1.write(i, 8, optionPrice[2]) ws1.write(i, 9, optionPrice[3]) ws1.write(i, 22, stderr[0]) ws1.write(i, 23, stderr[1]) ws1.write(i, 24, stderr[2]) ws1.write(i, 25, stderr[3]) optionPrice = [ price_option_BS(usdinr[i], ratesD[i], ratesF[i], diffrates[i], sigma1, T, 100, k) for k in K ] optionPricesBS.append(optionPrice) ws1.write(i, 10, optionPrice[0]) ws1.write(i, 11, optionPrice[1]) ws1.write(i, 12, optionPrice[2]) ws1.write(i, 13, optionPrice[3]) optionPricesVanilla = np.array(optionPricesVanilla) optionPricesAntithetic = np.array(optionPricesAntithetic) optionPricesBS = np.array(optionPricesBS) wb1.save('Option Prices.xls') # finally, we plot all the different option prices from our simulations. axis1 = plotter.subplot(2, 2, 1) axis1.plot_date(dates[12:len(dates) - T], optionPricesVanilla[:, 0], label="-2sigma", linestyle='solid', marker=',') axis1.plot_date(dates[12:len(dates) - T], optionPricesVanilla[:, 1], label="-1sigma", linestyle='solid', marker=',') axis1.plot_date(dates[12:len(dates) - T], optionPricesVanilla[:, 2], label="+1sigma", linestyle='solid', marker=',') axis1.plot_date(dates[12:len(dates) - T], optionPricesVanilla[:, 3], label="+2sigma", linestyle='solid', marker=',') axis1.set_ylabel('Option Prices') axis1.legend() axis2 = axis1.twinx() axis2.plot_date(dates, usdinr, label='USDINR', linestyle='solid', marker=',') axis2.set_ylabel('USD / INR Prices') axis2.legend() plotter.title('Vanilla Monte Carlo Simulation') axis1 = plotter.subplot(2, 2, 2) axis1.plot_date(dates[12:len(dates) - T], optionPricesBS[:, 0], label="-2sigma", linestyle='solid', marker=',') axis1.plot_date(dates[12:len(dates) - T], optionPricesBS[:, 1], label="-1sigma", linestyle='solid', marker=',') axis1.plot_date(dates[12:len(dates) - T], optionPricesBS[:, 2], label="+1sigma", linestyle='solid', marker=',') axis1.plot_date(dates[12:len(dates) - T], optionPricesBS[:, 3], label="+2sigma", linestyle='solid', marker=',') axis1.set_ylabel('Option Prices') axis1.legend() # axis2 = axis1.twinx() # axis2.plot_date(dates, usdinr, label='USDINR', linestyle='solid', marker=',') # , secondary_y=True) # axis2.set_ylabel('USD / INR Prices') # axis2.legend() plotter.title('Black Scholes Calculation') plotter.subplot(2, 2, 3) plotter.plot_date(dates[12:len(dates) - T], optionPricesAntithetic[:, 0], label="-2sigma", linestyle='solid', marker=',') plotter.plot_date(dates[12:len(dates) - T], optionPricesAntithetic[:, 1], label="-1sigma", linestyle='solid', marker=',') plotter.plot_date(dates[12:len(dates) - T], optionPricesAntithetic[:, 2], label="+1sigma", linestyle='solid', marker=',') plotter.plot_date(dates[12:len(dates) - T], optionPricesAntithetic[:, 3], label="+2sigma", linestyle='solid', marker=',') plotter.legend() plotter.title('Antithetic Monte Carlo Simulation') plotter.ylabel('Option Prices') plotter.show()
def write_excel(data): file_w = Workbook() sheet1 = file_w.add_sheet(u'Data', cell_overwrite_ok=True) # 创建sheet write_data(data, sheet1) file_w.save('data.xls') return 0
#!/usr/bin/env python3 import glob import os import sys from datetime import date from xlrd import open_workbook, xldate_as_tuple from xlwt import Workbook input_folder = sys.argv[1] output_file = sys.argv[2] output_workbook = Workbook() output_worksheet = output_workbook.add_sheet('sums_and_averages') all_data = [] sales_column_index = 3 header = [ 'workbook', 'worksheet', 'worksheet_total', 'worksheet_average', 'workbook_total', 'workbook_average' ] all_data.append(header) for input_file in glob.glob(os.path.join(input_folder, '*.xlsx')): with open_workbook(input_file) as workbook: list_of_totals = [] list_of_numbers = [] workbook_output = [] for worksheet in workbook.sheets(): total_sales = 0 number_of_sales = 0 worksheet_list = [] worksheet_list.append(os.path.basename(input_file))
mysql_db.commit() def get_all_wenshu(): logger.info("开始遍历裁判文书") cur = db["judgement_wenshu"].find({}) for doc in cur: for city_ in all_city: if city_ in doc.get("case_name", "") or city_ in doc.get( "doc_content", ""): dict_all[city_] += 1 logger.info("结束遍历裁判文书") if __name__ == "__main__": w = Workbook() today = datetime.date.today() yesterday = datetime.date.today() - datetime.timedelta(days=1) all_city = gansu + xinjiang + qinghai + ningxia + shanxi dict_all = {} for city in all_city: dict_all[city] = 0 get_all_wenshu() dict_b = OrderedDict() dict_b["甘肃"] = gansu dict_b["新疆"] = xinjiang dict_b["青海"] = qinghai dict_b["宁夏"] = ningxia dict_b["陕西"] = shanxi mysql_db = get_conn() mysql_cur = mysql_db.cursor()
from datetime import date from xlwt import Workbook, XFStyle, Borders, Pattern, Font fnt = Font() fnt.name = 'Arial' borders = Borders() borders.left = Borders.THICK borders.right = Borders.THICK borders.top = Borders.THICK borders.bottom = Borders.THICK pattern = Pattern() pattern.pattern = Pattern.SOLID_PATTERN pattern.pattern_fore_colour = 0x0A style = XFStyle() style.num_format_str = 'YYYY-MM-DD' style.font = fnt style.borders = borders style.pattern = pattern book = Workbook() sheet = book.add_sheet('A Date') sheet.write(1, 1, date(2009, 3, 18), style) book.save('date.xls')
from nltk.corpus import stopwords from nltk.tokenize import word_tokenize, sent_tokenize from xlwt import Workbook # from pywsd.lesk import simple_lesk import re import nltk import xlrd import numpy as np # Assigning variables loc = "Survey1.xlsx" numCol = 5 commentCol = 6 saveFileName = "resultsQ.xls" wb = Workbook() # TODO: update regex regex_pattern = "#[\w]*" noise_list = set(stopwords.words('english\en')) lem = WordNetLemmatizer() stem = PorterStemmer() example_sent = "This is a sample sentence, showing off the stop words filtration and #hashtag to test" # Lemmatizes word def _lemmatize_word(input_word): lemmatized_word = lem.lemmatize(input_word, "v") return lemmatized_word
import sys from datetime import date from xlrd import open_workbook, xldate_as_tuple from xlwt import Workbook input_file = sys.argv[1] output_file = sys.argv[2] output_workbook = Workbook() output_worksheet = output_workbook.add_sheet('jan_2013_output') sale_amount_column_index = 3 with open_workbook(input_file) as workbook: worksheet = workbook.sheet_by_name('january_2013') data = [] header = worksheet.row_values(0) data.append(header) for row_index in range(1, worksheet.nrows): row_list = [] sale_amount = worksheet.cell_value(row_index, sale_amount_column_index) if sale_amount > 1400.0: for column_index in range(worksheet.ncols): cell_value = worksheet.cell_value(row_index, column_index) cell_type = worksheet.cell_type(row_index, column_index) if cell_type == 3: date_cell = xldate_as_tuple(cell_value, workbook.datemode) date_cell = date(*date_cell[0:3]).strftime('%m/%d/%Y') row_list.append(date_cell) else: row_list.append(cell_value) if row_list:
#parsing data in xml file: import xml.etree.ElementTree as ET #this module formats data in tree struture(heirarchial form) import xlwt from xlwt import Workbook #To generate sheets and store data in them wb = Workbook() sheet1 = wb.add_sheet('Sheet 1') mytree = ET.parse('data.xml.xml') myroot = mytree.getroot() #print(myroot.tag) [for head node of data] z = 1 ct = 0 for x in myroot[1].findall('Stroke'): ct = ct + 1 #print no. of strokes print(ct) sheet1.write(0, 0, "X_value") sheet1.write(0, 1, "Y_value") sheet1.write(0, 2, "Time") #excessing each child for x in myroot[1]: for y in x: sheet1.write(z, 0, y.get('x')) sheet1.write(z, 1, y.get('y')) sheet1.write(z, 2, y.get('time')) z = z + 1 sheet1.write(z, 0, '\n') sheet1.write(z, 1, '\n')
from tempfile import TemporaryFile from xlwt import Workbook book = Workbook() sheet1 = book.add_sheet('Wangzi') sheet1.write(0, 0, '0,0') sheet1.write(0, 1, '0,1') book.save('D:/Desktop/simple.xls') #book.save(TemporaryFile())
#-*- coding: utf-8 -*- import urllib, urllib2 import StringIO, gzip #to read xls file from xlrd import open_workbook xls = open_workbook('test.xls') sheet0 = xls.sheet_by_index(0) #to write in xls file from tempfile import TemporaryFile from xlwt import Workbook book = Workbook() sheet1 = book.add_sheet('result 1') for row_index in range(sheet0.nrows): keyword = sheet0.cell(row_index, 0).value params = {'query': keyword} enc_params = urllib.urlencode(params) request = urllib2.Request('http://search.naver.com/' + 'search.naver' + '?' + enc_params) request.add_header('User-agent', 'Mozilla/5.0') request.add_header('Accept-encoding', 'gzip') response = urllib2.urlopen(request) compressedstream = StringIO.StringIO(response.read()) gzipper = gzip.GzipFile(fileobj=compressedstream)
def getElection(): # from PIL import Image, ImageFile x_index = 1 root.directory = filedialog.askdirectory() mypath = root.directory onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))] files_length = len(onlyfiles) # print(files_length) # Workbook is created wb = Workbook() # add_sheet is used to create sheet. sheet1 = wb.add_sheet('Sheet 1', cell_overwrite_ok=True) # Write Headers to sheet sheet1.write(0, 0, 'S.No') sheet1.write(0, 1, 'Election Number') sheet1.write(0, 2, 'Gender') sheet1.write(0, 3, 'Age') sheet1.write(0, 4, 'Name') sheet1.write(0, 5, 'Fathers Name') sheet1.write(0, 6, 'File Name') sheet1.write(0, 7, 'Text') ixsheet = 0 # print(files_lenth) while ixsheet < files_length: ImageFile.LOAD_TRUNCATED_IMAGES = True x = "'D:\Personal\Machine Learning\Election Commission\'" dir_path = x.replace("'", "") file_path = onlyfiles[ixsheet] join_path = join(dir_path, file_path) # print(join_path) im = Image.open(join_path) # load the example image and convert it to grayscale image = cv2.imread(join_path) gray = prePrcoessing(image) # write the grayscale image to disk as a temporary file so we can # apply OCR to it filename = "{}.png".format(os.getpid()) cv2.imwrite(filename, gray) ############################################################################################################## ######################################## Section 3: Running PyTesseract ###################################### ############################################################################################################## # load the image as a PIL/Pillow image, apply OCR, and then delete # the temporary file # pytesseract.pytesseract.tesseract_cmd = 'D:\\Tesseract-OCR\\tesseract.exe' text = pytesseract.image_to_string(Image.open(filename), lang='eng') # add +hin after eng within the same argument to extract hindi specific text - change encoding to utf-8 while writing os.remove(filename) # writing extracted data into a text file text_output = open('outputbase.txt', 'w', encoding='utf-8') text_output.write(text) text_output.close() file = open('outputbase.txt', 'r', encoding='utf-8') text = file.read() # Cleaning all the gibberish text bad_chars = [ '~', '`', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '{', '}', "'", '[', ']', '|', ':', ';', ',', '<', '>', '.', '?', '/', '+', '=', '_' ] for i in bad_chars: text = text.replace(i, '') ############################################################################################################ ###################################### Section 4: Extract relevant information ############################# ############################################################################################################ # Initializing data variable name = None fname = None dob = None pan = None nameline = [] dobline = [] panline = [] text0 = [] text1 = [] text2 = [] # Searching for PAN lines = text.split('\n') for lin in lines: s = lin.strip() s = lin.replace('\n', '') s = s.rstrip() s = s.lstrip() text1.append(s) text1 = list(filter(None, text1)) # print(text1) # to remove any text read from the image file which lies before the line 'Income Tax Department' lineno = 0 # to start from the first line of the text file. for wordline in text1: xx = wordline.split('\n') if ([ w for w in xx if re.search('(ELECTION|COMMISSION|INDIA|NDIA)$', w) ]): text1 = list(text1) lineno = text1.index(wordline) break # text1 = list(text1) text0 = text1[lineno + 1:] #print(text0) # Contains all the relevant extracted text in form of a list - uncomment to check ############################################################################################################### ######################################### Section 5: Dishwasher part ########################################## ############################################################################################################### # Cleaning first names, better accuracy namelist = text.split(' ') while ("" in namelist): namelist.remove("") name = re.findall(r'\w{0}[N,n]\w{0}[A,a]\w{0}[M,m]\w{0}[E,e]', text) for n in name: indices = [i for i, x in enumerate(namelist) if x == n] # print(indices) list_names = list() if name is not None: for n in indices: list_names.append((namelist[n + 1] + " " + namelist[n + 2])) # Cleaning Father's name Person_name = "" # print(len(list_names)) if len(list_names) > 0: if list_names[0] is not None: Person_name = list_names[0] else: Person_name = "" fname = "" if len(list_names) == 2: if list_names[1] is not None: fname = list_names[1] else: fname = "" gender = re.findall( r'\w{0}[F,f]\w{0}[e,E]\w{0}[M,m]\w{0}[A,a]\w{0}[a-zA-Z]\w{0}[E,e]', text) if gender == []: gender = re.findall(r'\w{0}[M,m]\w{0}[A,a]\w{0}[a-zA-Z]\w{0}[E,e]', text) # Cleaning DOB dob = re.findall(r'\d{2}[-/|-]\d{2}[-/|-]\d{4}', text) electiono = re.findall(r'\w{2}[a-zA-Z]\w{6}[0-9]', text) # print(electiono) # Making tuples of data data = {} data['Name'] = Person_name data['Father Name'] = fname data['Gender'] = gender data['Election No'] = electiono sheet1.write(ixsheet + 1, 0, ixsheet + 1) sheet1.write(ixsheet + 1, 1, data['Election No']) sheet1.write(ixsheet + 1, 2, data['Gender']) sheet1.write(ixsheet + 1, 4, data['Name']) sheet1.write(ixsheet + 1, 5, data['Father Name']) sheet1.write(ixsheet + 1, 6, join_path) sheet1.write(ixsheet + 1, 7, text) ixsheet = ixsheet + 1 wb.save('Election Card DATA.xls')
# 총 날짜에 대한 근월물, 차월물의 가격과 거래량 데이터 DfFuturesPriceVolume[name] = pd.concat( [DfFuturesPriceVolume[name], pd.merge(DfFirst.iloc[:-1,:], DfSecond.iloc[:-1,:], on='Date')] ) from xlwt.Workbook import * from pandas import ExcelWriter import xlsxwriter listforSheet =[] wb= Workbook() i=0 for name in SheetName: ws = wb.add_sheet(name) DfFuturesPriceVolume[name].to_excel(ws, name) i+=1 writer.save('DfFuturesPriceVolume.xlsx') ws1 = wb.add_sheet('original') ws2 = wb.add_sheet('result') original.to_excel(writer,'original') data.to_excel(writer,'result') writer.save('final.xls') 'DfFuturesPriceVolume'+'LeanHog' for name in SheetName: DfFuturesPriceVolume[name].reset_index()
def getPanCard(): # from PIL import Image, ImageFile x_index = 1 root.directory = filedialog.askdirectory() mypath = root.directory onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))] files_length = len(onlyfiles) # print(files_length) # Workbook is created wb = Workbook() # add_sheet is used to create sheet. sheet1 = wb.add_sheet('Sheet 1', cell_overwrite_ok=True) # Write Headers to sheet sheet1.write(0, 0, 'S.No') sheet1.write(0, 1, 'PAN Number') sheet1.write(0, 2, 'Date of Birth') sheet1.write(0, 3, 'Name') sheet1.write(0, 4, 'Fathers Name') sheet1.write(0, 5, 'File Name') ixsheet = 0 # print(files_lenth) while ixsheet < files_length: ImageFile.LOAD_TRUNCATED_IMAGES = True x = mypath dir_path = x.replace("'", "") file_path = onlyfiles[ixsheet] join_path = join(dir_path, file_path) # print(join_path) im = Image.open(join_path) # load the example image and convert it to grayscale image = cv2.imread(join_path) gray = prePrcoessing(image) # write the grayscale image to disk as a temporary file so we can # apply OCR to it filename = "{}.png".format(os.getpid()) cv2.imwrite(filename, gray) ''' A blurring method may be applied. We apply a median blur when the --preprocess flag is set to blur. Applying a median blur can help reduce salt and pepper noise, again making it easier for Tesseract to correctly OCR the image. After pre-processing the image, we use os.getpid to derive a temporary image filename based on the process ID of our Python script. The final step before using pytesseract for OCR is to write the pre-processed image, gray, to disk saving it with the filename from above ''' ############################################################################################################## ######################################## Section 3: Running PyTesseract ###################################### ############################################################################################################## # load the image as a PIL/Pillow image, apply OCR, and then delete # the temporary file # pytesseract.pytesseract.tesseract_cmd = 'D:\\Tesseract-OCR\\tesseract.exe' #from pytesseract import Output #import pytesseract text = pytesseract.image_to_string(Image.open(filename), lang='eng') # add +hin after eng within the same argument to extract hindi specific text - change encoding to utf-8 while writing os.remove(filename) # print(text) # show the output images # cv2.imshow("Image", image) # cv2.imshow("Output", gray) # cv2.waitKey(0) # writing extracted data into a text file text_output = open('outputbase.txt', 'w', encoding='utf-8') text_output.write(text) text_output.close() file = open('outputbase.txt', 'r', encoding='utf-8') text = file.read() # print(text) # Cleaning all the gibberish text bad_chars = [ '~', '`', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '{', '}', "'", '[', ']', '|', ':', ';', ',', '<', '>', '.', '?', '+', '=', '_' ] for i in bad_chars: text = text.replace(i, '') '''for god_damn in text: if nonsense(god_damn): text.remove(god_damn) else: print(text)''' # print(text) ############################################################################################################ ###################################### Section 4: Extract relevant information ############################# ############################################################################################################ # Initializing data variable name = None fname = None dob = None pan = None nameline = [] dobline = [] panline = [] text0 = [] text1 = [] text2 = [] # Searching for PAN lines = text.split('\n') for lin in lines: s = lin.strip() s = lin.replace('\n', '') s = s.rstrip() s = s.lstrip() text1.append(s) text1 = list(filter(None, text1)) # print(text1) # to remove any text read from the image file which lies before the line 'Income Tax Department' lineno = 0 # to start from the first line of the text file. for wordline in text1: xx = wordline.split('\n') if ([ w for w in xx if re.search( '(INCOMETAXDEPARWENT @|mcommx|INCOME|TAX|GOW|GOVT|GOVERNMENT|OVERNMENT|VERNMENT|DEPARTMENT|EPARTMENT|PARTMENT|ARTMENT|INDIA|NDIA)$', w) ]): text1 = list(text1) lineno = text1.index(wordline) break # text1 = list(text1) text0 = text1[lineno + 1:] # print(text0) # Contains all the relevant extracted text in form of a list - uncomment to check def findword(textlist, wordstring): lineno = -1 for wordline in textlist: xx = wordline.split() if ([w for w in xx if re.search(wordstring, w)]): lineno = textlist.index(wordline) textlist = textlist[lineno + 1:] return textlist return textlist ############################################################################################################### ######################################### Section 5: Dishwasher part ########################################## ############################################################################################################### try: # Cleaning first names, better accuracy name = text0[0] name = name.rstrip() name = name.lstrip() name = name.replace("8", "B") name = name.replace("0", "D") name = name.replace("6", "G") name = name.replace("1", "I") name = re.sub('[^a-zA-Z] +', ' ', name) # Cleaning Father's name fname = text0[1] fname = fname.rstrip() fname = fname.lstrip() fname = fname.replace("8", "S") fname = fname.replace("0", "O") fname = fname.replace("6", "G") fname = fname.replace("1", "I") fname = fname.replace("\"", "A") fname = re.sub('[^a-zA-Z] +', ' ', fname) # Cleaning DOB dob = re.findall(r'\d{2}[-/|-]\d{2}[-/|-]\d{4}', text) # Cleaning PAN Card details text0 = findword( text1, '(Pormanam|Number|umber|Account|ccount|count|Permanent|ermanent|manent|wumm)$' ) pan = re.findall( r'\w{2}[a-zA-Z]\w{0}[P,C,H,A,B,G,J,L,F,T]\w{0}[A-Z]\w{3}[0-9]\w{0}[A-Z]', text) if pan == []: pan = re.findall(r'\w{7}[A-Z]', text) finlen = len(pan) pan = pan[finlen - 1] # print(pan) except: pass # Making tuples of data data = {} data['Name'] = name data['Father Name'] = fname data['Date of Birth'] = dob data['PAN'] = pan sheet1.write(ixsheet + 1, 0, ixsheet + 1) sheet1.write(ixsheet + 1, 1, data['PAN']) sheet1.write(ixsheet + 1, 3, data['Name']) sheet1.write(ixsheet + 1, 4, data['Father Name']) if dob: sheet1.write(ixsheet + 1, 2, data['Date of Birth']) sheet1.write(ixsheet + 1, 5, join_path) ixsheet = ixsheet + 1 wb.save('PAN CARD DATA.xls')
def __init__(self,excelinfo=None): self.STARTLINE=1 engine=Engine() timemanager=TimeManager() self.workbook = Workbook() self.sheet = self.workbook.add_sheet(u'公示信息',cell_overwrite_ok=False) self.inforsheet=self.workbook.add_sheet(u'文档信息',cell_overwrite_ok=False) _tableTitle=[u"一卡通",u"学号",u"姓名",u"明细",u"说明",u"得分",u"总分"] #设置列宽(固定宽度) self.sheet.col(0).width=4000 self.sheet.col(1).width=4000 self.sheet.col(2).width=3000 self.sheet.col(3).width=20000 self.sheet.col(4).width=20000 self.sheet.col(5).width=2000 self.sheet.col(6).width=2000 #定义info栏的字体 # (<element>:(<attribute> <value>,)+;)+ xls_title =easyxf( 'font: name Arial,height 400,colour black;' 'pattern: pattern solid, fore_colour pale_blue;' 'alignment: horizontal center,vertical center;' ) xls_info=easyxf( 'font: name Arial,height 250,colour black;' 'pattern: pattern solid, fore_colour white;' 'alignment: horizontal center,vertical center;' 'borders:top medium,bottom medium,left medium,right medium;' ) self.xls_detail=easyxf( 'font: name Arial,height 250,colour black;' 'pattern: pattern solid, fore_colour white;' 'alignment: horizontal center,vertical center;' 'borders:top medium,bottom medium,left medium,right medium;' ) self.sumary=easyxf( 'font: name Arial,height 250,colour black;' 'pattern: pattern solid, fore_colour white;' 'alignment: horizontal center,vertical center;' 'borders:top medium,bottom medium,left medium,right medium;' ) self.details=easyxf( 'font: name Arial,height 250,colour black;' 'pattern: pattern solid, fore_colour yellow;' 'alignment: horizontal center,vertical center;' 'borders:top medium,bottom medium,left medium,right medium,bottom_colour violet;' ) self.inforsheet.write_merge(0,1,0,6,excelinfo["filename"], xls_title) self.inforsheet.write_merge(2,3,2,6,excelinfo["admin"],xls_info) self.inforsheet.write_merge(2,3,0,1,u"创建者:",xls_info) self.inforsheet.write_merge(4,5,2,6,excelinfo["grade"],xls_info) self.inforsheet.write_merge(4,5,0,1,u"公示年级:",xls_info) #self.inforsheet.write_merge(6,7,2,6,timemanager.strTime(excelinfo["maketime"]),xls_info) self.inforsheet.write_merge(6,7,0,1,u"创建时间:",xls_info) self.inforsheet.write_merge(8,9,2,6,excelinfo["start"]+u"至"+excelinfo["end"],xls_info) self.inforsheet.write_merge(8,9,0,1,u"统计区间:",xls_info) self.inforsheet.write_merge(10,11,2,6,excelinfo["note"],self.xls_detail) self.inforsheet.write_merge(10,11,0,1,u"备注:",xls_info) for i in range(len(_tableTitle)):#Make table title self.sheet.write(0,i,_tableTitle[i],self.xls_detail)
def open(self, file_name, encoding='ascii', style_compression=2, **keywords): BookWriter.open(self, file_name, **keywords) self.work_book = Workbook(style_compression=style_compression, encoding=encoding)
class MakeExcel(object): def __init__(self,excelinfo=None): self.STARTLINE=1 engine=Engine() timemanager=TimeManager() self.workbook = Workbook() self.sheet = self.workbook.add_sheet(u'公示信息',cell_overwrite_ok=False) self.inforsheet=self.workbook.add_sheet(u'文档信息',cell_overwrite_ok=False) _tableTitle=[u"一卡通",u"学号",u"姓名",u"明细",u"说明",u"得分",u"总分"] #设置列宽(固定宽度) self.sheet.col(0).width=4000 self.sheet.col(1).width=4000 self.sheet.col(2).width=3000 self.sheet.col(3).width=20000 self.sheet.col(4).width=20000 self.sheet.col(5).width=2000 self.sheet.col(6).width=2000 #定义info栏的字体 # (<element>:(<attribute> <value>,)+;)+ xls_title =easyxf( 'font: name Arial,height 400,colour black;' 'pattern: pattern solid, fore_colour pale_blue;' 'alignment: horizontal center,vertical center;' ) xls_info=easyxf( 'font: name Arial,height 250,colour black;' 'pattern: pattern solid, fore_colour white;' 'alignment: horizontal center,vertical center;' 'borders:top medium,bottom medium,left medium,right medium;' ) self.xls_detail=easyxf( 'font: name Arial,height 250,colour black;' 'pattern: pattern solid, fore_colour white;' 'alignment: horizontal center,vertical center;' 'borders:top medium,bottom medium,left medium,right medium;' ) self.sumary=easyxf( 'font: name Arial,height 250,colour black;' 'pattern: pattern solid, fore_colour white;' 'alignment: horizontal center,vertical center;' 'borders:top medium,bottom medium,left medium,right medium;' ) self.details=easyxf( 'font: name Arial,height 250,colour black;' 'pattern: pattern solid, fore_colour yellow;' 'alignment: horizontal center,vertical center;' 'borders:top medium,bottom medium,left medium,right medium,bottom_colour violet;' ) self.inforsheet.write_merge(0,1,0,6,excelinfo["filename"], xls_title) self.inforsheet.write_merge(2,3,2,6,excelinfo["admin"],xls_info) self.inforsheet.write_merge(2,3,0,1,u"创建者:",xls_info) self.inforsheet.write_merge(4,5,2,6,excelinfo["grade"],xls_info) self.inforsheet.write_merge(4,5,0,1,u"公示年级:",xls_info) #self.inforsheet.write_merge(6,7,2,6,timemanager.strTime(excelinfo["maketime"]),xls_info) self.inforsheet.write_merge(6,7,0,1,u"创建时间:",xls_info) self.inforsheet.write_merge(8,9,2,6,excelinfo["start"]+u"至"+excelinfo["end"],xls_info) self.inforsheet.write_merge(8,9,0,1,u"统计区间:",xls_info) self.inforsheet.write_merge(10,11,2,6,excelinfo["note"],self.xls_detail) self.inforsheet.write_merge(10,11,0,1,u"备注:",xls_info) for i in range(len(_tableTitle)):#Make table title self.sheet.write(0,i,_tableTitle[i],self.xls_detail) def _writeuser(self,rowNo,infobuf,lines): """ 写用户总体信息 """ _info=[infobuf["campID"],infobuf["studentID"],infobuf["name"],u"无",u"无",u"0",float(infobuf["sum"])] if lines==0:#如果无加分 for i in range(len(_info)): self.sheet.write_merge(rowNo,rowNo+lines,i,i,_info[i],self.sumary) return lines+1 else: for i in range(len(_info)): if i!=3 and i!=4 and i!=5:#明细,分值留空 self.sheet.write_merge(rowNo,rowNo+lines-1,i,i,_info[i],self.sumary) return lines def _writedetail(self,rowNo,items,lines): """ 写具体得分细则 """ i=0 for item in items: _info=[item["item_name"],item["note"],item["add"]] for s in range(len(_info)): self.sheet.write(rowNo+i,3+s,_info[s],self.details) i+=1 def saveAs(self,filename): self.workbook.save(filename) def run(self,userlist,starttime,endtime): i=self.STARTLINE _count=0 for user in userlist:#写用户总体信息 engine=Engine() result=engine.getUserDetail(user,start_time=starttime,end_time=endtime,is_jsonify=False) lines=len(result["items"]) if result is not None: _count+=1#增加一条记录 self._writedetail(i,result["items"],lines) i+=self._writeuser(i,result,lines) if _count>0: return True #至少有一个条目 else: return False #没有任何条目 return True
def xuat_bao_cao(TW_dstk, malophoc, nam): wb = Workbook() xlwt.add_palette_colour("custom_colour", 0x21) wb.set_colour_RGB(0x21, 251, 228, 228) sheet = wb.add_sheet('sheet 1') style = xlwt.easyxf('font: bold off, color black;\ borders: top_color black, bottom_color black, right_color black, left_color black,\ left thin, right thin, top thin, bottom thin;\ pattern: pattern solid, fore_color white;') style1 = xlwt.easyxf('font: bold on, color black, height 260;\ borders: top_color black, bottom_color black, right_color black, left_color black,\ left thin, right thin, top thin, bottom thin;\ pattern: pattern solid, fore_color custom_colour;\ align: vertical center, horizontal center;') style2 = xlwt.easyxf('font: bold on, color black;\ borders: top_color black, bottom_color black, right_color black, left_color black,\ left thin, right thin, top thin, bottom thin;\ pattern: pattern solid, fore_color yellow;') style3 = xlwt.easyxf('font: bold on, color black;\ borders: top_color black, bottom_color black, right_color black, left_color black,\ left thin, right thin, top thin, bottom thin;\ pattern: pattern solid, fore_color white;') cot = 5 hang = 1 ttlophoc = XLDL.xldl_LopHoc.layThonTinLopHoc(malophoc.split(' ')[0]) # write_merge(top_row, bottom_row, left_col , right_col) sheet.write_merge(0, 1, 1, 3, "BÁO CÁO THỐNG KÊ ĐIỂM DANH LỚP HỌC", style1) sheet.write(0, 6, "Năm: " + nam) sheet.write_merge(2, 2, 0, 1, "Mã lớp học: " + ttlophoc[0], style2) sheet.write_merge(2, 2, 2, 3, "Tên lớp học: " + ttlophoc[1], style2) sheet.write_merge(2, 2, 4, 6, "Tên giảng viên: " + ttlophoc[2], style2) sheet.write_merge(2, 2, 7, 7, "Sỉ số: " + str(ttlophoc[3]), style2) sum = 0 for row in range(TW_dstk.rowCount()): sum = sum + int(TW_dstk.item(row, 4).text()) sheet.write_merge(2, 2, 8, 9, "Tổng vắng: " + str(sum), style2) heder = [ "STT", "Mã sinh viên", "Tên sinh viên", "Lớp hinh hoạt", "giới tính", "Tổng vắng", "Tỉ lệ đi học(%)" ] for i in range(len(heder)): # ghi các herder cwidth = sheet.col(i).width # lấy độ rộng hiện tại của column if (len(str(heder[i])) * 367) > cwidth: sheet.col(i).width = (len(str(heder[i])) * 367) sheet.write(4, 0 + i, str(heder[i]), style3) dsNgay = XLDL.xldl_ThongKe.layDSngayDD(malophoc.split(' ')[0]) for i in range(len(dsNgay)): # ghi các cột ngày điểm danh cwidth = sheet.col(7 + i).width # lấy độ rộng hiện tại của column if (len(str(dsNgay[i])) * 367) > cwidth: sheet.col(7 + i).width = (len(str(dsNgay[i])) * 367) sheet.write(4, 7 + i, str(dsNgay[i]), style3) for row in range(TW_dstk.rowCount()): sheet.write(cot + row, 0, row + 1, style) for col in range(TW_dstk.columnCount()): # auto chỉnh size của cột cwidth = sheet.col(col).width # lấy độ rộng hiện tại của column if (len(TW_dstk.item(row, col).text()) * 367) > cwidth: sheet.col(col + 1).width = (len(TW_dstk.item(row, col).text()) * 367) if col == 4: sheet.write(cot + row, hang + col, int(TW_dstk.item(row, col).text()), style) elif col == 5: sheet.write(cot + row, hang + col, float(TW_dstk.item(row, col).text()), style) else: sheet.write(cot + row, hang + col, TW_dstk.item(row, col).text(), style) wb.save('bao_cao_lopHoc_' + malophoc + '.xls')
import xlrd import sys import time import json import xlwt from xlwt import Workbook # Open Input Excel Workbook wb = xlrd.open_workbook(sys.argv[1]) sheet = wb.sheet_by_index(0) # Extracting the first row sheet.cell_value(0, 0) # New Workbook for output wbwrite = Workbook() sheet1 = wbwrite.add_sheet('Hashes') sheet1.write(0, 0, 'MD5') sheet1.write(0, 1, 'SHA-1') sheet1.write(0, 2, 'SHA-256') sheet1.write(0, 3, 'Detections') sheet1.write(0, 4, 'Total AVs looked') # VT API data, Do not Change or Share url = 'https://www.virustotal.com/vtapi/v2/file/report' API_KEY = '<API KEY GOES HERE>' HASH = '' # Moving row by row down for i in range(sheet.nrows): HASH = (sheet.cell_value(i, 0))
# Writing to an excel # sheet using Python import xlwt from xlwt import Workbook # Workbook is created wb = Workbook() # add_sheet is used to create sheet. sheet1 = wb.add_sheet('Sheet 1') #Number of total particles (input) - Ntotal # Lx,Ly and Lz #amp_v_y - amplitude of vibratory particles in Y direction # Delta x , delta y and delta z (inter-particle distance) #define roughness of the particles (Roughness factor) Fr .. Place them in up and down position Nx = input("Number of particles in x direction") Nx = int(Nx) Ny = input("Number of particles in y direction") Ny = int(Ny) Nz = input("Number of particles inin z direction") Nz = int(Nz) dp = input("diameter of particle") dp = int(dp) deltax = input("interpaticle distance in x-direction") deltax = int(deltax) deltay = input("interpaticle distance in y-direction") deltay = int(deltay) deltaz = input("interpaticle distance in z-direction") deltaz = int(deltaz)
def xuat_bao_cao_SV(TW_dstk, masv): wb = Workbook() xlwt.add_palette_colour("custom_colour", 0x21) wb.set_colour_RGB(0x21, 251, 228, 228) sheet = wb.add_sheet('sheet 1') style = xlwt.easyxf('font: bold off, color black;\ borders: top_color black, bottom_color black, right_color black, left_color black,\ left thin, right thin, top thin, bottom thin;\ pattern: pattern solid, fore_color white;') style1 = xlwt.easyxf('font: bold on, color black, height 260;\ borders: top_color black, bottom_color black, right_color black, left_color black,\ left thin, right thin, top thin, bottom thin;\ pattern: pattern solid, fore_color custom_colour;\ align: vertical center, horizontal center;') style2 = xlwt.easyxf('font: bold on, color black;\ borders: top_color black, bottom_color black, right_color black, left_color black,\ left thin, right thin, top thin, bottom thin;\ pattern: pattern solid, fore_color yellow;') style3 = xlwt.easyxf('font: bold on, color black;\ borders: top_color black, bottom_color black, right_color black, left_color black,\ left thin, right thin, top thin, bottom thin;\ pattern: pattern solid, fore_color white;') cot = 5 hang = 1 ttSinhVien = XLDL.xldl_SinhVien.layThonTinSinhVien(masv.split(' ')[0]) sheet.write_merge(0, 1, 1, 3, "BÁO CÁO THỐNG KÊ ĐIỂM DANH", style1) sheet.write_merge(2, 2, 0, 1, "Mã sinh viên: " + ttSinhVien[0], style2) sheet.write_merge(2, 2, 2, 4, "Họ và tên: " + ttSinhVien[1], style2) sheet.write_merge(2, 2, 5, 6, "Giới tính: " + ttSinhVien[2], style2) sheet.write_merge(2, 2, 7, 8, "Địa chỉ: " + str(ttSinhVien[3]), style2) sum = 0 for row in range(TW_dstk.rowCount()): sum = sum + int(TW_dstk.item(row, 3).text()) sheet.write_merge(2, 2, 9, 10, "Tổng vắng: " + str(sum), style2) heder = [ "STT", "Mã lớp học", "Tên lớp học", "Tên môn học", "Tổng vắng", "Số buổi học", "Tỉ lệ đi học(%)" ] for i in range(len(heder)): # ghi các herder cwidth = sheet.col(i).width # lấy độ rộng hiện tại của column if (len(str(heder[i])) * 367) > cwidth: sheet.col(i).width = (len(str(heder[i])) * 367) sheet.write(4, 0 + i, str(heder[i]), style3) for row in range(TW_dstk.rowCount()): sheet.write(cot + row, 0, row + 1, style) for col in range(TW_dstk.columnCount()): # auto chỉnh size của cột cwidth = sheet.col(col).width # lấy độ rộng hiện tại của column if (len(TW_dstk.item(row, col).text()) * 367) > cwidth: sheet.col(col + 1).width = (len(TW_dstk.item(row, col).text()) * 367) if col == 4 or col == 3: sheet.write(cot + row, hang + col, int(TW_dstk.item(row, col).text()), style) elif col == 5: sheet.write(cot + row, hang + col, float(TW_dstk.item(row, col).text()), style) else: sheet.write(cot + row, hang + col, TW_dstk.item(row, col).text(), style) wb.save('bao_cao_SinhVien_' + masv + '.xls')
from xlwt import Workbook import openpyxl # Workbook is created wb = Workbook() # add_sheet is used to create sheet. sheet1 = wb.add_sheet("Sheet 1") sheet1.write(1, 0, "ISBT DEHRADUN") sheet1.write(2, 0, "SHASTRADHARA") sheet1.write(3, 0, "CLEMEN TOWN") sheet1.write(4, 0, "RAJPUR ROAD") sheet1.write(5, 0, "CLOCK TOWER") sheet1.write(0, 1, "ISBT DEHRADUN") sheet1.write(0, 2, "SHASTRADHARA") sheet1.write(0, 3, "CLEMEN TOWN") sheet1.write(0, 4, "RAJPUR ROAD") sheet1.write(0, 5, "CLOCK TOWER") wb.save("xlwt example.xls") # Workbook is created openpyxl_wb = openpyxl.Workbook() # create_sheet is used to create sheet. sheet1 = openpyxl_wb.create_sheet("Sheet 1") sheet1.cell(1, 1, "ISBT DEHRADUN") sheet1.cell(2, 1, "SHASTRADHARA") sheet1.cell(3, 1, "CLEMEN TOWN")
def writeResults(C1, C2, C3, R2, R3, f, magCL, magOL, phaseOL, magvcoTF, PFDCPNoiseOut, PrescalerNoiseOut, VCONoiseOut, R2NoiseOut, R3NoiseOut, XTALNoiseOut, SDNoiseOut, TotalNoise, t, fT, lockTime_0p001Pcnt, lockTime_0p0001Pcnt, lockTime_0p00001Pcnt, lockTime_0p000001Pcnt, f2): book = Workbook() parameter = easyxf( 'font: name Arial, bold True, height 280; alignment: horizontal center' ) parameterValue = easyxf( 'font: name Arial, height 280; alignment: horizontal center', num_format_str='0.000E+00') parameterValue2 = easyxf( 'font: name Arial, height 280; alignment: horizontal center', num_format_str='0.000') parameterValue3 = easyxf( 'font: name Arial, height 280; alignment: horizontal center', num_format_str='0.000000%') columnHeader = easyxf( 'font: name Arial, bold True, height 280; alignment: horizontal center' ) redResult = easyxf('font: name Arial, bold True, height 280, colour red;' 'alignment: horizontal center') #Write Loop Filter Components worksheet: sheetLoopFilter = book.add_sheet('Loop Filter Components') sheetLoopFilter.col(1).width = 5000 sheetLoopFilter.write(0, 0, 'C1', parameter) sheetLoopFilter.write(0, 1, C1, parameterValue) sheetLoopFilter.write(1, 0, 'C2', parameter) sheetLoopFilter.write(1, 1, C2, parameterValue) sheetLoopFilter.write(2, 0, 'C3', parameter) sheetLoopFilter.write(2, 1, C3, parameterValue) sheetLoopFilter.write(3, 0, 'R2', parameter) sheetLoopFilter.write(3, 1, R2, parameterValue) sheetLoopFilter.write(4, 0, 'R3', parameter) sheetLoopFilter.write(4, 1, R3, parameterValue) #Write Loop Response worksheet: sheetLoopResponse = book.add_sheet('Loop Response Data') sheetLoopResponse.col(0).width = 6000 sheetLoopResponse.write(0, 0, 'Frequency (Hz)', columnHeader) sheetLoopResponse.col(1).width = 15000 sheetLoopResponse.write(0, 1, 'Closed Loop Response Magnitude (dB)', columnHeader) sheetLoopResponse.col(2).width = 14000 sheetLoopResponse.write(0, 2, 'Open Loop Response Magnitude (dB)', columnHeader) sheetLoopResponse.col(3).width = 14000 sheetLoopResponse.write(0, 3, 'Open Loop Response Phase (dB)', columnHeader) sheetLoopResponse.col(4).width = 14000 sheetLoopResponse.write(0, 4, 'VCO Transfer Function Magnitude (dB)', columnHeader) for i in range(len(f)): sheetLoopResponse.write(i + 1, 0, f[i], parameterValue) sheetLoopResponse.write(i + 1, 1, magCL[i], parameterValue2) sheetLoopResponse.write(i + 1, 2, magOL[i], parameterValue2) sheetLoopResponse.write(i + 1, 3, phaseOL[i], parameterValue2) sheetLoopResponse.write(i + 1, 4, magvcoTF[i], parameterValue2) #Write Noise Results worksheet: sheetPLLNoise = book.add_sheet('PLL Noise Contributors') sheetPLLNoise.col(0).width = 6000 sheetPLLNoise.write(0, 0, 'Frequency (Hz)', columnHeader) sheetPLLNoise.col(1).width = 6000 sheetPLLNoise.write(0, 1, 'PFDCP (dBc/Hz)', columnHeader) sheetPLLNoise.col(2).width = 7000 sheetPLLNoise.write(0, 2, 'Prescaler (dBc/Hz)', columnHeader) sheetPLLNoise.col(3).width = 6000 sheetPLLNoise.write(0, 3, 'VCO (dBc/Hz)', columnHeader) sheetPLLNoise.col(4).width = 6000 sheetPLLNoise.write(0, 4, 'R2 (dBc/Hz)', columnHeader) sheetPLLNoise.col(5).width = 6000 sheetPLLNoise.write(0, 5, 'R3 (dBc/Hz)', columnHeader) sheetPLLNoise.col(6).width = 6000 sheetPLLNoise.write(0, 6, 'XTAL (dBc/Hz)', columnHeader) sheetPLLNoise.col(7).width = 8000 sheetPLLNoise.write(0, 7, 'Sigma Delta (dBc/Hz)', columnHeader) sheetPLLNoise.col(8).width = 8000 sheetPLLNoise.write(0, 8, 'Total Noise (dBc/Hz)', columnHeader) for i in range(len(f)): sheetPLLNoise.write(i + 1, 0, f[i], parameterValue) sheetPLLNoise.write(i + 1, 1, PFDCPNoiseOut[i], parameterValue2) sheetPLLNoise.write(i + 1, 2, PrescalerNoiseOut[i], parameterValue2) sheetPLLNoise.write(i + 1, 3, VCONoiseOut[i], parameterValue2) sheetPLLNoise.write(i + 1, 4, R2NoiseOut[i], parameterValue2) sheetPLLNoise.write(i + 1, 5, R3NoiseOut[i], parameterValue2) sheetPLLNoise.write(i + 1, 6, XTALNoiseOut[i], parameterValue2) sheetPLLNoise.write(i + 1, 7, SDNoiseOut[i], parameterValue2) sheetPLLNoise.write(i + 1, 8, TotalNoise[i], parameterValue2) #Write Time Response worksheet: sheetPLLTime = book.add_sheet('Time Response') sheetPLLTime.col(0).width = 5000 sheetPLLTime.write(0, 0, 'Time (s)', columnHeader) sheetPLLTime.col(1).width = 9000 sheetPLLTime.write(0, 1, 'Output Frequency (Hz)', columnHeader) for i in range(len(t)): sheetPLLTime.write(i + 1, 0, t[i], parameterValue) sheetPLLTime.write(i + 1, 1, fT[i], parameterValue) sheetLockTimes = book.add_sheet('Lock Times') sheetLockTimes.col(0).width = 11000 sheetLockTimes.write(0, 0, 'Locks within what % error', columnHeader) sheetLockTimes.col(1).width = 11000 sheetLockTimes.write(0, 1, 'Locks within how many Hertz', columnHeader) sheetLockTimes.col(2).width = 6000 sheetLockTimes.write(0, 2, 'Lock Time (s)', columnHeader) sheetLockTimes.write(1, 0, 0.00001, parameterValue3) sheetLockTimes.write(1, 1, float(scientific(0.00001 * f2)), parameterValue) sheetLockTimes.write(1, 2, lockTime_0p001Pcnt, parameterValue) sheetLockTimes.write(2, 0, 0.000001, parameterValue3) sheetLockTimes.write(2, 1, float(scientific(0.000001 * f2)), parameterValue) sheetLockTimes.write(2, 2, lockTime_0p0001Pcnt, parameterValue) sheetLockTimes.write(3, 0, 0.0000001, parameterValue3) sheetLockTimes.write(3, 1, float(scientific(0.0000001 * f2)), parameterValue) sheetLockTimes.write(3, 2, lockTime_0p00001Pcnt, parameterValue) sheetLockTimes.write(4, 0, 0.00000001, parameterValue3) sheetLockTimes.write(4, 1, float(scientific(0.00000001 * f2)), parameterValue) sheetLockTimes.write(4, 2, lockTime_0p000001Pcnt, parameterValue) sheetphaseError = book.add_sheet('Phase Error') sheetphaseError.col(0).width = 6000 sheetphaseError.write(0, 0, 'Frequency (Hz)', columnHeader) sheetphaseError.col(1).width = 8000 sheetphaseError.write(0, 1, 'Total Noise (V2/Hz)', columnHeader) for i in range(len(f)): sheetphaseError.write(i + 1, 0, f[i], parameterValue) sheetphaseError.write(i + 1, 1, 10**(TotalNoise[i] / 10.0), parameterValue) sheetphaseError.col(3).width = 10000 sheetphaseError.write(1, 3, "Lower Integration Limit", columnHeader) sheetphaseError.write(2, 3, "Upper Integration Limit", columnHeader) sheetphaseError.write(4, 3, "Phase Error", redResult) sheetphaseError.col(4).width = 6000 sheetphaseError.write(1, 4, 1.7e3, parameterValue) sheetphaseError.write(2, 4, 200e3, parameterValue) x = "(180/PI())*SQRT(2*((VLOOKUP(E3,A2:B32,1)-VLOOKUP(E2,A2:B32,1))/6)*(VLOOKUP(E2,A2:B32,2)+VLOOKUP(E3,A2:B32,2)+4*VLOOKUP(((VLOOKUP(E2,A2:B32,1)+VLOOKUP(E3,A2:B32,1))/2.0),A2:B32,2)))" y = "(180/PI())*SQRT(2*((E3-E2)/6)*(VLOOKUP(E2,A2:B32,2)+VLOOKUP(E3,A2:B32,2)+4*VLOOKUP(((E3+E2)/2.0),A2:B32,2)))" #y="4*VLOOKUP(((VLOOKUP(E2,A2:B32,1)+VLOOKUP(E3,A2:B32,1))/2.0),A2:B32,2)" sheetphaseError.write(4, 4, Formula(y), parameterValue) return book
def jiantuan(): csvFile = open("./统计结果/昆曲字频.csv", "rb") reader = unicodecsv.reader(csvFile,encoding='utf-8-sig') zidict = [] for item in reader: zidict.append(item) a=sorted(zidict, key=lambda x: int(x[1]), reverse=True) t500=np.array(a[0:500]) t1000=np.array(a[0:-1]) wb = Workbook() st1=wb.add_sheet('入声及尖团') st1.write(0, 0, '昆曲入声及尖团字统计,计频率前800') st1.write(1,0,'顺序') st1.write(1,1,'字') st1.write(1, 2, '尖团') st1.write(1, 3, '声调') st1.write(1, 4, '频率(%)') st2 = wb.add_sheet('尖团') st2.write(0, 0, '昆曲尖团字统计,计频率前500') st2.write(1, 0, '顺序') st2.write(1, 1, '字') st2.write(1, 2, '尖团') st2.write(1, 3, '频率(%)') st3 = wb.add_sheet('入声') st3.write(0, 0, '昆曲入声字统计,计频率前500') st3.write(1, 0, '顺序') st3.write(1, 1, '字') st3.write(1, 2, '频率(%)') n1 = 1 n2=1 n3=1 #style = xlwt.easyxf(' color red;') for count in range(len(t1000[:,0])): freq=int(t1000[count,1])/np.sum(t1000[:,1].astype(float))*100 i=t1000[count,0] if isjian(i)=='非'and isru(i)=='非入' : continue else: if n1<801: st1.write(n1 + 1, 0, n1) st1.write(n1+1, 1, i) st1.write(n1+1, 2, isjian(i)) st1.write(n1+1, 3, isru(i)) if round(freq,2)==0: st1.write(n1 + 1, 4,'<0.01') else: st1.write(n1+1, 4, round(freq,2)) n1+=1 if isjian(i) in ['尖','团'] and n2<501: st2.write(n2 + 1, 0, n2) st2.write(n2+1, 1, i) st2.write(n2 + 1, 2, isjian(i)) if round(freq,2)==0: st2.write(n2 + 1, 3,'<0.01') else: st2.write(n2+1, 3, round(freq,2)) n2+=1 if isru(i)=='入声'and n3<501: st3.write(n3 + 1, 0, n3) st3.write(n3+1, 1, i) if round(freq,2)==0: st3.write(n3 + 1, 2,'<0.01') else: st3.write(n3+1, 2, round(freq,2)) n3+=1 wb.save('./统计结果/昆曲入声及尖团统计.xls')
#!/user/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Aug 7 17:02:28 2018 @author: martin """ #%% import sys from datetime import date from xlrd import open_book, xldate_as_tuple from xlwt import Workbook input_file = sys.argv[1] output_file = sys.argv[2] output_workbook = Workbook() output_worsheet = output_workbook.add_sheet('jan_2013_output') my_columns = ['Customer ID', 'Purchase Date'] with open_book(input_file) as workbook: worksheet = workbook.sheet_by_name('january_2013') data = [my_columns] header_list = worksheet.row_values(0) header_index_list = [] for header_index in range(len(header_list)): if header_list[header_index] in my_columns: header_index_list.append(header_index)