def exportExcelDirect(request, adict): try: l_parm = adict['ex_parm'] l_parm['title'] # : '文档标题', 第一行各列合并居中写入title l_parm['cols'] # :['列名1','列名2','列名3'], //列标题 l_parm['rows'] # :[ ['行1列1','行1列2','行1列3'], # ['行2列1','行2列2','行2列3'], import io from xlsxwriter import Workbook output = io.BytesIO() workbook = Workbook(output) worksheet = workbook.add_worksheet('export') worksheet.write('A1',l_parm['title']) # Text with formatting. .......... write( row, cols, content ) for i in range(len(l_parm['cols'])): worksheet.write(1, i, l_parm['cols'][i]) for i in range(len(l_parm['rows'] )): for j in range(len(l_parm['rows'][i])): worksheet.write(i + 2, j, l_parm['rows'][i][j]) workbook.close() output.seek(0) response = HttpResponse(output.read(), content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet") except Exception as e: ls_err = str(e.args) response = HttpResponse(str("导出excel失败:" + ls_err), content_type="application/text") response['Content-Disposition'] = "attachment; filename=test.xlsx" return response
def write_cell_errors(xlsxname, header, lines, cells_to_mark): workbook = Workbook(join(SPREADSHEETS_DIR, xlsxname)) cell_fmt = workbook.add_format() cell_fmt.set_bg_color('cyan') worksheet = workbook.add_worksheet() write_sheet(worksheet, cell_fmt, header, lines, cells_to_mark) workbook.close()
def action(args): book = Workbook() filetype = args.type (infiles, ) = args.infiles if filetype == 'Analysis': tabs = ['0_QC', '1_QC_Metrics', '2_QC_by_Gene', '3_QC_by_Exon', '4_SV_Gridss', '5_SV_Breakdancer', '6_SV_Pindel', '7_CNV_Gene', '8_CNV_Exon', '9_Clinically_Flagged', '10_SNP','11_INDEL', '12_MSI', '13_Amplicons', '14_PolyHunter'] # for each tab, find its file and process. for tab in tabs: try: #Find file in infiles sheet_name, fname = process_files(infiles, tab, filetype) print sheet_name, fname write_workbook(sheet_name, book, fname) except TypeError: print "Tab %s not processed" % tab elif filetype == 'Combined': for fname in infiles: (f_path, f_name) = os.path.split(fname) if re.search(str(filetype), f_name): (f_short_name, f_extension) = os.path.splitext(f_name) sheet_name = f_short_name.split('Combined_') sheet_name = '_'.join(sheet_name[1:30]) print sheet_name, fname write_workbook(sheet_name, book, fname) book.filename=args.outfile book.close()
def dump_info_xlsx(): from xlsxwriter import Workbook wb = Workbook(configuration['outputfile'] + '.xlsx') ws = wb.add_worksheet("CollatedInfo") ordered_list = 'Name,Matric,Team,MainLineCount,MainWordCount,TestLineCount,TestWordCount,DocsLineCount,DocsWordCount,MainFile,TestFile,DocsFile'.split(",") row = 0 for header in ordered_list: col = ordered_list.index(header) ws.write(row, col, header) row = 1 for matric in sorted(studentlist.keys()): for k , v in studentlist[matric].items(): if k == 'MainFile': pass elif k == 'Main': pass elif k == 'TestFile': pass elif k == 'Test': pass col = ordered_list.index(k) ws.write(row, col, v) row += 1 wb.close()
def verified(context, collaborator, test, outpath=None): """Export variants which have been verified for an institute and write them to an excel file. Args: collaborator(str): institute id test(bool): True if the function is called for testing purposes outpath(str): path to output file Returns: written_files(int): number of written or simulated files """ written_files = 0 collaborator = collaborator or 'cust000' LOG.info('Exporting verified variants for cust {}'.format(collaborator)) adapter = context.obj['adapter'] verified_vars = adapter.verified(institute_id=collaborator) LOG.info('FOUND {} verified variants for institute {}'.format(len(verified_vars), collaborator)) if not verified_vars: LOG.warning('There are no verified variants for institute {} in database!'.format(collaborator)) return None document_lines = export_verified_variants(verified_vars) today = datetime.datetime.now().strftime('%Y-%m-%d') document_name = '.'.join(['verified_variants', collaborator, today]) + '.xlsx' # If this was a test and lines are created return success if test and document_lines: written_files +=1 LOG.info('Success. Verified variants file contains {} lines'.format(len(document_lines))) return written_files # create workbook and new sheet # set up outfolder if not outpath: outpath = str(os.getcwd()) workbook = Workbook(os.path.join(outpath,document_name)) Report_Sheet = workbook.add_worksheet() # Write the column header row = 0 for col,field in enumerate(VERIFIED_VARIANTS_HEADER): Report_Sheet.write(row,col,field) # Write variant lines, after header (start at line 1) for row, line in enumerate(document_lines,1): # each line becomes a row in the document for col, field in enumerate(line): # each field in line becomes a cell Report_Sheet.write(row,col,field) workbook.close() if os.path.exists(os.path.join(outpath,document_name)): LOG.info('Success. Verified variants file of {} lines was written to disk'. format(len(document_lines))) written_files += 1 return written_files
def write_mismatch(filename, etalon, tocomp, mismatch): workbook = Workbook(join(SPREADSHEETS_DIR, get_filebase(filename)+'.xlsx')) cell_fmt = workbook.add_format() cell_fmt.set_bg_color('cyan') worksheet = workbook.add_worksheet(name='test') write_sheet(worksheet, cell_fmt, *tocomp, cells_to_mark=mismatch) worksheet = workbook.add_worksheet(name='etalon') write_sheet(worksheet, cell_fmt, *etalon) workbook.close()
def write_presentation(self, values, records, output): workbook = Workbook(output, {'constant_memory': True}) worksheet = workbook.add_worksheet() for column, value in enumerate(values): worksheet.write_string(0, column, value) for row, record in enumerate(records): value_iterator = enumerate([unicode(record[v]) for v in values]) for column, text in value_iterator: worksheet.write_string(row + 1, column, text) workbook.close()
def dispatch(dataset): buf = StringIO() wb = Workbook(buf, {'constant_memory': True, 'in_memory': True, 'default_date_format': 'yyyy/mm/dd hh:mm:ss'} ) format_ = wb.add_format(FORMAT) # dataset metadata ws = wb.add_worksheet('Global attributes') write_metadata(ws, dataset, 0, 0, format_) # 1D grids for grid in [g for g in walk(dataset, GridType) if len(g.shape) == 1]: logger.debug('Grid {}'.format(grid.name)) ws = wb.add_worksheet(grid.name) # headers ws.write(0, 0, grid.dimensions[0], format_) ws.write(0, 1, grid.name, format_) # data for j, data in enumerate(grid.data): for i, value in enumerate(numpy.asarray(data)): ws.write(i+1, 1-j, value) # add var metadata write_metadata(ws, grid, 0, 2, format_) # sequences for seq in walk(dataset, SequenceType): logger.debug('Sequence {}'.format(seq.name)) ws = wb.add_worksheet(seq.name) # add header across the first row for j, var_ in enumerate(seq.keys()): ws.write(0, j, var_, format_) # add data in the subsequent rows for i, row in enumerate(seq.data): for j, value in enumerate(row): ws.write(i+1, j, value) # add var metadata in columns to the right of the data n = 0 j = len(seq.keys())+1 for child in seq.children(): logger.debug("Child {}".format(child.name)) ws.merge_range(n, j, n, j+1, child.name, format_) n = write_metadata(ws, child, n+1, j, format_)+1 wb.close() return buf
def _open_worksheet(xlsx_file): """ Enable XlsxWriter Worksheet object to be opened, operated on, and then automatically closed within a `with` statement. A filename or stream object (such as a ``BytesIO`` instance) is expected as *xlsx_file*. """ workbook = Workbook(xlsx_file, {"in_memory": True}) worksheet = workbook.add_worksheet() yield workbook, worksheet workbook.close()
def genera_e_salva(self, nome='File.xlsx', scadenza=None, ordina_fogli=True, save_to_memory=False): """ Genera il file e lo salva su database. :param nome: Il nome del file da allegare (opzionale, default 'File.xlsx'). :param scadenza: Scadenza del file. Domani. :return: """ generatore = GeneratoreNomeFile('allegati/') zname = generatore(self, nome) self.prepara_cartelle(MEDIA_ROOT + zname) if save_to_memory: workbook = Workbook(self.output) else: workbook = Workbook(MEDIA_ROOT + zname) bold = workbook.add_format({'bold': True}) # Per ogni foglio for foglio in [x for x in self.fogli]: # Aggiunge il foglio worksheet = workbook.add_worksheet(foglio.nome) # Aggiunge l'intestazione for col, testo in enumerate(foglio.intestazione): worksheet.write(0, col, str(testo), bold) # Aggiunge il contenuto for riga, colonne in enumerate(foglio.contenuto): riga += 1 # Indice shiftato per intestazione for colonna, testo in enumerate(colonne): if isinstance(testo, datetime): testo = testo.strftime("%d/%m/%Y %H:%M") if isinstance(testo, date): testo = testo.strftime("%d/%m/%Y") if testo == ", ": # Rimuove campi ', ' testo = "" worksheet.write(riga, colonna, str(testo)) if ordina_fogli: workbook.worksheets_objs.sort(key=lambda x: x.name) workbook.close() if save_to_memory: self.output.seek(0) else: self.file = zname self.nome = nome self.scadenza = scadenza or domani() self.save()
def mt_excel_files(store, case_obj, temp_excel_dir): """Collect MT variants and format line of a MT variant report to be exported in excel format Args: store(adapter.MongoAdapter) case_obj(models.Case) temp_excel_dir(os.Path): folder where the temp excel files are written to Returns: written_files(int): the number of files written to temp_excel_dir """ today = datetime.datetime.now().strftime('%Y-%m-%d') samples = case_obj.get('individuals') query = {'chrom':'MT'} mt_variants = list(store.variants(case_id=case_obj['_id'], query=query, nr_of_variants= -1, sort_key='position')) written_files = 0 for sample in samples: sample_id = sample['individual_id'] sample_lines = export_mt_variants(variants=mt_variants, sample_id=sample_id) # set up document name document_name = '.'.join([case_obj['display_name'], sample_id, today]) + '.xlsx' workbook = Workbook(os.path.join(temp_excel_dir,document_name)) Report_Sheet = workbook.add_worksheet() # Write the column header row = 0 for col,field in enumerate(MT_EXPORT_HEADER): Report_Sheet.write(row,col,field) # Write variant lines, after header (start at line 1) for row, line in enumerate(sample_lines,1): # each line becomes a row in the document for col, field in enumerate(line): # each field in line becomes a cell Report_Sheet.write(row,col,field) workbook.close() if os.path.exists(os.path.join(temp_excel_dir,document_name)): written_files += 1 return written_files
def get_query_result_excel(self, sql, excelFile, header='True'): ''' Build an excel file from result set of the sql command \n Example: get query result excel select * from customer test.xlsx ''' cur = self._dbconnection.cursor() cur.execute(sql + ';') rows = cur.fetchall() print(cur.description) workbook = Workbook(excelFile) sheet = workbook.add_worksheet() format = workbook.add_format({'bold': True}) if header.lower() == 'true': for i, val in enumerate([column[0] for column in cur.description]): sheet.write(0, i, self._f(val), format) #Write header column for r, row in enumerate(rows): for c, s in enumerate(row): sheet.write(r+1, c, self._f(s)) # Write table data workbook.close()
def generar_layout(datos, salida): wb = Workbook(salida) ws = wb.add_worksheet('Layout') encabezados = ''' Nombre Inicio Longitud Tipo de dato capturado '''.split('\n')[1:-1] # Escribir encabezados col_enc_bg = "#{:02x}{:02x}{:02x}".format(15, 36, 62).upper() col_ren2 = "#{:02x}{:02x}{:02x}".format(220, 230, 241).upper() format_enc = wb.add_format({'bold': True, 'font_color': 'white', 'bg_color': col_enc_bg}) format_ren1 = wb.add_format({'border': 1}) format_ren2 = wb.add_format({'border': 1, 'bg_color': col_ren2}) for col, encabezado in enumerate(encabezados): ws.write(0, col, encabezado, format_enc) # Escribir datos del diccionario for renglon, registro in enumerate(datos, 1): formato = format_ren1 if renglon % 2 == 0 else format_ren2 # Registros2 = namedtuple('Registros', 'etiqueta nombre inicio longitud tipo_cap') ws.write(renglon, 0, registro.etiqueta, formato) ws.write(renglon, 1, int(registro.inicio), formato) ws.write(renglon, 2, int(registro.longitud), formato) ws.write(renglon, 3, registro.tipo_cap, formato) # Aplicando formato a la hoja ws.freeze_panes(1, 0) ws.autofilter('A1:D1') ws.set_column(0, 0, 55) ws.set_column(1, 1, 8) ws.set_column(2, 2, 10) ws.set_column(3, 3, 25) ws.hide_gridlines(2) wb.close() click.launch(salida) print("Layout generado")
def medicamentos_topOrganizacionesPorPedidoExcel(request): datos = request.session['estadistica']['excel']['datos'] medicamento = request.session['estadistica']['excel']['medicamento'] excel = io.BytesIO() workbook = Workbook(excel, {'in_memory': True}) worksheet = workbook.add_worksheet() titulo = workbook.add_format({ 'font_name':'Arial', 'font_size': 12, 'font_color': 'navy', 'bold': True }) encabezado = workbook.add_format({ 'font_name': 'Arial', 'bold': True }) alignLeft = workbook.add_format({ 'align': 'left', }) worksheet.write('A1:B1', 'Organizaciones mas demandantes del medicamento '+medicamento+' (por pedido)', titulo) worksheet.set_column('B:B', 40) worksheet.set_column('C:C', 20) worksheet.write('A2', '#', encabezado) worksheet.write('B2', 'Organizacion', encabezado) worksheet.write('C2', 'Cantidad', encabezado) fila = 2 tope = len(datos) for i in range(0, tope): worksheet.write(fila, 0, i + 1, alignLeft) worksheet.write(fila, 1, datos[i]['organizacion'], alignLeft) worksheet.write(fila, 2, datos[i]['cantidad'], alignLeft) fila += 1 workbook.close() excel.seek(0) response = HttpResponse(excel.read(), content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet") response['Content-Disposition'] = "attachment; filename=OrganizacionesMasDemandantesDeMedicamentoPorPedido.xlsx" return response
def write_excel(date, name, names=None, shift=1, id_col=0, chart_cell='D1', val_col=1): workbook = Workbook(name) sheet_name = 'TextTiling' worksheet = workbook.add_worksheet(sheet_name) i = 0 for value in date: if names == None: value_name = "%d - %d" % (i + shift, i + shift + 1) else: value_name = names[i] worksheet.write(i, id_col, value_name) worksheet.write(i, val_col, value) i += 1 chart = workbook.add_chart({ 'type': 'column'}) chart.add_series({ 'name': 'Cosines', 'categories': [sheet_name, 0, id_col, i - 1, id_col], 'values': [sheet_name, 0, val_col, i - 1, val_col], }) chart.set_size({'width': 920, 'height': 776}) worksheet.insert_chart(chart_cell, chart) workbook.close()
def pedidosAlaboratorio_topLabConMasSolicitudesPedidosExcel(request): datos = request.session['estadistica']['excel'] excel = io.BytesIO() workbook = Workbook(excel, {'in_memory': True}) worksheet = workbook.add_worksheet() titulo = workbook.add_format({ 'font_name':'Arial', 'font_size': 12, 'font_color': 'navy', 'bold': True }) encabezado = workbook.add_format({ 'font_name':'Arial', 'bold': True }) alignLeft = workbook.add_format({ 'align':'left', }) worksheet.write('A1:B1', 'Laboratorios con mas solicitudes (por medicamento)', titulo) worksheet.set_column('B:B', 40) worksheet.set_column('C:C', 20) worksheet.write('A2', '#', encabezado) worksheet.write('B2', 'Laboratorio', encabezado) worksheet.write('C2', 'Cantidad', encabezado) fila = 2 tope = len(datos) for i in range(0, tope): worksheet.write(fila, 0, i + 1, alignLeft) worksheet.write(fila, 1, datos[i]['laboratorio'], alignLeft) worksheet.write(fila, 2, datos[i]['cantidad'], alignLeft) fila += 1 workbook.close() excel.seek(0) response = HttpResponse(excel.read(), content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet") response['Content-Disposition'] = "attachment; filename=LaboratoriosConMasSolicitudesDeMedicamentos.xlsx" return response
def save_as_excel(folder, vb_dict, header): """Save to an Excel file.""" wb = Workbook(folder + "/combinedcsvs.xlsx") ws = wb.add_worksheet("Sheety") print(header) row = 0 col = 0 for title in header: ws.write(row, col, title) col += 1 row += 1 for vb in vb_dict.keys(): col = 0 ws.write(row, col, vb) col += 1 for i in range(1, len(vb_dict[vb])): ws.write(row, col, vb_dict[vb][i]) col += 1 row += 1 wb.close()
def xlsx_generate(file_name, sheets): log.info(' -> %s', file_name) with open(file_name, 'w') as fh: workbook = Workbook(fh) header_format = workbook.add_format({ 'bold': True, 'border': 1 }) for title, rows in sheets: if not len(rows): continue headers = get_headers(rows) if not len(headers): continue worksheet = workbook.add_worksheet(title) worksheet.strings_to_numbers = False worksheet.strings_to_urls = False worksheet.strings_to_formulas = False row_idx = 0 headers = None for row in rows: if headers is None: headers = row.keys() for c, cell in enumerate(headers): worksheet.write(row_idx, c, cell, header_format) row_idx += 1 col_idx = 0 for cell in headers: worksheet.write(row_idx, col_idx, row.get(cell)) col_idx += 1 row_idx += 1 worksheet.freeze_panes(1, 0) workbook.close()
def generate_change_template(request): stream = BytesIO() book = Workbook(stream) bold = book.add_format({'bold': True}) # create sheet for user input sheet = book.add_worksheet('Updates') # set headers sheet.write('A1', 'category (like destID)', bold) sheet.write('B1', 'title (like input cat)', bold) sheet.write('C1', 'url (like output cat)', bold) rows = 400 # no idead sheet.data_validation(1, 1, rows, 1, { 'validate': 'list', 'source': 'Valid_Categories' }) # set width sheet.set_column('A:A', 25) sheet.set_column('B:B', 25) sheet.set_column('C:C', 25) # Create sheet containing data for user validations # sheet = book.add_worksheet('Validations') # sheet.write('A1', 'Categories', bold) categories = Category.objects.all() for i, cat in enumerate(categories): sheet.write(i + 1, 0, cat.slug) # book.define_name('Valid_Categories', '=Validations!$A$2:$A${}'.format(1 + categories.count())) # sheet.set_column('A:A', 10) # sheet.set_column('B:B', 2) book.close() stream.seek(0) response = HttpResponse(stream, content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') response['Content-Disposition'] = 'attachement; filename="page_add_template.xlsx"' return response
def wr_xlsx(fout_xlsx, xlsx_data, **kws): """Write a spreadsheet into a xlsx file.""" if xlsx_data: from xlsxwriter import Workbook workbook = Workbook(fout_xlsx) worksheet = workbook.add_worksheet() # Cell formatting fmt_hdr = workbook.add_format({'top':1, 'bottom':1, 'left':0, 'right':0, 'bold':True}) fmt_txt = workbook.add_format({'border':0}) flds = xlsx_data[0]._fields if 'fld2col_widths' in kws: _set_xlsx_colwidths(worksheet, kws['fld2col_widths'], flds) # Print header hdrs = flds if 'hdrs' not in kws else kws['hdrs'] for col_idx, hdr in enumerate(hdrs): worksheet.write(0, col_idx, hdr, fmt_hdr) row_data_start = 1 # Print data row_idx = _wrxlsxdata(xlsx_data, row_data_start, worksheet, fmt_txt, **kws) workbook.close() sys.stdout.write(" {:>5} items WROTE: {}\n".format(row_idx-row_data_start, fout_xlsx)) else: sys.stdout.write(" 0 items. NOT WRITING {}\n".format(fout_xlsx))
def toExcel(self, outFileName): """ Write the distribution fitter results to an Excel spreadsheet :param outFileName: output spreadsheet name :type outFileName: str :return: Nothing :rtype: None """ workbook = Workbook(outFileName, {'constant_memory': True}) workbook.use_zip64() # allow large size Excels just in case wks = workbook.add_worksheet('Distribution Fitting') hdrFmt = workbook.add_format({'bold' : True, 'underline' : True, 'align' : 'center'}) resultFormats = [workbook.add_format({'num_format' : fmtStr}) \ for fmtStr in ['0.000000', '0.0000%']] row = 0 wks.set_column(0, 0, 11) wks.set_column(1, 1, 8, resultFormats[0]) wks.set_column(2, 2, 10.6, resultFormats[1]) for col, headerName in enumerate(self.getHeaderList()): wks.write_string(row, col, headerName, hdrFmt) for distrName, (results, params) in self.result.iteritems(): row += 1 col = 0 wks.write_string(row, col, distrName) for col, (result, outFormat) in \ enumerate(itertools.izip(results, resultFormats), col+1): wks.write_number(row, col, result, outFormat) for col, paramValue in enumerate(params, col+1): wks.write_number(row, col, paramValue) workbook.close()
def make_xlsx_response(sheets, basename): output = StringIO() workbook = Workbook(output) header_format = workbook.add_format({ 'bold': True, 'border': 1 }) for title, rows in sheets.items(): if not len(rows): continue worksheet = workbook.add_worksheet(title) row_idx = 0 headers = None for row in rows: if headers is None: headers = row.keys() for c, cell in enumerate(headers): worksheet.write(row_idx, c, cell, header_format) row_idx += 1 col_idx = 0 for cell in headers: worksheet.write(row_idx, col_idx, row.get(cell)) col_idx += 1 row_idx += 1 worksheet.freeze_panes(1, 0) workbook.close() output.seek(0) fn = basename + '.xlsx' headers = {'Content-Disposition': 'attachment; filename=%s' % fn} return Response(output, mimetype=XLSX_MIME, headers=headers)
class _XlsxWriter(ExcelWriter): engine = "xlsxwriter" supported_extensions = (".xlsx", ) def __init__( self, path, engine=None, date_format=None, datetime_format=None, mode="w", **engine_kwargs, ): # Use the xlsxwriter module as the Excel writer. from xlsxwriter import Workbook if mode == "a": raise ValueError("Append mode is not supported with xlsxwriter!") super().__init__( path, engine=engine, date_format=date_format, datetime_format=datetime_format, mode=mode, **engine_kwargs, ) self.book = Workbook(path, **engine_kwargs) def save(self): """ Save workbook to disk. """ return self.book.close() def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None): # Write the frame cells using xlsxwriter. sheet_name = self._get_sheet_name(sheet_name) if sheet_name in self.sheets: wks = self.sheets[sheet_name] else: wks = self.book.add_worksheet(sheet_name) self.sheets[sheet_name] = wks style_dict = {"null": None} if _validate_freeze_panes(freeze_panes): wks.freeze_panes(*(freeze_panes)) for cell in cells: val, fmt = self._value_with_fmt(cell.val) stylekey = json.dumps(cell.style) if fmt: stylekey += fmt if stylekey in style_dict: style = style_dict[stylekey] else: style = self.book.add_format( _XlsxStyler.convert(cell.style, fmt)) style_dict[stylekey] = style if cell.mergestart is not None and cell.mergeend is not None: wks.merge_range( startrow + cell.row, startcol + cell.col, startrow + cell.mergestart, startcol + cell.mergeend, val, style, ) else: wks.write(startrow + cell.row, startcol + cell.col, val, style)
if integ[col] >= 0: crea[spec_num] += integ[col] else: dest[spec_num] += abs(integ[col]) for col, (spec_num, rxn_num) in useful_columns.items(): if integ[col] >= 0: percents[rxn_num, spec_num] = integ[col]/crea[spec_num]*100 else: percents[rxn_num, spec_num] = integ[col]/dest[spec_num]*100 wb = Workbook(percent_file) ws = wb.add_worksheet() for col, spec in enumerate(species): col += 2 ws.write(0, col, spec) for rxn_num, reaction in reactions.items(): row = rxn_num + 1 ws.write(row, 0, row) ws.write(row, 1, reaction) it = np.nditer(percents, flags=['multi_index']) while not it.finished: ws.write(it.multi_index[0] + 1, it.multi_index[1] + 2, it[0]) it.iternext() wb.close()
reader = csv.reader(f, delimiter=',') for row in reader: for y in range(len(row)): try: worksheet2.write(x,y,row[y]) except: print() x=x+1 #Save and create new excel file #workbook.close() #workbook =Workbook('OutputFCL.xlsx') worksheet3 = workbook.add_worksheet('Freight') x=0 with open('/var/www/html/Info/AIR/FreightChargesAIR.csv', newline='\n') as f: reader = csv.reader(f, delimiter=',') for row in reader: for y in range(len(row)): try: worksheet3.write(x,y,row[y]) except: print() x=x+1 #Save and create new excel file workbook.close()
async def post(self): """ :return: """ time = datetime.datetime.now() export_time = datetime2str(time, date_format='%Y-%m-%d %H:%M:%S') order = self.get_argument('order', '') chart_name = self.get_argument('chart_name', '') # 答题活跃度的数据 data_dict = self.get_argument('data', '') data_list = [] condition_title_list = [] # 没有筛选条件的总体活跃度 if data_dict: data_dict = json.loads(data_dict) condition_title_list = list(data_dict.keys()) data_list = list(data_dict.values()) # 有筛选条件的数据 if '总体活跃度' in condition_title_list: position = condition_title_list.index('总体活跃度') else: position = data_list.index(max(sum(data_list))) if len(data_list) > position: condition_title_list.remove(condition_title_list[position]) # 可管理的省份名称 manage_region_title_list = [] manage_region_code_list = self.current_user.manage_region_code_list if manage_region_code_list: for manage_region_code in manage_region_code_list: manage_region_province = await AdministrativeDivision.find_one( { 'code': manage_region_code, 'record_flag': 1, 'parent_code': None }) if manage_region_province: manage_region_title_list.append( manage_region_province.title) else: manage_region_city = await AdministrativeDivision.find_one( { 'code': manage_region_code, 'record_flag': 1 }) province = await manage_region_city.parent manage_region_title_list.append(province.title) try: output = BytesIO() workbook = Workbook(output, {'in_memory': True}) title_format = workbook.add_format({ 'font_size': 12, 'bold': '1', 'valign': 'vcenter', 'align': 'center', 'font_name': 'Microsoft YaHei', 'border': 1 }) data_format = workbook.add_format({ 'valign': 'vcenter', 'align': 'left', 'font_name': 'Microsoft YaHei', 'border': 1 }) data_center_format = workbook.add_format({ 'valign': 'vcenter', 'align': 'center', 'font_name': 'Microsoft YaHei', 'border': 1 }) if order == "1" and chart_name: pass # 公民科学素质学习答题趋势统计 if order == '2' and chart_name: answer_tendency_date = self.get_argument( 'answer_tendency_date', '') answer_tendency_data = self.get_argument( 'answer_tendency_data', '') answer_tendency_data = json.loads(answer_tendency_data) answer_tendency_date = json.loads(answer_tendency_date) answer_tendency_date = deal_with_data(answer_tendency_date) if answer_tendency_date and answer_tendency_data: worksheet = workbook.add_worksheet(name=chart_name) worksheet.merge_range(1, 2, 1, 5, '导出时间' + export_time, cell_format=title_format) worksheet.merge_range(0, 0, 0, 2, chart_name, cell_format=title_format) worksheet.merge_range(1, 0, 1, 1, '筛选条件', cell_format=title_format) worksheet.merge_range(2, 0, 3, 1, '总体答题次数', cell_format=title_format) worksheet.write_string(2, 2, '日期', cell_format=data_format) worksheet.write_string(3, 2, '答题次数', cell_format=data_format) answer_tendency_title = list(answer_tendency_data.keys()) answer_data_list = list(answer_tendency_data.values()) # 有筛选条件的数据 if '总体答题次数' in answer_tendency_title: position = answer_tendency_title.index('总体答题次数') else: position = answer_data_list.index( max(sum(answer_data_list))) if len(answer_data_list) > position: answer_tendency_title.remove( answer_tendency_title[position]) for index, date in enumerate(answer_tendency_date): worksheet.write_string(2, 3 + index, date) if '总体答题次数' in list(answer_tendency_data.keys()): worksheet.write_string( 3, 3 + index, str(answer_tendency_data['总体答题次数'][index]), cell_format=data_center_format) else: max_data_list = max(sum(answer_data_list)) worksheet.write_string( 3, 2 + order, max_data_list[index - 1], cell_format=data_center_format) if answer_tendency_title: # 有筛选条件得数据写入到excel for index, condition_title in enumerate( answer_tendency_title): worksheet.merge_range(2 * (index + 2) + index + 1, 0, 2 * (index + 2) + 2 + index, 1, condition_title, cell_format=title_format) worksheet.write_string(2 * (index + 2) + index + 1, 2, '日期', cell_format=data_format) worksheet.write_string(2 * (index + 2) + index + 2, 2, '答题次数', cell_format=data_format) for condition_index, data in enumerate( answer_tendency_data[condition_title]): worksheet.write_string(2 * (index + 2) + index + 2, 2 + condition_index + 1, str(data), cell_format=data_format) worksheet.write_string( 2 * (index + 2) + index + 1, 2 + condition_index + 1, answer_tendency_date[condition_index], cell_format=data_format) if order == '3' and chart_name and data_dict: # 活跃度的导出excel worksheet = workbook.add_worksheet(name=chart_name) for order in range(1, 31): worksheet.write_string(2, 2 + order, str(order), cell_format=data_center_format) if '总体活跃度' in list(data_dict.keys()): worksheet.write_string(3, 2 + order, data_dict['总体活跃度'][order - 1] + '%', cell_format=data_center_format) else: max_data_list = max(sum(data_list)) worksheet.write_string(3, 2 + order, max_data_list[order - 1] + '%', cell_format=data_center_format) worksheet.merge_range(1, 2, 1, 5, '导出时间' + export_time, cell_format=title_format) worksheet.merge_range(0, 0, 0, 2, chart_name, cell_format=title_format) worksheet.merge_range(1, 0, 1, 1, '筛选条件', cell_format=title_format) worksheet.merge_range(2, 0, 3, 1, '总体活跃度(%)', cell_format=title_format) worksheet.write_string(2, 2, '活跃天数', cell_format=data_format) worksheet.write_string(3, 2, '活跃度(%)', cell_format=data_format) if condition_title_list: # 有筛选条件得数据写入到excel for index, condition_title in enumerate( condition_title_list): worksheet.merge_range(2 * (index + 2) + index + 1, 0, 2 * (index + 2) + 2 + index, 1, condition_title, cell_format=title_format) worksheet.write_string(2 * (index + 2) + index + 1, 2, '活跃天数', cell_format=data_format) for order in range(1, 31): worksheet.write_string(2 * (index + 2) + index + 1, 2 + order, str(order), cell_format=data_format) worksheet.write_string(2 * (index + 2) + index + 2, 2, '活跃度(%)', cell_format=data_format) for condition_index, data in enumerate( data_dict[condition_title]): worksheet.write_string(2 * (index + 2) + index + 2, 2 + condition_index + 1, data, cell_format=data_format) # 每日参与top5的导出数据 if order == '4' and chart_name: # 每日参与top_5的数据 stat_category = self.get_argument('stat_category', '') top_five_data_list = self.get_argument('top_five_data', '') if top_five_data_list: top_five_data_list = json.loads(top_five_data_list) date_list = self.get_argument('date', '') if date_list: date_list = json.loads(date_list) date_list = deal_with_data(date_list) if stat_category and top_five_data_list and date_list: data_series_dict, province_and_city_dict = deal_with_data_excel( date_list, top_five_data_list) # {'江苏': ['南京', '苏州‘], '浙江':['杭州']} total_data_dict = {} # 某个省下面的所有的市 报表中有数据的市 city_title_list = [] # 报表中省的列表 province_title_list = [] # 省和市的列表 total_title = [] show_name_list = [] show_data_list = [] # 需要添加undefined的省份 need_append_undifend_province_list = [] for top_five_data in top_five_data_list: temple_data = [] temple_name = [] for index, data in enumerate(top_five_data): total_title.append(data['name']) if data['name'] and data['value']: temple_name.append( {date_list[index]: data['name']}) temple_data.append( {date_list[index]: data['value']}) show_name_list.append(temple_name) show_data_list.append(temple_data) total_title = [title for title in total_title if title] for total in total_title: if ' ' in total: province_title_list.append(total.split(' ')[0]) city_title_list.append(total.split(' ')[1]) if total.split(' ')[1] == 'undefined': need_append_undifend_province_list.append( total.split(' ')[0]) province_title_list = list(set(province_title_list)) city_title_list = list( set([city for city in city_title_list if city])) for province_title in province_title_list: total_data_dict[province_title] = city_title_list province = await AdministrativeDivision.find_one({ 'title': province_title, 'parent_code': None }) if province: belong_provice_city_title_list = await AdministrativeDivision.distinct( 'title', {'parent_code': province.code}) total_data_dict[province_title] = list( set(city_title_list) & set(belong_provice_city_title_list)) total_data_dict[province_title] = list( set(city_title_list) & set(belong_provice_city_title_list)) # 各个省的市的个数 length_list = [] for index, city_title in enumerate( list(total_data_dict.values())): if list(total_data_dict.keys() )[index] in need_append_undifend_province_list: total_data_dict.get( list(total_data_dict.keys())[index]).append( 'undefined') for index, city_title in enumerate( list(total_data_dict.values())): if city_title: length_list.append(len(city_title)) province_length = sum(length_list) + len( list(total_data_dict.values())) if province_length == 0: province_length = 10 worksheet = workbook.add_worksheet(name=chart_name + '(' + stat_category + ')') worksheet.merge_range(0, 0, province_length, 0, '每日参与' + stat_category, cell_format=data_format) worksheet.merge_range(1, 1, province_length, 1, '导出时间: ' + export_time, cell_format=data_format) worksheet.merge_range(0, 2, 0, 4, '日期', cell_format=data_center_format) for index, date in enumerate(date_list): worksheet.write_string(0, 5 + index, date, cell_format=data_format) worksheet.merge_range(1, 2, province_length, 2, '省份', cell_format=data_center_format) city_map = {} province_map = {} if total_data_dict: choice_city_title_list = list(total_data_dict.values()) for index, data in enumerate(choice_city_title_list): if index == 0: worksheet.merge_range( 1, 3, 1 + len(data), 3, list(total_data_dict.keys())[index], cell_format=data_center_format) else: worksheet.merge_range( 1 + sum(length_list[:index]) + index, 3, sum(length_list[:index + 1]) + index + 1, 3, list(total_data_dict.keys())[index], cell_format=data_center_format) if index == 0: for city_index, city in enumerate(data): if city == 'undefined': city = '_' worksheet.write_string( 1, 4, list(total_data_dict.keys())[index], cell_format=data_center_format) worksheet.write_string( 2 + city_index, 4, city, cell_format=data_center_format) worksheet.write_string( 1, 5, '6666', cell_format=data_format) city_map[city] = 2 + city_index province_map[list( total_data_dict.keys())[index]] = 1 Position(city, 2 + city_index, 4) Position( list(total_data_dict.keys())[index], 1, 4) else: for city_index, city in enumerate(data): if city == 'undefined': city = '_' worksheet.write_string( sum(length_list[:index]) + index + 1, 4, list(total_data_dict.keys())[index], cell_format=data_center_format) worksheet.write_string( sum(length_list[:index]) + index + 2 + city_index, 4, city, cell_format=data_center_format) city_map[city] = sum( length_list[:index] ) + 2 + index + city_index province_map[list( total_data_dict.keys())[index]] = sum( length_list[:index]) + index + 1 Position( city, sum(length_list[:index]) + 2 + index + city_index, 4) Position( list(total_data_dict.keys())[index], sum(length_list[:index]) + index + 1, 4) for index, data in enumerate(choice_city_title_list): if index == 0: for key, value in data_series_dict.items(): if key.split(' ')[0] == 'undefined': position = Position( key.split(' ')[0], city_map['_'], 4) else: position = Position( key.split(' ')[0], city_map[key.split(' ')[0]], 4) if position: order = date_list.index( key.split(' ')[1]) worksheet.write_number( position.row, 5 + order, int(value)) else: for key, value in data_series_dict.items(): if key.split(' ')[0] == 'undefined': position = Position( key.split(' ')[0], city_map['_'], 4) else: position = Position( key.split(' ')[0], city_map[key.split(' ')[0]], 4) if position: order = date_list.index( key.split(' ')[1]) worksheet.write_number( position.row, 5 + order, int(value)) for order, date in enumerate(date_list): for index, value in enumerate( list(province_map.values())): if index != len(list( province_map.values())) - 1: first = value + 2 end = list(province_map.values())[index + 1] else: first = list( province_map.values())[index] + 2 end = province_length + 1 col = 5 + order col = convert(col) first = col + str(first) end = col + str(end) worksheet.write_formula( value, 5 + order, '=SUM(' + first + ':' + end + ')') # 学习近况的导出数据 if order == '1' and chart_name: # 取前一天凌晨12点之前的数据 time_match = get_yesterday() time_match_stage = MatchStage( {'updated_dt': { '$lt': time_match }}) province_code_list, city_code_list, _ = await do_different_administrative_division2( self.current_user.manage_region_code_list) month_stage_list = [] member_stage_list = [] accuracy_stage_list = [] if province_code_list: month_stage_list.append( MatchStage( {'province_code': { '$in': province_code_list }})) member_stage_list.append( MatchStage( {'province_code': { '$in': province_code_list }})) accuracy_stage_list.append( MatchStage( {'province_code': { '$in': province_code_list }})) if city_code_list: month_stage_list.append( MatchStage({'city_code': { '$in': city_code_list }})) member_stage_list.append( MatchStage({'city_code': { '$in': city_code_list }})) accuracy_stage_list.append( MatchStage({'city_code': { '$in': city_code_list }})) add_fields_stage = AddFieldsStage( t_accuracy={ '$cond': { 'if': { '$eq': ['$t_total', 0] }, 'then': 0, 'else': { '$divide': ['$t_correct', '$t_total'] } } }) member_stage_list.append( MatchStage({'status': STATUS_USER_ACTIVE})) month_group_stage = GroupStage( { 'province_code': '$province_code', 'created_dt': { "$dateToString": { "format": "%Y-%m", "date": "$created_dt" } } }, sum={'$sum': '$learn_times'}) lookup_stage = LookupStage(AdministrativeDivision, '_id', 'post_code', 'ad_list') member_group_stage = GroupStage( { 'province_code': '$province_code', 'created_dt': { "$dateToString": { "format": "%Y-%m", "date": "$created_dt" } } }, sum={'$sum': 1}) accuracy_group_stage = GroupStage( { 'province_code': '$province_code', 'created_dt': { "$dateToString": { "format": "%Y-%m", "date": "$created_dt" } } }, t_total={'$sum': '$total'}, t_correct={'$sum': '$correct'}) group_stage = GroupStage('province_code', t_total={'$sum': '$total'}, t_correct={'$sum': '$correct'}) month_sort_stage = SortStage([('_id.created_dt', ASC)]) # 次数 month_stage_list.extend([ time_match_stage, month_group_stage, lookup_stage, month_sort_stage ]) # 人数 member_stage_list.extend([ time_match_stage, member_group_stage, lookup_stage, month_sort_stage ]) accuracy_province_stage_list = copy.deepcopy( accuracy_stage_list) accuracy_province_stage_list.extend([ time_match_stage, group_stage, lookup_stage, add_fields_stage, month_sort_stage ]) # 省和月份共同筛选的正确率 accuracy_stage_list.extend([ time_match_stage, accuracy_group_stage, lookup_stage, add_fields_stage, month_sort_stage ]) # 只有省的正确率 month_province_list = MemberLearningDayStatistics.aggregate( month_stage_list) member_province_list = Member.aggregate(member_stage_list) accuracy_province_list = MemberSubjectStatistics.aggregate( accuracy_stage_list) total_accuracy = MemberSubjectStatistics.aggregate( accuracy_province_stage_list) month_province_dict = {} member_province_dict = {} accuracy_province_dict = {} date_list = [] province_title_list = [] province_map = {} member_date_list = [] accuracy_date_list = [] # 次数 while await month_province_list.fetch_next: month_province = month_province_list.next_object() if month_province: province_dt = month_province.id if month_province.id else '000000' province = await AdministrativeDivision.find_one({ 'code': province_dt.get('province_code'), 'record_flag': 1, 'parent_code': None }) if province_dt.get('created_dt') not in date_list: date_list.append(province_dt.get('created_dt')) province_title = '' if province: province_title = province.title province_title_list.append(province_title) province_title_list = list(set(province_title_list)) dt = province_dt.get('created_dt') month_province_dict[province_title + ' ' + dt] = month_province.sum # 人数 while await member_province_list.fetch_next: member_province = member_province_list.next_object() if member_province: member_province_id = member_province.id if member_province.id else '' province = await AdministrativeDivision.find_one({ 'code': member_province_id.get('province_code'), 'record_flag': 1, 'parent_code': None }) province_title = '' if province: province_title = province.title dt = member_province_id.get('created_dt') if member_province_id.get( 'created_dt') not in member_date_list: member_date_list.append( member_province_id.get('created_dt')) member_province_dict[province_title + ' ' + dt] = member_province.sum # 正确率 while await accuracy_province_list.fetch_next: accuracy_province = accuracy_province_list.next_object() if accuracy_province: accuracy_province_id = accuracy_province.id if accuracy_province.id else '' province = await AdministrativeDivision.find_one({ 'code': accuracy_province_id.get('province_code'), 'record_flag': 1, 'parent_code': None }) province_title = '' if province: province_title = province.title dt = accuracy_province_id.get('created_dt') if accuracy_province_id.get( 'created_dt') not in accuracy_date_list: accuracy_date_list.append( accuracy_province_id.get('created_dt')) if accuracy_province.t_total == 0: accuracy_province_dict[province_title + ' ' + dt] = 0 else: accuracy_province_dict[ province_title + ' ' + dt] = (accuracy_province.t_correct / accuracy_province.t_total) * 100 province_dict = {} # 总的题目 total_quantity_list = [] # 总的答对题目 correct_quantity_list = [] # 总的正确率 while await total_accuracy.fetch_next: province_stat = total_accuracy.next_object() if province_stat: province_code = province_stat.id if province_stat.id else '000000' total = province_stat.t_total if province_stat.t_total else 0 correct = province_stat.t_correct if province_stat.t_correct else 0 province = await AdministrativeDivision.find_one({ 'code': province_code, 'record_flag': 1, 'parent_code': None }) province_title = '' if province: province_title = province.title province_dict[province_title] = round( correct / total * 100 if total > 0 else 0, 2) total_quantity_list.append(total) correct_quantity_list.append(correct) # 次数的sheet print(date_list) worksheet = workbook.add_worksheet(name='次数') worksheet.merge_range(0, 0, 0, len(date_list) + 1, '公民参与科学素质学习状况', cell_format=title_format) worksheet.write_string(1, 0, '已累计次数', cell_format=data_center_format) worksheet.merge_range(1, 2, 1, len(date_list) + 1, '导出时间:' + export_time, cell_format=data_center_format) worksheet.merge_range(2, 0, 3, 0, '省份', cell_format=data_center_format) worksheet.merge_range(2, 1, 3, 1, '人数汇总(人)', cell_format=data_center_format) worksheet.merge_range(2, 2, 2, 6, '每月新增人数(人)', cell_format=data_center_format) insert_excel(date_list, worksheet, data_center_format, province_title_list, province_map, month_province_dict) # 人数的sheet worksheet = workbook.add_worksheet(name='人数') worksheet.merge_range(0, 0, 0, len(member_date_list) + 1, '公民参与科学素质学习状况', cell_format=title_format) worksheet.write_string(1, 0, '已累计人数', cell_format=data_center_format) worksheet.merge_range(1, 2, 1, len(member_date_list) + 1, '导出时间:' + export_time, cell_format=data_center_format) worksheet.merge_range(2, 0, 3, 0, '省份', cell_format=data_center_format) worksheet.merge_range(2, 1, 3, 1, '人数汇总(人/次)', cell_format=data_center_format) worksheet.merge_range(2, 2, 2, 6, '每月新增人数(人/次)', cell_format=data_center_format) insert_excel(member_date_list, worksheet, data_center_format, province_title_list, province_map, member_province_dict) # 正确率的sheet worksheet = workbook.add_worksheet(name='正确率') total_province_accuracy = round( sum(correct_quantity_list) / sum(total_quantity_list) * 100, 2) worksheet.merge_range(0, 0, 0, len(date_list) + 1, '公民参与科学素质学习状况', cell_format=title_format) worksheet.merge_range(1, 0, 1, 1, '总体正确率' + str(total_province_accuracy) + '%', cell_format=data_center_format) worksheet.merge_range(1, 2, 1, len(date_list) + 1, '导出时间:' + export_time, cell_format=data_center_format) worksheet.merge_range(2, 0, 3, 0, '省份', cell_format=data_center_format) worksheet.merge_range(2, 1, 3, 1, '正确率', cell_format=data_center_format) worksheet.merge_range(2, 2, 2, 6, '每月正确率波动(%)', cell_format=data_center_format) for index, date in enumerate(accuracy_date_list): worksheet.write_string(3, 2 + index, date, cell_format=data_center_format) for index, province_title in enumerate(province_title_list): worksheet.write_string(4 + index, 0, province_title, cell_format=data_center_format) worksheet.write_string(4 + index, 1, str(province_dict[province_title]), cell_format=data_center_format) province_map[province_title] = 4 + index for month_province, value in accuracy_province_dict.items(): value = round(value, 2) position = Position( month_province.split(' ')[0], province_map[month_province.split(' ')[0]], 0) order = accuracy_date_list.index( month_province.split(' ')[1]) worksheet.write_string(position.row, 2 + order, str(value)) workbook.close() self.set_header( 'Content-Type', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' ) self.set_header( 'Content-Disposition', "attachment;filename*=utf-8''{}.xlsx".format( quote(chart_name.encode('utf-8')))) self.write(output.getvalue()) self.finish() except Exception: logger.error(traceback.format_exc())
def total_personas_no_ubicadas_provincias_cierre_mes(request): start_time = time.time() anno = datetime.today().year mes = datetime.today().month response = HttpResponse( content_type= 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') response[ 'Content-Disposition'] = "attachment; filename=Total_de_personas_no_ubicadas_por_provincias._(%s).xlsx" % ( anno) book = Workbook(response, {'in_memory': True}) worksheet_data = book.add_worksheet("Reporte 1") formato = book.add_format({'bold': True, 'border': 1}) formato2 = book.add_format({'border': 1}) worksheet_data.write("A1", "Provincias", formato) worksheet_data.write("B1", "Controlados", formato) worksheet_data.write("C1", "Mujeres controladas", formato) worksheet_data.write("D1", "Jovenes controlados", formato) worksheet_data.write("E1", "No ubicados", formato) worksheet_data.write("F1", "Mujeres no ubicadas", formato) worksheet_data.write("G1", "Jovenes no ubicados", formato) fuentes_procedencia = FuenteProcedencia.objects.filter( activo=True).order_by('id') cantidad_fuentes = fuentes_procedencia.count() indice = 7 total_arriba = indice + cantidad_fuentes worksheet_data.write(0, total_arriba, "Total", formato) for fuente in fuentes_procedencia: worksheet_data.write(0, indice, fuente.nombre, formato) indice = indice + 1 worksheet_data.set_column("A:A", 17) worksheet_data.set_column("B:B", 11) worksheet_data.set_column("C:C", 10) worksheet_data.set_column("D:D", 10) worksheet_data.set_column("E:E", 10) worksheet_data.set_column("F:F", 10) worksheet_data.set_column("G:G", 10) provincias = Provincia.objects.all() arr_provincias = [] for p in provincias: arr_provincias.append(p.nombre) worksheet_data.write_column(1, 0, arr_provincias, formato2) cantidad_provincias = arr_provincias.__len__() indice_total = cantidad_provincias + 1 worksheet_data.write(indice_total, 0, "Total", formato) licenciados_sma = LicenciadosSMA.objects.filter(activo=True) query = """SELECT id FROM public."SGMGU_licenciadossma" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """;""" resultado_query = LicenciadosSMA.objects.raw(query) ids_listado = [persona.id for persona in resultado_query] licenciados_sma = licenciados_sma.exclude(id__in=ids_listado) egresados_ep = EgresadosEstablecimientosPenitenciarios.objects.filter( fuente_procedencia_id=2, activo=True) query = """SELECT id FROM public."SGMGU_egresadosestablecimientospenitenciarios" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """ AND t.fuente_procedencia_id = 2;""" resultado_query = EgresadosEstablecimientosPenitenciarios.objects.raw( query) ids_listado = [persona.id for persona in resultado_query] egresados_ep = egresados_ep.exclude(id__in=ids_listado) sancionados = EgresadosEstablecimientosPenitenciarios.objects.filter( fuente_procedencia_id=3, activo=True) query = """SELECT id FROM public."SGMGU_egresadosestablecimientospenitenciarios" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """ AND t.fuente_procedencia_id = 3;""" resultado_query = EgresadosEstablecimientosPenitenciarios.objects.raw( query) ids_listado = [persona.id for persona in resultado_query] sancionados = sancionados.exclude(id__in=ids_listado) desvinculados = Desvinculado.objects.filter(activo=True) query = """SELECT id FROM public."SGMGU_desvinculado" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """;""" resultado_query = Desvinculado.objects.raw(query) ids_listado = [persona.id for persona in resultado_query] desvinculados = desvinculados.exclude(id__in=ids_listado) tecnicos_medio = TMedioOCalificadoEOficio.objects.filter( fuente_procedencia_id=6, activo=True) query = """SELECT id FROM public."SGMGU_tmedioocalificadoeoficio" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """ AND t.fuente_procedencia_id = 6;""" resultado_query = TMedioOCalificadoEOficio.objects.raw(query) ids_listado = [persona.id for persona in resultado_query] tecnicos_medio = tecnicos_medio.exclude(id__in=ids_listado) obreros_calificados = TMedioOCalificadoEOficio.objects.filter( fuente_procedencia_id=7, activo=True) query = """SELECT id FROM public."SGMGU_tmedioocalificadoeoficio" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """ AND t.fuente_procedencia_id = 7;""" resultado_query = TMedioOCalificadoEOficio.objects.raw(query) ids_listado = [persona.id for persona in resultado_query] obreros_calificados = obreros_calificados.exclude(id__in=ids_listado) escuelas_oficio = TMedioOCalificadoEOficio.objects.filter( fuente_procedencia_id=8, activo=True) query = """SELECT id FROM public."SGMGU_tmedioocalificadoeoficio" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """ AND t.fuente_procedencia_id = 8;""" resultado_query = TMedioOCalificadoEOficio.objects.raw(query) ids_listado = [persona.id for persona in resultado_query] escuelas_oficio = escuelas_oficio.exclude(id__in=ids_listado) egresados_escuelas_especiales = EgresadosEscuelasEspeciales.objects.filter( activo=True) query = """SELECT id FROM public."SGMGU_egresadosescuelasespeciales" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """;""" resultado_query = EgresadosEscuelasEspeciales.objects.raw(query) ids_listado = [persona.id for persona in resultado_query] egresados_escuelas_especiales = egresados_escuelas_especiales.exclude( id__in=ids_listado) egresados_escuelas_conducta = EgresadosEscuelasConducta.objects.filter( activo=True) query = """SELECT id FROM public."SGMGU_egresadosescuelasconducta" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """;""" resultado_query = EgresadosEscuelasConducta.objects.raw(query) ids_listado = [persona.id for persona in resultado_query] egresados_escuelas_conducta = egresados_escuelas_conducta.exclude( id__in=ids_listado) egresados_efi = EgresadosEFI.objects.filter(activo=True) query = """SELECT id FROM public."SGMGU_egresadosefi" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """;""" resultado_query = EgresadosEFI.objects.raw(query) ids_listado = [persona.id for persona in resultado_query] egresados_efi = egresados_efi.exclude(id__in=ids_listado) menores_incapacitados = Menores.objects.filter(fuente_procedencia_id=12, activo=True) query = """SELECT id FROM public."SGMGU_menores" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """ AND t.fuente_procedencia_id = 12;""" resultado_query = Menores.objects.raw(query) ids_listado = [persona.id for persona in resultado_query] menores_incapacitados = menores_incapacitados.exclude(id__in=ids_listado) menores_desvinculados = Menores.objects.filter(fuente_procedencia_id=13, activo=True) query = """SELECT id FROM public."SGMGU_menores" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """ AND t.fuente_procedencia_id = 13;""" resultado_query = Menores.objects.raw(query) ids_listado = [persona.id for persona in resultado_query] menores_desvinculados = menores_desvinculados.exclude(id__in=ids_listado) menores_dictamen = Menores.objects.filter(fuente_procedencia_id=14, activo=True) query = """SELECT id FROM public."SGMGU_menores" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """ AND t.fuente_procedencia_id = 14;""" resultado_query = Menores.objects.raw(query) ids_listado = [persona.id for persona in resultado_query] menores_dictamen = menores_dictamen.exclude(id__in=ids_listado) discapacitados = Discapacitados.objects.filter(activo=True) query = """SELECT id FROM public."SGMGU_discapacitados" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """;""" resultado_query = Discapacitados.objects.raw(query) ids_listado = [persona.id for persona in resultado_query] discapacitados = discapacitados.exclude(id__in=ids_listado) mujeres_riesgo_pnr = PersonasRiesgo.objects.filter( fuente_procedencia_id=17, activo=True) query = """SELECT id FROM public."SGMGU_personasriesgo" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """ AND t.fuente_procedencia_id = 17;""" resultado_query = PersonasRiesgo.objects.raw(query) ids_listado = [persona.id for persona in resultado_query] mujeres_riesgo_pnr = mujeres_riesgo_pnr.exclude(id__in=ids_listado) hombres_riesgo_pnr = PersonasRiesgo.objects.filter( fuente_procedencia_id=18, activo=True) query = """SELECT id FROM public."SGMGU_personasriesgo" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """ AND t.fuente_procedencia_id = 18;""" resultado_query = PersonasRiesgo.objects.raw(query) ids_listado = [persona.id for persona in resultado_query] hombres_riesgo_pnr = hombres_riesgo_pnr.exclude(id__in=ids_listado) proxenetas = PersonasRiesgo.objects.filter(fuente_procedencia_id=19, activo=True) query = """SELECT id FROM public."SGMGU_personasriesgo" t where date_part('year', t.fecha_registro)=""" + unicode( anno) + """ AND date_part('month', t.fecha_registro)=""" + unicode( mes) + """ AND t.fuente_procedencia_id = 19;""" resultado_query = PersonasRiesgo.objects.raw(query) ids_listado = [persona.id for persona in resultado_query] proxenetas = proxenetas.exclude(id__in=ids_listado) total_controlados_provincia = [] total_mujeres_controladas_provincia = [] total_jovenes_controlados_provincia = [] total_ubicados_provincia = [] total_mujeres_ubicadas_provincia = [] total_jovenes_ubicados_provincia = [] total_egresados_ep_provincias = [] total_sancionados_provincias = [] total_desvinculados_provincias = [] total_tecnicos_medio_provincias = [] total_obreros_calificados_provincias = [] total_escuela_oficio_provincias = [] total_egresados_esc_especiales_provincias = [] total_egresados_esc_conducta_provincias = [] total_egresados_efi_provincias = [] total_menores_incapacitados_provincias = [] total_menores_desvinculados_provincias = [] total_menores_dictamen_provincias = [] total_discapacitados_provincias = [] total_mueres_riesgo_pnr_provincias = [] total_hombres_riesgo_pnr_provincias = [] total_proxenetas_riesgo_pnr_provincias = [] cero = [] for provincia in provincias: cero.append(0) # CONTROLADOS licenciados_sma_controlados = licenciados_sma.filter( municipio_residencia__provincia=provincia).count() egresados_ep_controlados = egresados_ep.filter( municipio_solicita_empleo__provincia=provincia).count() sancionados_controlados = sancionados.filter( municipio_solicita_empleo__provincia=provincia).count() desvinculados_controlados = desvinculados.filter( municipio_solicita_empleo__provincia=provincia).count() tecnicos_medio_controlados = tecnicos_medio.filter( municipio_solicita_empleo__provincia=provincia).count() obreros_calificados_controlados = obreros_calificados.filter( municipio_solicita_empleo__provincia=provincia).count() escuelas_oficio_controlados = escuelas_oficio.filter( municipio_solicita_empleo__provincia=provincia).count() egresados_escuelas_especiales_controlados = egresados_escuelas_especiales.filter( municipio_solicita_empleo__provincia=provincia).count() egresados_escuelas_conducta_controlados = egresados_escuelas_conducta.filter( municipio_solicita_empleo__provincia=provincia).count() egresados_efi_controlados = egresados_efi.filter( municipio_solicita_empleo__provincia=provincia).count() menores_incapacitados_controlados = menores_incapacitados.filter( municipio_solicita_empleo__provincia=provincia).count() menores_desvinculados_controlados = menores_desvinculados.filter( municipio_solicita_empleo__provincia=provincia).count() menores_dictamen_controlados = menores_dictamen.filter( municipio_solicita_empleo__provincia=provincia).count() discapacitados_controlados = discapacitados.filter( municipio_solicita_empleo__provincia=provincia).count() mujeres_riesgo_pnr_controlados = mujeres_riesgo_pnr.filter( municipio_solicita_empleo__provincia=provincia).count() hombres_riesgo_pnr_controlados = hombres_riesgo_pnr.filter( municipio_solicita_empleo__provincia=provincia).count() proxenetas_controlados = proxenetas.filter( municipio_solicita_empleo__provincia=provincia).count() total = (licenciados_sma_controlados + egresados_ep_controlados + sancionados_controlados + desvinculados_controlados + tecnicos_medio_controlados + obreros_calificados_controlados + escuelas_oficio_controlados + egresados_escuelas_especiales_controlados + egresados_escuelas_conducta_controlados + egresados_efi_controlados + menores_incapacitados_controlados + menores_desvinculados_controlados + menores_dictamen_controlados + discapacitados_controlados + mujeres_riesgo_pnr_controlados + hombres_riesgo_pnr_controlados + proxenetas_controlados) total_controlados_provincia.append(total) # MUJERES CONTROLADAS licenciados_sma_controlados = licenciados_sma.filter( municipio_residencia__provincia=provincia, sexo='F').count() egresados_ep_controlados = egresados_ep.filter( municipio_solicita_empleo__provincia=provincia, sexo='F').count() sancionados_controlados = sancionados.filter( municipio_solicita_empleo__provincia=provincia, sexo='F').count() desvinculados_controlados = desvinculados.filter( municipio_solicita_empleo__provincia=provincia, sexo='F').count() tecnicos_medio_controlados = tecnicos_medio.filter( municipio_solicita_empleo__provincia=provincia, sexo='F').count() obreros_calificados_controlados = obreros_calificados.filter( municipio_solicita_empleo__provincia=provincia, sexo='F').count() escuelas_oficio_controlados = escuelas_oficio.filter( municipio_solicita_empleo__provincia=provincia, sexo='F').count() egresados_escuelas_especiales_controlados = egresados_escuelas_especiales.filter( municipio_solicita_empleo__provincia=provincia, sexo='F').count() egresados_escuelas_conducta_controlados = egresados_escuelas_conducta.filter( municipio_solicita_empleo__provincia=provincia, sexo='F').count() egresados_efi_controlados = egresados_efi.filter( municipio_solicita_empleo__provincia=provincia, sexo='F').count() menores_incapacitados_controlados = menores_incapacitados.filter( municipio_solicita_empleo__provincia=provincia, sexo='F').count() menores_desvinculados_controlados = menores_desvinculados.filter( municipio_solicita_empleo__provincia=provincia, sexo='F').count() menores_dictamen_controlados = menores_dictamen.filter( municipio_solicita_empleo__provincia=provincia, sexo='F').count() discapacitados_controlados = discapacitados.filter( municipio_solicita_empleo__provincia=provincia, sexo='F').count() mujeres_riesgo_pnr_controlados = mujeres_riesgo_pnr.filter( municipio_solicita_empleo__provincia=provincia, sexo='F').count() hombres_riesgo_pnr_controlados = hombres_riesgo_pnr.filter( municipio_solicita_empleo__provincia=provincia, sexo='F').count() proxenetas_controlados = proxenetas.filter( municipio_solicita_empleo__provincia=provincia, sexo='F').count() total = (licenciados_sma_controlados + egresados_ep_controlados + sancionados_controlados + desvinculados_controlados + tecnicos_medio_controlados + obreros_calificados_controlados + escuelas_oficio_controlados + egresados_escuelas_especiales_controlados + egresados_escuelas_conducta_controlados + egresados_efi_controlados + menores_incapacitados_controlados + menores_desvinculados_controlados + menores_dictamen_controlados + discapacitados_controlados + mujeres_riesgo_pnr_controlados + hombres_riesgo_pnr_controlados + proxenetas_controlados) total_mujeres_controladas_provincia.append(total) # JOVENES CONTROLADOS licenciados_sma_controlados = licenciados_sma.filter( municipio_residencia__provincia=provincia, edad__lte=35).count() egresados_ep_controlados = egresados_ep.filter( municipio_solicita_empleo__provincia=provincia, edad__lte=35).count() sancionados_controlados = sancionados.filter( municipio_solicita_empleo__provincia=provincia, edad__lte=35).count() desvinculados_controlados = desvinculados.filter( municipio_solicita_empleo__provincia=provincia, edad__lte=35).count() tecnicos_medio_controlados = tecnicos_medio.filter( municipio_solicita_empleo__provincia=provincia, edad__lte=35).count() obreros_calificados_controlados = obreros_calificados.filter( municipio_solicita_empleo__provincia=provincia, edad__lte=35).count() escuelas_oficio_controlados = escuelas_oficio.filter( municipio_solicita_empleo__provincia=provincia, edad__lte=35).count() egresados_escuelas_especiales_controlados = egresados_escuelas_especiales.filter( municipio_solicita_empleo__provincia=provincia, edad__lte=35).count() egresados_escuelas_conducta_controlados = egresados_escuelas_conducta.filter( municipio_solicita_empleo__provincia=provincia, edad__lte=35).count() egresados_efi_controlados = egresados_efi.filter( municipio_solicita_empleo__provincia=provincia, edad__lte=35).count() menores_incapacitados_controlados = menores_incapacitados.filter( municipio_solicita_empleo__provincia=provincia, edad__lte=35).count() menores_desvinculados_controlados = menores_desvinculados.filter( municipio_solicita_empleo__provincia=provincia, edad__lte=35).count() menores_dictamen_controlados = menores_dictamen.filter( municipio_solicita_empleo__provincia=provincia, edad__lte=35).count() discapacitados_controlados = discapacitados.filter( municipio_solicita_empleo__provincia=provincia, edad__lte=35).count() mujeres_riesgo_pnr_controlados = mujeres_riesgo_pnr.filter( municipio_solicita_empleo__provincia=provincia, edad__lte=35).count() hombres_riesgo_pnr_controlados = hombres_riesgo_pnr.filter( municipio_solicita_empleo__provincia=provincia, edad__lte=35).count() proxenetas_controlados = proxenetas.filter( municipio_solicita_empleo__provincia=provincia, edad__lte=35).count() total = (licenciados_sma_controlados + egresados_ep_controlados + sancionados_controlados + desvinculados_controlados + tecnicos_medio_controlados + obreros_calificados_controlados + escuelas_oficio_controlados + egresados_escuelas_especiales_controlados + egresados_escuelas_conducta_controlados + egresados_efi_controlados + menores_incapacitados_controlados + menores_desvinculados_controlados + menores_dictamen_controlados + discapacitados_controlados + mujeres_riesgo_pnr_controlados + hombres_riesgo_pnr_controlados + proxenetas_controlados) total_jovenes_controlados_provincia.append(total) # NO UBICADOS egresados_ep_controlados = egresados_ep.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count() sancionados_controlados = sancionados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count() desvinculados_controlados = desvinculados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count() tecnicos_medio_controlados = tecnicos_medio.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count() obreros_calificados_controlados = obreros_calificados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count() escuelas_oficio_controlados = escuelas_oficio.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count() egresados_escuelas_especiales_controlados = egresados_escuelas_especiales.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count() egresados_escuelas_conducta_controlados = egresados_escuelas_conducta.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count() egresados_efi_controlados = egresados_efi.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count() menores_incapacitados_controlados = menores_incapacitados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count() menores_desvinculados_controlados = menores_desvinculados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count() menores_dictamen_controlados = menores_dictamen.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count() discapacitados_controlados = discapacitados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count() mujeres_riesgo_pnr_controlados = mujeres_riesgo_pnr.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count() hombres_riesgo_pnr_controlados = hombres_riesgo_pnr.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count() proxenetas_controlados = proxenetas.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count() total = ( egresados_ep_controlados + sancionados_controlados + desvinculados_controlados + tecnicos_medio_controlados + obreros_calificados_controlados + escuelas_oficio_controlados + egresados_escuelas_especiales_controlados + egresados_escuelas_conducta_controlados + egresados_efi_controlados + menores_incapacitados_controlados + menores_desvinculados_controlados + menores_dictamen_controlados + discapacitados_controlados + mujeres_riesgo_pnr_controlados + hombres_riesgo_pnr_controlados + proxenetas_controlados) total_ubicados_provincia.append(total) # MUJERES NO UBICADAS egresados_ep_controlados = egresados_ep.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, sexo='F').count() sancionados_controlados = sancionados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, sexo='F').count() desvinculados_controlados = desvinculados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, sexo='F').count() tecnicos_medio_controlados = tecnicos_medio.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, sexo='F').count() obreros_calificados_controlados = obreros_calificados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, sexo='F').count() escuelas_oficio_controlados = escuelas_oficio.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, sexo='F').count() egresados_escuelas_especiales_controlados = egresados_escuelas_especiales.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, sexo='F').count() egresados_escuelas_conducta_controlados = egresados_escuelas_conducta.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, sexo='F').count() egresados_efi_controlados = egresados_efi.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, sexo='F').count() menores_incapacitados_controlados = menores_incapacitados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, sexo='F').count() menores_desvinculados_controlados = menores_desvinculados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, sexo='F').count() menores_dictamen_controlados = menores_dictamen.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, sexo='F').count() discapacitados_controlados = discapacitados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, sexo='F').count() mujeres_riesgo_pnr_controlados = mujeres_riesgo_pnr.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, sexo='F').count() hombres_riesgo_pnr_controlados = hombres_riesgo_pnr.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, sexo='F').count() proxenetas_controlados = proxenetas.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, sexo='F').count() total = ( egresados_ep_controlados + sancionados_controlados + desvinculados_controlados + tecnicos_medio_controlados + obreros_calificados_controlados + escuelas_oficio_controlados + egresados_escuelas_especiales_controlados + egresados_escuelas_conducta_controlados + egresados_efi_controlados + menores_incapacitados_controlados + menores_desvinculados_controlados + menores_dictamen_controlados + discapacitados_controlados + mujeres_riesgo_pnr_controlados + hombres_riesgo_pnr_controlados + proxenetas_controlados) total_mujeres_ubicadas_provincia.append(total) # JOVENES NO UBICADOS egresados_ep_controlados = egresados_ep.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, edad__lte=35).count() sancionados_controlados = sancionados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, edad__lte=35).count() desvinculados_controlados = desvinculados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, edad__lte=35).count() tecnicos_medio_controlados = tecnicos_medio.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, edad__lte=35).count() obreros_calificados_controlados = obreros_calificados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, edad__lte=35).count() escuelas_oficio_controlados = escuelas_oficio.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, edad__lte=35).count() egresados_escuelas_especiales_controlados = egresados_escuelas_especiales.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, edad__lte=35).count() egresados_escuelas_conducta_controlados = egresados_escuelas_conducta.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, edad__lte=35).count() egresados_efi_controlados = egresados_efi.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, edad__lte=35).count() menores_incapacitados_controlados = menores_incapacitados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, edad__lte=35).count() menores_desvinculados_controlados = menores_desvinculados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, edad__lte=35).count() menores_dictamen_controlados = menores_dictamen.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, edad__lte=35).count() discapacitados_controlados = discapacitados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, edad__lte=35).count() mujeres_riesgo_pnr_controlados = mujeres_riesgo_pnr.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, edad__lte=35).count() hombres_riesgo_pnr_controlados = hombres_riesgo_pnr.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, edad__lte=35).count() proxenetas_controlados = proxenetas.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False, edad__lte=35).count() total = ( egresados_ep_controlados + sancionados_controlados + desvinculados_controlados + tecnicos_medio_controlados + obreros_calificados_controlados + escuelas_oficio_controlados + egresados_escuelas_especiales_controlados + egresados_escuelas_conducta_controlados + egresados_efi_controlados + menores_incapacitados_controlados + menores_desvinculados_controlados + menores_dictamen_controlados + discapacitados_controlados + mujeres_riesgo_pnr_controlados + hombres_riesgo_pnr_controlados + proxenetas_controlados) total_jovenes_ubicados_provincia.append(total) # NO UBICADOS: Egresados de establecimientos penitenciarios total_egresados_ep_provincias.append( egresados_ep.filter(municipio_solicita_empleo__provincia=provincia, ubicado=False).count()) # NO UBICADOS: SANCIONADOS total_sancionados_provincias.append( sancionados.filter(municipio_solicita_empleo__provincia=provincia, ubicado=False).count()) # NO UBICADOS: DESVINCULADOS total_desvinculados_provincias.append( desvinculados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count()) # NO UBICADOS: Tecnicos medios total_tecnicos_medio_provincias.append( tecnicos_medio.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count()) # NO UBICADOS: Egresados obreros calificados total_obreros_calificados_provincias.append( obreros_calificados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count()) # NO UBICADOS: Egresados escuelas de oficio total_escuela_oficio_provincias.append( escuelas_oficio.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count()) # NO UBICADOS: Egresados de escuelas especiales total_egresados_esc_especiales_provincias.append( egresados_escuelas_especiales.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count()) # NO UBICADOS: Egresados de escuelas de conducta total_egresados_esc_conducta_provincias.append( egresados_escuelas_conducta.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count()) # NO UBICADOS: Egresados de la EFI total_egresados_efi_provincias.append( egresados_efi.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count()) # NO UBICADOS: Menores incapacitados para el estudio por dictamen médico total_menores_incapacitados_provincias.append( menores_incapacitados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count()) # NO UBICADOS: Menores desvinculados del SNE por bajo rendimiento total_menores_desvinculados_provincias.append( menores_desvinculados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count()) # NO UBICADOS: Menores con dictamen del CDO-MININT total_menores_dictamen_provincias.append( menores_dictamen.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count()) # NO UBICADOS: Personas con discapacidad total_discapacitados_provincias.append( discapacitados.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count()) # UBICADOS: Mujeres de riesgo controladas por al PNR total_mueres_riesgo_pnr_provincias.append( mujeres_riesgo_pnr.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count()) # UBICADOS: Hombres de riesgo controlados por al PNR total_hombres_riesgo_pnr_provincias.append( hombres_riesgo_pnr.filter( municipio_solicita_empleo__provincia=provincia, ubicado=False).count()) # UBICADOS: Proxenetas de riesgo controlados por la PNR total_proxenetas_riesgo_pnr_provincias.append( proxenetas.filter(municipio_solicita_empleo__provincia=provincia, ubicado=False).count()) worksheet_data.write_column(1, 1, total_controlados_provincia, formato2) worksheet_data.write_column(1, 2, total_mujeres_controladas_provincia, formato2) worksheet_data.write_column(1, 3, total_jovenes_controlados_provincia, formato2) worksheet_data.write_column(1, 4, total_ubicados_provincia, formato2) worksheet_data.write_column(1, 5, total_mujeres_ubicadas_provincia, formato2) worksheet_data.write_column(1, 6, total_jovenes_ubicados_provincia, formato2) worksheet_data.write_column(1, 7, cero, formato2) # NO UBICADOS: LICENCIADOS DEL SMA worksheet_data.write_column(1, 8, total_egresados_ep_provincias, formato2) worksheet_data.write_column(1, 9, total_sancionados_provincias, formato2) worksheet_data.write_column(1, 10, total_desvinculados_provincias, formato2) worksheet_data.write_column(1, 11, cero, formato2) # UBICADOS: Egresados Universitarios worksheet_data.write_column(1, 12, total_tecnicos_medio_provincias, formato2) worksheet_data.write_column(1, 13, total_obreros_calificados_provincias, formato2) worksheet_data.write_column(1, 14, total_escuela_oficio_provincias, formato2) worksheet_data.write_column(1, 15, total_egresados_esc_especiales_provincias, formato2) worksheet_data.write_column(1, 16, total_egresados_esc_conducta_provincias, formato2) worksheet_data.write_column(1, 17, total_egresados_efi_provincias, formato2) worksheet_data.write_column(1, 18, total_menores_incapacitados_provincias, formato2) worksheet_data.write_column(1, 19, total_menores_desvinculados_provincias, formato2) worksheet_data.write_column(1, 20, total_menores_dictamen_provincias, formato2) worksheet_data.write_column(1, 21, total_discapacitados_provincias, formato2) worksheet_data.write_column(1, 22, total_mueres_riesgo_pnr_provincias, formato2) worksheet_data.write_column(1, 23, total_hombres_riesgo_pnr_provincias, formato2) worksheet_data.write_column(1, 24, total_proxenetas_riesgo_pnr_provincias, formato2) # ------------ SUMAS ABAJO------------------- sumas = [] for a in range(1, 25): total = '=SUM(%s)' % xl_range(1, a, cantidad_provincias, a) sumas.append(total) indice_sumas = 1 for suma in sumas: worksheet_data.write(17, indice_sumas, suma, formato2) indice_sumas = indice_sumas + 1 # ------------ SUMAS ARRIBA------------------- sumas2 = [] inicio = 7 cant = cantidad_provincias + 1 posicion1 = cantidad_fuentes + inicio - 1 for i in range(1, cant): total2 = '=SUM(%s)' % xl_range(i, inicio, i, posicion1) sumas2.append(total2) indice_sumas = 1 posicion2 = inicio + cantidad_fuentes for suma in sumas2: worksheet_data.write(indice_sumas, posicion2, suma, formato2) indice_sumas = indice_sumas + 1 book.close() elapsed_time = time.time() - start_time print("Tiempo transcurrido: %.10f segundos." % elapsed_time) # print("Tiempo transcurrido: % segundos." % elapsed_time) return response
def interruptos_por_organismos_sin_entidades(request): anno_actual = datetime.today().year mes_actual = datetime.today().month categoria_usuario = request.user.perfil_usuario.categoria.nombre organismo = request.user.perfil_usuario.organismo nombre_organismo = organismo.nombre.encode('utf-8').strip() num_anno = anno_actual if mes_actual == 12: num_anno = anno_actual - 1 response = HttpResponse( content_type= 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') if categoria_usuario == 'interrupto' or categoria_usuario == 'interrupto': response[ 'Content-Disposition'] = "attachment; filename=Interruptos_por_organismos_(%s)_(%s_%s).xlsx" % ( str(nombre_organismo).replace( " ", "_"), obtener_mes(mes_actual - 1), num_anno) else: response[ 'Content-Disposition'] = "attachment; filename=Interruptos_por_organismos_(%s_%s).xlsx" % ( obtener_mes(mes_actual - 1), num_anno) book = Workbook(response, {'in_memory': True}) worksheet_data = book.add_worksheet("Interruptos por Organismos") formato1 = book.add_format({ 'align': 'center', 'valign': 'vcenter', 'bold': True, 'border': 1 }) formato2 = book.add_format({ 'rotation': 45, 'align': 'center', 'valign': 'vcenter', 'bold': True, 'border': 1, 'text_wrap': True }) formato3 = book.add_format({'align': 'center', 'bold': True, 'border': 1}) formato4 = book.add_format({ 'rotation': 90, 'align': 'center', 'valign': 'vcenter', 'bold': True, 'border': 1, 'text_wrap': True }) formato5 = book.add_format({'border': 1, 'text_wrap': True}) formato_organismos = book.add_format({'bold': True, 'border': 1}) worksheet_data.merge_range('A1:A4', "OACE-OSDE", formato1) worksheet_data.merge_range( "B1:O1", "Interruptos: %s-%s" % (obtener_mes(mes_actual - 1), num_anno), formato3) worksheet_data.merge_range("B2:B4", "Total", formato2) worksheet_data.merge_range("C2:C4", "Femeninos", formato2) worksheet_data.merge_range("D2:D4", "Jovenes", formato2) worksheet_data.merge_range("E2:H2", "Tiempo de Interrupcion", formato3) worksheet_data.merge_range("I2:O2", "Situacion Actual", formato3) worksheet_data.merge_range("E3:E4", "Hasta 30 dias", formato4) worksheet_data.merge_range("F3:F4", "Mas de 30 y hasta 60 dias", formato4) worksheet_data.merge_range("G3:G4", "Mas de 60 dias y hasta 1 anno", formato4) worksheet_data.merge_range("H3:H4", "Mas de 1 anno", formato4) worksheet_data.merge_range("I3:K3", "Reubicados Temporales", formato3) worksheet_data.write("I4", "En la misma entidad", formato4) worksheet_data.write("J4", "En otra entidad del mismo organismo", formato4) worksheet_data.write("K4", "En una entidad de otro organismo", formato4) worksheet_data.merge_range("L3:L4", "Cobrando Garantia Salarial", formato4) worksheet_data.merge_range("M3:M4", "Sin Garantia Salarial", formato4) worksheet_data.merge_range("N3:N4", "Bajas", formato4) worksheet_data.merge_range("O3:O4", "Propuesto a Disponibles", formato4) worksheet_data.set_column("A:A", 20.58) worksheet_data.set_row(3, 80) if categoria_usuario == 'interrupto': if es_oace(organismo): osdes = obtener_osdes_de_un_oace(organismo) if osdes.__len__() > 0: interruptos = Interruptos.objects.filter(fecha_registro__year=anno_actual, organismo__id__in=[osde.id for osde in osdes] ) | \ Interruptos.objects.filter(organismo=organismo) else: interruptos = Interruptos.objects.filter( fecha_registro__year=anno_actual, organismo=organismo) organismos = Organismo.objects.filter(id=organismo.id) else: interruptos = Interruptos.objects.filter( fecha_registro__year=anno_actual, organismo=organismo) query = """SELECT id FROM public."SGMGU_interruptos" t where date_part('month',t.fecha_registro)=""" + unicode( mes_actual) + """;""" resultado_query_interruptos = Interruptos.objects.raw(query) ids_interruptos = [interr.id for interr in resultado_query_interruptos] interruptos = interruptos.filter(id__in=ids_interruptos) elif categoria_usuario == 'administrador': query = """SELECT id FROM public."SGMGU_interruptos" t where date_part('month',t.fecha_registro)=""" + unicode( mes_actual) + """;""" resultado_query_interruptos = Interruptos.objects.raw(query) ids_interruptos = [interr.id for interr in resultado_query_interruptos] interruptos = Interruptos.objects.filter( fecha_registro__year=anno_actual, id__in=ids_interruptos) organismos = obtener_oaces() inicio_fila = 4 i = 0 totales = 0 totales_femeninos = 0 totales_jovenes = 0 totales_hasta_treinta_dias = 0 totales_mas_treinta_menos_sesenta_dias = 0 totales_mas_sesenta_dias_menos_un_anno = 0 totales_mas_un_anno = 0 totales_reubicados_temporal_misma_entidad = 0 totales_reubicados_temporal_mismo_organismo = 0 totales_reubicados_temporal_otro_organismo = 0 totales_cobrando_garantia_salarial = 0 totales_sin_garantia_salarial = 0 totales_baja = 0 totales_propuesto_disponible = 0 if es_oace(organismo) or categoria_usuario == 'administrador': for oace in organismos: org = interruptos.filter(organismo=oace) dict_totales = totales_por_organismo(org) t = dict_totales['t'] t_femeninos = dict_totales['t_femeninos'] t_jovenes = dict_totales['t_jovenes'] t_hasta_treinta_dias = dict_totales['t_hasta_treinta_dias'] t_mas_treinta_menos_sesenta_dias = dict_totales[ 't_mas_treinta_menos_sesenta_dias'] t_mas_sesenta_dias_menos_un_anno = dict_totales[ 't_mas_sesenta_dias_menos_un_anno'] t_mas_un_anno = dict_totales['t_mas_un_anno'] t_reubicados_temporal_misma_entidad = dict_totales[ 't_reubicados_temporal_misma_entidad'] t_reubicados_temporal_mismo_organismo = dict_totales[ 't_reubicados_temporal_mismo_organismo'] t_reubicados_temporal_otro_organismo = dict_totales[ 't_reubicados_temporal_otro_organismo'] t_cobrando_garantia_salarial = dict_totales[ 't_cobrando_garantia_salarial'] t_sin_garantia_salarial = dict_totales['t_sin_garantia_salarial'] t_baja = dict_totales['t_baja'] t_propuesto_disponible = dict_totales['t_propuesto_disponible'] worksheet_data.write(inicio_fila + i, 0, oace.siglas, formato_organismos) # organismo worksheet_data.write(inicio_fila + i, 1, t, formato5) # total worksheet_data.write(inicio_fila + i, 2, t_femeninos, formato5) # totales femeninos worksheet_data.write(inicio_fila + i, 3, t_jovenes, formato5) # totales jóvenes worksheet_data.write(inicio_fila + i, 4, t_hasta_treinta_dias, formato5) # menos de 30 dias worksheet_data.write(inicio_fila + i, 5, t_mas_treinta_menos_sesenta_dias, formato5) # entre 30 y 60 dias worksheet_data.write(inicio_fila + i, 6, t_mas_sesenta_dias_menos_un_anno, formato5) # entre 60 dias y un año worksheet_data.write(inicio_fila + i, 7, t_mas_un_anno, formato5) # mas de un año worksheet_data.write( inicio_fila + i, 8, t_reubicados_temporal_misma_entidad, formato5) # reubicado temporal dentro de la misma entidad worksheet_data.write( inicio_fila + i, 9, t_reubicados_temporal_mismo_organismo, formato5) # reubicado temporal dentro del mismo organismo worksheet_data.write( inicio_fila + i, 10, t_reubicados_temporal_otro_organismo, formato5) # reubicado temporal dentro de otro organismo worksheet_data.write(inicio_fila + i, 11, t_cobrando_garantia_salarial, formato5) # cobrando garantia salarial worksheet_data.write(inicio_fila + i, 12, t_sin_garantia_salarial, formato5) # sin garantia salarial worksheet_data.write(inicio_fila + i, 13, t_baja, formato5) # baja worksheet_data.write(inicio_fila + i, 14, t_propuesto_disponible, formato5) # propuesto a disponible totales += t totales_femeninos += t_femeninos totales_jovenes += t_jovenes totales_hasta_treinta_dias += t_hasta_treinta_dias totales_mas_treinta_menos_sesenta_dias += t_mas_treinta_menos_sesenta_dias totales_mas_sesenta_dias_menos_un_anno += t_mas_sesenta_dias_menos_un_anno totales_mas_un_anno += t_mas_un_anno totales_reubicados_temporal_misma_entidad += t_reubicados_temporal_misma_entidad totales_reubicados_temporal_mismo_organismo += t_reubicados_temporal_mismo_organismo totales_reubicados_temporal_otro_organismo += t_reubicados_temporal_otro_organismo totales_cobrando_garantia_salarial += t_cobrando_garantia_salarial totales_sin_garantia_salarial += t_sin_garantia_salarial totales_baja += t_baja totales_propuesto_disponible += t_propuesto_disponible i = i + 1 # for interrupto in interruptos: # if interrupto.organismo_id == oace.id: # worksheet_data.write(inicio_fila + i, 0, interrupto.entidad.e_nombre, formato5) # entidad # worksheet_data.write(inicio_fila + i, 1, total_interruptos_por_entidad(interrupto), formato5) # total # worksheet_data.write(inicio_fila + i, 2, total_femeninos_interruptos_por_entidad(interrupto), formato5) # totales femeninos # worksheet_data.write(inicio_fila + i, 3, total_jovenes_interruptos_por_entidad(interrupto), formato5) # totales jóvenes # worksheet_data.write(inicio_fila + i, 4, interruptos_hasta_treinta_dias_por_entidad(interrupto), formato5) # menos de 30 dias # worksheet_data.write(inicio_fila + i, 5, interruptos_mas_treinta_hasta_sesenta_por_entidad(interrupto), formato5) # entre 30 y 60 dias # worksheet_data.write(inicio_fila + i, 6, interruptos_mas_sesenta_menos_un_anno_por_entidad(interrupto), formato5) # entre 60 dias y un año # worksheet_data.write(inicio_fila + i, 7, interruptos_mas_un_anno_por_entidad(interrupto), formato5) # mas de un año # worksheet_data.write(inicio_fila + i, 8, interruptos_reubicado_misma_entidad_por_entidad(interrupto), formato5) # reubicado temporal dentro de la misma entidad # worksheet_data.write(inicio_fila + i, 9, interruptos_reubicado_mismo_organismo_por_entidad(interrupto), formato5) # reubicado temporal dentro del mismo organismo # worksheet_data.write(inicio_fila + i, 10, interruptos_reubicado_otro_organismo_por_entidad(interrupto), formato5) # reubicado temporal dentro de otro organismo # worksheet_data.write(inicio_fila + i, 11, interruptos_cobrando_garantia_salarial_por_entidad(interrupto), formato5) # cobrando garantia salarial # worksheet_data.write(inicio_fila + i, 12, interruptos_sin_garantia_salarial_por_entidad(interrupto), formato5) # sin garantia salarial # worksheet_data.write(inicio_fila + i, 13, interruptos_baja_por_entidad(interrupto), formato5) # baja # worksheet_data.write(inicio_fila + i, 14, interruptos_propuesto_disponible_por_entidad(interrupto), formato5) # propuesto a disponible # i = i + 1 osdes = obtener_osdes_de_un_oace(oace) if osdes.__len__() > 0: for osde in osdes: org = interruptos.filter(organismo=osde) dict_totales = totales_por_organismo(org) t = dict_totales['t'] t_femeninos = dict_totales['t_femeninos'] t_jovenes = dict_totales['t_jovenes'] t_hasta_treinta_dias = dict_totales['t_hasta_treinta_dias'] t_mas_treinta_menos_sesenta_dias = dict_totales[ 't_mas_treinta_menos_sesenta_dias'] t_mas_sesenta_dias_menos_un_anno = dict_totales[ 't_mas_sesenta_dias_menos_un_anno'] t_mas_un_anno = dict_totales['t_mas_un_anno'] t_reubicados_temporal_misma_entidad = dict_totales[ 't_reubicados_temporal_misma_entidad'] t_reubicados_temporal_mismo_organismo = dict_totales[ 't_reubicados_temporal_mismo_organismo'] t_reubicados_temporal_otro_organismo = dict_totales[ 't_reubicados_temporal_otro_organismo'] t_cobrando_garantia_salarial = dict_totales[ 't_cobrando_garantia_salarial'] t_sin_garantia_salarial = dict_totales[ 't_sin_garantia_salarial'] t_baja = dict_totales['t_baja'] t_propuesto_disponible = dict_totales[ 't_propuesto_disponible'] worksheet_data.write(inicio_fila + i, 0, osde.siglas, formato_organismos) # osde worksheet_data.write(inicio_fila + i, 1, t, formato5) # total worksheet_data.write(inicio_fila + i, 2, t_femeninos, formato5) # totales femeninos worksheet_data.write(inicio_fila + i, 3, t_jovenes, formato5) # totales jóvenes worksheet_data.write(inicio_fila + i, 4, t_hasta_treinta_dias, formato5) # menos de 30 dias worksheet_data.write(inicio_fila + i, 5, t_mas_treinta_menos_sesenta_dias, formato5) # entre 30 y 60 dias worksheet_data.write(inicio_fila + i, 6, t_mas_sesenta_dias_menos_un_anno, formato5) # entre 60 dias y un año worksheet_data.write(inicio_fila + i, 7, t_mas_un_anno, formato5) # mas de un año worksheet_data.write( inicio_fila + i, 8, t_reubicados_temporal_misma_entidad, formato5 ) # reubicado temporal dentro de la misma entidad worksheet_data.write( inicio_fila + i, 9, t_reubicados_temporal_mismo_organismo, formato5 ) # reubicado temporal dentro del mismo organismo worksheet_data.write( inicio_fila + i, 10, t_reubicados_temporal_otro_organismo, formato5 ) # reubicado temporal dentro de otro organismo worksheet_data.write( inicio_fila + i, 11, t_cobrando_garantia_salarial, formato5) # cobrando garantia salarial worksheet_data.write(inicio_fila + i, 12, t_sin_garantia_salarial, formato5) # sin garantia salarial worksheet_data.write(inicio_fila + i, 13, t_baja, formato5) # baja worksheet_data.write(inicio_fila + i, 14, t_propuesto_disponible, formato5) # propuesto a disponible totales += t totales_femeninos += t_femeninos totales_jovenes += t_jovenes totales_hasta_treinta_dias += t_hasta_treinta_dias totales_mas_treinta_menos_sesenta_dias += t_mas_treinta_menos_sesenta_dias totales_mas_sesenta_dias_menos_un_anno += t_mas_sesenta_dias_menos_un_anno totales_mas_un_anno += t_mas_un_anno totales_reubicados_temporal_misma_entidad += t_reubicados_temporal_misma_entidad totales_reubicados_temporal_mismo_organismo += t_reubicados_temporal_mismo_organismo totales_reubicados_temporal_otro_organismo += t_reubicados_temporal_otro_organismo totales_cobrando_garantia_salarial += t_cobrando_garantia_salarial totales_sin_garantia_salarial += t_sin_garantia_salarial totales_baja += t_baja totales_propuesto_disponible += t_propuesto_disponible i = i + 1 # l_interruptos = interruptos.filter(organismo_id=osde) # # for interrupto in l_interruptos: # worksheet_data.write(inicio_fila + i, 0, interrupto.entidad.e_nombre, formato5) # entidad # worksheet_data.write(inicio_fila + i, 1, total_interruptos_por_entidad(interrupto), formato5) # total # worksheet_data.write(inicio_fila + i, 2, total_femeninos_interruptos_por_entidad(interrupto), formato5) # totales femeninos # worksheet_data.write(inicio_fila + i, 3, total_jovenes_interruptos_por_entidad(interrupto), formato5) # totales jóvenes # worksheet_data.write(inicio_fila + i, 4, interruptos_hasta_treinta_dias_por_entidad(interrupto), formato5) # menos de 30 dias # worksheet_data.write(inicio_fila + i, 5, interruptos_mas_treinta_hasta_sesenta_por_entidad(interrupto), formato5) # entre 30 y 60 dias # worksheet_data.write(inicio_fila + i, 6, interruptos_mas_sesenta_menos_un_anno_por_entidad(interrupto), formato5) # entre 60 dias y un año # worksheet_data.write(inicio_fila + i, 7, interruptos_mas_un_anno_por_entidad(interrupto), formato5) # mas de un año # worksheet_data.write(inicio_fila + i, 8, interruptos_reubicado_misma_entidad_por_entidad(interrupto), formato5) # reubicado temporal dentro de la misma entidad # worksheet_data.write(inicio_fila + i, 9, interruptos_reubicado_mismo_organismo_por_entidad(interrupto), formato5) # reubicado temporal dentro del mismo organismo # worksheet_data.write(inicio_fila + i, 10, interruptos_reubicado_otro_organismo_por_entidad(interrupto), formato5) # reubicado temporal dentro de otro organismo # worksheet_data.write(inicio_fila + i, 11, interruptos_cobrando_garantia_salarial_por_entidad(interrupto), formato5) # cobrando garantia salarial # worksheet_data.write(inicio_fila + i, 12, interruptos_sin_garantia_salarial_por_entidad(interrupto), formato5) # sin garantia salarial # worksheet_data.write(inicio_fila + i, 13, interruptos_baja_por_entidad(interrupto), formato5) # baja # worksheet_data.write(inicio_fila + i, 14, interruptos_propuesto_disponible_por_entidad(interrupto), formato5) # propuesto a disponible # i = i + 1 if not es_oace(organismo): org = interruptos.filter(organismo=organismo) dict_totales = totales_por_organismo(org) t = dict_totales['t'] t_femeninos = dict_totales['t_femeninos'] t_jovenes = dict_totales['t_jovenes'] t_hasta_treinta_dias = dict_totales['t_hasta_treinta_dias'] t_mas_treinta_menos_sesenta_dias = dict_totales[ 't_mas_treinta_menos_sesenta_dias'] t_mas_sesenta_dias_menos_un_anno = dict_totales[ 't_mas_sesenta_dias_menos_un_anno'] t_mas_un_anno = dict_totales['t_mas_un_anno'] t_reubicados_temporal_misma_entidad = dict_totales[ 't_reubicados_temporal_misma_entidad'] t_reubicados_temporal_mismo_organismo = dict_totales[ 't_reubicados_temporal_mismo_organismo'] t_reubicados_temporal_otro_organismo = dict_totales[ 't_reubicados_temporal_otro_organismo'] t_cobrando_garantia_salarial = dict_totales[ 't_cobrando_garantia_salarial'] t_sin_garantia_salarial = dict_totales['t_sin_garantia_salarial'] t_baja = dict_totales['t_baja'] t_propuesto_disponible = dict_totales['t_propuesto_disponible'] worksheet_data.write(inicio_fila + i, 0, organismo.siglas, formato_organismos) # organismo worksheet_data.write(inicio_fila + i, 1, t, formato5) # total worksheet_data.write(inicio_fila + i, 2, t_femeninos, formato5) # totales femeninos worksheet_data.write(inicio_fila + i, 3, t_jovenes, formato5) # totales jóvenes worksheet_data.write(inicio_fila + i, 4, t_hasta_treinta_dias, formato5) # menos de 30 dias worksheet_data.write(inicio_fila + i, 5, t_mas_treinta_menos_sesenta_dias, formato5) # entre 30 y 60 dias worksheet_data.write(inicio_fila + i, 6, t_mas_sesenta_dias_menos_un_anno, formato5) # entre 60 dias y un año worksheet_data.write(inicio_fila + i, 7, t_mas_un_anno, formato5) # mas de un año worksheet_data.write( inicio_fila + i, 8, t_reubicados_temporal_misma_entidad, formato5) # reubicado temporal dentro de la misma entidad worksheet_data.write( inicio_fila + i, 9, t_reubicados_temporal_mismo_organismo, formato5) # reubicado temporal dentro del mismo organismo worksheet_data.write( inicio_fila + i, 10, t_reubicados_temporal_otro_organismo, formato5) # reubicado temporal dentro de otro organismo worksheet_data.write(inicio_fila + i, 11, t_cobrando_garantia_salarial, formato5) # cobrando garantia salarial worksheet_data.write(inicio_fila + i, 12, t_sin_garantia_salarial, formato5) # sin garantia salarial worksheet_data.write(inicio_fila + i, 13, t_baja, formato5) # baja worksheet_data.write(inicio_fila + i, 14, t_propuesto_disponible, formato5) # propuesto a disponible totales += t totales_femeninos += t_femeninos totales_jovenes += t_jovenes totales_hasta_treinta_dias += t_hasta_treinta_dias totales_mas_treinta_menos_sesenta_dias += t_mas_treinta_menos_sesenta_dias totales_mas_sesenta_dias_menos_un_anno += t_mas_sesenta_dias_menos_un_anno totales_mas_un_anno += t_mas_un_anno totales_reubicados_temporal_misma_entidad += t_reubicados_temporal_misma_entidad totales_reubicados_temporal_mismo_organismo += t_reubicados_temporal_mismo_organismo totales_reubicados_temporal_otro_organismo += t_reubicados_temporal_otro_organismo totales_cobrando_garantia_salarial += t_cobrando_garantia_salarial totales_sin_garantia_salarial += t_sin_garantia_salarial totales_baja += t_baja totales_propuesto_disponible += t_propuesto_disponible i = i + 1 for interrupto in interruptos: if interrupto.organismo_id == organismo.id: worksheet_data.write(inicio_fila + i, 0, interrupto.entidad.e_nombre, formato5) # entidad worksheet_data.write(inicio_fila + i, 1, total_interruptos_por_entidad(interrupto), formato5) # total worksheet_data.write( inicio_fila + i, 2, total_femeninos_interruptos_por_entidad(interrupto), formato5) # totales femeninos worksheet_data.write( inicio_fila + i, 3, total_jovenes_interruptos_por_entidad(interrupto), formato5) # totales jóvenes worksheet_data.write( inicio_fila + i, 4, interruptos_hasta_treinta_dias_por_entidad(interrupto), formato5) # menos de 30 dias worksheet_data.write( inicio_fila + i, 5, interruptos_mas_treinta_hasta_sesenta_por_entidad( interrupto), formato5) # entre 30 y 60 dias worksheet_data.write( inicio_fila + i, 6, interruptos_mas_sesenta_menos_un_anno_por_entidad( interrupto), formato5) # entre 60 dias y un año worksheet_data.write( inicio_fila + i, 7, interruptos_mas_un_anno_por_entidad(interrupto), formato5) # mas de un año worksheet_data.write( inicio_fila + i, 8, interruptos_reubicado_misma_entidad_por_entidad( interrupto), formato5) # reubicado temporal dentro de la misma entidad worksheet_data.write( inicio_fila + i, 9, interruptos_reubicado_mismo_organismo_por_entidad( interrupto), formato5) # reubicado temporal dentro del mismo organismo worksheet_data.write( inicio_fila + i, 10, interruptos_reubicado_otro_organismo_por_entidad( interrupto), formato5) # reubicado temporal dentro de otro organismo worksheet_data.write( inicio_fila + i, 11, interruptos_cobrando_garantia_salarial_por_entidad( interrupto), formato5) # cobrando garantia salarial worksheet_data.write( inicio_fila + i, 12, interruptos_sin_garantia_salarial_por_entidad(interrupto), formato5) # sin garantia salarial worksheet_data.write(inicio_fila + i, 13, interruptos_baja_por_entidad(interrupto), formato5) # baja worksheet_data.write( inicio_fila + i, 14, interruptos_propuesto_disponible_por_entidad(interrupto), formato5) # propuesto a disponible i = i + 1 # SUMAS # totales = '=SUM(%s)' % xl_range(inicio_fila, 1, inicio_fila + i - 1, 1) # totales_femeninos = '=SUM(%s)' % xl_range(inicio_fila, 2, inicio_fila + i - 1, 2) # totales_jovenes = '=SUM(%s)' % xl_range(inicio_fila, 3, inicio_fila + i - 1, 3) # totales_hasta_treinta_dias = '=SUM(%s)' % xl_range(inicio_fila, 4, inicio_fila + i - 1, 4) # totales_mas_treinta_menos_sesenta_dias = '=SUM(%s)' % xl_range(inicio_fila, 5, inicio_fila + i - 1, 5) # totales_mas_sesenta_dias_menos_un_anno = '=SUM(%s)' % xl_range(inicio_fila, 6, inicio_fila + i - 1, 6) # totales_mas_un_anno = '=SUM(%s)' % xl_range(inicio_fila, 7, inicio_fila + i - 1, 7) # totales_reubicados_temporal_misma_entidad = '=SUM(%s)' % xl_range(inicio_fila, 8, inicio_fila + i - 1, 8) # totales_reubicados_temporal_mismo_organismo = '=SUM(%s)' % xl_range(inicio_fila, 9, inicio_fila + i - 1, 9) # totales_reubicados_temporal_otro_organismo = '=SUM(%s)' % xl_range(inicio_fila, 10, inicio_fila + i - 1, 10) # totales_cobrando_garantia_salarial = '=SUM(%s)' % xl_range(inicio_fila, 11, inicio_fila + i - 1, 11) # totales_sin_garantia_salarial = '=SUM(%s)' % xl_range(inicio_fila, 12, inicio_fila + i - 1, 12) # totales_baja = '=SUM(%s)' % xl_range(inicio_fila, 13, inicio_fila + i - 1, 13) # totales_propuesto_disponible = '=SUM(%s)' % xl_range(inicio_fila, 14, inicio_fila + i - 1, 14) # TOTALES worksheet_data.write(inicio_fila + i, 0, "TOTALES", formato_organismos) worksheet_data.write(inicio_fila + i, 1, totales, formato_organismos) worksheet_data.write(inicio_fila + i, 2, totales_femeninos, formato_organismos) worksheet_data.write(inicio_fila + i, 3, totales_jovenes, formato_organismos) worksheet_data.write(inicio_fila + i, 4, totales_hasta_treinta_dias, formato_organismos) worksheet_data.write(inicio_fila + i, 5, totales_mas_treinta_menos_sesenta_dias, formato_organismos) worksheet_data.write(inicio_fila + i, 6, totales_mas_sesenta_dias_menos_un_anno, formato_organismos) worksheet_data.write(inicio_fila + i, 7, totales_mas_un_anno, formato_organismos) worksheet_data.write(inicio_fila + i, 8, totales_reubicados_temporal_misma_entidad, formato_organismos) worksheet_data.write(inicio_fila + i, 9, totales_reubicados_temporal_mismo_organismo, formato_organismos) worksheet_data.write(inicio_fila + i, 10, totales_reubicados_temporal_otro_organismo, formato_organismos) worksheet_data.write(inicio_fila + i, 11, totales_cobrando_garantia_salarial, formato_organismos) worksheet_data.write(inicio_fila + i, 12, totales_sin_garantia_salarial, formato_organismos) worksheet_data.write(inicio_fila + i, 13, totales_baja, formato_organismos) worksheet_data.write(inicio_fila + i, 14, totales_propuesto_disponible, formato_organismos) book.close() return response
class Xlsx: def __init__(self, path): self.wb = Workbook(path) self.ws = self.wb.add_worksheet("List 1") self.savecounter = 1 self.count = 2 self.write_titles() def write_titles(self): self.ws.write("A1", "Filename") self.ws.write("B1", "Size, Mb") self.ws.write("C1", "Duration, s") self.ws.write("D1", "Format") self.ws.write("E1", "Width, px") self.ws.write("F1", "Height, px") self.ws.write("G1", "Aspect ratio") self.ws.write("H1", "Created, yy-mm-dd hh-mm-ss") self.ws.write("I1", "FPS") self.ws.write("J1", "Bitrate, Kbit/s") self.ws.write("K1", "Amount of sound channels") self.ws.write("L1", "Sampling rate, kHz") def write_value(self, cell, check, format=lambda x: x): if check is not None: self.ws.write(cell, format(check)) else: self.ws.write(cell, "?") def write_line(self, filename, stats): c = self.count self.write_value(f"A{c}", stats["name"]) self.write_value(f"B{c}", round(stats["size"] / 2**20, 4)) self.write_value(f"C{c}", stats["duration"], lambda x: round(x, 4)) self.write_value(f"D{c}", stats["container"]) self.write_value(f"E{c}", stats["width"]) self.write_value(f"F{c}", stats["height"]) self.write_value( f"G{c}", stats["width"] and stats["height"] and [stats["width"], stats["height"]], lambda x: x[0] / x[1]) self.write_value( f"H{c}", stats["created"], lambda x: "{}-{}-{} {}-{}-{}".format( x.year, x.month, x.day, x.hour, x.minute, x.second, ), ) self.write_value(f"I{c}", stats["fps"]) self.write_value(f"J{c}", stats["bitrate"]) self.write_value(f"K{c}", stats["channels"]) self.write_value(f"L{c}", stats["frequency"]) self.count += 1 def save(self): self.wb.close()
def write_excel_observations_skeleton(entries_fpath, traits_fpath, out_fhand, accession_header=None, synonym_header=None, row_header=None, column_header=None, pot_number_header=None, vba_macro=None, rows_per_plant=1): utc_offset = int(-(time.timezone / 3600)) sign = '+' if utc_offset >= 0 else '-' relative_hour = '{0}{1:02d}00'.format(sign, utc_offset) columns = [] if accession_header is not None: columns.append(accession_header) if synonym_header is not None: columns.append(synonym_header) if row_header is not None: columns.append(row_header) if column_header is not None: columns.append(column_header) if pot_number_header is not None: columns.append(pot_number_header) columns += MANDATORY_COLS traits = _parse_traits(traits_fpath) workbook = Workbook(out_fhand.name) workbook.set_vba_name('ThisWorkbook') sheet = workbook.add_worksheet() sheet.set_vba_name('Hoja1') if vba_macro: workbook.add_vba_project(vba_macro) # header locked = workbook.add_format() locked.set_locked(True) unlocked = workbook.add_format() unlocked.set_locked(False) date_fmt = "yyyy-mm-dd hh:mm:ss\"{}\"".format(relative_hour) date_format = workbook.add_format({'num_format': date_fmt, 'locked': 1}) # sheet.protect() max_lengths = {} for idx, col in enumerate(columns): max_lengths[idx] = [len(col)] sheet.write(0, idx, col, locked) # Line per Maceta row_index = 1 for plant_def in csv.DictReader(open(entries_fpath, "r")): plant = Plant.objects.get(plant_name=plant_def['unique_id']) for trait in traits: row_content = [] for column_index, column in enumerate(columns): if column == PLANT_ID: value = plant.plant_name format_ = locked elif accession_header and column == accession_header: value = plant.accession.accession_number format_ = locked elif synonym_header and column == synonym_header: try: value = plant.accession.collecting_accession[-1] except TypeError: value = None format_ = locked elif row_header and column == row_header: value = plant.row format_ = locked elif column_header and column == column_header: value = plant.column format_ = locked elif pot_number_header and column == pot_number_header: value = plant.pot_number format_ = locked elif column == TRAIT_HEADER_NAME: value = trait.name format_ = locked elif column == TRAIT_TYPE_NAME: value = trait.type.name format_ = locked elif column == DATE_HEADER_NAME: value = None format_ = date_format else: value = None format_ = unlocked row_content.append({'value': value, 'format': format_}) for _ in range(rows_per_plant): write_row_in_sheet(sheet, row_content, row_index) row_index += 1 len_value = 0 if value is None else len(value) max_lengths[column_index].append(len_value) fecha_col_index = columns.index(DATE_HEADER_NAME) valor_col_index = columns.index(VALUE_HEARDER_NAME) autor_col_index = columns.index(USER_HEDER_NAME) for col_index, lengths in max_lengths.items(): if col_index in (fecha_col_index, valor_col_index, autor_col_index): length = 25 else: length = max(lengths) sheet.set_column(col_index, col_index, length + 2) workbook.close()
def incorporados_por_organismos_sma_junio(request): # start_time = time.time() anno_actual = datetime.today().year categoria_usuario = request.user.perfil_usuario.categoria.nombre municipio_usuario = request.user.perfil_usuario.municipio provincia_usuario = request.user.perfil_usuario.provincia response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') if categoria_usuario == 'dmt': response[ 'Content-Disposition'] = "attachment; filename=Incorporados_por_organismos_sma_Junio_(%s)_(%s).xlsx" % (municipio_usuario, anno_actual) LICENCIADOS_SMA = LicenciadosSMA.objects.filter(municipio_residencia=municipio_usuario, mes_entrevista='Junio', fecha_registro__year=anno_actual).values("incorporado", "organismo_id", "ubicacion_id", "municipio_residencia__provincia_id") | \ LicenciadosSMA.objects.filter(municipio_residencia=municipio_usuario, mes_entrevista='Junio', activo=True).values("incorporado", "organismo_id", "ubicacion_id", "municipio_residencia__provincia_id") MUNICIPIO = Municipio.objects.filter(id=municipio_usuario.id) elif categoria_usuario == 'dpt_ee': response['Content-Disposition'] = "attachment; filename=Incorporados_por_organismos_sma_Junio_(%s)_(%s).xlsx" % (provincia_usuario, anno_actual) PROVINCIAS = Provincia.objects.filter(id=provincia_usuario.id) # MUNICIPIOS = Municipio.objects.all() LICENCIADOS_SMA = LicenciadosSMA.objects.filter(municipio_residencia__provincia=provincia_usuario, mes_entrevista='Junio', fecha_registro__year=anno_actual).values("incorporado", "organismo_id", "ubicacion_id", "municipio_residencia__provincia_id") | \ LicenciadosSMA.objects.filter(municipio_residencia__provincia=provincia_usuario, mes_entrevista='Junio', activo=True).values("incorporado", "organismo_id", "ubicacion_id", "municipio_residencia__provincia_id") else: response['Content-Disposition'] = "attachment; filename=Incorporados_por_organismos_sma_Junio_(%s).xlsx" % anno_actual PROVINCIAS = Provincia.objects.all() # MUNICIPIOS = Municipio.objects.all() LICENCIADOS_SMA = LicenciadosSMA.objects.filter(fecha_registro__year=anno_actual, mes_entrevista='Junio',).values("incorporado", "organismo_id", "ubicacion_id", "municipio_residencia__provincia_id") | \ LicenciadosSMA.objects.filter(activo=True, mes_entrevista='Junio',).values("incorporado", "organismo_id", "ubicacion_id", "municipio_residencia__provincia_id") book = Workbook(response, {'in_memory': True}) worksheet_data = book.add_worksheet('Incorporados') formato = book.add_format({'bold': True, 'border': 1}) formato2 = book.add_format({'border': 1}) formato3 = book.add_format({'border': 1, 'font_color': 'red'}) worksheet_data.write("A1", "OACE", formato) worksheet_data.set_column("A:A", 21) if categoria_usuario == 'administrador' or categoria_usuario == 'dpt_ee': provincias = [provincia.siglas for provincia in PROVINCIAS] cantidad_provincias = provincias.__len__() worksheet_data.write_row(0, 1, provincias, formato) worksheet_data.write(0, cantidad_provincias + 1, 'Total', formato) if categoria_usuario == 'dmt': worksheet_data.write(0, 1, municipio_usuario.nombre, formato2) LICENCIADOS_SMA = LICENCIADOS_SMA.filter(incorporado=1) ORGANISMOS = Organismo.objects.filter(activo=True) INICIO = 1 if categoria_usuario == 'dpt_ee' or categoria_usuario == 'administrador': for organismo in ORGANISMOS: worksheet_data.write(INICIO, 0, organismo.siglas, formato) for index, provincia in enumerate(PROVINCIAS): worksheet_data.write(INICIO, index + 1, LICENCIADOS_SMA.filter( organismo_id=organismo.id, municipio_residencia__provincia_id=provincia.id).count(), formato2) total_organismo = '=SUM(%s)' % xl_range(INICIO, 1, INICIO, cantidad_provincias) worksheet_data.write(INICIO, cantidad_provincias + 1, total_organismo, formato) INICIO += 1 worksheet_data.write(INICIO, 0, 'TPCP', formato3) for index, provincia in enumerate(PROVINCIAS): worksheet_data.write(INICIO, index + 1, LICENCIADOS_SMA.filter( ubicacion_id=2, municipio_residencia__provincia_id=provincia.id).count(), formato2) total_tpcp = '=SUM(%s)' % xl_range(INICIO, 1, INICIO, cantidad_provincias) worksheet_data.write(INICIO, cantidad_provincias + 1, total_tpcp, formato) INICIO += 1 worksheet_data.write(INICIO, 0, 'DL358', formato3) for index, provincia in enumerate(PROVINCIAS): worksheet_data.write(INICIO, index + 1, LICENCIADOS_SMA.filter( ubicacion_id=3, municipio_residencia__provincia_id=provincia.id).count(), formato2) total_dl358 = '=SUM(%s)' % xl_range(INICIO, 1, INICIO, cantidad_provincias) worksheet_data.write(INICIO, cantidad_provincias + 1, total_dl358, formato) INICIO += 1 worksheet_data.write(INICIO, 0, 'Otra no Estatal', formato3) for index, provincia in enumerate(PROVINCIAS): worksheet_data.write(INICIO, index + 1, LICENCIADOS_SMA.filter( ubicacion_id=4, municipio_residencia__provincia_id=provincia.id).count(), formato2) total_otra_no_estatal = '=SUM(%s)' % xl_range(INICIO, 1, INICIO, cantidad_provincias) worksheet_data.write(INICIO, cantidad_provincias + 1, total_otra_no_estatal, formato) INICIO += 1 worksheet_data.write(INICIO, 0, 'Total', formato) for index, provincia in enumerate(PROVINCIAS): total_otra_no_estatal = '=SUM(%s)' % xl_range(1, index + 1, ORGANISMOS.count() + 3, index + 1) worksheet_data.write(INICIO, index + 1, total_otra_no_estatal, formato) total_general = '=SUM(%s)' % xl_range(INICIO, 1, INICIO, cantidad_provincias) worksheet_data.write(INICIO, cantidad_provincias + 1, total_general, formato) if categoria_usuario == 'dmt': for organismo in ORGANISMOS: worksheet_data.write(INICIO, 0, organismo.siglas, formato) for index, municipio in enumerate(MUNICIPIO): worksheet_data.write(INICIO, index + 1, LICENCIADOS_SMA.filter( organismo_id=organismo.id, municipio_residencia_id=municipio.id).count(), formato2) INICIO += 1 worksheet_data.write(INICIO, 0, 'TPCP', formato3) for index, municipio in enumerate(MUNICIPIO): worksheet_data.write(INICIO, index + 1, LICENCIADOS_SMA.filter( ubicacion_id=2, municipio_residencia_id=municipio.id).count(), formato2) INICIO += 1 worksheet_data.write(INICIO, 0, 'DL358', formato3) for index, municipio in enumerate(MUNICIPIO): worksheet_data.write(INICIO, index + 1, LICENCIADOS_SMA.filter( ubicacion_id=3, municipio_residencia_id=municipio.id).count(), formato2) INICIO += 1 worksheet_data.write(INICIO, 0, 'Otra no Estatal', formato3) for index, municipio in enumerate(MUNICIPIO): worksheet_data.write(INICIO, index + 1, LICENCIADOS_SMA.filter( ubicacion_id=4, municipio_residencia_id=municipio.id).count(), formato2) INICIO += 1 worksheet_data.write(INICIO, 0, 'Total', formato) for index, municipio in enumerate(MUNICIPIO): total_otra_no_estatal = '=SUM(%s)' % xl_range(1, index + 1, ORGANISMOS.count() + 3, index + 1) worksheet_data.write(INICIO, index + 1, total_otra_no_estatal, formato) book.close() # elapsed_time = time.time() - start_time # print("Tiempo transcurrido: %.10f segundos." % elapsed_time) return response
def resultados_entrevistas_sma_diciembre(request): start_time = time.time() anno_actual = datetime.today().year categoria_usuario = request.user.perfil_usuario.categoria.nombre municipio_usuario = request.user.perfil_usuario.municipio provincia_usuario = request.user.perfil_usuario.provincia response = HttpResponse( content_type= 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') if categoria_usuario == 'dmt': response[ 'Content-Disposition'] = "attachment; filename=Resultados_entrevistas_sma_(Diciembre)(%s)_(%s).xlsx" % ( municipio_usuario, anno_actual) LICENCIADOS_SMA = LicenciadosSMA.objects.filter( municipio_residencia=municipio_usuario, mes_entrevista='Diciembre', activo=True).values("recibio_oferta", "acepto_oferta", "ubicacion", "causa_no_aceptacion") elif categoria_usuario == 'dpt_ee': response[ 'Content-Disposition'] = "attachment; filename=Resultados_entrevistas_sma_(Diciembre)(%s)_(%s).xlsx" % ( provincia_usuario, anno_actual) PROVINCIAS = Provincia.objects.filter(id=provincia_usuario.id) MUNICIPIOS = Municipio.objects.all() LICENCIADOS_SMA = LicenciadosSMA.objects.filter( municipio_residencia__provincia=provincia_usuario, mes_entrevista='Diciembre', activo=True).values("recibio_oferta", "acepto_oferta", "ubicacion", "causa_no_aceptacion") else: response[ 'Content-Disposition'] = "attachment; filename=Resultados_entrevistas_sma_(Diciembre)(%s).xlsx" % anno_actual PROVINCIAS = Provincia.objects.all() MUNICIPIOS = Municipio.objects.all() LICENCIADOS_SMA = LicenciadosSMA.objects.filter( activo=True, mes_entrevista='Diciembre').values("recibio_oferta", "acepto_oferta", "ubicacion", "causa_no_aceptacion") book = Workbook(response, {'in_memory': True}) worksheet_data = book.add_worksheet('Resultados_entrevistas') formato = book.add_format({'bold': True, 'border': 1}) formato2 = book.add_format({'border': 1}) worksheet_data.write("B1", "Controlados", formato) worksheet_data.write("C1", "Recibio oferta", formato) worksheet_data.write("D1", "Acepto oferta", formato) worksheet_data.write("E1", "Empleo Estatal", formato) worksheet_data.write("F1", "TPCP", formato) worksheet_data.write("G1", "DL 358", formato) worksheet_data.write("H1", "Otra no Estatal", formato) worksheet_data.write("I1", "No acepto oferta", formato) worksheet_data.write("J1", "No le gustan las ofertas", formato) worksheet_data.write("K1", "No desea trabajar", formato) worksheet_data.set_column("A:A", 23) worksheet_data.set_column("B:B", 11) worksheet_data.set_column("C:C", 13) worksheet_data.set_column("D:D", 13) worksheet_data.set_column("E:E", 14) worksheet_data.set_column("F:F", 7) worksheet_data.set_column("G:G", 7) worksheet_data.set_column("H:H", 14) worksheet_data.set_column("I:I", 16) worksheet_data.set_column("J:J", 22) worksheet_data.set_column("K:K", 16) if categoria_usuario == 'dmt': worksheet_data.write("A1", "Municipio", formato) else: worksheet_data.write("A1", "Provincia / Municipio", formato) INICIO = 1 TOTALES = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] if categoria_usuario == 'dpt_ee' or categoria_usuario == 'administrador': for provincia in PROVINCIAS: worksheet_data.write(INICIO, 0, provincia.nombre, formato) DIC_LICENCIADOS_SMA_PROVINCIA = resultados_entrevistas_provincia( LICENCIADOS_SMA.filter( municipio_residencia__provincia=provincia)) controlados = DIC_LICENCIADOS_SMA_PROVINCIA['controlados'] recibio_oferta = DIC_LICENCIADOS_SMA_PROVINCIA['recibio_oferta'] acepto_oferta = DIC_LICENCIADOS_SMA_PROVINCIA['acepto_oferta'] empleo_estatal = DIC_LICENCIADOS_SMA_PROVINCIA['empleo_estatal'] tpcp = DIC_LICENCIADOS_SMA_PROVINCIA['tpcp'] dl_358 = DIC_LICENCIADOS_SMA_PROVINCIA['dl_358'] otra_no_estatal = DIC_LICENCIADOS_SMA_PROVINCIA['otra_no_estatal'] no_acepto_oferta = DIC_LICENCIADOS_SMA_PROVINCIA[ 'no_acepto_oferta'] no_le_gustan_las_ofertas = DIC_LICENCIADOS_SMA_PROVINCIA[ 'no_le_gustan_las_ofertas'] no_desea_trabajar = DIC_LICENCIADOS_SMA_PROVINCIA[ 'no_desea_trabajar'] worksheet_data.write(INICIO, 1, DIC_LICENCIADOS_SMA_PROVINCIA['controlados'], formato) worksheet_data.write( INICIO, 2, DIC_LICENCIADOS_SMA_PROVINCIA['recibio_oferta'], formato) worksheet_data.write( INICIO, 3, DIC_LICENCIADOS_SMA_PROVINCIA['acepto_oferta'], formato) worksheet_data.write( INICIO, 4, DIC_LICENCIADOS_SMA_PROVINCIA['empleo_estatal'], formato) worksheet_data.write(INICIO, 5, DIC_LICENCIADOS_SMA_PROVINCIA['tpcp'], formato) worksheet_data.write(INICIO, 6, DIC_LICENCIADOS_SMA_PROVINCIA['dl_358'], formato) worksheet_data.write( INICIO, 7, DIC_LICENCIADOS_SMA_PROVINCIA['otra_no_estatal'], formato) worksheet_data.write( INICIO, 8, DIC_LICENCIADOS_SMA_PROVINCIA['no_acepto_oferta'], formato) worksheet_data.write( INICIO, 9, DIC_LICENCIADOS_SMA_PROVINCIA['no_le_gustan_las_ofertas'], formato) worksheet_data.write( INICIO, 10, DIC_LICENCIADOS_SMA_PROVINCIA['no_desea_trabajar'], formato) TOTALES[0] += controlados TOTALES[1] += recibio_oferta TOTALES[2] += acepto_oferta TOTALES[3] += empleo_estatal TOTALES[4] += tpcp TOTALES[5] += dl_358 TOTALES[6] += otra_no_estatal TOTALES[7] += no_acepto_oferta TOTALES[8] += no_le_gustan_las_ofertas TOTALES[9] += no_desea_trabajar INICIO += 1 L_MUNICIPIOS = MUNICIPIOS.filter(provincia=provincia) for municipio in L_MUNICIPIOS: worksheet_data.write(INICIO, 0, municipio.nombre, formato2) DIC_LICENCIADOS_SMA_MUNICIPIO = resultados_entrevistas_provincia( LICENCIADOS_SMA.filter(municipio_residencia=municipio)) worksheet_data.write( INICIO, 1, DIC_LICENCIADOS_SMA_MUNICIPIO['controlados'], formato) worksheet_data.write( INICIO, 2, DIC_LICENCIADOS_SMA_MUNICIPIO['recibio_oferta'], formato) worksheet_data.write( INICIO, 3, DIC_LICENCIADOS_SMA_MUNICIPIO['acepto_oferta'], formato) worksheet_data.write( INICIO, 4, DIC_LICENCIADOS_SMA_MUNICIPIO['empleo_estatal'], formato) worksheet_data.write(INICIO, 5, DIC_LICENCIADOS_SMA_MUNICIPIO['tpcp'], formato) worksheet_data.write(INICIO, 6, DIC_LICENCIADOS_SMA_MUNICIPIO['dl_358'], formato) worksheet_data.write( INICIO, 7, DIC_LICENCIADOS_SMA_MUNICIPIO['otra_no_estatal'], formato) worksheet_data.write( INICIO, 8, DIC_LICENCIADOS_SMA_MUNICIPIO['no_acepto_oferta'], formato) worksheet_data.write( INICIO, 9, DIC_LICENCIADOS_SMA_MUNICIPIO['no_le_gustan_las_ofertas'], formato) worksheet_data.write( INICIO, 10, DIC_LICENCIADOS_SMA_MUNICIPIO['no_desea_trabajar'], formato) INICIO += 1 worksheet_data.write(INICIO, 0, 'Total', formato) worksheet_data.write_row(INICIO, 1, TOTALES, formato) book.close() elapsed_time = time.time() - start_time print("Tiempo transcurrido: %.10f segundos." % elapsed_time) return response
worksheet = workbook.add_worksheet() # write functions worksheet.write(0,0, "zero rows and zero columns") worksheet.write(0,1, "zero rows and one column") worksheet.write(1,0, "one row and zero columns") worksheet.write(1,1, "one row and one column") for row in range(20): worksheet.write(row,0,"Row number") worksheet.write(row,1,row) # close workbook workbook.close()
cell = sheet.cell(row_index, 0) # if (cell.ctype != XL_CELL_TEXT): if (cell.ctype == XL_CELL_EMPTY or cell.ctype == XL_CELL_BLANK): print("--------------------break on", row_index) print("the cell type is " , cell.ctype) break print("the cell type is " , cell.ctype) points = sheet.row_values(row_index, row_start, row_end) if (row_index == 0): sheet_writer.write_row(row_index, 0, [points[0], "summary"]) continue print("points is ", points) sub_points = points[sub_start:] sub_points.sort(reverse=True) sub_points = sub_points[:3] print(sub_points) sub_points = sub_points * np.asarray(factors) post_points = points[:sub_start] + sub_points.tolist() print("post points: ", post_points) # points.append(sum(post_points[2:])) present_points = [points[0], sum(post_points[2:])] print("points: ", present_points) sheet_writer.write_row(row_index, 0, present_points) wb_writer.close()
def main(): # Array with all SO signal names, generated from Interlock Strings sheet. soArray = generate_soArray() # Array with all DO/DI signal names, generated from the Interlock Matrix sheet. doiArray = matrixSheet.row_values(2, 14, 146) # Array containing entire column 'A' from Interlock Strings sheet. # The array is used to determine row numbers corresponding to 'ILK', # 'Where Used', and 'I/O memebers' data. filterArray = stringsSheet.col_values(0) # Might need update <-- for index, item in enumerate(filterArray): if item == "Interlock Number:": interlockNumArray.append(index) elif item == "Where Used:": whereUsedNumArray.append(index) elif item == "I/O Members:": ioMemNumArray.append(index) # Dictionary mapping signals to their equations dependencyMap = dict() for soName in soArray: dependencyMap[soName] = [] # Looks for soName occurence in the Interlock Strings sheet. # After the occurence is detemined, the position is used to match # the signal with its main ILK. for index, item in enumerate(whereUsedNumArray): if index == (len(whereUsedNumArray) - 1): searchCol1 = stringsSheet.col_values(11, item+1) searchCol2 = stringsSheet.col_values(26, item+1) else: searchCol1 = stringsSheet.col_values(11, item+1, interlockNumArray[index+1]) searchCol2 = stringsSheet.col_values(26, item+1, interlockNumArray[index+1]) found = match(searchCol1, searchCol2, soName) if not found: continue break # TODO: can raise exception if soName not found in the sheet. # Appends the main ILK at the front of the array. dependencyMap[soName].append(stringsSheet.cell(interlockNumArray[index], 11).value) # Beginning row number for I/O members of the given ILK. start = ioMemNumArray[index] + 1 # End row number for I/O members of the given ILK. stop = whereUsedNumArray[index] - 1 # Column 'L' of the I/O members. c1 = stringsSheet.col_values(11, start, stop) # Column 'AA' of the I/O members. c2 = stringsSheet.col_values(26, start, stop) # Appends DO/DI signals from the first column. for doName in c1: if doName == '': continue dependencyMap[soName].append(doName) # Appends DO/DI signals from the second column. for doName in c2: if doName == '': continue dependencyMap[soName].append(doName) # For each SO signal, we flatten the equation, starting from the top, # i.e., the main ILK name, and its corresponding I/O members. for soName in soArray: array = dependencyMap[soName] # main ILK ilk = array[0] mainIlk = ilk # Array of DO/DI signals. array = array[1:] # Separate check for ILK_01, since it only contains one I/O member. # Might need update <-- if mainIlk == 'ILK_01': dependencyMap[soName] = [mainIlk, 'DI_000'] continue tree = Tree() # Main ILK inserted at root, and p is the pointer to the root. p = tree.insert_root(ilk) # Checks if any ILKs are present in the tree which we haven't # accounted for (not "visited" even once), and flattens the # equation by finding that ILK's I/O members, and so on... while tree.has_ilk(): pointer, ilk = tree.get_ilk() loop(tree, pointer, ilk) # Ensures proper traversal order. # New ILK branches are always on the left, and the continuing I/O # members are on the right. tree.reverse_children() # New array with all signals for a given soName, with these # signals formatted properly (i.e., NOT changed to ~, ILKs skipped, # and parenthesis in place for logic reduction). arrayCopy = tree.format_list() string = '' for index, item in enumerate(arrayCopy): string = string + ' ' + item if index <= len(arrayCopy) - 2: if item.find('D') != -1: if arrayCopy[index+1].find('D') != -1 or arrayCopy[index+1].find('(') != -1: string = string + ' &' elif item.find(')') != -1: if arrayCopy[index+1].find('(') != -1 or arrayCopy[index+1].find('D') != -1: string = string + ' &' f = expr(string) f = f.simplify() f = f.factor() dependencyMap[soName] = [mainIlk, str(f)] # Write data to excel file. book = Workbook('ILKEqns.xlsx') sheet = book.add_worksheet('Flattened Interlock Equations') sheet.write(0, 0, 'SO') sheet.write(0, 1, 'ILK') sheet.write(0, 2, 'Dependency') # Set column width. sheet.set_column(2, 2, 300) write_data(sheet, soArray, dependencyMap) book.close()
def main(): """Shows basic usage of the Google Calendar API. Prints the start and name of the next 10 events on the user's calendar. """ creds = None page_token = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) service = build('calendar', 'v3', credentials=creds) # Work calendarId workCalendar = "*****@*****.**" # Call the Calendar API weekAgo = datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time weekAgo_object = datetime.strptime(weekAgo, "%Y-%m-%dT%H:%M:%S.%fZ") weekAgo_object = weekAgo_object - timedelta(days=14) weekAgo = datetime.strftime(weekAgo_object, "%Y-%m-%dT%H:%M:%S.%fZ") now = datetime.utcnow().isoformat() + 'Z' print('Getting the upcoming 10 events') events_result = service.events().list(calendarId=workCalendar, timeMin=weekAgo, timeMax=now, singleEvents=True, orderBy='startTime').execute() events = events_result.get('items', []) wb = Workbook("invoice " + datetime.strftime(weekAgo_object, "%b %d %Y") + " - " + datetime.strftime(datetime.now(), "%b %d %Y") + ".xlsx") sheet1 = wb.add_worksheet() sheet1.set_column(1, 1, 15) sheet1.write(0, 0, "Client") sheet1.write(0, 1, "Start Date") sheet1.write(0, 2, "End Date") sheet1.write(0, 3, "No. Hours") sheet1.write(0, 4, "Rate") sheet1.write(0, 5, "Class Type") sheet1.write(0, 6, "Total") row = 1 total = 0 if not events: print('No upcoming events found.') for event in events: start = event['start'].get('dateTime', event['start'].get('date')) print(start, event['summary']) if "COLES" in event['summary']: continue else: writeToInvoice(event, wb, sheet1, row) row = row + 1 calculateTotalRevenue(sheet1, row) wb.close()
def excel_export_subscriptions(request): response = HttpResponse( content_type= 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') response['Content-Disposition'] = 'attachment; filename=Report.xlsx' output = BytesIO() workbook = Workbook(output) worksheet_s = workbook.add_worksheet(Config.vocabulary('subscription_pl')) worksheet_s.write_string(0, 0, str(_('Übersicht'))) worksheet_s.write_string(0, 1, str(_('HauptbezieherIn'))) worksheet_s.write_string(0, 2, str(_('HauptbezieherInEmail'))) worksheet_s.write_string(0, 3, str(_('HauptbezieherInTelefon'))) worksheet_s.write_string(0, 4, str(_('HauptbezieherInMobile'))) worksheet_s.write_string(0, 5, str(_('Weitere BezieherInnen'))) worksheet_s.write_string(0, 6, str(_('Status'))) worksheet_s.write_string(0, 7, str(_('Depot'))) worksheet_s.write_string(0, 8, str(Config.vocabulary('assignment'))) worksheet_s.write_string( 0, 9, str(_('{} soll'.format(Config.vocabulary('assignment'))))) worksheet_s.write_string( 0, 10, str(_('{} status(%)'.format(Config.vocabulary('assignment'))))) worksheet_s.write_string( 0, 11, str(_('{} Kernbereich'.format(Config.vocabulary('assignment'))))) worksheet_s.write_string( 0, 12, str(_('{} Kernbereich soll'.format(Config.vocabulary('assignment'))))) worksheet_s.write_string( 0, 13, str( _('{} Kernbereich status(%)'.format( Config.vocabulary('assignment'))))) worksheet_s.write_string(0, 14, str(_('Preis'))) subs = subscriptions_with_assignments(SubscriptionDao.all_subscritions()) row = 1 for sub in subs: primary_member = sub['subscription'].primary_member if primary_member is not None: name = primary_member.get_name() email = primary_member.email phone = primary_member.phone or '' mobile = primary_member.mobile_phone or '' else: name = '' email = '' phone = '' mobile = '' worksheet_s.write_string(row, 0, sub['subscription'].overview) worksheet_s.write_string(row, 1, name) worksheet_s.write_string(row, 2, email) worksheet_s.write_string(row, 3, phone) worksheet_s.write_string(row, 4, mobile) worksheet_s.write_string(row, 5, sub['subscription'].other_recipients_names) worksheet_s.write_string(row, 6, sub['subscription'].state_text) worksheet_s.write_string(row, 7, sub['subscription'].depot.name) worksheet_s.write(row, 8, sub.get('assignments')) worksheet_s.write(row, 9, sub['subscription'].required_assignments) worksheet_s.write(row, 10, sub.get('assignments_progress')) worksheet_s.write(row, 11, sub.get('core_assignments')) worksheet_s.write(row, 12, sub['subscription'].required_core_assignments) worksheet_s.write(row, 13, sub.get('core_assignments_progress')) worksheet_s.write(row, 14, sub['subscription'].price) row += 1 workbook.close() xlsx_data = output.getvalue() response.write(xlsx_data) return response
def mt_report(context, case_id, test, outpath=None): """Export all mitochondrial variants for each sample of a case and write them to an excel file Args: adapter(MongoAdapter) case_id(str) test(bool): True if the function is called for testing purposes outpath(str): path to output file Returns: written_files(int): number of written or simulated files """ LOG.info('exporting mitochondrial variants for case "{}"'.format(case_id)) adapter = context.obj['adapter'] query = {'chrom':'MT'} case_obj = adapter.case(case_id=case_id) if not case_obj: LOG.warning('Could not find a scout case with id "{}". No report was created.'.format(case_id)) context.abort() samples = case_obj.get('individuals') mt_variants = list(adapter.variants(case_id=case_id, query=query, nr_of_variants= -1, sort_key='position')) if not mt_variants: LOG.warning('There are no MT variants associated to case {} in database!'.format(case_id)) context.abort() today = datetime.datetime.now().strftime('%Y-%m-%d') # set up outfolder if not outpath: outpath = str(os.getcwd()) # get document lines for each of the cases's individuals # Write excel document for each sample in case written_files = 0 for sample in samples: sample_id = sample['individual_id'] sample_lines = export_mt_variants(variants=mt_variants, sample_id=sample_id) # set up document name document_name = '.'.join([case_obj['display_name'], sample_id, today]) + '.xlsx' workbook = Workbook(os.path.join(outpath,document_name)) Report_Sheet = workbook.add_worksheet() if test and sample_lines and workbook: written_files +=1 continue # Write the column header row = 0 for col,field in enumerate(MT_EXPORT_HEADER): Report_Sheet.write(row,col,field) # Write variant lines, after header (start at line 1) for row, line in enumerate(sample_lines,1): # each line becomes a row in the document for col, field in enumerate(line): # each field in line becomes a cell Report_Sheet.write(row,col,field) workbook.close() if os.path.exists(os.path.join(outpath,document_name)): written_files += 1 if test: LOG.info("Number of excel files that can be written to folder {0}: {1}".format(outpath, written_files)) else: LOG.info("Number of excel files written to folder {0}: {1}".format(outpath, written_files)) return written_files
def mt_report(case_id, test, outpath=None): """Export all mitochondrial variants for each sample of a case and write them to an excel file Args: adapter(MongoAdapter) case_id(str) test(bool): True if the function is called for testing purposes outpath(str): path to output file Returns: written_files(int): number of written or simulated files """ LOG.info('exporting mitochondrial variants for case "{}"'.format(case_id)) adapter = store query = {"chrom": "MT"} case_obj = adapter.case(case_id=case_id) if not case_obj: LOG.warning( 'Could not find a scout case with id "{}". No report was created.'. format(case_id)) raise click.Abort() samples = case_obj.get("individuals") mt_variants = list( adapter.variants(case_id=case_id, query=query, nr_of_variants=-1, sort_key="position")) if not mt_variants: LOG.warning( "There are no MT variants associated to case {} in database!". format(case_id)) raise click.Abort() today = datetime.datetime.now().strftime("%Y-%m-%d") # set up outfolder if not outpath: outpath = str(os.getcwd()) # get document lines for each of the cases's individuals # Write excel document for each sample in case written_files = 0 for sample in samples: sample_id = sample["individual_id"] sample_lines = export_mt_variants(variants=mt_variants, sample_id=sample_id) # set up document name document_name = ".".join([case_obj["display_name"], sample_id, today ]) + ".xlsx" workbook = Workbook(os.path.join(outpath, document_name)) Report_Sheet = workbook.add_worksheet() if test and sample_lines and workbook: written_files += 1 continue # Write the column header row = 0 for col, field in enumerate(MT_EXPORT_HEADER): Report_Sheet.write(row, col, field) # Write variant lines, after header (start at line 1) for row, line in enumerate( sample_lines, 1): # each line becomes a row in the document for col, field in enumerate( line): # each field in line becomes a cell Report_Sheet.write(row, col, field) workbook.close() if os.path.exists(os.path.join(outpath, document_name)): written_files += 1 if test: LOG.info( "Number of excel files that can be written to folder {0}: {1}". format(outpath, written_files)) else: LOG.info("Number of excel files written to folder {0}: {1}".format( outpath, written_files)) return written_files
def crea_excel_presupuesto(queryset): ''' Crea excel de un presupuesto. Se llama desde la acción "Crea Excel" de presupuestos_list ''' #queryset = queryset[0] for obj in queryset[:1]: _fileName = "P{}_{}_{}.xlsx".format( obj.id, re.sub(' ', '', obj.cliente.nombre.title()), re.sub(' ', '', obj.asunto.title())) response = HttpResponse( content_type= 'application/application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', charset='iso-8859-1') response['Content-Disposition'] = f'attachment; filename={_fileName}' book = Workbook(response) sheet = book.add_worksheet('Grupo SPEC') ################# ANCHO Y ALTO ################# format_filas_columnas(sheet) ################# CABECERAS ################# #row = 0 row = imp_cabeceras(book, sheet, obj, "Soporte Grupo SPEC") row += 2 col = 2 ################# ASUNTO ################# sheet.merge_range(row, col, row, col + 7, obj.asunto.upper(), book.add_format(FORMATOS.get("headerGrupos"))) row += 2 col = 2 ################# TITULOS COLUMNAS VACIA ################# #titulos de grupos #valor y columnas que ocupa var_columnas = ( ("CÓDIGO", 1), ("CANTIDAD", 1), ("DESCRIPCIÓN", 1), ("PRECIO UNITARIO", 2), ("TOTAL", 3), ) for var, posic in var_columnas: if posic == 1: #sheet.write(row, col, var, book.add_format(dict(**FORMATOS.get("center"), **FORMATOS.get("medio")))) sheet.write(row, col, var, book.add_format(FORMATOS.get("headerGrupos"))) else: #sheet.merge_range(row, col, row, col+posic-1, var, book.add_format(dict(**FORMATOS.get("center"), **FORMATOS.get("wrap"), **FORMATOS.get("medio")))) sheet.merge_range( row, col, row, col + posic - 1, var, book.add_format(FORMATOS.get("headerGrupos"))) col += posic ################# FIN TITULOS COLUMNAS VACIA ################# ################# LINEA VACIA ################# col = 2 row += 1 imp_linea_vacia(book, sheet, row, col) ################# ITEMS ################# row += 1 lineas = LineaPresupuesto.objects.filter( presupuesto=obj.id) #.order_by('-check_in') initialRow = row #imprimiendo valores... for obj in queryset: for linea in lineas: sheet.write(row, col, linea.repuesto.codigo, book.add_format(FORMATOS.get("item_BI"))) sheet.write(row, col + 1, linea.cantidad, book.add_format(FORMATOS.get("item_BI"))) sheet.write(row, col + 2, linea.repuesto.nombre, book.add_format( FORMATOS.get("item_BI"))) #descripcion sheet.write(row, col + 3, obj.moneda.codigo, book.add_format(FORMATOS.get("item_BI"))) sheet.write( row, col + 4, linea.costo_custom if linea.costo_custom else linea.repuesto.costo * obj.tasa_cambio, book.add_format(FORMATOS.get("item_SB"))) sheet.write(row, col + 5, obj.moneda.codigo, book.add_format(FORMATOS.get("item_BI"))) sheet.write_formula(row, col + 6, f'D{row+1}*G{row+1}', book.add_format(FORMATOS.get("item_SB"))) sheet.write(row, col + 7, "+ IVA", book.add_format(FORMATOS.get("item_BD"))) row += 1 ################# FIN ITEMS ################# ################# LINEA VACIA ################# imp_linea_vacia(book, sheet, row, col) ################# LINEA TOTAL ################# row += 1 imp_linea_total(book, sheet, row, col, obj.moneda.codigo, initialRow) ################# LINEA FINAL ################# row += 1 imp_linea_vacia(book, sheet, row, col, final=True) ################# CONDICIONES COMERCIALES ################# row += 2 col = 2 imp_condiciones(book, sheet, row, col, tipo="presupuesto") book.close() return response
def interruptos_por_actividades(request): anno_actual = datetime.today().year mes_actual = datetime.today().month categoria_usuario = request.user.perfil_usuario.categoria.nombre organismo = request.user.perfil_usuario.organismo nombre_organismo = organismo.nombre.encode('utf-8').strip() num_anno = anno_actual if mes_actual == 12: num_anno = anno_actual - 1 response = HttpResponse( content_type= 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') if categoria_usuario == 'interrupto': response[ 'Content-Disposition'] = "attachment; filename=Interruptos_por_actividades_(%s)_(%s_%s).xlsx" % ( str(nombre_organismo).replace( " ", "_"), obtener_mes(mes_actual - 1), num_anno) else: response[ 'Content-Disposition'] = "attachment; filename=Interruptos_por_actividades_(%s_%s).xlsx" % ( obtener_mes(mes_actual - 1), num_anno) book = Workbook(response, {'in_memory': True}) worksheet_data = book.add_worksheet("Interruptos por Provincias") formato = book.add_format({'bold': True, 'border': 1}) formato1 = book.add_format({ 'align': 'center', 'valign': 'vcenter', 'bold': True, 'border': 1 }) formato5 = book.add_format({'border': 1, 'text_wrap': True}) formato_organismos = book.add_format({'bold': True, 'border': 1}) worksheet_data.write("A1", "OACE-OSDE / Actividades", formato1) worksheet_data.set_column("A:A", 24) worksheet_data.set_column("B:B", 19) worksheet_data.set_column("C:C", 21) worksheet_data.set_column("D:D", 21) actividades = ActividadInterrupto.objects.filter(activo=True).exclude(id=3) posicion_total_derecho = actividades.count() + 1 totales = 0 lista_totales = [] for indice, a in enumerate(actividades): lista_totales.insert(indice, totales) worksheet_data.write(0, indice + 1, a.actividad, formato) worksheet_data.write(0, posicion_total_derecho, "Totales", formato) if categoria_usuario == 'interrupto': if es_oace(organismo): osdes = obtener_osdes_de_un_oace(organismo) if osdes.__len__() > 0: interruptos = Interruptos.objects.filter(fecha_registro__year=anno_actual, organismo__id__in=[osde.id for osde in osdes] ) | \ Interruptos.objects.filter(organismo=organismo) else: interruptos = Interruptos.objects.filter( fecha_registro__year=anno_actual, organismo=organismo) organismos = Organismo.objects.filter(id=organismo.id) else: interruptos = Interruptos.objects.filter( fecha_registro__year=anno_actual, organismo=organismo) query = """SELECT id FROM public."SGMGU_interruptos" t where date_part('month',t.fecha_registro)=""" + unicode( mes_actual) + """ AND date_part('year',t.fecha_registro)=""" + unicode( anno_actual) + """;""" resultado_query_interruptos = Interruptos.objects.raw(query) ids_interruptos = [interr.id for interr in resultado_query_interruptos] interruptos = Interruptos.objects.filter(id__in=ids_interruptos) elif categoria_usuario == 'administrador': query = """SELECT id FROM public."SGMGU_interruptos" t where date_part('month',t.fecha_registro)=""" + unicode( mes_actual) + """ AND date_part('year',t.fecha_registro)=""" + unicode( anno_actual) + """;""" resultado_query_interruptos = Interruptos.objects.raw(query) ids_interruptos = [interr.id for interr in resultado_query_interruptos] interruptos = Interruptos.objects.filter(id__in=ids_interruptos) organismos = obtener_oaces() inicio_fila = 1 i = 0 if es_oace(organismo) or categoria_usuario == 'administrador': for oace in organismos: worksheet_data.write(inicio_fila + i, 0, oace.siglas, formato_organismos) # organismo totales_a = 0 for indice, actividad in enumerate(actividades): worksheet_data.write( inicio_fila + i, indice + 1, totales_interruptos_organismo_actividades( interruptos, oace, actividad), formato_organismos) lista_totales[indice] = lista_totales[ indice] + totales_interruptos_organismo_actividades( interruptos, oace, actividad) totales_a = totales_a + totales_interruptos_organismo_actividades( interruptos, oace, actividad) worksheet_data.write(inicio_fila + i, posicion_total_derecho, totales_a, formato) i = i + 1 l_interruptos = interruptos.filter(organismo__id=oace.id) for interrupto in l_interruptos: worksheet_data.write(inicio_fila + i, 0, interrupto.entidad.e_nombre, formato5) # entidad totales_a = 0 for indice, actividad in enumerate(actividades): worksheet_data.write( inicio_fila + i, indice + 1, totales_interruptos_actividades(interrupto, actividad), formato5) totales_a = totales_a + totales_interruptos_actividades( interrupto, actividad) worksheet_data.write(inicio_fila + i, posicion_total_derecho, totales_a, formato) i = i + 1 osdes = obtener_osdes_de_un_oace(oace) if obtener_osdes_de_un_oace(oace).__len__() > 0: for osde in osdes: worksheet_data.write(inicio_fila + i, 0, osde.siglas, formato_organismos) # osde totales_a = 0 for indice, actividad in enumerate(actividades): worksheet_data.write( inicio_fila + i, indice + 1, totales_interruptos_organismo_actividades( interruptos, osde, actividad), formato_organismos) lista_totales[indice] = lista_totales[ indice] + totales_interruptos_organismo_actividades( interruptos, osde, actividad) totales_a = totales_a + totales_interruptos_organismo_actividades( interruptos, osde, actividad) worksheet_data.write(inicio_fila + i, posicion_total_derecho, totales_a, formato) i = i + 1 l_interruptos = interruptos.filter(organismo_id=osde.id) for interrupto in l_interruptos: worksheet_data.write(inicio_fila + i, 0, interrupto.entidad.e_nombre, formato5) # entidad totales_a = 0 for indice, actividad in enumerate(actividades): worksheet_data.write( inicio_fila + i, indice + 1, totales_interruptos_actividades( interrupto, actividad), formato5) totales_a = totales_a + totales_interruptos_actividades( interrupto, actividad) worksheet_data.write(inicio_fila + i, posicion_total_derecho, totales_a, formato) i = i + 1 if not es_oace(organismo): worksheet_data.write(inicio_fila + i, 0, organismo.siglas, formato_organismos) # organismo totales_a = 0 for indice, actividad in enumerate(actividades): worksheet_data.write( inicio_fila + i, indice + 1, totales_interruptos_organismo_actividades( interruptos, organismo, actividad), formato_organismos) lista_totales[indice] = lista_totales[ indice] + totales_interruptos_organismo_actividades( interruptos, organismo, actividad) totales_a = totales_a + totales_interruptos_organismo_actividades( interruptos, organismo, actividad) worksheet_data.write(inicio_fila + i, posicion_total_derecho, totales_a, formato) i = i + 1 l_interruptos = interruptos.filter(organismo__nombre=organismo) for interrupto in l_interruptos: worksheet_data.write(inicio_fila + i, 0, interrupto.entidad.e_nombre, formato5) # entidad totales_a = 0 for indice, a in enumerate(actividades): worksheet_data.write( inicio_fila + i, indice + 1, totales_interruptos_actividades(interrupto, actividad), formato5) totales_a = totales_a + totales_interruptos_actividades( interrupto, actividad) worksheet_data.write(inicio_fila + i, posicion_total_derecho, totales_a, formato) i = i + 1 total = 0 worksheet_data.write(inicio_fila + i, 0, "TOTALES", formato_organismos) for indice, actividad in enumerate(actividades): worksheet_data.write(inicio_fila + i, indice + 1, lista_totales[indice], formato_organismos) total = total + lista_totales[indice] worksheet_data.write(inicio_fila + i, posicion_total_derecho, total, formato) book.close() return response
def create_excel(self, submission_list, is_single_sheet): workbook_file = tempfile.NamedTemporaryFile(suffix=".xlsx", delete=False) workbook = Workbook(workbook_file, options={'constant_memory': True}) headers = self.get_visible_headers(is_single_sheet) if isinstance(headers, NoneType): headers = {} visible_headers = headers if is_single_sheet: create_single_sheet_excel_headers(visible_headers, workbook) row_count_dict = {'main': 0} formatter = AdvanceSubmissionFormatter(self.columns, self.form_model, self.local_time_delta, self.preferences, is_single_sheet) for row_number, row_dict in enumerate(submission_list): formatted_values = [] formatted_repeats = {} if row_number == 20000: # export limit set to 20K after performance exercise #since scan & scroll API does not support result set size the workaround is to handle it this way break row = row_dict['_source'] result = formatter.format_row(row, row_number, formatted_repeats) if self.form_model.has_nested_fields and not is_single_sheet: result.append(row_number + 1) if is_single_sheet: result.insert(0, row_number + 1) formatted_values.append(result) formatted_repeats.update({'main': formatted_values}) create_single_sheet_entries(formatted_repeats, workbook, row_count_dict) else: create_multi_sheet_excel_headers(visible_headers, workbook) sheet_names_index_map = dict([(sheet_name, index) for index, sheet_name in enumerate(visible_headers.iterkeys())]) sheet_name_row_count_map = dict([(sheet_name, 0) for sheet_name in sheet_names_index_map.iterkeys()]) formatter = AdvanceSubmissionFormatter(self.columns, self.form_model, self.local_time_delta, self.preferences) for row_number, row_dict in enumerate(submission_list): formatted_values, formatted_repeats = [], {} if row_number == 20000: # export limit set to 20K after performance exercise #since scan & scroll API does not support result set size the workaround is to handle it this way break row = row_dict['_source'] result = formatter.format_row(row, row_number, formatted_repeats) if self.form_model.has_nested_fields: result.append(row_number + 1) formatted_values.append(result) formatted_repeats.update({'main': formatted_values}) create_multi_sheet_entries(formatted_repeats, workbook, sheet_names_index_map, sheet_name_row_count_map) workbook.close() return workbook_file
class SaveProductsInExcel(): def __init__(self, inscricaoFederal: str, monthStart: int, yearStart: int, monthEnd: int, yearEnd: int): self._inscricaoFederal = inscricaoFederal self._monthStart = monthStart self._yearStart = yearStart self._monthEnd = monthEnd self._yearEnd = yearEnd self._connectMongo = ConnectMongoDB() self._database = self._connectMongo.getConnetion() self._workbook = Workbook( os.path.join( foderToSaveResult, f'detalhado_{self._inscricaoFederal}_{getDateTimeNowInFormatStr()}.xlsx' )) self._sheet = self._workbook.add_worksheet('Produtos') self.__setSettingsOfWorkbook() self.__writeReader() self._row = 0 def __setSettingsOfWorkbook(self): self._cell_format_header = self._workbook.add_format({ 'bold': True, 'font_color': 'black', 'bg_color': 'yellow', 'text_wrap': True }) self._cell_format_money = self._workbook.add_format( {'num_format': '##0.00'}) self._cell_format_date = self._workbook.add_format( {'num_format': 'dd/mm/yyyy'}) def __writeReader(self): self._sheet.write(0, 0, "CNPJ Emitente", self._cell_format_header) self._sheet.write(0, 1, "Nome Emitente", self._cell_format_header) self._sheet.write(0, 2, "Numero NF", self._cell_format_header) self._sheet.write(0, 3, "Data Emissao", self._cell_format_header) self._sheet.write(0, 4, "Modelo NF", self._cell_format_header) self._sheet.write(0, 5, "Serie", self._cell_format_header) self._sheet.write(0, 6, "CNPJ Destinatario", self._cell_format_header) self._sheet.write(0, 7, "Nome Destinatario", self._cell_format_header) self._sheet.write(0, 8, "Chave Nota", self._cell_format_header) self._sheet.write(0, 9, "Num. Item Produto", self._cell_format_header) self._sheet.write(0, 10, "Cod. Produto", self._cell_format_header) self._sheet.write(0, 11, "Nome Produto", self._cell_format_header) self._sheet.write(0, 12, "NCM", self._cell_format_header) self._sheet.write(0, 13, "Nome NCM", self._cell_format_header) self._sheet.write(0, 14, "Regra aplicada pra este NCM", self._cell_format_header) self._sheet.write(0, 15, "Unidade", self._cell_format_header) self._sheet.write(0, 16, "CFOP", self._cell_format_header) self._sheet.write(0, 17, "Quantidade", self._cell_format_header) self._sheet.write(0, 18, "Valor Unitario", self._cell_format_header) self._sheet.write(0, 19, "Valor Produto", self._cell_format_header) self._sheet.write(0, 20, "Valor Desconto", self._cell_format_header) self._sheet.write(0, 21, "Valor Frete", self._cell_format_header) self._sheet.write(0, 22, "Valor Outros", self._cell_format_header) self._sheet.write(0, 23, "Valor VSeg", self._cell_format_header) self._sheet.write(0, 24, "Valor Total", self._cell_format_header) def __writeRows(self, product: Dict[str, str]) -> None: emitenteInscricaoFederal = treatsFieldAsTextInDictOrArray( product, ['emitente_inscricao_federal']) emitenteRazaoSocial = treatsFieldAsTextInDictOrArray( product, ['emitente_razao_social']) numeroNF = treatsFieldAsNumberInDictOrArray( product, ['identificao_nfe_numero_nf']) dataEmissaoNF = treatsFieldAsDateInDictOrArray( product, ['identificao_nfe_data_emissao'], formatoData=2) modeloNF = treatsFieldAsTextInDictOrArray( product, ['identificao_nfe_modelo_nf']) serieNF = treatsFieldAsTextInDictOrArray(product, ['identificao_nfe_serie_nf']) destinatarioInscricaoFederal = treatsFieldAsTextInDictOrArray( product, ['destinatario_inscricao_federal']) destinatarioRazaoSocial = treatsFieldAsTextInDictOrArray( product, ['destinatario_razao_social']) chaveNota = treatsFieldAsTextInDictOrArray(product, ['chave_nota']) numeroItemProduto = treatsFieldAsTextInDictOrArray( product, ['prod_numero_item']) codigoProduto = treatsFieldAsTextInDictOrArray(product, ['prod_codigo_produto']) nomeProduto = treatsFieldAsTextInDictOrArray(product, ['prod_nome_produto']) ncm = treatsFieldAsTextInDictOrArray(product, ['prod_ncm']) nomeNCM = treatsFieldAsTextInDictOrArray(product, ['prod_name_ncm']) ruleNCM = treatsFieldAsTextInDictOrArray(product, ['prod_ncm_rule']) unidade = treatsFieldAsTextInDictOrArray(product, ['prod_unidade']) cfop = treatsFieldAsNumberInDictOrArray(product, ['prod_cfop']) quantidade = treatsFieldAsDecimalInDictOrArray(product, ['prod_quantidade']) valorUnitario = treatsFieldAsDecimalInDictOrArray( product, ['prod_valor_unitario']) valorProduto = treatsFieldAsDecimalInDictOrArray( product, ['prod_valor_produto']) valorDesconto = treatsFieldAsDecimalInDictOrArray( product, ['prod_valor_desconto']) valorFrete = treatsFieldAsDecimalInDictOrArray(product, ['prod_valor_frete']) valorOutros = treatsFieldAsDecimalInDictOrArray( product, ['prod_valor_outros']) valorVSeg = treatsFieldAsDecimalInDictOrArray(product, ['prod_vseg']) valorTotal = treatsFieldAsDecimalInDictOrArray(product, ['prod_valor_total']) self._sheet.write(self._row, 0, emitenteInscricaoFederal) self._sheet.write(self._row, 1, emitenteRazaoSocial) self._sheet.write(self._row, 2, numeroNF) self._sheet.write(self._row, 3, dataEmissaoNF, self._cell_format_date) self._sheet.write(self._row, 4, modeloNF) self._sheet.write(self._row, 5, serieNF) self._sheet.write(self._row, 6, destinatarioInscricaoFederal) self._sheet.write(self._row, 7, destinatarioRazaoSocial) self._sheet.write(self._row, 8, chaveNota) self._sheet.write(self._row, 9, numeroItemProduto) self._sheet.write(self._row, 10, codigoProduto) self._sheet.write(self._row, 11, nomeProduto) self._sheet.write(self._row, 12, ncm) self._sheet.write(self._row, 13, nomeNCM) self._sheet.write(self._row, 14, ruleNCM) self._sheet.write(self._row, 15, unidade) self._sheet.write(self._row, 16, cfop) self._sheet.write(self._row, 17, quantidade) self._sheet.write(self._row, 18, valorUnitario, self._cell_format_money) self._sheet.write(self._row, 19, valorProduto, self._cell_format_money) self._sheet.write(self._row, 20, valorDesconto, self._cell_format_money) self._sheet.write(self._row, 21, valorFrete, self._cell_format_money) self._sheet.write(self._row, 22, valorOutros, self._cell_format_money) self._sheet.write(self._row, 23, valorVSeg, self._cell_format_money) self._sheet.write(self._row, 24, valorTotal, self._cell_format_money) def __closeWorkbook(self): self._workbook.close() def saveData(self): year = self._yearStart while year <= self._yearEnd: months = returnMonthsOfYear(year, self._monthStart, self._yearStart, self._monthEnd, self._yearEnd) print('\t\t\t- ', end='') for month in months: monthYearStr = f'{month:0>2}/{year}' print(monthYearStr, ' ', end='') getListProduct = GetListProduct( self._database, f"notas_{self._inscricaoFederal}", year, month) listProducts: List[Dict[str, str]] = getListProduct.getList() for product in listProducts: self._row += 1 self.__writeRows(product) print('') year += 1 self.__closeWorkbook()
def download_blacklisted_member_file(request): try: print '\nRequest IN | hall_booking_report.py | download_blacklisted_member_report_file | User = '******'from_date') and request.GET.get('to_date'): from_date = datetime.strptime(str(request.GET.get('from_date')), '%d/%m/%Y').date() to_date = datetime.strptime(str(request.GET.get('to_date')), '%d/%m/%Y').date() output = io.BytesIO() workbook = Workbook(output, {'in_memory': True}) worksheet1 = workbook.add_worksheet('Blacklisted Member Report') workbook.formats[0].set_font_size(10) workbook.formats[0].set_border(1) workbook.formats[0].set_text_wrap() merge_format = workbook.add_format({ 'bold': 1, 'align': 'center', 'valign': 'vcenter' }) cell_header_format = workbook.add_format({ 'font_size': 8, 'border': 1, 'text_wrap': True, 'bold': 1, 'align': 'center', 'valign': 'vcenter', 'border_color': '#000000' }) cell_format = workbook.add_format({ 'font_size': 10, 'border': 1, 'text_wrap': True, 'border_color': '#000000' }) worksheet1.set_column('A:A', 2) worksheet1.set_column('B:B', 25) worksheet1.set_column('C:C', 6) worksheet1.set_column('D:D', 11) worksheet1.set_column('E:E', 20) worksheet1.set_column('F:F', 20) title_text = 'Blacklisted Member Details Between ' + str( from_date.strftime('%d/%m/%Y')) + ' to ' + str( to_date.strftime('%d/%m/%Y')) worksheet1.merge_range('A1:F1', title_text, merge_format) column_name = [ 'Sr No', 'Name', 'Member', 'Date', 'Blacklisted by', 'Remark' ] for i in range(len(column_name)): worksheet1.write_string(1, int(i), column_name[i], cell_header_format) i = 2 j = 1 blacklist_obj = None blacklist_obj = UserTrackDetail.objects.filter( is_deleted=False, is_blacklisted=True, blacklisted_date__range=[from_date, to_date]) for blacklist_members in blacklist_obj: if blacklist_members.blacklisted_date: blaclist_date = blacklist_members.blacklisted_date.strftime( '%d/%m/%Y') else: blaclist_date = '' if blacklist_members.member: status = 'YES' else: status = 'NO' worksheet1.write_number(i, 0, int(j), cell_format) worksheet1.write_string( i, 1, str(blacklist_members.company if blacklist_members. company else ''), cell_format) worksheet1.write_string(i, 2, str(str(status)), cell_format) worksheet1.write_string(i, 3, str(str(blaclist_date)), cell_format) worksheet1.write_string( i, 4, str(blacklist_members.blacklisted_by if blacklist_members. blacklisted_by else ''), cell_format) worksheet1.write_string( i, 5, str(blacklist_members.blacklist_remark if blacklist_members .blacklist_remark else ''), cell_format) i = i + 1 j = j + 1 workbook.close() output.seek(0) response = HttpResponse( output.read(), content_type= "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" ) response[ 'Content-Disposition'] = 'attachment; filename=Blacklisted_Member_Report' + str( datetime.now().date().strftime('%d_%m_%Y')) + '.xlsx' print '\nResponse OUT hall_booking_report.py | download_blacklisted_member_report_file | User = '******'\nException IN hall_booking_report.py | download_blacklisted_member_report_file | EXCP = ', str( traceback.print_exc()) data = {'success': 'false'} return HttpResponse(json.dumps(data), content_type='application/json')
def relacion_no_ubicados_nominal_sma_diciembre(request): # start_time = time.time() anno_actual = datetime.today().year categoria_usuario = request.user.perfil_usuario.categoria.nombre municipio_usuario = request.user.perfil_usuario.municipio provincia_usuario = request.user.perfil_usuario.provincia response = HttpResponse( content_type= 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') if categoria_usuario == 'dmt': response[ 'Content-Disposition'] = "attachment; filename=Relacion_nominal_no_incorporados_sma_Diciembre(%s)_(%s).xlsx" % ( municipio_usuario, anno_actual) LICENCIADOS_SMA = LicenciadosSMA.objects.filter(municipio_residencia=municipio_usuario, mes_entrevista='Diciembre', fecha_registro__year=anno_actual, incorporado_id=2).values("municipio_residencia__provincia__nombre", "nombre_apellidos", "ci", "municipio_residencia__nombre", "mes_entrevista", "causa_no_ubicado__causa") | \ LicenciadosSMA.objects.filter(municipio_residencia=municipio_usuario, mes_entrevista='Diciembre', activo=True, incorporado_id=2).values("municipio_residencia__provincia__nombre", "nombre_apellidos", "ci", "municipio_residencia__nombre", "mes_entrevista", "causa_no_ubicado__causa") elif categoria_usuario == 'dpt_ee': response[ 'Content-Disposition'] = "attachment; filename=Relacion_nominal_no_incorporados_sma_Diciembre(%s)_(%s).xlsx" % ( provincia_usuario, anno_actual) LICENCIADOS_SMA = LicenciadosSMA.objects.filter(municipio_residencia__provincia=provincia_usuario, mes_entrevista='Diciembre', fecha_registro__year=anno_actual, incorporado_id=2).values("municipio_residencia__provincia__nombre", "nombre_apellidos", "ci", "municipio_residencia__nombre", "mes_entrevista", "causa_no_ubicado__causa") | \ LicenciadosSMA.objects.filter(municipio_residencia__provincia=provincia_usuario, mes_entrevista='Diciembre', activo=True, incorporado_id=2).values("municipio_residencia__provincia__nombre", "nombre_apellidos", "ci", "municipio_residencia__nombre", "mes_entrevista", "causa_no_ubicado__causa") else: response[ 'Content-Disposition'] = "attachment; filename=Relacion_nominal_no_incorporados_sma_Diciembre(%s).xlsx" % anno_actual LICENCIADOS_SMA = LicenciadosSMA.objects.filter(fecha_registro__year=anno_actual, mes_entrevista='Diciembre', incorporado_id=2).values("municipio_residencia__provincia__nombre", "nombre_apellidos", "ci", "municipio_residencia__nombre", "mes_entrevista", "causa_no_ubicado__causa") | \ LicenciadosSMA.objects.filter(activo=True, mes_entrevista='Diciembre', incorporado_id=2).values("municipio_residencia__provincia__nombre", "nombre_apellidos", "ci", "municipio_residencia__nombre", "mes_entrevista", "causa_no_ubicado__causa") book = Workbook(response, {'in_memory': True}) worksheet_data = book.add_worksheet('No_incorporados') formato = book.add_format({'bold': True, 'border': 1}) formato2 = book.add_format({'border': 1}) if categoria_usuario == 'dmt': worksheet_data.write("A1", "Municipio", formato) else: worksheet_data.write("A1", "Nombre", formato) worksheet_data.write("B1", "CI", formato) worksheet_data.write("C1", "Municipio", formato) worksheet_data.write("D1", "Provincia", formato) worksheet_data.write("E1", "Fecha de la entrevista", formato) worksheet_data.write("F1", "Causa de no Incorporacion", formato) worksheet_data.set_column("A:A", 31) worksheet_data.set_column("B:B", 14) worksheet_data.set_column("C:C", 20.60) worksheet_data.set_column("D:D", 17) worksheet_data.set_column("E:E", 20) worksheet_data.set_column("F:F", 28.30) INICIO = 1 for persona in LICENCIADOS_SMA: worksheet_data.write(INICIO, 0, persona['nombre_apellidos'], formato2) worksheet_data.write(INICIO, 1, persona['ci'], formato2) worksheet_data.write(INICIO, 2, persona['municipio_residencia__nombre'], formato2) worksheet_data.write( INICIO, 3, persona['municipio_residencia__provincia__nombre'], formato2) worksheet_data.write(INICIO, 4, persona["mes_entrevista"], formato2) worksheet_data.write(INICIO, 5, persona["causa_no_ubicado__causa"], formato2) INICIO += 1 book.close() # elapsed_time = time.time() - start_time # print("Tiempo transcurrido: %.10f segundos." % elapsed_time) return response
def download_utilization_revenue_report_file(request): try: print '\nRequest IN | hall_booking_report.py | download_utilization_revenue_report_file | User = '******'from_date') and request.GET.get('to_date'): from_date = datetime.strptime(str(request.GET.get('from_date')), '%d/%m/%Y').date() to_date = datetime.strptime(str(request.GET.get('to_date')), '%d/%m/%Y').date() location = request.GET.get('location') total_days = to_date - from_date no_of_days = total_days.days date_list = [] temp_booking_details = [] i = 1 j = 2 k = 3 mem_total_sum = 0 nmem_total_sum = 0 output = io.BytesIO() workbook = Workbook(output, {'in_memory': True}) worksheet1 = workbook.add_worksheet('Hall_Utilization_Revenue') merge_format = workbook.add_format({ 'bold': 1, 'align': 'center', 'valign': 'vcenter', }) if location != 'All': location_name = HallLocation.objects.filter(id=int(location)) for name in location_name: title_text = 'Hall Utilization and Revenue Details for ' + str( name) + ' Between ' + str( from_date.strftime('%d/%m/%Y')) + ' to ' + str( to_date.strftime('%d/%m/%Y')) else: title_text = 'Hall Utilization and Revenue Details Between ' + str( from_date.strftime('%d/%m/%Y')) + ' to ' + str( to_date.strftime('%d/%m/%Y')) worksheet1.merge_range('A1:L1', title_text, merge_format) worksheet1.set_column('A:A', 3) worksheet1.set_column('B:B', 10) while from_date <= to_date: date_list.append(from_date) from_date = from_date + timedelta(days=1) temp_booking_details_list = HallBookingDetail.objects.all( ).exclude(booking_status__in=[0, 10]) for item in temp_booking_details_list: if item.booking_from_date.date() in date_list: temp_booking_details.append(item) booking_detail_obj_list = HallBookingDetail.objects.filter( id__in=[item.id for item in temp_booking_details]) if location != 'All': booking_details = HallBookingDetail.objects.filter( id__in=[item.id for item in temp_booking_details], hall_location_id=int(location)).values( 'hall_detail_id').distinct() else: booking_details = HallBookingDetail.objects.filter( id__in=[item.id for item in temp_booking_details]).values( 'hall_detail_id').distinct().order_by('hall_location') merge_cell_format = workbook.add_format({ 'text_wrap': True, 'font_size': 10, 'border': 1, 'align': 'center', 'valign': 'vcenter', 'bold': 1 }) cell_format = workbook.add_format({ 'text_wrap': True, 'font_size': 10, 'border': 1 }) worksheet1.merge_range('A2:A3', 'Sr No.', merge_cell_format) worksheet1.merge_range('B2:B3', 'Hall', merge_cell_format) worksheet1.merge_range('C2:C3', 'Location', merge_cell_format) worksheet1.merge_range('D2:E2', 'Revenue', merge_cell_format) worksheet1.merge_range('F2:H2', 'No. of Booking', merge_cell_format) worksheet1.merge_range('I2:K2', 'Hours Used', merge_cell_format) worksheet1.merge_range('L2:L3', 'Utilization %', merge_cell_format) worksheet1.write_string(j, 3, 'M', merge_cell_format) worksheet1.write_string(j, 4, 'NM', merge_cell_format) worksheet1.write_string(j, 5, 'I', merge_cell_format) worksheet1.write_string(j, 6, 'M', merge_cell_format) worksheet1.write_string(j, 7, 'NM', merge_cell_format) worksheet1.write_string(j, 8, 'I', merge_cell_format) worksheet1.write_string(j, 9, 'M', merge_cell_format) worksheet1.write_string(j, 10, 'NM', merge_cell_format) for b in booking_details: mem_rev = 0 nmem_rev = 0 h_i = 0 h_m = 0 h_nm = 0 total_hours_i = timedelta(hours=0) total_hours_m = timedelta(hours=0) total_hours_nm = timedelta(hours=0) if b['hall_detail_id']: hall_detail_obj = HallDetail.objects.get( id=int(b['hall_detail_id'])) internal_booking_count = booking_detail_obj_list.filter( hall_detail=hall_detail_obj, hall_booking__booking_for=0, is_deleted=False).count() member_booking_count = booking_detail_obj_list.filter( hall_detail=hall_detail_obj, hall_booking__booking_for=1, is_deleted=False).count() non_member_booking_count = booking_detail_obj_list.filter( hall_detail=hall_detail_obj, hall_booking__booking_for=2, is_deleted=False).count() for hall in booking_detail_obj_list.filter( hall_detail=hall_detail_obj, is_deleted=False): if hall.hall_booking.booking_for == 0: total_hours_i = total_hours_i + hall.booking_to_date - hall.booking_from_date total_sec = total_hours_i.total_seconds() h_i = total_sec // 3600 elif hall.hall_booking.booking_for == 1: total = hall.total_rent mem_rev = mem_rev + total total_hours_m = total_hours_m + hall.booking_to_date - hall.booking_from_date total_sec = total_hours_m.total_seconds() h_m = total_sec // 3600 elif hall.hall_booking.booking_for == 2: total = hall.total_rent nmem_rev = nmem_rev + total total_hours_nm = total_hours_nm + hall.booking_to_date - hall.booking_from_date total_sec = total_hours_nm.total_seconds() h_nm = total_sec // 3600 total_hrs = h_i + h_m + h_nm utilization = (total_hrs / 8) / no_of_days * 100 worksheet1.write_number(k, 0, int(i), cell_format) worksheet1.write_string(k, 1, str(hall_detail_obj.hall_name), cell_format) worksheet1.write_string( k, 2, str(hall_detail_obj.hall_location.location), cell_format) worksheet1.write_number(k, 3, int(mem_rev), cell_format) worksheet1.write_number(k, 4, int(nmem_rev), cell_format) worksheet1.write_number(k, 5, int(internal_booking_count), cell_format) worksheet1.write_number(k, 6, int(member_booking_count), cell_format) worksheet1.write_number(k, 7, int(non_member_booking_count), cell_format) worksheet1.write_number(k, 8, int(h_i), cell_format) worksheet1.write_number(k, 9, int(h_m), cell_format) worksheet1.write_number(k, 10, int(h_nm), cell_format) worksheet1.write_number(k, 11, int(utilization), cell_format) i = i + 1 k = k + 1 mem_total_sum = mem_total_sum + mem_rev nmem_total_sum = nmem_total_sum + nmem_rev l = k worksheet1.write_string(l, 2, str('Total'), merge_cell_format) worksheet1.write_number(l, 3, int(mem_total_sum), merge_cell_format) worksheet1.write_number(l, 4, int(nmem_total_sum), merge_cell_format) workbook.close() output.seek(0) response = HttpResponse( output.read(), content_type= "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" ) response[ 'Content-Disposition'] = 'attachment; filename=Hall_Utilization_Revenue_Report.xlsx' print '\nResponse OUT | hall_booking_report.py | download_utilization_revenue_report_file | User = '******'\nException IN | hall_booking_report.py | download_utilization_revenue_report_file | EXCP = ', str( traceback.print_exc()) return False
if read.cell_type(x, readToColumn) == 2: loc2 = str(int(temp2)) else: loc2 = temp2 except: print("Could not get Location 2!") break ## Find miles between them, print, write to new sheet miles = calcDist(loc1, loc2) numCalc += 1 numSuccess += 1 print1(x, miles, loc1, loc2, numSuccess, numCalc) write.write(x, writeColumn, miles) success = True break ## If something failed after 10 tries, give up and do manually if not success: write.write(x, writeColumn, "Failed!") print(str(x + 1) + " failed!") numCalc += 1 ## Save the sheet print("\nSuccess Rate: " + str(100 * numSuccess / numCalc) + "%") wbWrite.close()
def events(frame,put): identity_keywords = ["who are you", "who r u", "what is your name"] youtube_keywords = ("play ", "stream ", "queue ") launch_keywords = ["open ", "launch "] search_keywords = ["search "] wikipedia_keywords = ["wikipedia ", "wiki "] location_keywords = ["locate","spot"] check_keywords = ["what","when","was","how","has","had","should","would","can","could","cool","good"] #could or cool or good download_music=("download ","download music ") search_pc= ("find ","lookfor ") close_keywords=("close ","over ","stop ","exit ") link = put.split() #Add note if put.startswith("note") or put.startswith("not") or put.startswith("node"): try: check = link[1] username = os.getlogin() filename = "Notes.txt" f1 = open(r'''C:\Users\{0}\Desktop\{1}'''.format(username,filename),'a') link = '+'.join(link[1:]) text = link.replace('+',' ') text = text[0].capitalize() + text[1:] if check in check_keywords: text += "?" else: text += "." f1.write(text) f1.write("\n") f1.close() speak.say("Note added successfully!") speak.runAndWait() except: print("Could not add the specified note!") #Screen Recorder elif link[0] == "recorder": try: if len(link) < 2: video = '"UScreenCapture"' audio = '"Microphone (Realtek High Definition Audio)"' elif len(link) < 3: video = link[1] video = video.replace('_',' ') video = '"' + video + '"' audio = '"Microphone (Realtek High Definition Audio)"' else: video = link[1] video = video.replace('_',' ') video = '"' + video + '"' audio = link[2] audio = audio.replace('_',' ') audio = '"' + audio + '"' username = os.getlogin() speak.say("Recording started!") speak.runAndWait() os.chdir(r'''C:\Users\{}\Desktop'''.format(username)) subprocess.call(r'''ffmpeg -rtbufsize 1500M -f dshow -i video={0}:audio={1} -vcodec mpeg4 -vtag xvid -qscale:v 0 -crf 0 -acodec libmp3lame -ab 320k -ac 1 -ar 44100 video.avi'''.format(video,audio),shell=True) #video = UScreenCapture , audio = Microphone (Realtek High Definition Audio) except: print("Unable to start requested service!") #Voice Recorder elif link[0] == "audio" and link[1] == "recorder": try: if len(link) < 3: audio = '"Microphone (Realtek High Definition Audio)"' else: audio = link[2] audio = audio.replace('_',' ') audio = '"' + audio + '"' username = os.getlogin() speak.say("Recording started!") speak.runAndWait() os.chdir(r'''C:\Users\{}\Desktop'''.format(username)) subprocess.call(r'''ffmpeg -rtbufsize 1500M -f dshow -i audio={0} -acodec libmp3lame -ab 320k -ac 1 -ar 44100 audio.mp3'''.format(audio),shell=True) except: print("Unable to start requested service!") #Video Recorder elif link[0] == "video" and link[1] == "recorder": try: if len(link) < 3: video = '"UScreenCapture"' else: video = link[2] video = video.replace('_',' ') video = '"' + video + '"' username = os.getlogin() speak.say("Recording started!") speak.runAndWait() os.chdir(r'''C:\Users\{}\Desktop'''.format(username)) subprocess.call(r'''ffmpeg -rtbufsize 1500M -f dshow -i video={0} -vcodec mpeg4 -vtag xvid -qscale:v 0 -crf 0 video.avi'''.format(video),shell=True) except: print("Unable to start requested service!") #Merge audio and video elif link[0] == "merge": try: username = os.getlogin() os.chdir(r'''C:\Users\{}\Desktop'''.format(username)) video = link[1] audio = link[2] output = link[3] subprocess.call(r'''ffmpeg -i {} -i {} -c:v copy -c:a copy {}'''.format(video,audio,output),shell=True) except: print("Unable to process requested service!") #Convert video elif link[0] == "convert": try: username = os.getlogin() os.chdir(r'''C:\Users\{}\Desktop'''.format(username)) if link[1] == "na": form_in = link[2] video1 = link[3] form_out = link[4] video2 = link[5] if (form_in == "avi" or form_in == "webm" or form_in == "mp4" or form_in == "mkv") and (form_out == "mp4" or form_out == "mkv"): subprocess.call(r'''ffmpeg -i {} -c:v libx264 -an {}'''.format(video1,video2), shell = True) elif (form_in == "avi" or form_in == "mp4" or form_in == "mkv") and form_out == "webm": subprocess.call(r'''ffmpeg -i {} -c:v libvpx-vp9 -b:v 2M -an {}'''.format(video1,video2),shell=True) else: form_in = link[1] video1 = link[2] form_out = link[3] video2 = link[4] if (form_in == "avi" or form_in == "webm" or form_in == "mp4" or form_in == "mkv") and (form_out == "mp4" or form_out == "mkv"): subprocess.call(r'''ffmpeg -i {} -c:v libx264 -acodec aac {}'''.format(video1,video2), shell = True) elif (form_in == "avi" or form_in == "mp4" or form_in == "mkv") and form_out == "webm": subprocess.call(r'''ffmpeg -i {} -c:v libvpx-vp9 -b:v 2M -cpu-used -5 -deadline realtime -c:a libvorbis {}'''.format(video1,video2), shell = True) elif (form_in == "mp4" or form_in == "mkv" or form_in == "webm") and form_out == "avi": subprocess.call(r'''ffmpeg -i {} -c:v mpeg4 -vtag xvid -qscale:v 0 -acodec libmp3lame {}'''.format(video1,video2), shell = True) elif (form_in == "avi" or form_in == "webm" or form_in == "mp4" or form_in == "mkv" or form_in == "mp3" or form_in == "m4a") and (form_out == "m4a" or form_out == "mp3"): subprocess.call(r'''ffmpeg -i {} {}'''.format(video1,video2), shell = True) except: print("Unable to process requested service!") #Closing Benji elif put.startswith(close_keywords): os._exit(0) #Images to video elif put.startswith("images to video "): try: framerate = link[3] username = os.getlogin() os.chdir(r'''C:\Users\{}\Desktop\Images'''.format(username)) subprocess.call(r'''ffmpeg -framerate 1/{} -i img%03d.jpg -vcodec mpeg4 -vtag xvid -qscale:v 0 -crf 0 output.avi'''.format(framerate),shell=True) speak.say("Video created!") speak.runAndWait() except: print("Unable to create video file!") elif put.startswith(search_pc): try: name=link[1] rex=regex.compile(name) filepath=link[2] realpath=filepath for root,dirs,files in os.walk(os.path.normpath(filepath)): for f in files: result = rex.search(f) if result: realpath=os.path.join(root, f) print (realpath+"\n") os.startfile(realpath) except: print("Error") put = put.lower() put = put.strip() link = put.split() # elif put.startswith(search_pc): # process=subprocess.Popen("dir /b/s "+link[1],shell=True,stdout=subprocess.PIPE) # while True: # output = process.stdout.readline() # if output == '' and process.poll() is not None: # break # if output: # print (output.strip()+"\n") # outp=output # try: # os.startfile(outp) # except: # speak.say("Sorry,couldn't open") #Play song on youtube if put.startswith(youtube_keywords): try: link = '+'.join(link[1:]) # print(link) say = link.replace('+', ' ') url = 'https://www.youtube.com/results?search_query='+link # webbrowser.open('https://www.youtube.com'+link) fhand=urllib.request.urlopen(url).read() soup = BeautifulSoup(fhand, "html.parser") songs = soup.findAll('div', {'class': 'yt-lockup-video'}) hit = songs[0].find('a')['href'] # print(hit) speak.say("playing "+say) speak.runAndWait() webbrowser.open('https://www.youtube.com'+hit) except: print('Sorry Ethan. Looks like its not working!') #Download video if put.startswith("download video "): try: link = '+'.join(link[2:]) say = link.replace('+', ' ') url = 'https://www.youtube.com/results?search_query='+link fhand=urllib.request.urlopen(url).read() soup = BeautifulSoup(fhand, "html.parser") songs = soup.findAll('div', {'class': 'yt-lockup-video'}) hit = songs[0].find('a')['href'] speak.say("downloading video "+say) speak.runAndWait() username = os.getlogin() os.chdir(r'''C:\Users\{}\Desktop'''.format(username)) YouTube('https://www.youtube.com' + hit).streams.first().download() speak.say("download complete!") speak.runAndWait() except: print('Sorry Ethan. Looks like its not working!') #Download music elif put.startswith(download_music): try: link = '+'.join(link[1:]) # print(link) say = link.replace('+', ' ') url = 'https://www.youtube.com/results?search_query='+link # webbrowser.open('https://www.youtube.com'+link) fhand=urllib.request.urlopen(url).read() soup = BeautifulSoup(fhand, "html.parser") songs = soup.findAll('div', {'class': 'yt-lockup-video'}) hit = songs[0].find('a')['href'] # print(hit) speak.say("downloading "+say) speak.runAndWait() ydl_opts = { 'format': 'bestaudio/best', 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192', }], 'quiet': True, 'restrictfilenames': True, 'outtmpl': 'C:\\Users\\'+os.environ['USERNAME']+'\\Desktop\\%(title)s.%(ext)s' } ydl = youtube_dl.YoutubeDL(ydl_opts) ydl.download(['https://www.youtube.com'+hit]) speak.say("download completed Check your desktop for the song") speak.runAndWait() except: print("Unable to download requested music!") #Location elif any(word in put for word in location_keywords): try: link='+'.join(link[1:]) say=link.replace('+',' ') speak.say("locating "+ say) speak.runAndWait() webbrowser.open('https://www.google.nl/maps/place/'+link) except: print('The place seems to be sequestered.') #Who are you? elif any(word in put for word in identity_keywords): try: speak.say("I am BENJI, a digital assistant declassified for civilian use. Previously I was used by the Impossible Missions Force") speak.runAndWait() except: print('Error. Try reading the ReadMe to know about me!') #Open a webpage elif any(word in put for word in launch_keywords): try: link = '+'.join(link[1:]) speak.say("opening "+link) speak.runAndWait() webbrowser.open('http://www.'+ link) except: print('Sorry Ethan,unable to access it. Cannot hack either-IMF protocol!') #Google search elif any(word in put for word in search_keywords): try: link='+'.join(link[1:]) say=link.replace('+',' ') speak.say("searching google for "+say) speak.runAndWait() webbrowser.open('https://www.google.com/search?q='+link) except: print('Nope, this is not working.') #Google Images elif put.startswith("images of "): try: link='+'.join(link[2:]) say=link.replace('+',' ') speak.say("searching images of " + say) speak.runAndWait() webbrowser.open('https://www.google.co.in/search?q=' + link + '&source=lnms&tbm=isch') except: print('Could not search for images!') #Gmail elif put.startswith("gmail"): try: speak.say("Opening Gmail!") speak.runAndWait() webbrowser.open('https://www.google.com/gmail') except: print("Could not open Gmail!") #Google Cloud Print elif put.startswith("google cloud print"): try: speak.say("Opening google cloud print!") speak.runAndWait() webbrowser.open('https://www.google.com/cloudprint') except: print("Could not open Google Cloud Print!") #Google Others elif put.startswith("google "): try: say = link[1] speak.say("Opening google " + say) speak.runAndWait() webbrowser.open('https://'+ say +'.google.com') except: print("Could not open Google " + say.capitalize() + "!") #Blogger elif put.startswith("blogger"): try: speak.say("Opening blogger!") speak.runAndWait() webbrowser.open('https://www.blogger.com') except: print("Could not open Blogger!") #Wikipedia elif any(word in put for word in wikipedia_keywords): try: link = '+'.join(link[1:]) say = link.replace('+', ' ') wikisearch = wikipedia.page(say) speak.say("Opening wikipedia page for" + say) speak.runAndWait() webbrowser.open(wikisearch.url) except: print('Wikipedia could not either find the article or your Third-world connection is unstable') #Podcast elif put.startswith("podcast"): try: speak.say("Opening podcast!") speak.runAndWait() webbrowser.open('https://castbox.fm/home') except: print("Could not open podcast!") #Lock the device elif put.startswith('secure ') or put.startswith('lock '): try: speak.say("locking the device") speak.runAndWait() ctypes.windll.user32.LockWorkStation() except : print('Cannot lock device') #News of various press agencies elif put.startswith('news '): try: say = '+'.join(link[1:]) say = say.replace('+','-') if link[1] == "al" and link[2] == "jazeera": say += "-english" elif link[1] == "bbc": say += "-news" elif link[1] == "espn" and link[2] == "cric": say += "-info" url = ('https://newsapi.org/v1/articles?source=' + say + '&sortBy=latest&apiKey=571863193daf421082a8666fe4b666f3') newsresponce = requests.get(url) newsjson = newsresponce.json() speak.say('Our agents from ' + say + ' report this') speak.runAndWait() print(' ====='+ say.upper() +'===== \n') i = 1 for item in newsjson['articles']: print(str(i) + '. ' + item['title'] + '\n') print(item['description'] + '\n') i += 1 except: print('Unable to retrieve data!') #shutdown after specific time elif put.startswith('shutdown after '): try: if link[2].isdigit() and link[4].isdigit(): if link[2] == "zero": link[2] = "0" if link[4] == "zero": link[4] = "0" hours = int(link[2]) minutes = int(link[4]) time_seconds = 60 * minutes time_seconds = time_seconds + hours * 3600 subprocess.call("shutdown /s /t {0}".format(str(time_seconds)), shell = True) speak.say("Shutdown initialized!") speak.runAndWait() except: print("Please shutdown manually!") #shutdown now elif put.startswith("shutdown now"): try: subprocess.call("shutdown /s /t 0", shell = True) except: print("Please shutdown manually!") #abort shutdown elif put.startswith("cancel shutdown"): try: subprocess.call("shutdown /a", shell = True) speak.say("Shutdown cancelled!") speak.runAndWait() except: print("Unable do cancel shutdown!") #restart elif put.startswith("restart now"): try: subprocess.call("shutdown /r /t 0", shell = True) except: print("Unable do restart device!") #Folder elif put.startswith('create ') and link[-1] == "folder": try: username = os.getlogin() filename = '+'.join(link[1:-1]) filename = filename.replace('+','_').capitalize() path = r'''C:\Users\{0}\Desktop\{1}'''.format(username,filename) os.mkdir(path) speak.say("Folder created!") speak.runAndWait() except: print("Couldn't create specified folder!") #create file elif put.startswith('create ') and link[-1] == "document": try: username = os.getlogin() filename = '+'.join(link[1:-2]) filename = filename.replace('+','_').capitalize() if link[-2] == "text": filename += ".txt" f1 = open(r'''C:\Users\{0}\Desktop\{1}'''.format(username,filename),'a') f1.close() elif link[-2] == "word" or link[-2] == "world": filename += ".docx" f1 = open(r'''C:\Users\{0}\Desktop\{1}'''.format(username,filename),'a') f1.close() elif link[-2] == "powerpoint" or link[-2] =="presentation": filename += ".pptx" prs = Presentation() title_slide_layout = prs.slide_layouts[0] slide = prs.slides.add_slide(title_slide_layout) os.chdir(r'''C:\Users\{0}\Desktop'''.format(username)) prs.save(filename) elif link[-2] == "excel" or link[-2] == "Excel": filename += ".xlsx" wb = Workbook(filename) ws = wb.add_worksheet() os.chdir(r'''C:\Users\{0}\Desktop'''.format(username)) wb.close() elif link[-2] == "visio" or link[-2] == "vizio": filename += ".vsdx" f1 = open(r'''C:\Users\{0}\Desktop\{1}'''.format(username,filename),'a') f1.close() elif link[-2] == "rich" or link[-2] == "reach": filename += ".rtf" f1 = open(r'''C:\Users\{0}\Desktop\{1}'''.format(username,filename),'a') f1.close() speak.say("Created" + filename) speak.runAndWait() except: print("Unable to create a file.") #Calculator elif put.startswith('calculator'): try: subprocess.call('calc',shell=True) except: print("Unable to open calculator!") #Exit/Quit elif put.startswith('exit') or put.startswith('quit'): sys.exit()
def testYieldCurveFwdsGeneration(): # ========================================================================== # A_Auto: USD_IRS_LIBOR_3M # Test based on Auto Zero rates # zeroRates=[0.00233531255143132, # 0.00233531255143297, # 0.00233531255143352, # 0.00233531255143333, # 0.00297455023730465, # 0.00319723236979883, # 0.00330765830399476, # 0.00691586090809845, # 0.01085698247648050, # 0.01394073189902450, # 0.01633917521905550, # 0.02000659612723410, # 0.02346660204636610, # 0.02685361531988290, # 0.02854781579990930, # 0.02975553109452430] # Test based on Manual Zero rates zeroRates = [ 0.00236843676021893, 0.00236821394576394, 0.00236797582662841, 0.00236773777132554, 0.00290042675432344, 0.00308558288679761, 0.00330592988104989, 0.00691281016439806, 0.01085462048320750, 0.01393891008113780, 0.01633775879077160, 0.02000467532244330, 0.02346505198474200, 0.02685243199737300, 0.02854687951971270, 0.02975479160795850 ] yieldCurve = FYC.yieldCurveFactory(times=ZERO_TIMES, zeros=zeroRates, type='linearFwd') runTime = dt.strptime('20141128.230000', dtFmtLog) runDate = runTime.date() # same as outDates # make sure the first out date is the end of the following month nextDate = Cal.NextEndOfMonth(runDate) startDate = nextDate if nextDate.month == runDate.month else runDate dfltFwdDates = SortedList(unfoldGen(func = Cal.NextEndOfMonth, initArg = startDate, numSteps = NUMBER_FWD_MONTHS)) cal = Cal() fwdDates = [[cal.AddSplitTenor(baseDate = baseDate, tenorSize = x[0], tenorUnit = x[1]) for x in [[0,'D']] + TENORS] for baseDate in dfltFwdDates] fwdTimes = [cal.DateTimesToFloats(dateTimes = fwdDateList, baseDate = runDate) for fwdDateList in fwdDates] outFwds = np.empty((len(fwdTimes[0])-1,NUMBER_FWD_MONTHS)) for idx, fwdTimesAtDate in enumerate(fwdTimes): outFwds[:,idx] \ = yieldCurve.getFwds( times = fwdTimesAtDate[1:], baseTime = fwdTimesAtDate[0]) rateTable = np.column_stack((zeroRates, outFwds)) # outFwds = np.maximum(0., outFwds) workbook = Workbook('tests/test_YieldCurve.xlsx',{'constant_memory': True}) headerFormat = workbook.add_format({'bold' : True, 'font_name': 'Arial', 'font_size': 8}) rateFormat = workbook.add_format({'num_format': '0.000%', 'font_name' : 'Arial', 'font_size' : 8, 'border' : 1}) stringFormat = workbook.add_format({'font_name' : 'Arial', 'font_size' : 8, 'border' : 1}) dateFormat = workbook.add_format({'bold' : False, 'num_format': 'mm/dd/yyyy', 'font_name' : 'Arial', 'font_size' : 8, 'border' : 1}) headerDateFormat = workbook.add_format({'bold' : True, 'num_format': 'mm/dd/yyyy', 'font_name' : 'Arial', 'font_size' : 8, 'border' : 1}) # ========================================================================== wks = workbook.add_worksheet("Forwards") row = 1 initCol = 2 col = initCol wks.write_string(row, col, 'Maturities/Dates', headerFormat) col +=1 wks.write_datetime(row, col, runDate, headerDateFormat) for outDate in fwdDates: col +=1 wks.write_datetime(row, col, outDate[0], headerDateFormat) # write values for tenor, rateRow in zip(TENORS, rateTable): row += 1 col = initCol # print row, col, tenor wks.write_string(row, col, "%d%s"%(tenor[0],tenor[1]), headerFormat) col += 1 # print rateRow for rate in rateRow: # print row, col, rate wks.write_number(row, col, rate,rateFormat) col += 1 # ========================================================================== # Format desired output crvName = "USD_IRS_LIBOR_3M" scenarioName = "A_Base" # for baseDate in dfltFwdDates # for tenor in TENORS wks = workbook.add_worksheet("Comparision") initRow = 0 initCol = 0 headerString = ['Curve','Scenario','Base Date', 'Tenor', 'Rate'] wks.write_row(initRow, initCol, headerString, headerFormat) col = initCol row = initRow + 1 for tenor, rateRow in zip(TENORS, outFwds): for baseDate, rate in zip (dfltFwdDates, rateRow): col=initCol wks.write_string(row, col, crvName, stringFormat) col +=1 wks.write_string(row, col, scenarioName, stringFormat) col +=1 wks.write_datetime(row, col, baseDate, dateFormat) col +=1 wks.write_string(row, col, "%d%s"%(tenor[0],tenor[1]), stringFormat) col +=1 wks.write_number(row, col, rate, rateFormat) row +=1 workbook.close()
class XlsxReport(): ''' 导出Excel报表,主要的用法: data=(('data11','data12'), ('data21','data22')) columns=[{'header':'标题1', 'width':11,}, {'header':'标题2', 'width':13, 'format':'currency'}] with XlsxReport('xlsx_name')as rpt: rpt.add_table('A1','sheetname',columns=columns,data=data) ''' def __init__(self,*args,**kwargs): self.book=Workbook(*args,**kwargs) self.formats={} self.add_formats(DefaultFormats) self.sheet=None self.sheets={} def __enter__(self): return self def __exit__(self,_type,value,trace): self.close() def ensure_sheet(self,sheet): self.get_sheet(sheet) if not sheet in self.sheets: self.sheets[sheet]=Sheet(self) return self.sheets.get(sheet) def get_sheet(self,sheetname): '''获取工作表,并设为默认工作表''' if isinstance(sheetname,Worksheet): return sheetname for sheet in self.book.worksheets(): if sheet.name==sheetname:break else: sheet=self.book.add_worksheet(sheetname) self.sheet=sheet return sheet def set_row(self,row,height=None,format=None,options={}): if format: format=self.formats.get(format,format) self.sheet.set_row(row,height,format,options) @convert_column_args def set_column(self,firstcol,lastcol,width=None, format=None,options={}): if format: format=self.formats.get(format,format) self.sheet.set_column(firstcol,lastcol,width,format,options) @convert_range_args def mwrite(self,first_row,first_col,last_row,last_col,\ value,format=None): '''合并写入到默认的工作表中''' if format: format=self.formats.get(format,format) self.sheet.merge_range(first_row,first_col,last_row,last_col, value,format) @convert_range_args def write_formulas(self,*args,**kwargs): pass @convert_cell_args def write_formula(self,row,col,formula,format=None,value=0): if format: format=self.formats.get(format,format) self.sheet.write_formula(row,col,formula,format,value) @convert_cell_args def write(self,row,col,value,format=None): '''单一单元格写入''' if format: format=self.formats.get(format,format) self.sheet.write(row,col,value,format) @convert_cell_args def rwrite(self,row,col,values,format=None): '''按行写入''' if format: format=self.formats.get(format,format) self.sheet.write_row(row,col,values,format) def close(self): '''关闭文件''' self.book.close() def add_formats(self,args): '''添加格式''' if isinstance(args,dict): args=args.items() self.formats.update({name:self.book.add_format(format) for \ name,format in args}) @convert_range_args def add_table(self,first_row,first_col,last_row,last_col,\ sheet=None,**kwargs): '''添加图表,如sheet为空,则使用默认的工作表''' sheet=self.get_sheet(sheet)if sheet else self.sheet columns=kwargs.get('columns') if columns: new_columns=[] for idx,column in enumerate(columns): if 'width' in column: sheet.set_column("{0}:{0}".format( xl_col_to_name(idx+first_col)),\ column.get('width')) format=column.get("format") if format and isinstance(format,str): new_column=column.copy() new_column['format']=self.formats.get(format) new_columns.append(new_column) else: new_columns.append(column) kwargs['columns']=new_columns last_col=first_col+len(columns)-1 if 'data' in kwargs: last_row=first_row+len(kwargs['data']) if kwargs.get('total_row',False): last_row+=1 sheet.add_table(first_row,first_col,last_row,last_col,kwargs)
def events(frame, put): identity_keywords = ["who are you", "who r u", "what is your name"] youtube_keywords = ("play ", "stream ", "queue ") launch_keywords = ["open ", "launch "] search_keywords = ["search "] wikipedia_keywords = ["wikipedia ", "wiki "] location_keywords = ["locate", "spot"] check_keywords = [ "what", "when", "was", "how", "has", "had", "should", "would", "can", "could", "cool", "good" ] #could or cool or good download_music = ("download ", "download music ") search_pc = ("find ", "lookfor ") graph_generation = ("draw graph for ") close_keywords = ("close ", "over ", "stop ", "exit ") pc_locations = ("desktop", "documents", "downloads") put = put.lower() link = put.split() #Add user for face detection if link[0] == "face" or link[0] == "phase": name = link[1] path = 'C:/dataset' cam = cv2.VideoCapture(0) ret, img = cam.read() cv2.imwrite(path + "/" + str(name) + ".jpg", img) cam.release() cv2.destroyAllWindows() #Get top 10 tweets elif link[0] == "get" and link[-1] == "tweets": auth = OAuthHandler(twitterCredentials.consumer_key, twitterCredentials.consumer_secret) auth.set_access_token(twitterCredentials.access_token, twitterCredentials.access_secret) api = tweepy.API(auth) if link[-2] == "my": for tweet in tweepy.Cursor(api.user_timeline).items(10): print("\n", json.dumps(tweet.text)) print("on ", tweet.created_at) elif link[1] == "tweets": for status in tweepy.Cursor(api.home_timeline).items(10): print("\n", status.text) print("By ", status.user.screen_name, " at ", status.user.created_at) #Get friends from twitter elif link[-1] == "twitter": if link[-3] == "follow" and link[-1] == "twitter": auth = OAuthHandler(twitterCredentials.consumer_key, twitterCredentials.consumer_secret) auth.set_access_token(twitterCredentials.access_token, twitterCredentials.access_secret) api = tweepy.API(auth) for friend in tweepy.Cursor(api.friends).items(): print("\nName: ", json.dumps(friend.name), " Username: "******"screenshot"): try: pic = pyautogui.screenshot() spath = os.path.expanduser('~') + '/Desktop/screenshot.jpg' pic.save(spath) except: print("Unable to take screenshot.") #Upcoming events elif put.startswith("upcoming events") or put.startswith( "coming events") or put.startswith("events"): try: SCOPES = 'https://www.googleapis.com/auth/calendar.readonly' store = file.Storage('credentials.json') creds = store.get() if not creds or creds.invalid: flow = client.flow_from_clientsecrets('client_secret.json', SCOPES) creds = tools.run_flow(flow, store) service = build('calendar', 'v3', http=creds.authorize(Http())) now = datetime.datetime.utcnow().isoformat( ) + 'z' # 'Z' indicates UTC time root = tk.Tk() root.title("Top 10 Upcoming Events") events_result = service.events().list( calendarId='primary', timeMin=now, maxResults=10, singleEvents=True, orderBy='startTime').execute() events = events_result.get('items', []) if not events: w = tk.Label(root, text="No upcoming events found.") w.pack() w = tk.Label(root, text="Event Title") w.grid(row=0, column=1) w = tk.Label(root, text="Time And Date Of Event") w.grid(row=0, column=2) i = 1 for event in events: start = event['start'].get('dateTime', event['start'].get('date')) w = tk.Label(root, text=event['summary']) w.grid(row=i, column=1) w = tk.Label(root, text=start) w.grid(row=i, column=2) i = i + 1 root.geometry("400x400") root.mainloop() except: print("Unable to take upcoming events") #Add note elif put.startswith("note") or put.startswith("not") or put.startswith( "node"): try: check = link[1] username = os.getlogin() filename = "Notes.txt" f1 = open( r'''C:\Users\{0}\Desktop\{1}'''.format(username, filename), 'a') link = '+'.join(link[1:]) text = link.replace('+', ' ') text = text[0].capitalize() + text[1:] if check in check_keywords: text += "?" else: text += "." f1.write(text) f1.write("\n") f1.close() speak.say("Note added successfully!") speak.runAndWait() except: print("Could not add the specified note!") #adding an event in google calendar elif link[0] == "add" and link[1] == "event": try: try: import argparse flags = argparse.ArgumentParser( parents=[tools.argparser]).parse_args() except ImportError: flags = None SCOPES = 'https://www.googleapis.com/auth/calendar' CLIENT_SECRET_FILE = 'Client_Secret.json' APPLICATION_NAME = 'GSSOC ' def get_credentials(): home_dir = os.path.expanduser('~') credential_dir = os.path.join(home_dir, '.credentials') if not os.path.exists(credential_dir): os.makedirs(credential_dir) credential_path = os.path.join( credential_dir, 'calendar-python-quickstart.json') store = Storage(credential_path) credentials = store.get() if not credentials or credentials.invalid: flow = client.flow_from_clientsecrets( CLIENT_SECRET_FILE, SCOPES) flow.user_agent = APPLICATION_NAME if flags: credentials = tools.run_flow(flow, store, flags) else: credentials = tools.run(flow, store) print('Storing credentials to ' + credential_path) return credentials def main(): credentials = get_credentials() http = credentials.authorize(httplib2.Http()) service = discovery.build('calendar', 'v3', http=http) summary = link[2] d = link[-3] e = link[-1] date = d + "T00:00:00-07:00" end = e + "T00:00:00-07:00" event = { 'summary': summary, 'start': { 'dateTime': date, }, 'end': { 'dateTime': end, }, 'reminders': { 'useDefault': False, 'overrides': [ { 'method': 'email', 'minutes': 24 * 60 }, { 'method': 'popup', 'minutes': 15 }, ], }, } event = service.events().insert(calendarId='primary', body=event).execute() #print('Event created: %s' % (event.get('htmlLink'))) #webbrowser.open('https://calendar.google.com/calendar/r') if __name__ == '__main__': main() except Exception as e: print(e) #Open a existing folder elif put.startswith(search_pc): try: if any(word in put for word in pc_locations): username = getpass.getuser() location = link[-1] file_name = link[1] path = r"C:\Users\%s\%s\%s" % (username, location, file_name) os.system("start " + path) elif link[-1] == "drive" and link[-3] == "in": drive = link[-2] file_name1 = link[1] if link[2] == link[-3]: file_name2 = '' else: file_name2 = link[2] path = r"%s:\%s %s " % (drive, file_name1, file_name2) os.system("start " + path) elif link[-1] == "drive": drive = link[-2] path = r"%s:\ " % (drive) os.system("start " + path) except Exception as e: print(e) #Screen Recorder elif link[0] == "recorder": try: if len(link) < 2: video = '"UScreenCapture"' audio = '"Microphone (Realtek High Definition Audio)"' elif len(link) < 3: video = link[1] video = video.replace('_', ' ') video = '"' + video + '"' audio = '"Microphone (Realtek High Definition Audio)"' else: video = link[1] video = video.replace('_', ' ') video = '"' + video + '"' audio = link[2] audio = audio.replace('_', ' ') audio = '"' + audio + '"' username = os.getlogin() speak.say("Recording started!") speak.runAndWait() os.chdir(r'''C:\Users\{}\Desktop'''.format(username)) subprocess.call( r'''ffmpeg -rtbufsize 1500M -f dshow -i video={0}:audio={1} -vcodec mpeg4 -vtag xvid -qscale:v 0 -crf 0 -acodec libmp3lame -ab 320k -ac 1 -ar 44100 video.avi''' .format(video, audio), shell=True ) #video = UScreenCapture , audio = Microphone (Realtek High Definition Audio) except: print("Unable to start requested service!") #Voice Recorder elif link[0] == "audio" and link[1] == "recorder": try: if len(link) < 3: audio = '"Microphone (Realtek High Definition Audio)"' else: audio = link[2] audio = audio.replace('_', ' ') audio = '"' + audio + '"' username = os.getlogin() speak.say("Recording started!") speak.runAndWait() os.chdir(r'''C:\Users\{}\Desktop'''.format(username)) subprocess.call( r'''ffmpeg -rtbufsize 1500M -f dshow -i audio={0} -acodec libmp3lame -ab 320k -ac 1 -ar 44100 audio.mp3''' .format(audio), shell=True) except: print("Unable to start requested service!") #Video Recorder elif link[0] == "video" and link[1] == "recorder": try: if len(link) < 3: video = '"UScreenCapture"' else: video = link[2] video = video.replace('_', ' ') video = '"' + video + '"' username = os.getlogin() speak.say("Recording started!") speak.runAndWait() os.chdir(r'''C:\Users\{}\Desktop'''.format(username)) subprocess.call( r'''ffmpeg -rtbufsize 1500M -f dshow -i video={0} -vcodec mpeg4 -vtag xvid -qscale:v 0 -crf 0 video.avi''' .format(video), shell=True) except: print("Unable to start requested service!") #Merge audio and video elif link[0] == "merge": try: username = os.getlogin() os.chdir(r'''C:\Users\{}\Desktop'''.format(username)) video = link[1] audio = link[2] output = link[3] subprocess.call( r'''ffmpeg -i {} -i {} -c:v copy -c:a copy {}'''.format( video, audio, output), shell=True) except: print("Unable to process requested service!") #Convert video elif link[0] == "convert": try: username = os.getlogin() os.chdir(r'''C:\Users\{}\Desktop'''.format(username)) if link[1] == "na": form_in = link[2] video1 = link[3] form_out = link[4] video2 = link[5] if (form_in == "avi" or form_in == "webm" or form_in == "mp4" or form_in == "mkv") and (form_out == "mp4" or form_out == "mkv"): subprocess.call( r'''ffmpeg -i {} -c:v libx264 -an {}'''.format( video1, video2), shell=True) elif (form_in == "avi" or form_in == "mp4" or form_in == "mkv") and form_out == "webm": subprocess.call( r'''ffmpeg -i {} -c:v libvpx-vp9 -b:v 2M -an {}'''. format(video1, video2), shell=True) else: form_in = link[1] video1 = link[2] form_out = link[3] video2 = link[4] if (form_in == "avi" or form_in == "webm" or form_in == "mp4" or form_in == "mkv") and (form_out == "mp4" or form_out == "mkv"): subprocess.call( r'''ffmpeg -i {} -c:v libx264 -acodec aac {}'''.format( video1, video2), shell=True) elif (form_in == "avi" or form_in == "mp4" or form_in == "mkv") and form_out == "webm": subprocess.call( r'''ffmpeg -i {} -c:v libvpx-vp9 -b:v 2M -cpu-used -5 -deadline realtime -c:a libvorbis {}''' .format(video1, video2), shell=True) elif (form_in == "mp4" or form_in == "mkv" or form_in == "webm") and form_out == "avi": subprocess.call( r'''ffmpeg -i {} -c:v mpeg4 -vtag xvid -qscale:v 0 -acodec libmp3lame {}''' .format(video1, video2), shell=True) elif (form_in == "avi" or form_in == "webm" or form_in == "mp4" or form_in == "mkv" or form_in == "mp3" or form_in == "m4a") and (form_out == "m4a" or form_out == "mp3"): subprocess.call(r'''ffmpeg -i {} {}'''.format( video1, video2), shell=True) except: print("Unable to process requested service!") #Closing Benji elif put.startswith(close_keywords): os._exit(0) #Images to video elif put.startswith("images to video "): try: framerate = link[3] username = os.getlogin() os.chdir(r'''C:\Users\{}\Desktop\Images'''.format(username)) subprocess.call( r'''ffmpeg -framerate 1/{} -i img%03d.jpg -vcodec mpeg4 -vtag xvid -qscale:v 0 -crf 0 output.avi''' .format(framerate), shell=True) speak.say("Video created!") speak.runAndWait() except: print("Unable to create video file!") #Open Files elif put.startswith(search_pc): try: name = link[1] rex = regex.compile(name) filepath = link[2] realpath = filepath for root, dirs, files in os.walk(os.path.normpath(filepath)): for f in files: result = rex.search(f) if result: realpath = os.path.join(root, f) print(realpath + "\n") os.startfile(realpath) except: print("Error") #Plotting of graph elif put.startswith(graph_generation): try: formula = link[3] lower_limit = int(link[5]) upper_limit = int(link[7]) x = np.array(range(lower_limit, upper_limit)) y = eval(formula) speak.say("Plotting The Graph") speak.runAndWait() plt.plot(x, y) plt.show() except: print("Error") speak.say("Sorry Graph can not be Plotted") speak.runAndWait() #Box Office Status elif link[-1] == "boxoffice": try: url = "https://in.bookmyshow.com/" + link[0] + "/movies/nowshowing" r = requests.get(url) soup = BeautifulSoup(r.content, 'html.parser') soup_level2 = [] show_status_list = [] shows_list = soup.find_all( 'div', attrs={ 'class': 'card-container wow fadeIn movie-card-container' }) for i in shows_list: start = str(i).index("href=") end = str(i).index("title=") soup_level2.append("https://in.bookmyshow.com" + str(i)[start + 6:end - 2]) show_status_raw = soup.find_all( 'div', attrs={'class': 'popularity sa-data-plugin'}) for i in show_status_raw: start = str(i).index("data-coming-soon=") end = str(i).index('data-event-code') data = str(i)[start + 18:end - 2] if data == "false": show_status_list.append("In Cinemas Now...") if data == "true": show_status_list.append("Coming Soon...") Tags_list = [] Name_list = [] for url in soup_level2: r = requests.get(url) tags = BeautifulSoup(r.content, 'html.parser') Tags_raw = tags.find_all('span', attrs={'class': '__genre-tag'}) tmp_tags = "" for i in Tags_raw: tmp_tags = tmp_tags + str(i)[str(i).index('">') + 2:str(i). index("</span>")] + " - " Tags_list.append(tmp_tags[:-3]) Names_raw = tags.find_all('h1', attrs={'class': '__name'}) for i in Names_raw: Name_list.append( str(i)[str(i).index('">') + 2:str(i).index("</h1>")]) speak.say("Preparing List") speak.runAndWait() cntr = len(Name_list) print("----------------------------------------------") print(link[0].capitalize()) print("----------------------------------------------") print("") for i in range(cntr): print("Name : " + Name_list[i]) print("Tags : " + Tags_list[i]) print("Status : " + show_status_list[i]) print("") print("----------------------------------------------") print("") except: print("Sorry, List Cannot be Prepared...") speak.say("Sorry, List Cannot be Prepared...") speak.runAndWait() # elif put.startswith(search_pc): # process=subprocess.Popen("dir /b/s "+link[1],shell=True,stdout=subprocess.PIPE) # while True: # output = process.stdout.readline() # if output == '' and process.poll() is not None: # break # if output: # print (output.strip()+"\n") # outp=output # try: # os.startfile(outp) # except: # speak.say("Sorry,couldn't open") #Play song on youtube if put.startswith(youtube_keywords): try: link = '+'.join(link[1:]) # print(link) say = link.replace('+', ' ') url = 'https://www.youtube.com/results?search_query=' + link # webbrowser.open('https://www.youtube.com'+link) fhand = urllib.request.urlopen(url).read() soup = BeautifulSoup(fhand, "html.parser") songs = soup.findAll('div', {'class': 'yt-lockup-video'}) hit = songs[0].find('a')['href'] # print(hit) speak.say("playing " + say) speak.runAndWait() webbrowser.open('https://www.youtube.com' + hit) except: print('Sorry Ethan. Looks like its not working!') #Download video if put.startswith("download video "): try: link = '+'.join(link[2:]) say = link.replace('+', ' ') url = 'https://www.youtube.com/results?search_query=' + link fhand = urllib.request.urlopen(url).read() soup = BeautifulSoup(fhand, "html.parser") songs = soup.findAll('div', {'class': 'yt-lockup-video'}) hit = songs[0].find('a')['href'] speak.say("downloading video " + say) speak.runAndWait() username = os.getlogin() os.chdir(r'''C:\Users\{}\Desktop'''.format(username)) YouTube('https://www.youtube.com' + hit).streams.first().download() speak.say("download complete!") speak.runAndWait() except: print('Sorry Ethan. Looks like its not working!') #Download music elif put.startswith(download_music): try: link = '+'.join(link[1:]) # print(link) say = link.replace('+', ' ') url = 'https://www.youtube.com/results?search_query=' + link # webbrowser.open('https://www.youtube.com'+link) fhand = urllib.request.urlopen(url).read() soup = BeautifulSoup(fhand, "html.parser") songs = soup.findAll('div', {'class': 'yt-lockup-video'}) hit = songs[0].find('a')['href'] # print(hit) speak.say("downloading " + say) speak.runAndWait() ydl_opts = { 'format': 'bestaudio/best', 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192', }], 'quiet': True, 'restrictfilenames': True, 'outtmpl': 'C:\\Users\\' + os.environ['USERNAME'] + '\\Desktop\\%(title)s.%(ext)s' } ydl = youtube_dl.YoutubeDL(ydl_opts) ydl.download(['https://www.youtube.com' + hit]) speak.say("download completed Check your desktop for the song") speak.runAndWait() except: print("Unable to download requested music!") #Location elif any(word in put for word in location_keywords): try: link = '+'.join(link[1:]) say = link.replace('+', ' ') speak.say("locating " + say) speak.runAndWait() webbrowser.open('https://www.google.nl/maps/place/' + link) except: print('The place seems to be sequestered.') #Who are you? elif any(word in put for word in identity_keywords): try: speak.say( "I am BENJI, a digital assistant declassified for civilian use. Previously I was used by the Impossible Missions Force" ) speak.runAndWait() except: print('Error. Try reading the ReadMe to know about me!') #Open a webpage elif any(word in put for word in launch_keywords): try: link = '+'.join(link[1:]) speak.say("opening " + link) speak.runAndWait() webbrowser.open('http://www.' + link) except: print( 'Sorry Ethan,unable to access it. Cannot hack either-IMF protocol!' ) #Google search elif any(word in put for word in search_keywords): try: link = '+'.join(link[1:]) say = link.replace('+', ' ') speak.say("searching google for " + say) speak.runAndWait() webbrowser.open('https://www.google.com/search?q=' + link) except: print('Nope, this is not working.') #Google Images elif put.startswith("images of "): try: link = '+'.join(link[2:]) say = link.replace('+', ' ') speak.say("searching images of " + say) speak.runAndWait() webbrowser.open('https://www.google.co.in/search?q=' + link + '&source=lnms&tbm=isch') except: print('Could not search for images!') #Gmail elif put.startswith("gmail"): try: speak.say("Opening Gmail!") speak.runAndWait() webbrowser.open('https://www.google.com/gmail') except: print("Could not open Gmail!") #Google Cloud Print elif put.startswith("google cloud print"): try: speak.say("Opening google cloud print!") speak.runAndWait() webbrowser.open('https://www.google.com/cloudprint') except: print("Could not open Google Cloud Print!") #Google Others elif put.startswith("google "): try: say = link[1] speak.say("Opening google " + say) speak.runAndWait() webbrowser.open('https://' + say + '.google.com') except: print("Could not open Google " + say.capitalize() + "!") #Blogger elif put.startswith("blogger"): try: speak.say("Opening blogger!") speak.runAndWait() webbrowser.open('https://www.blogger.com') except: print("Could not open Blogger!") #Wikipedia elif any(word in put for word in wikipedia_keywords): try: link = '+'.join(link[1:]) say = link.replace('+', ' ') wikisearch = wikipedia.page(say) speak.say("Opening wikipedia page for" + say) speak.runAndWait() webbrowser.open(wikisearch.url) except: print( 'Wikipedia could not either find the article or your Third-world connection is unstable' ) #Podcast elif put.startswith("podcast"): try: speak.say("Opening podcast!") speak.runAndWait() webbrowser.open('https://castbox.fm/home') except: print("Could not open podcast!") #Lock the device elif put.startswith('secure ') or put.startswith('lock '): try: speak.say("locking the device") speak.runAndWait() ctypes.windll.user32.LockWorkStation() except: print('Cannot lock device') #News of various press agencies elif put.startswith('news '): try: say = '+'.join(link[1:]) say = say.replace('+', '-') if link[1] == "al" and link[2] == "jazeera": say += "-english" elif link[1] == "bbc": say += "-news" elif link[1] == "espn" and link[2] == "cric": say += "-info" url = ('https://newsapi.org/v1/articles?source=' + say + '&sortBy=latest&apiKey=571863193daf421082a8666fe4b666f3') newsresponce = requests.get(url) newsjson = newsresponce.json() speak.say('Our agents from ' + say + ' report this') speak.runAndWait() print(' =====' + say.upper() + '===== \n') i = 1 for item in newsjson['articles']: print(str(i) + '. ' + item['title'] + '\n') print(item['description'] + '\n') i += 1 except: print('Unable to retrieve data!') #shutdown after specific time elif put.startswith('shutdown after '): try: if link[2].isdigit() and link[4].isdigit(): if link[2] == "zero": link[2] = "0" if link[4] == "zero": link[4] = "0" hours = int(link[2]) minutes = int(link[4]) time_seconds = 60 * minutes time_seconds = time_seconds + hours * 3600 subprocess.call("shutdown /s /t {0}".format(str(time_seconds)), shell=True) speak.say("Shutdown initialized!") speak.runAndWait() except: print("Please shutdown manually!") #shutdown now elif put.startswith("shutdown now"): try: subprocess.call("shutdown /s /t 0", shell=True) except: print("Please shutdown manually!") #abort shutdown elif put.startswith("cancel shutdown"): try: subprocess.call("shutdown /a", shell=True) speak.say("Shutdown cancelled!") speak.runAndWait() except: print("Unable do cancel shutdown!") #restart elif put.startswith("restart now"): try: subprocess.call("shutdown /r /t 0", shell=True) except: print("Unable do restart device!") #Folder elif put.startswith('create ') and link[-1] == "folder": try: username = os.getlogin() filename = '+'.join(link[1:-1]) filename = filename.replace('+', '_').capitalize() path = r'''C:\Users\{0}\Desktop\{1}'''.format(username, filename) os.mkdir(path) speak.say("Folder created!") speak.runAndWait() except: print("Couldn't create specified folder!") #create file elif put.startswith('create ') and link[-1] == "document": try: username = os.getlogin() filename = '+'.join(link[1:-2]) filename = filename.replace('+', '_').capitalize() if link[-2] == "text": filename += ".txt" f1 = open( r'''C:\Users\{0}\Desktop\{1}'''.format(username, filename), 'a') f1.close() elif link[-2] == "word" or link[-2] == "world": filename += ".docx" f1 = open( r'''C:\Users\{0}\Desktop\{1}'''.format(username, filename), 'a') f1.close() elif link[-2] == "powerpoint" or link[-2] == "presentation": filename += ".pptx" prs = Presentation() title_slide_layout = prs.slide_layouts[0] slide = prs.slides.add_slide(title_slide_layout) os.chdir(r'''C:\Users\{0}\Desktop'''.format(username)) prs.save(filename) elif link[-2] == "excel" or link[-2] == "Excel": filename += ".xlsx" wb = Workbook(filename) ws = wb.add_worksheet() os.chdir(r'''C:\Users\{0}\Desktop'''.format(username)) wb.close() elif link[-2] == "visio" or link[-2] == "vizio": filename += ".vsdx" f1 = open( r'''C:\Users\{0}\Desktop\{1}'''.format(username, filename), 'a') f1.close() elif link[-2] == "rich" or link[-2] == "reach": filename += ".rtf" f1 = open( r'''C:\Users\{0}\Desktop\{1}'''.format(username, filename), 'a') f1.close() speak.say("Created" + filename) speak.runAndWait() except: print("Unable to create a file.") #Calculator elif put.startswith('calculator'): try: subprocess.call('calc', shell=True) except: print("Unable to open calculator!") #Exit/Quit elif put.startswith('exit') or put.startswith('quit'): sys.exit()
def getcomponies(): """ Get Companies from web and write to excel file :return: """ _bases.kill_web_driver_edge() driver = Edge() componies = [] driver.get('https://www.dosab.org.tr/Alfabetik-Firmalar-Listesi') # Get links # links = [] # datalinks = driver.find_elements(By.XPATH, '/html/body/div[2]/div/ul/li/div/a') # for link in datalinks: # linkobj = { # 'link': link.get_attribute('href'), # 'name': link.text # } # links.append(linkobj) # Downlaod Mail Images # for complink in componies: # parsedlink = str(complink['link']).split('/') # mailimg = f'https://www.dosab.org.tr/dosyalar/emailler/{parsedlink[4]}_EMail.jpg' # wget.download(mailimg, "imgs") # OCR Image to text pytesseract.pytesseract.tesseract_cmd = r'C:\Users\abdul\AppData\Local\Tesseract-OCR\tesseract.exe' imgfiles = os.listdir('imgs') imgfiles.sort() for imgfile in imgfiles: compid = imgfile.split('_EMail.jpg')[0] driver.get(f'https://www.dosab.org.tr/Firma/{compid}') compname = driver.find_element(By.XPATH, '/html/body/div[2]/div/div[2]/h4').text img = cv2.imread(f'imgs/{imgfile}') emailtext = str(pytesseract.image_to_string(img, lang='eng')).replace( '\n\f', '') if '@' not in emailtext: emailtext = '' company = {'mail': emailtext, 'name': compname} componies.append(company) workbook = Workbook(excel_file_name) worksheet = workbook.add_worksheet('dosab') row = 0 hformat = workbook.add_format() hformat.set_bold() worksheet.write(row, 0, "Firma Adi", hformat) worksheet.write(row, 1, 'Mailler', hformat) row += 1 for comp in componies: worksheet.write(row, 0, comp["name"]) if '@' in comp['mail']: worksheet.write(row, 1, comp['mail']) row += 1 workbook.close() driver.close()
# -*- coding: utf-8 -*- from qiniu import Auth, put_data import StringIO from xlsxwriter import Workbook import datetime import time ak = "" sk = '' file_type = "office" output = StringIO.StringIO() office = Workbook(output) worksheet_detail = office.add_worksheet(u"明细") worksheet_all = office.add_worksheet(u"总表") worksheet_all.set_column('A:H', 20) worksheet_all.write_row('A1', [ u"被测评人工号", u"被测评人姓名", u"被测评人手机号", u"被测评人职级", u"被测评人部门", u"上对下评分", u"平级平均分", u"下对上评分", u"总平均分" ]) office.close() prefix = 'upload/{0}/'.format(file_type) q = Auth(ak, sk) bucket = "demo" url = "pmr318jrz.bkt.clouddn.com" file_name = '测评信息' + datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') full_filename = prefix + file_name + '.xlsx' token = q.upload_token(bucket, full_filename, 36000) ret, info = put_data(token, full_filename, output.getvalue()) print '{}{}?t={}'.format(url, ret['key'].encode("utf-8"), 111)
class XLSX(Report): """ Report writer for XLSX. """ # Stop words to use for generating a clean sheet name STOP_WORDS = set(stopwords.words("english")) def __init__(self, embeddings, db): super(XLSX, self).__init__(embeddings, db) # Workbook handles self.workbook = None self.worksheet = None # Workbook styles self.styles = {} # Row index self.row = 0 def mode(self): # xlsxwriter writes binary output return "wb" def open(self, output): self.workbook = Workbook(output) # Header styles self.styles["query"] = self.workbook.add_format({ "bold": True, "font_size": 14 }) self.styles["section"] = self.workbook.add_format({ "bold": True, "font_size": 12 }) self.styles["highlight"] = self.workbook.add_format({ "underline": True, "font_color": "blue", "font_size": 10.5 }) self.styles["highlight-noref"] = self.workbook.add_format( {"font_size": 10.5}) self.styles["header"] = self.workbook.add_format({ "bold": True, "underline": True, "bg_color": "#cccccc" }) # Common style definitions common = {"text_wrap": True, "align": "vcenter", "font_size": 10.5} url = {"font_color": "blue", "underline": True} even = {"bg_color": "#f5f5f5"} # Column styles self.styles["default"] = self.workbook.add_format(common) self.styles["url"] = self.workbook.add_format({**common, **url}) # Even row column styles self.styles["default-even"] = self.workbook.add_format({ **common, **even }) self.styles["url-even"] = self.workbook.add_format({ **common, **url, **even }) def close(self): # Free xlsx resources and close file handle if self.workbook: self.workbook.close() def style(self, name): """ Looks up a style by name. Args: name: style name """ return self.styles[name] def sheetname(self, query): """ Builds a sheet name from a query. Args: query: query Returns: sheet name """ # Build sheet name up to 30 chars, prevent breaking on word tokens = [ token.strip(string.punctuation) for token in query.split() if token.lower() not in XLSX.STOP_WORDS ] name = "" for token in tokens: if len(name) + len(token) > 30: break name += token + " " return name def write(self, columns, style=None, altrows=False): """ Writes a row to excel. Args: columns: list of columns to write for row style: optional style to use for row altrows: if every other row should be color shaded """ # Use altstyle if enabled and it exists altstyle = "" if altrows and self.row % 2 == 0: altstyle = "-even" if style and (style + altstyle) in self.styles: style += altstyle # Write out row column by column for x, value in enumerate(columns): if isinstance(value, tuple): if value[0]: # Tuples are URLs url = "url" + altstyle if altrows else "highlight" self.worksheet.write_url(self.row, x, value[0], self.styles[url], string=value[1]) else: # URL link empty, write text with no wrap default = "default" + altstyle if altrows else "highlight-noref" self.worksheet.write(self.row, x, value[1], self.styles[default]) else: # Default write method self.worksheet.write( self.row, x, value, self.styles[style if style else "default" + altstyle]) # Increment row count self.row += 1 def query(self, output, query): # Build sheet name name = self.sheetname(query) # Create new worksheet, reset rows index self.worksheet = self.workbook.add_worksheet(name.title().strip()) self.row = 0 # Column widths widths = None if "Severe" in self.names: widths = [10, 40, 15, 15, 15, 40, 40, 60, 10] else: widths = [10, 50, 15, 50, 50, 60, 10] # Format size of columns for column, width in enumerate(widths): self.worksheet.set_column(column, column, width) # Write query to file self.write(["%s" % query.strip()], "query") def section(self, output, name): self.write([name], "section") def highlight(self, output, article, highlight): # Extract fields author, reference = article # Build text and citation name text = "%s (%s)" % (Query.text(highlight), Query.authors(author if author else "Source")) # Write out highlight self.write([(reference, text)]) def headers(self, output, names): self.write(names, "header") def buildRow(self, article, stat, sections): columns = {} # Date columns["Date"] = Query.date(article[0]) if article[0] else "" # Title title = article[1] # Append Publication if available. Assume preprint otherwise and show preprint source. title += " [%s]" % (article[3] if article[3] else article[4]) # Title + Publication if available columns["Title"] = (article[2], title) # Severe columns["Severe"] = stat # Fatality columns["Fatality"] = "" # Design columns["Design"] = Query.design(article[5]) # Sample Size columns["Sample"] = Query.sample(article[6], article[7]) # Sampling Method columns["Sampling Method"] = Query.text(article[8]) # Top Matches columns["Matches"] = "\n\n".join( [Query.text(text) for _, text in sections]) # Entry Date columns["Entry"] = article[9] if article[9] else "" return columns def writeRow(self, output, row): # Write row, use alternate color shading for rows self.write(row, None, True) def separator(self, output): # Section separator self.row += 1
def write_excel(self, title, filename, datas): filepath = os.path.join(self.dirname, filename) debug(encode_info(u"开始生成 excel: %r" % filepath)) workbook = Workbook(filepath) try: worksheet = workbook.add_worksheet('sheet1') title_format = workbook.add_format({ 'valign': 'vcenter', 'align': 'center', 'locked': True, 'bold': True, 'font_size': '20', }) top_format = workbook.add_format({ 'text_wrap': 1, 'valign': 'vcenter', 'locked': True, 'bold': True, 'bg_color': '#D7D7D7', 'font_size': '12', 'border': True }) body_format = workbook.add_format({ 'text_wrap': 1, 'valign': 'vcenter', 'font_size': '12', 'align': 'left', 'border': True }) body_format_t = workbook.add_format({ 'text_wrap': 1, 'valign': 'vcenter', 'font_size': '12', 'align': 'left', 'border': True, 'bg_color': '#e7e7e7' }) worksheet.merge_range(0, 0, 2, 6, title, title_format) width = 4 worksheet.freeze_panes(6, 1) worksheet.set_column(0, 0, width * 1.5) worksheet.set_column(1, 1, width * 5) worksheet.set_column(2, 2, width * 4) worksheet.set_column(3, 3, width * 15) worksheet.set_column(4, 4, width * 7) worksheet.set_column(5, 5, width * 5) worksheet.set_column(6, 6, width * 5) worksheet.write(3, 0, u'No.', top_format) worksheet.write(3, 1, u'交易时间', top_format) worksheet.write(3, 2, u'类型', top_format) worksheet.write(3, 3, u'相关单号', top_format) worksheet.write(3, 4, u'充值方式/商品名称', top_format) worksheet.write(3, 5, u'收入/支出', top_format) worksheet.write(3, 6, u'余额', top_format) set_row = 3 index = 0 for data in datas: set_row += 1 index += 1 if index % 2 == 0: tmp_body_format = body_format else: tmp_body_format = body_format_t trans_name = data['trans_name'] desc = data['desc'] if isinstance(trans_name, str): trans_name = trans_name.decode('unicode_escape') if isinstance(desc, str): desc = desc.decode('unicode_escape') flow = data['flow'] flow_out = True if flow.lower() == 'out' else False payment = ''.join(('-' if flow_out else '', data['trans_amt'])) worksheet.write(set_row, 2, trans_name, tmp_body_format) worksheet.write(set_row, 0, index, tmp_body_format) worksheet.write(set_row, 1, data['trans_time'], tmp_body_format) worksheet.write(set_row, 3, data['trans_id'], tmp_body_format) worksheet.write(set_row, 4, desc, tmp_body_format) worksheet.write(set_row, 5, payment, tmp_body_format) worksheet.write(set_row, 6, data['balance'], tmp_body_format) except Exception as e: debug(e) finally: workbook.close() debug(encode_info(u"保存excel: %r" % filepath))
planilha.set_column('A:A', 20) planilha.set_column('B:B', 20) bold = excel_file.add_format({'bold': True}) # Creating DataSet to create chart header = ['CATEGORY', 'VALUES'] data = [['Maca', 'Uva', 'Pera', 'Morango'], [30, 13, 17, 40]] planilha.write_row('A1', header, bold) planilha.write_column('A2', data[0]) planilha.write_column('B2', data[1]) # Creating Chart chart = excel_file.add_chart({'type': 'pie'}) chart.add_series({ 'name': 'Fruits liked', 'categories': ['Sheet1', 1, 0, 4, 0], 'values': ['Sheet1', 1, 1, 4, 1] }) chart.set_title({'name': 'My fruits most liked'}) chart.set_style(10) planilha.insert_chart('D1', chart, {'x_offset': 20, 'y_offset': 25}) # Adding image to plan planilha.insert_image("A20", "logo.png") excel_file.close()
def crea_excel_oferta(queryset): ''' Crea excel de una oferta. Se llama desde la acción "Crea Excel" de ofertas_list ''' #queryset = queryset[0] for obj in queryset[:1]: _fileName = "SIGEC_OFE0{}_{}.xlsx".format( obj.id, re.sub(' ', '', obj.cliente.nombre.title())) response = HttpResponse( content_type= 'application/application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', charset='iso-8859-1') response['Content-Disposition'] = f'attachment; filename={_fileName}' book = Workbook(response) sheet = book.add_worksheet('Grupo SPEC') ################# ANCHO Y ALTO ################# format_filas_columnas(sheet) ################# CABECERAS ################# #row = 0 row = imp_cabeceras(book, sheet, obj, obj.user_names()) row += 2 col = 2 ################# ASUNTO ################# sheet.merge_range(row, col, row, col + 7, obj.asunto.upper(), book.add_format(FORMATOS.get("headerGrupos"))) row += 2 col = 2 ################# TITULOS COLUMNAS VACIA ################# #titulos de grupos #valor y columnas que ocupa var_columnas = ( ("CÓDIGO", 1), ("CANTIDAD", 1), ("DESCRIPCIÓN", 1), ("PRECIO UNITARIO", 2), ("TOTAL", 3), ) for var, posic in var_columnas: if posic == 1: #sheet.write(row, col, var, book.add_format(dict(**FORMATOS.get("center"), **FORMATOS.get("medio")))) sheet.write(row, col, var, book.add_format(FORMATOS.get("headerGrupos"))) else: #sheet.merge_range(row, col, row, col+posic-1, var, book.add_format(dict(**FORMATOS.get("center"), **FORMATOS.get("wrap"), **FORMATOS.get("medio")))) sheet.merge_range(row, col, row, col + posic - 1, var, book.add_format(FORMATOS.get("headerGrupos"))) col += posic ################# FIN TITULOS COLUMNAS VACIA ################# ################# LINEA VACIA ################# col = 2 row += 1 imp_linea_vacia(book, sheet, row, col) ################# ITEMS ################# lineas = LineaOferta.objects.filter(oferta=obj.id).order_by( 'producto__categoria__orden', 'pk') if lineas: row += 1 initialRow = row parcial = row categAnt = "" for obj in queryset: for linea in lineas[:1]: ################# AGRUPADOR INICIAL ################# categAnt = linea.producto.categoria.nombre imp_linea_categoria(book, sheet, row, col, grupo=categAnt) row += 1 for linea in lineas: categAnt = linea.producto.categoria.nombre if not categAnt else categAnt categAct = linea.producto.categoria.nombre #print(f'{categAnt != categAct} - |{categAnt}| vs |{categAct}|') if categAnt != categAct: ################# LINEA TOTAL ################# imp_linea_total(book, sheet, row, col, obj.moneda.codigo, parcial, final=False, grupo=categAnt) row += 1 ################# LINEA VACIA ################# imp_linea_vacia(book, sheet, row, col) row += 1 ################# AGRUPADOR ################# imp_linea_categoria(book, sheet, row, col, grupo=categAct) row += 1 categAnt = "" parcial = row sheet.write(row, col, linea.producto.codigo, book.add_format(FORMATOS.get("item_BI"))) sheet.write(row, col + 1, linea.cantidad, book.add_format(FORMATOS.get("item_BI"))) sheet.write(row, col + 2, linea.producto.descripcion, book.add_format( FORMATOS.get("item_BI"))) #descripcion sheet.write(row, col + 3, obj.moneda.codigo, book.add_format(FORMATOS.get("item_BI"))) ''' if obj.tasa_cambio < 1: #hay descuento #imprimo el costo real de cada producto para luego hacer notorio el descuento sheet.write(row, col+4, linea.costo_custom if linea.costo_custom else linea.producto.costo, book.add_format(FORMATOS.get("item_SB"))) else: ''' sheet.write( row, col + 4, linea.costo_custom if linea.costo_custom else linea.producto.costo * obj.tasa_cambio, book.add_format(FORMATOS.get("item_SB"))) sheet.write(row, col + 5, obj.moneda.codigo, book.add_format(FORMATOS.get("item_BI"))) sheet.write_formula(row, col + 6, f'D{row+1}*G{row+1}', book.add_format(FORMATOS.get("item_SB"))) sheet.write(row, col + 7, "+ IVA", book.add_format(FORMATOS.get("item_BD"))) row += 1 ################# FIN ITEMS ################# imp_linea_total(book, sheet, row, col, obj.moneda.codigo, parcial, final=False, grupo=categAnt) row += 1 ################# LINEA VACIA ################# imp_linea_vacia(book, sheet, row, col) row += 1 ################# DESCUENTOS ################# descuentos = obj.get_descuentos() if descuentos: imp_linea_categoria(book, sheet, row, col, grupo="Bonificaciones") row += 1 desc_total = False if "TOTAL" in descuentos: imp_linea_total( book, sheet, row, col, obj.moneda.codigo, initialRow, monto=obj.get_categoria_sin_descuento("TOTAL") - obj.get_costo_categoria("TOTAL"), grupo=f'{"TOTAL"} ({descuentos.get("TOTAL")}% OFF)', descuento=True) row += 1 else: for k, v in descuentos.items(): if obj.existe_categoria(k): imp_linea_total( book, sheet, row, col, obj.moneda.codigo, initialRow, monto=obj.get_categoria_sin_descuento(k) - obj.get_costo_categoria(k), grupo=f'{k} ({v}% OFF)', descuento=True) row += 1 if descuentos: imp_linea_total(book, sheet, row, col, obj.moneda.codigo, initialRow, monto=obj.get_total_descuentos(), grupo=f'.....................TOTAL BONIFICADO', descuento=True) row += 1 imp_linea_vacia(book, sheet, row, col) row += 1 ################# LINEA TOTAL ################# imp_linea_total(book, sheet, row, col, obj.moneda.codigo, initialRow, monto=obj.costo_total()) ################# LINEA FINAL ################# row += 1 imp_linea_vacia(book, sheet, row, col, final=True) ################# CONDICIONES COMERCIALES ################# row += 2 col = 2 condic_custom = Condiciones_Custom.objects.filter(oferta=obj.id) imp_condiciones( book, sheet, row, col, tipo="oferta", condiciones_custom=condic_custom[0] if condic_custom else None) book.close() return response
def excel_writer(arr): book_name = 'Products Survey%s.xlsx' % strftime('%Y%m%d%H%m%S', localtime()) sheet_name = 'Product Information' review_detail_name = 'Review Detail' wb = Workbook(book_name, options={'strings_to_urls': 0}) ws = wb.add_worksheet(sheet_name) ws2 = wb.add_worksheet(review_detail_name) ncol = len(pro_infos) style = wb.add_format({ 'font_name': 'Times New Roman', 'align': 'center', 'valign': 'vcenter', 'border': 1, }) red = wb.add_format({ 'color': 'red', 'bold': 1, 'align': 'center', 'valign': 'vcenter', 'border': 1, 'font_name': 'Times New Roman' }) green = wb.add_format({ 'color': 'green', 'bold': 1, 'align': 'center', 'valign': 'vcenter', 'border': 1, 'font_name': 'Times New Roman' }) blue = wb.add_format({ 'color': 'blue', 'bold': 1, 'align': 'center', 'valign': 'vcenter', 'border': 1, 'font_name': 'Times New Roman' }) orange = wb.add_format({ 'color': 'orange', 'bold': 1, 'align': 'center', 'valign': 'vcenter', 'border': 1, 'font_name': 'Times New Roman' }) style_arr = [red, green, orange, blue] style_arr.extend([style_arr] * 16) style.set_text_wrap() ws.set_column(1, 21, 20) ws.set_column(0, 0, 10) row_high = [(0, 20), (1, 120), (2, 50), (3, 20), (4, 20), (5, 20), (6, 20), (7, 20), (8, 20), (9, 20), (10, 120), (11, 20), (12, 20), (13, 20), (14, 120)] for item in row_high: ws.set_row(item[0], item[1]) first_col = [ '产品信息', '产品图片', '类目', '尺寸', '尺寸(cm)', '重量', '重量(kg)', '卖家', '发货方式', '评论数', '评分', '售价', '上架日期', '销量', '销量趋势' ] for j in range(len(first_col)): ws.write(j, 0, first_col[j], style) for j in range(20): ws.write(0, j + 1, j + 1, style) for j in range(1, 602): ws2.set_row(j + 1, 23) ws2.merge_range(1, 0, 6, 0, '产品图片', style) nn = 7 for j in range(102): ws2.merge_range(5 * j + nn, 0, 5 * j + nn + 1, 0, '标题', style) ws2.write(5 * j + nn + 2, 0, '评分', style) ws2.write(5 * j + nn + 3, 0, '款式', style) ws2.write(5 * j + nn + 4, 0, '评论时间', style) ws2.set_column(1, 3 * ncol, 20) ws2.set_column(0, 0, 10) for j in range(ncol): ws2.merge_range(0, 3 * j + 1, 0, 3 * j + 3, j + 1, style) ws2.merge_range(1, 3 * j + 1, 6, 3 * j + 3, '', style) for I in range(ncol): try: url = pro_infos[I][1]['Url'] col_num = 1 asin = pro_infos[I][1]['ASIN'] pic_link = pro_infos[I][1]['Picture'] get_img(pic_link, asin) rank = pro_infos[I][1]['Best Sellers Rank'] size = pro_infos[I][1]['Package Dimensions'] weight = pro_infos[I][1]['Shipping Weight'] cm_size = pro_infos[I][1]['Package Dimensions(cm)'] kg_weight = pro_infos[I][1]['Shipping Weight(kg)'] price = pro_infos[I][1]['Price'] review_num = pro_infos[I][1]['Customer Reviews'] saler = pro_infos[I][1]['Saler'] delivery = pro_infos[I][1]['Delivery Way'] first_date = pro_infos[I][1]['Date First Available'] review_rate = pro_infos[I][1]['Review Rate'] one_star = pro_infos[I][1]['Review Details']['1 star'] two_star = pro_infos[I][1]['Review Details']['2 star'] three_star = pro_infos[I][1]['Review Details']['3 star'] four_star = pro_infos[I][1]['Review Details']['4 star'] five_star = pro_infos[I][1]['Review Details']['5 star'] stars = [one_star, two_star, three_star, four_star, five_star] ws.insert_image( col_num, I + 1, 'images/%s.jpg' % asin, { 'x_scale': 0.2, 'y_scale': 0.2, 'x_offset': 10, 'y_offset': 10, 'url': url }) ws.write(col_num, I + 1, '', style) col_num += 1 name_list = ['1 star', '2 star', '3 star', '4 star', '5 star'] num_list = [int(stars[j].strip().strip('%')) for j in range(5)] grey_list = [100 - j for j in num_list] rects = plt.barh(range(5), num_list, tick_label=name_list, color='orange') plt.barh(range(5), grey_list, color='grey', left=[rect.get_width() for rect in rects]) k = 0 for rect in rects: plt.text(102, rect.get_y() + rect.get_height() / 2, str(num_list[k]) + '%', ha='left', va='center', fontdict={'fontsize': 'xx-large'}) plt.text(0, rect.get_y() + rect.get_height() / 2, name_list[k], ha='right', va='center', fontdict={ 'color': 'blue', 'fontsize': 'xx-large' }) k += 1 plt.text(10, -1, str(review_rate), fontdict={'fontsize': 30}) plt.axis('off') if not exists('reviews_shot'): mkdir('reviews_shot') plt.savefig('reviews_shot/%s.jpg' % asin) plt.close() row_value = [ str(rank), str(size), str(cm_size), str(weight), str(kg_weight), str(saler), str(delivery), int(review_num[:-16].replace(',', '')) ] for j in range(len(row_value)): ws.write(j + col_num, I + 1, row_value[j], style) col_num += len(row_value) ws.insert_image(col_num, I + 1, 'reviews_shot/%s.jpg' % asin, { 'x_scale': 0.2325, 'y_scale': 0.3, 'x_offset': 3, 'y_offset': 13 }) ws.write(col_num, I + 1, '', style) col_num += 1 row_value2 = [float(price[1:]), str(first_date)] for j in range(len(row_value2)): ws.write(j + col_num, I + 1, row_value2[j], style) col_num += len(row_value2) # 销量, 销量趋势 for j in range(2): ws.write(j + col_num, I + 1, '', style) col_num += 2 ws.write(100, I + 1, str(url)) ws2.insert_image( 1, 3 * I + 1, 'images/%s.jpg' % asin, { 'x_scale': 0.2, 'y_scale': 0.2, 'x_offset': 180, 'y_offset': 10, 'url': url }) logging.info('准备翻译评论--ASIN: %s ......\n' % asin) threads = [ TranslaterThread(wb, ws2, pro_infos[I][1]['Reviews info'], style, arr) for j in range(10) ] for thread in threads: thread.start() for thread in threads: thread.join() except KeyError as e: logging.info(e, url) for I in range(len(arr[:20])): ws2.write(0, 3 * ncol + 1, 'Words', style) ws2.write(0, 3 * ncol + 2, 'Translation', style) ws2.write(0, 3 * ncol + 3, 'Frequency', style) ws2.write(I + 1, 3 * ncol + 1, str(arr[I][0]), style) try: ws2.write(I + 1, 3 * ncol + 2, translate_format(arr[I][0]), style) except KeyError: logging.info(arr[I][0]) ws2.write(I + 1, 3 * ncol + 3, int(arr[I][1]), style) # 插入词频饼状图 chart = wb.add_chart({'type': 'pie'}) chart.add_series({ 'name': 'Word Frequency Data', 'categories': ['%s' % review_detail_name, 1, 3 * ncol + 1, 11, 3 * ncol + 1], 'values': ['%s' % review_detail_name, 1, 3 * ncol + 3, 11, 3 * ncol + 3], 'data_labels': { 'value': 1, 'category': 1 } }) chart.set_title({'name': 'Word Frequency Top 10'}) chart.set_style(10) ws2.insert_chart(4, 3 * ncol + 4, chart, {'x_offset': 25, 'y_offset': 20}) logging.info('\nExcel写入完成......') wb.close()
'ITW', 'ITV', 'IRR', 'IMF', 'AHT' ] # print(sample_all_zeo_with_certain_T_sites(topo, t_count=1)) wb = Workbook('/Users/jiaweiguo/Box/temp_T_count.xlsx') sheet1 = wb.add_worksheet('Sheet1') output = [] for count, zeolite in enumerate(topo): try: EFzeolite = ExtraFrameworkMaker(iza_code=zeolite) EFzeolite.get_t_sites() output.append([ zeolite, len(EFzeolite.t_site_indices), len(EFzeolite.EFzeolite) ]) sheet1.write(count, 0, zeolite) sheet1.write(count, 1, len(EFzeolite.t_site_indices)) sheet1.write(count, 2, len(EFzeolite.EFzeolite)) print([ zeolite, len(EFzeolite.t_site_indices), len(EFzeolite.EFzeolite) ]) except: print([zeolite, 0, 0]) wb.close()
def write_xlsx(filename, groups, col_fields, head_names, cfg): """ Write BoM out to a XLSX file filename = path to output file (must be a .csv, .txt or .tsv file) groups = [list of ComponentGroup groups] col_fields = [list of headings to search for data in the BoM file] head_names = [list of headings to display in the BoM file] cfg = BoMOptions object with all the configuration """ if not XLSX_SUPPORT: logger.error( 'Python xlsxwriter module not installed (Debian: python3-xlsxwriter)' ) return False link_datasheet = -1 if cfg.xlsx.datasheet_as_link and cfg.xlsx.datasheet_as_link in col_fields: link_datasheet = col_fields.index(cfg.xlsx.datasheet_as_link) link_digikey = cfg.xlsx.digikey_link hl_empty = cfg.xlsx.highlight_empty workbook = Workbook(filename) ws_names = ['BoM', 'DNF'] row_headings = head_names # Leave space for the logo, title and info head_size = compute_head_size(cfg) # First rowe for the information r_info_start = 1 if cfg.xlsx.title else 0 max_width = cfg.xlsx.max_col_width # ####################### # Create all the formats # ####################### # Headings # Column names format fmt_head = create_fmt_head(workbook, cfg.xlsx.style) # Column formats fmt_cols = create_fmt_cols(workbook, cfg.xlsx.col_colors) col_fmt = create_col_fmt(col_fields, cfg.xlsx.col_colors, fmt_cols) # Page head # Logo image_data = get_logo_data(cfg.xlsx.logo) # Title fmt_title = create_fmt_title(workbook, cfg.xlsx.title) fmt_subtitle = create_fmt_subtitle(workbook) # Info fmt_info = create_fmt_info(workbook, cfg) # ####################### # Fill the cells # ####################### # Normal BoM & DNF for ws in range(2): # Second pass is DNF dnf = ws == 1 # Should we generate the DNF? if dnf and (not cfg.xlsx.generate_dnf or cfg.n_total == cfg.n_fitted): break worksheet = workbook.add_worksheet(ws_names[ws]) row_count = head_size # Headings # Create the head titles column_widths = [0] * max(len(col_fields), 6) rows = [row_headings] for i in range(len(row_headings)): # Title for this column column_widths[i] = len(row_headings[i]) + 10 worksheet.write_string(row_count, i, row_headings[i], fmt_head) if cfg.column_comments[i]: worksheet.write_comment(row_count, i, cfg.column_comments[i]) # Body row_count += 1 for i, group in enumerate(groups): if (cfg.ignore_dnf and not group.is_fitted()) != dnf: continue # Get the data row row = group.get_row(col_fields) rows.append(row) if link_datasheet != -1: datasheet = group.get_field(ColumnList.COL_DATASHEET_L) # Fill the row for i in range(len(row)): cell = row[i] if hl_empty and (len(cell) == 0 or cell.strip() == "~"): fmt = col_fmt[-1][row_count % 2] else: fmt = col_fmt[i][row_count % 2] # Link this column to the datasheet? if link_datasheet == i and datasheet.startswith('http'): worksheet.write_url(row_count, i, datasheet, fmt, cell) # A link to Digi-Key? elif link_digikey and col_fields[i] in link_digikey: url = 'http://search.digikey.com/scripts/DkSearch/dksus.dll?Detail&name=' + cell worksheet.write_url(row_count, i, url, fmt, cell) else: worksheet.write_string(row_count, i, cell, fmt) if len(cell) > column_widths[i] - 5: column_widths[i] = len(cell) + 5 row_count += 1 # Page head # Logo col1 = insert_logo(worksheet, image_data) # Title if cfg.xlsx.title: worksheet.set_row(0, 32) worksheet.merge_range(0, col1, 0, len(column_widths) - 1, cfg.xlsx.title, fmt_title) # PCB & Stats Info if not (cfg.xlsx.hide_pcb_info and cfg.xlsx.hide_stats_info): write_info(cfg, r_info_start, worksheet, column_widths, col1, fmt_info, fmt_subtitle) # Adjust cols and rows adjust_widths(worksheet, column_widths, max_width, cfg.column_levels) adjust_heights(worksheet, rows, max_width, head_size) worksheet.freeze_panes(head_size + 1, 0) worksheet.repeat_rows(head_size + 1) worksheet.set_landscape() # Optionally add KiCost information kicost_colors = None if cfg.xlsx.kicost: kicost_colors = create_kicost_sheet(workbook, groups, image_data, fmt_title, fmt_info, fmt_subtitle, cfg) # Add a sheet for the color references create_color_ref(workbook, cfg.xlsx.col_colors, hl_empty, fmt_cols, cfg.xlsx.kicost and KICOST_SUPPORT, kicost_colors) workbook.close() return True