def get_msg_part( request, folder, uid, part_number, inline = False ): '''Gets a message part. ''' folder_name = base64.urlsafe_b64decode(str(folder)) M = serverLogin( request ) folder = M[folder_name] message = folder[int(uid)] part = message.bodystructure.find_part(part_number) response = HttpResponse(content_type='%s/%s' % (part.media, part.media_subtype)) if part.filename(): filename = part.filename() else: filename = _('Unknown') if inline: response['Content-Disposition'] = 'inline; filename=%s' % filename else: response['Content-Disposition'] = 'attachment; filename=%s' % filename if part.media.upper() == 'TEXT': response['Content-Type'] = '%s/%s; charset=%s' % (part.media, part.media_subtype, part.charset()) else: response['Content-Type'] = '%s/%s' % (part.media, part.media_subtype) response.write( message.part(part) ) response.close() return response
def get_csv_from_search(request): systems, data, name, modes, size, metrics, sort, order = get_search_results( request) response = HttpResponse(mimetype="text/csv") response[ 'Content-Disposition'] = 'attachment; filename=transit_search_results.csv' writer = csv.writer(response) #headers writer.writerow([ "System Name", "Mode", "City", "State", "Urbanized Area", "Avg Capital Expenses", "Avg Operating Expenses", "Passenger Miles Travelled (PMT)", "Unlinked Passenger Trips (UPT)", "Recovery Ratio", "Operating Expense per PMT", "Capital Expense per PMT", "Operating Expense per UPT", "Capital Expense per UPT" ]) #write the data out for sys in systems: writer.writerow([ sys.name, sys.mode, sys.city, sys.state.name, sys.urbanized_area.name, sys.avg_capital_expenses, sys.avg_operating_expenses, sys.total_PMT, sys.total_UPT, sys.recovery_ratio, sys.avg_operating_PMT, sys.avg_capital_PMT, sys.avg_operating_UPT, sys.avg_capital_UPT ]) response.close() return response
def archive_download(request): try: id = request.GET.get("id","") if id: id = int(id) archive = Archive.objects.get(pk=id) UserLog.objects.log_action(request,UserLog.DOWN_OPT,UserLog.ARCH,archive) context = Context({ "archive":archive, }) src_pathname = archive.path # src_pathname = r"D:\project_now\OurTomorrowLikeTheSun\SVN\leadtel_audio\trunk\src\bocm_audio\audio\static\js\audiojs\audiofinal.mp3" if os.path.exists(src_pathname) and os.path.isfile(src_pathname): path,filename = os.path.split(src_pathname) # now = datetime.datetime.now() # nowstr = now.strftime("%Y%m%d%H%M%S") # newfilename = nowstr+".mp3" newfilename = filename #下载文件名直接取spx文件的名称 response = HttpResponse(mimetype="audio/x-mpeg") response['Content-Disposition'] = 'attachment; filename='+newfilename response.write(open(src_pathname, "rb").read()) response.close() return response else: return HttpResponse(u"下载失败,文件可能不存在或者被删除") else: return HttpResponse(u"下载失败,文件可能不存在或者被删除") except Exception,e: traceback.print_exc() return HttpResponse(u"下载失败,文件可能不存在或者被删除")
def archive_download(request): try: id = request.GET.get("id", "") if id: id = int(id) archive = Archive.objects.get(pk=id) UserLog.objects.log_action(request, UserLog.DOWN_OPT, UserLog.ARCH, archive) context = Context({ "archive": archive, }) src_pathname = archive.path # src_pathname = r"D:\project_now\OurTomorrowLikeTheSun\SVN\leadtel_audio\trunk\src\bocm_audio\audio\static\js\audiojs\audiofinal.mp3" if os.path.exists(src_pathname) and os.path.isfile(src_pathname): path, filename = os.path.split(src_pathname) # now = datetime.datetime.now() # nowstr = now.strftime("%Y%m%d%H%M%S") # newfilename = nowstr+".mp3" newfilename = filename #下载文件名直接取spx文件的名称 response = HttpResponse(mimetype="audio/x-mpeg") response[ 'Content-Disposition'] = 'attachment; filename=' + newfilename response.write(open(src_pathname, "rb").read()) response.close() return response else: return HttpResponse(u"下载失败,文件可能不存在或者被删除") else: return HttpResponse(u"下载失败,文件可能不存在或者被删除") except Exception, e: traceback.print_exc() return HttpResponse(u"下载失败,文件可能不存在或者被删除")
def __call__(self, message): # Set script prefix from message root_path set_script_prefix(message.get("root_path", "")) signals.request_started.send(sender=self.__class__, message=message) # Run request through view system try: request = self.request_class(message) except UnicodeDecodeError: logger.warning("Bad Request (UnicodeDecodeError)", exc_info=sys.exc_info(), extra={"status_code": 400}) response = http.HttpResponseBadRequest() except RequestTimeout: # Parsing the rquest failed, so the response is a Request Timeout error response = HttpResponse("408 Request Timeout (upload too slow)", status_code=408) except RequestAborted: # Client closed connection on us mid request. Abort! return else: try: response = self.get_response(request) # Fix chunk size on file responses if isinstance(response, FileResponse): response.block_size = 1024 * 512 except AsgiRequest.ResponseLater: # The view has promised something else # will send a response at a later time return # Transform response into messages, which we yield back to caller for message in self.encode_response(response): # TODO: file_to_stream yield message # Close the response now we're done with it response.close()
def imprimir_liquidacion(request,pk): try: liquidacion = Liquidacion.objects.get(id=pk) except ValueError: # Si no existe llamamos a "pagina no encontrada". raise Http404() response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = "attachment; filename="+str(liquidacion.mes)+"_"+str(liquidacion.ano)+".pdf" Q = SimpleDocTemplate(response,rightMargin=72,leftMargin=72,topMargin=72,BottomMargin=18) Story = [] styles = getSampleStyleSheet() ptext = 'Liquidacion de Sueldo.' Story.append(Paragraph(ptext,styles["Normal"])) ptext = 'Rut Trabajador: '+str(liquidacion.Usuario_rut) Story.append(Paragraph(ptext,styles["Normal"])) ptext = 'mes: '+str(liquidacion.mes) Story.append(Paragraph(ptext,styles["Normal"])) ptext = 'año: '+str(liquidacion.ano) Story.append(Paragraph(ptext,styles["Normal"])) ptext = 'zonal: '+str(liquidacion.zonal) Story.append(Paragraph(ptext,styles["Normal"])) ptext = 'centro costo: '+str(liquidacion.c_costo) Story.append(Paragraph(ptext,styles["Normal"])) ptext = 'dias: '+str(liquidacion.dias) Story.append(Paragraph(ptext,styles["Normal"])) ptext = 'Sueldo: '+str(liquidacion.sueldo) Story.append(Paragraph(ptext,styles["Normal"])) ptext = 'Horas extras: '+str(liquidacion.h_extras) Story.append(Paragraph(ptext,styles["Normal"])) ptext = 'Bonos imponibles: '+str(liquidacion.bonos_impon) Story.append(Paragraph(ptext,styles["Normal"])) Q.build(Story) response.close() return response
def get_msg_part(request, folder, uid, part_number, inline=False): '''Gets a message part. ''' folder_name = base64.urlsafe_b64decode(str(folder)) M = serverLogin(request) folder = M[folder_name] message = folder[int(uid)] part = message.bodystructure.find_part(part_number) response = HttpResponse(content_type='%s/%s' % (part.media, part.media_subtype)) if part.filename(): filename = part.filename() else: filename = _('Unknown') if inline: response['Content-Disposition'] = 'inline; filename=%s' % filename else: response['Content-Disposition'] = 'attachment; filename=%s' % filename if part.media.upper() == 'TEXT': response['Content-Type'] = '%s/%s; charset=%s' % ( part.media, part.media_subtype, part.charset()) else: response['Content-Type'] = '%s/%s' % (part.media, part.media_subtype) response.write(message.part(part)) response.close() return response
def lists_to_csv(lists, as_response=False, filename='report.csv'): """ lists: list of lists The reader is hard-coded to recognise either '\r' or '\n' as end-of-line, and ignores lineterminator. This behavior may change in the future. """ if as_response: target = HttpResponse(mimetype='text/csv') target['Content-Disposition'] = 'attachment; filename=%s' % filename else: target = open(filename, 'w') new_lists = [] for row in lists: new_row = [ smart_str(i).replace('\n', '').replace('\r', '') for i in row ] new_lists.append(new_row) writer = csv.writer(target) writer.writerows(new_lists) if as_response: return target else: target.close()
def transform_to_pdf(response, pdfname, return_stringIO=False): """ call xhtml2pdf.pisa to convert html responce to pdf """ # response['mimetype'] = 'application/pdf' # TODO : on the fly filename from url # response['Content-Disposition'] = 'attachment; filename=%s.pdf' % pdfname content = response.content if not return_stringIO: new_response = HttpResponse(content="", mimetype="application/pdf") new_response["Content-Disposition"] = "attachment; filename=%s.pdf" % pdfname else: new_response = StringIO.StringIO() pdf = pisa.pisaDocument(StringIO.StringIO(content), new_response, link_callback=fetch_resources) if not pdf.err: if return_stringIO: pdf = new_response.getvalue() new_response.close() return pdf else: return new_response else: # TODO return error and redirect to default view return HttpResponse( "We had some errors in pdfMiddleWare : \ <br/><pre>%s</pre>" % pdf )
def handle(self, body): """ Synchronous message processing. """ # Set script prefix from message root_path, turning None into empty string set_script_prefix(self.scope.get("root_path", "") or "") signals.request_started.send(sender=self.__class__, scope=self.scope) # Run request through view system try: request = self.request_class(self.scope, body) except UnicodeDecodeError: logger.warning( "Bad Request (UnicodeDecodeError)", exc_info=sys.exc_info(), extra={ "status_code": 400, } ) response = http.HttpResponseBadRequest() except RequestTimeout: # Parsing the rquest failed, so the response is a Request Timeout error response = HttpResponse("408 Request Timeout (upload too slow)", status=408) except RequestAborted: # Client closed connection on us mid request. Abort! return else: response = self.get_response(request) # Fix chunk size on file responses if isinstance(response, FileResponse): response.block_size = 1024 * 512 # Transform response into messages, which we yield back to caller for response_message in self.encode_response(response): self.send(response_message) # Close the response now we're done with it response.close()
def handle(self, body): """ Synchronous message processing. """ # Set script prefix from message root_path, turning None into empty string set_script_prefix(self.scope.get("root_path", "") or "") signals.request_started.send(sender=self.__class__, scope=self.scope) # Run request through view system try: request = self.request_class(self.scope, body) except UnicodeDecodeError: logger.warning("Bad Request (UnicodeDecodeError)", exc_info=sys.exc_info(), extra={ "status_code": 400, }) response = http.HttpResponseBadRequest() except RequestTimeout: # Parsing the rquest failed, so the response is a Request Timeout error response = HttpResponse("408 Request Timeout (upload too slow)", status=408) except RequestAborted: # Client closed connection on us mid request. Abort! return else: response = self.get_response(request) # Fix chunk size on file responses if isinstance(response, FileResponse): response.block_size = 1024 * 512 # Transform response into messages, which we yield back to caller for response_message in self.encode_response(response): self.send(response_message) # Close the response now we're done with it response.close()
def map_data_csv(request, sector_name=None): # need to translate the state_id back to FIPS codes for the map and normalize by population # grabbing a complete list of state objects and building a table for translation states = {} for state in State.objects.all(): states[state.id] = state if request.method == 'GET': if request.GET.has_key('q'): (sector, form, faads_search_query) = construct_form_and_query_from_querydict( sector_name, request.GET['q']) faads_results = faads_search_query.aggregate('recipient_state') max_state_total = 0 max_per_capital_total = 0 per_capita_totals = {} for state_id in faads_results: if states.has_key(state_id) and states[ state_id].population and faads_results[state_id] > 0: per_capita_totals[state_id] = faads_results[ state_id] / states[state_id].population if per_capita_totals[state_id] > max_per_capital_total: max_per_capital_total = per_capita_totals[state_id] if faads_results[state_id] > max_state_total: max_state_total = faads_results[state_id] results = [] response = HttpResponse(mimetype="text/csv") response[ 'Content-Disposition'] = "attachment; filename=%s-per-capita.csv" % ( request.GET['q']) writer = csv.writer(response) writer.writerow(['state', 'Total Spending', 'Per capita spending']) for state_id in per_capita_totals: if states.has_key(state_id): writer.writerow([ states[state_id].name, faads_results[state_id], per_capita_totals[state_id] ]) response.close() return response return Http404()
def machinelearningCurl(csv_file): # parse CSV d = {} d['date'] = [] d['radiation'] = [] d['humidity'] = [] d['temperature'] = [] d['wind'] = [] d['demand'] = [] dictreader = csv.DictReader(csv_file, fieldnames=[ 'date', 'radiation', 'humidity', 'temperature', 'wind', 'demand' ], delimiter=',') next(dictreader) for row in dictreader: for key in row: d[key].append(row[key]) # interpolate weather data interpolate(d['radiation']) interpolate(d['humidity']) interpolate(d['temperature']) interpolate(d['wind']) # train machine learning algorithm training_x = np.array( zip(d['radiation'], d['humidity'], d['temperature'], d['wind'])[:32]) training_y = np.array(d['demand'][:32]) poly_svr = SVR(kernel='poly', degree=2) poly_svr.fit(training_x, training_y) prediction_x = np.array( zip(d['radiation'], d['humidity'], d['temperature'], d['wind'])[32:]) demand_predictions = poly_svr.predict(prediction_x) energy = np.array(demand_predictions).tolist() csvOut = HttpResponse(content_type='text/csv') writer = csv.writer(csvOut) writer.writerow(["spam", ""]) writer.writerow(["spam", ""]) writer.writerow(["", ""]) writer.writerow(["date", "energy"]) for i in range(0, len(energy)): writer.writerow([d['date'][i], energy[i]]) csvOut.close() return csvOut
def createBatchfileFromTestList(request): """ Create a batch file from the test list showing how the tests can be run from the command line. If a test does not have a test_script specified, fortyTwo does not know about it, and it cannot be added to this file. Assume python runs the tests. Add \r\n as the end line since that is what Windows/Notepad like. See 60293: Create a Mechanism to output the command line for running tests from fortyTwo. """ end = "\r\n" team, release = getTeamAndReleaseObjectsFromSession(request.session) testList = Planned_Exec.active.filter(team=team, release=release) \ .exclude(test_script__isnull=True) \ .exclude(test_script__exact='') \ .order_by('protocol__name', 'case__test__number', 'case__number', 'plm__pg_model') \ .select_related('commandlineargs','country') fileName = '%s_%s_%s_BatchFile.txt' % (date.today().strftime('%Y%m%d'), team.team_name, release.release_name) response = HttpResponse(mimetype="text/plain") response['Content-Disposition'] = 'attachment; filename="%s"' % fileName try: # Write Header information response.write("REM - Batch file output from fortyTwo generated at %s" % datetime.now()) response.write(end) response.write("REM - WARNING: The output in this file is not validated for fortyTwo's intended use."+end) response.write("REM "+end) for test in testList: try: response.write('python '+test.test_script+' '+test.commandlineargs.printCommandLine()+end) except CommandLineArgs.DoesNotExist: pass # tests without command line args do not need to be added to the batch file except: response.write("REM - An unknown error occurred during the creation of this file. Contact a fortyTwo administrator."+end) raise finally: response.close() return response
def machinelearningCurl(csv_file): # parse CSV d = {} d['date'] = [] d['radiation'] = [] d['humidity'] = [] d['temperature'] = [] d['wind'] = [] d['demand'] = [] dictreader = csv.DictReader(csv_file, fieldnames=['date', 'radiation', 'humidity', 'temperature', 'wind', 'demand'], delimiter=',') next(dictreader) for row in dictreader: for key in row: d[key].append(row[key]) # interpolate weather data interpolate(d['radiation']) interpolate(d['humidity']) interpolate(d['temperature']) interpolate(d['wind']) # train machine learning algorithm training_x = np.array(zip(d['radiation'], d['humidity'], d['temperature'], d['wind'])[:32]) training_y = np.array(d['demand'][:32]) poly_svr = SVR(kernel='poly', degree=2) poly_svr.fit(training_x, training_y) prediction_x = np.array(zip(d['radiation'], d['humidity'], d['temperature'], d['wind'])[32:]) demand_predictions = poly_svr.predict(prediction_x) energy = np.array(demand_predictions).tolist() csvOut = HttpResponse(content_type='text/csv') writer = csv.writer(csvOut) writer.writerow(["spam",""]) writer.writerow(["spam",""]) writer.writerow(["",""]) writer.writerow(["date","energy"]) for i in range(0,len(energy)): writer.writerow([d['date'][i],energy[i]]) csvOut.close() return csvOut
def test_response(self): filename = os.path.join(os.path.dirname(__file__), 'abc.txt') # file isn't closed until we close the response. file1 = open(filename) r = HttpResponse(file1) self.assertTrue(file1.closed) r.close() # when multiple file are assigned as content, make sure they are all # closed with the response. file1 = open(filename) file2 = open(filename) r = HttpResponse(file1) r.content = file2 self.assertTrue(file1.closed) self.assertTrue(file2.closed)
def test_response(self): filename = os.path.join(os.path.dirname(upath(__file__)), 'abc.txt') # file isn't closed until we close the response. file1 = open(filename) r = HttpResponse(file1) self.assertTrue(file1.closed) r.close() # when multiple file are assigned as content, make sure they are all # closed with the response. file1 = open(filename) file2 = open(filename) r = HttpResponse(file1) r.content = file2 self.assertTrue(file1.closed) self.assertTrue(file2.closed)
def download_csv(req): dataCorrected = req.POST['new_data'] # Create the HttpResponse object with the appropriate CSV header. response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="CorrectedData.csv"' # This is really dumb dataCorrected = str(dataCorrected).splitlines() usgincsvreader = csv.reader(dataCorrected, delimiter=',', quotechar='"') usgincsvwriter = csv.writer(response, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) usgincsvwriter.writerows(usgincsvreader) response.close() return response
def summary_statistics_csv(request, sector_name=None, first_column_label='', data_fetcher=''): data_fetcher = globals().get( data_fetcher ) # we take a string instead of the function itself so that the urlconf can call it directly assert len(str(first_column_label) ) > 0 # column must have a label ('state' or 'program') assert callable( data_fetcher ) # data-fetching function must be passed (returns list of lists containing either state- or program-indexed numbers) if request.method == 'GET': if request.GET.has_key('q'): (sector, form, faads_search_query) = construct_form_and_query_from_querydict( sector_name, request.GET['q']) results = faads_search_query.get_summary_statistics() year_range = faads_search_query.get_year_range() data = data_fetcher(results, year_range, sector_name) response = HttpResponse(mimetype="text/csv") response[ 'Content-Disposition'] = "attachment; filename=%s-%s.csv" % ( request.GET['q'], first_column_label.replace(" ", "_").lower()) writer = csv.writer(response) writer.writerow([str(first_column_label)] + year_range) for row in data[0]: striped_row = map(lambda x: strip_clean_tags(x), row) writer.writerow(striped_row) writer.writerow(['Total'] + map(lambda x: strip_clean_tags(x), data[1])) response.close() return response return Http404()
def obtener_certificado(request): now = datetime.datetime.now() try: usuario = Usuario.objects.get(rut=request.user.first_name) except ValueError: raise Http404() respuesta = HttpResponse(content_type = 'application/pdf') respuesta['Content-Disposition'] = 'filename = "certificado.pdf"' Q = SimpleDocTemplate(respuesta,rightMargin=72,leftMargin=72,topMargin=72,BottomMargin=18) Story = [] styles = getSampleStyleSheet() styles.add(ParagraphStyle(name='Header',alignment=1,spaceBefore=15,fontSize=20,leading=22)) styles.add(ParagraphStyle(name='Estilo01',alignment = 2)) styles.add(ParagraphStyle(name='Estilo02',alignment = 4,firstLineIndent=100,spaceBefore=30,fontSize=18,leading=20)) styles.add(ParagraphStyle(name='Pie',spaceBefore=120,alignment=2)) ptext = 'Servicios e Ingenieria Ltda.' ptext2 = 'Valdivia, Chile, '+str(now.day)+'/'+str(now.month)+'/'+str(now.year)+'' pa = Paragraph(ptext,styles['Estilo01']) pa2 = Paragraph(ptext2,styles['Estilo01']) im = Image("/var/www/redtel/static/Redtel_logo.gif") im.halign="LEFT" data = [[im,pa],['',pa2]] TTemp = Table(data,colWidths=200) Story.append(TTemp) HText = "CERTIFICADO DE ANTIGUEDAD LABORAL" Header = Paragraph(HText,styles['Header']) Story.append(Header) ptext = u'<b>JULIO GUIDILFREDO ZARECHT ORTEGA</b>, Rut 7.385.055-K representante legal de <b>Servicios e Ingeniería Limitada</b>,\ Rut: 77.869.650-9 por medio de la presente, certifica que don:'+usuario.nombre+u', RUT:'+usuario.rut+u', es trabajador de esta empresa, \ se desempeña como Encargado RRHH, con contrato vigente desde el <b>'+usuario.fecha_ingreso+u'</b> y es de carácter <b>Indefinido</b>, y registra\ domicilio según contrato en <b>'+usuario.direccion+u'</b>, de Valdivia.' TTemp = Paragraph(ptext,styles['Estilo02']) Story.append(TTemp) ptext = 'Se emite el presente certificado a petición del interesado para ser presentado en <b>Caja de Compensación</b>' TTemp = Paragraph(ptext,styles['Estilo02']) Story.append(TTemp) im2 = Image("/var/www/redtel/static/firma.png") ptext = "JULIO GUIDILFREDO ZARECHT ORTEGA <br/> Representante Legal" TTemp = Paragraph(ptext,styles['Pie']) data = [["",im2],["",TTemp]] TTemp = Table(data,colWidths=200) TTemp.setStyle(TableStyle([('ALIGN',(0,0),(1,1),'RIGHT'),])) Story.append(TTemp) Q.build(Story) respuesta.close() return respuesta
def obtener_certificado(request): try: usuario = get_object_or_404(Usuario, id=request.user.id) except ValueError: raise Http404() respuesta = HttpResponse(content_type = 'application/pdf') respuesta['Content-Disposition'] = 'filename = "Certificado_antiguedad_laboral.pdf"' Q = SimpleDocTemplate(respuesta,rightMargin=72,leftMargin=72,topMargin=72,BottomMargin=18) Story = [] styles = getSampleStyleSheet() ptext = 'Texto de prueba.' Story.append(Paragraph(ptext,styles["Normal"])) ptext = 'Rut usuario: '+str(usuario.rut) Story.append(Paragraph(ptext,styles["Normal"])) ptext = 'Fecha Ingreso: '+str(usuario.fecha_ingreso) Story.append(Paragraph(ptext,styles["Normal"])) ptext = 'Vencimiento Licencia de Conducir: '+str(usuario.vencimiento_licencia_conducir) Story.append(Paragraph(ptext,styles["Normal"])) Q.build(Story) respuesta.close() return respuesta
def admin_course(request): response = HttpResponse() response['Content-Type']="text/javascript" page = request.POST.get('page','1') #print 'page = ' + page rp = request.POST.get('rp','10') #print 'rp = ' + rp sortName = request.POST.get('sortname','createTime') #print 'sortName = ' + sortName sortOrder = request.POST.get('sortorder','ASC') query = request.POST.get('query','') query = urllib2.quote(query.encode("utf8")) #qType = request.POST.get('qtype','') #print 'sortOrder = ' + sortOrder json_result = bussness.findRoomByPage(rp, page, sortName, sortOrder) #print 'json_result = ' + json_result response.write(json_result) response.flush() response.close() return response
def test_response(self): filename = os.path.join(os.path.dirname(__file__), 'abc.txt') # file isn't closed until we close the response. file1 = open(filename) r = HttpResponse(file1) self.assertFalse(file1.closed) r.close() self.assertTrue(file1.closed) # don't automatically close file when we finish iterating the response. file1 = open(filename) r = HttpResponse(file1) self.assertFalse(file1.closed) list(r) self.assertFalse(file1.closed) r.close() self.assertTrue(file1.closed) # when multiple file are assigned as content, make sure they are all # closed with the response. file1 = open(filename) file2 = open(filename) r = HttpResponse(file1) r.content = file2 self.assertFalse(file1.closed) self.assertFalse(file2.closed) r.close() self.assertTrue(file1.closed) self.assertTrue(file2.closed)
def test_response(self): filename = os.path.join(os.path.dirname(upath(__file__)), 'abc.txt') # file isn't closed until we close the response. file1 = open(filename) r = HttpResponse(file1) self.assertFalse(file1.closed) r.close() self.assertTrue(file1.closed) # don't automatically close file when we finish iterating the response. file1 = open(filename) r = HttpResponse(file1) self.assertFalse(file1.closed) with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) list(r) self.assertFalse(file1.closed) r.close() self.assertTrue(file1.closed) # when multiple file are assigned as content, make sure they are all # closed with the response. file1 = open(filename) file2 = open(filename) r = HttpResponse(file1) r.content = file2 self.assertFalse(file1.closed) self.assertFalse(file2.closed) r.close() self.assertTrue(file1.closed) self.assertTrue(file2.closed)
def test_response(self): filename = os.path.join(os.path.dirname(__file__), "abc.txt") # file isn't closed until we close the response. file1 = open(filename) r = HttpResponse(file1) self.assertFalse(file1.closed) r.close() self.assertTrue(file1.closed) # don't automatically close file when we finish iterating the response. file1 = open(filename) r = HttpResponse(file1) self.assertFalse(file1.closed) with warnings.catch_warnings(): warnings.simplefilter("ignore", PendingDeprecationWarning) list(r) self.assertFalse(file1.closed) r.close() self.assertTrue(file1.closed) # when multiple file are assigned as content, make sure they are all # closed with the response. file1 = open(filename) file2 = open(filename) r = HttpResponse(file1) r.content = file2 self.assertFalse(file1.closed) self.assertFalse(file2.closed) r.close() self.assertTrue(file1.closed) self.assertTrue(file2.closed)
def __call__(self, message): # Set script prefix from message root_path, turning None into empty string set_script_prefix(message.get('root_path', '') or '') signals.request_started.send(sender=self.__class__, message=message) # Run request through view system try: request = self.request_class(message) except UnicodeDecodeError: logger.warning('Bad Request (UnicodeDecodeError)', exc_info=sys.exc_info(), extra={ 'status_code': 400, }) response = http.HttpResponseBadRequest() except RequestTimeout: # Parsing the rquest failed, so the response is a Request Timeout error response = HttpResponse("408 Request Timeout (upload too slow)", status=408) except RequestAborted: # Client closed connection on us mid request. Abort! return else: try: response = self.get_response(request) # Fix chunk size on file responses if isinstance(response, FileResponse): response.block_size = 1024 * 512 except AsgiRequest.ResponseLater: # The view has promised something else # will send a response at a later time return if response.status_code == HttpResponseLater.status_code: # equal to ResponseLater exception above return # Transform response into messages, which we yield back to caller for message in self.encode_response(response): # TODO: file_to_stream yield message # Close the response now we're done with it response.close()
def get_response(request): """Return information about HttpResponse object.""" a_dict = {} m_dict = {} context = {} response = HttpResponse() # Attributes: response.content = "some content" a_dict["content"] = response.content a_dict["charset"] = response.charset a_dict["status_code"] = response.status_code a_dict["reason_phrese"] = response.reason_phrase a_dict["streaming"] = response.streaming a_dict["closed"] = response.closed # Methods: m_dict["__setitem__(header, value)"] = response.__setitem__("test", "Test") m_dict["__getitem__(header)"] = response.__getitem__("test") m_dict["__delitem__(header)"] = response.__delitem__("test") m_dict["has_header(header)"] = response.has_header("test") m_dict["setdefault(headre, value)"] = response.setdefault("t", "test") m_dict["set_cookie(key, value='', max_age=None,\ expres=None, path='/', domain=None,\ secure=False, httponly=False,\ samesite=None)"] = response.set_cookie("some", "foo") m_dict["set_signed_cookie(key, value='', max_age=None,\ expres=None, path='/', domain=None,\ secure=False, httponly=False,\ samesite=None)"] = response.set_signed_cookie("foo", "foo") m_dict["delete_cookie(key, path='/', domain=None)"] =\ response.delete_cookie("foo") m_dict["close()"] = response.close() m_dict["write(content)"] = response.write("<p>CONTENT</p>") m_dict["flush()"] = response.flush() m_dict["tell()"] = response.tell() m_dict["getvalue()"] = response.getvalue() m_dict["readable()"] = response.readable() m_dict["seekable()"] = response.seekable() m_dict["writable()"] = response.writable() m_dict["writelines(lines)"] = response.writelines([" one", " two", " three"]) m_dict["lines"] = response.getvalue() context["a_dict"] = a_dict context["m_dict"] = m_dict return render(request, "response_object/response.html", context)
def get_system_ridership_csv(request, trs_id): system = TransitSystem.objects.get(trs_id=trs_id) operations = OperationStats.objects.filter( transit_system=system).order_by('mode') response = HttpResponse(mimetype="text/csv") response[ 'Content-Disposition'] = 'attachment; filename=%s_ridership_stats.csv' % system.name.replace( ' ', '_') writer = csv.writer(response) writer.writerow([ 'mode', 'year', 'passenger miles travelled', 'unlinked passenger trips', 'vehicle revenue miles', 'vehicle revenue hours', 'directional route miles', 'fares', 'operating expense' ]) for o in operations: writer.writerow( (o.get_mode_display(), o.year, o.passenger_miles_traveled, o.unlinked_passenger_trips, o.vehicle_revenue_miles, o.vehicle_revenue_hours, o.directional_route_miles, o.fares, o.operating_expense)) response.close() return response
def lists_to_csv(lists, as_response=False, filename='report.csv'): """ lists: list of lists The reader is hard-coded to recognise either '\r' or '\n' as end-of-line, and ignores lineterminator. This behavior may change in the future. """ if as_response: target = HttpResponse(mimetype='text/csv') target['Content-Disposition'] = 'attachment; filename=%s' % filename else: target = open(filename, 'w') new_lists = [] for row in lists: new_row = [smart_str(i).replace('\n', '').replace('\r', '') for i in row] new_lists.append(new_row) writer = csv.writer(target) writer.writerows(new_lists) if as_response: return target else: target.close()
def api_report( self, request, reporttype=None, from_date=None, to_date=None, object_profile=None, filter_default=None, exclude_zero=None, interface_profile=None, selector=None, administrative_domain=None, columns=None, o_format=None, enable_autowidth=False, exclude_serial_change=False, **kwargs, ): def translate_row(row, cmap): return [row[i] for i in cmap] cols = [ "object_name", "object_address", "object_adm_domain", "event_type", "sn_changed", "vendor_mac", "mac", "migrate_ts", "from_iface_name", "from_iface_down", "to_iface_name", "to_iface_down", ] header_row = [ "OBJECT_NAME", "OBJECT_ADDRESS", "OBJECT_ADM_DOMAIN", "EVENT_TYPE", "SN_CHANGED", "VENDOR_MAC", "MAC", "MIGRATE_TS", "FROM_IFACE_NAME", "FROM_IFACE_DOWN", "TO_IFACE_NAME", "TO_IFACE_DOWN", ] if columns: cmap = [] for c in columns.split(","): try: cmap += [cols.index(c)] except ValueError: continue else: cmap = list(range(len(cols))) r = [translate_row(header_row, cmap)] # Date Time Block if not from_date: from_date = datetime.datetime.now() - datetime.timedelta(days=1) else: from_date = datetime.datetime.strptime(from_date, "%d.%m.%Y") if not to_date or from_date == to_date: to_date = from_date + datetime.timedelta(days=1) else: to_date = datetime.datetime.strptime(to_date, "%d.%m.%Y") + datetime.timedelta(days=1) # interval = (to_date - from_date).days # ts_from_date = time.mktime(from_date.timetuple()) # ts_to_date = time.mktime(to_date.timetuple()) mos = self.get_report_object( user=request.user, adm=administrative_domain, selector=selector ) mos_id = set(mos.order_by("bi_id").values_list("bi_id", flat=True)) if interface_profile: interface_profile = InterfaceProfile.objects.get(id=interface_profile) iface_filter = ( "dictGetString('interfaceattributes', 'profile', (managed_object, interface)) == '%s'" % interface_profile.name ) else: iface_filter = "is_uni = 1" serials_changed = {} ch = connection() for row in ch.execute( DEVICE_MOVED_QUERY % ( from_date.date().isoformat(), (to_date.date() - datetime.timedelta(days=1)).isoformat(), ) ): serials_changed[int(row[0])] = row[1] for ( mo, mac, mo_name, mo_address, mo_adm_domain, ifaces, migrate_ifaces, migrate_count, ) in ch.execute( MAC_MOVED_QUERY % (iface_filter, from_date.date().isoformat(), to_date.date().isoformat()) ): if int(mo) not in mos_id: continue if exclude_serial_change and int(mo) in serials_changed: continue iface_from, iface_to, migrate = get_interface(ifaces) event_type = _("Migrate") if ( rx_port_num.search(iface_from).group() == rx_port_num.search(iface_to).group() and iface_from != iface_to ): event_type = _("Migrate (Device Changed)") r += [ translate_row( [ mo_name, mo_address, mo_adm_domain, event_type, _("Yes") if int(mo) in serials_changed else _("No"), MACVendor.get_vendor(mac), mac, datetime.datetime.fromtimestamp(migrate[1]).isoformat(sep=" "), # TS iface_from, "--", iface_to, "--", ], cmap, ) ] filename = "macs_move_report_%s" % datetime.datetime.now().strftime("%Y%m%d") if o_format == "csv": response = HttpResponse(content_type="text/csv") response["Content-Disposition"] = 'attachment; filename="%s.csv"' % filename writer = csv.writer(response, dialect="excel", delimiter=",", quoting=csv.QUOTE_MINIMAL) writer.writerows(r) return response elif o_format == "csv_zip": response = BytesIO() f = TextIOWrapper(TemporaryFile(mode="w+b"), encoding="utf-8") writer = csv.writer(f, dialect="excel", delimiter=";", quotechar='"') writer.writerows(r) f.seek(0) with ZipFile(response, "w", compression=ZIP_DEFLATED) as zf: zf.writestr("%s.csv" % filename, f.read()) zf.filename = "%s.csv.zip" % filename # response = HttpResponse(content_type="text/csv") response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/zip") response["Content-Disposition"] = 'attachment; filename="%s.csv.zip"' % filename return response elif o_format == "xlsx": response = BytesIO() wb = xlsxwriter.Workbook(response) cf1 = wb.add_format({"bottom": 1, "left": 1, "right": 1, "top": 1}) ws = wb.add_worksheet("Alarms") max_column_data_length = {} for rn, x in enumerate(r): for cn, c in enumerate(x): if rn and ( r[0][cn] not in max_column_data_length or len(str(c)) > max_column_data_length[r[0][cn]] ): max_column_data_length[r[0][cn]] = len(str(c)) ws.write(rn, cn, c, cf1) ws.autofilter(0, 0, rn, cn) ws.freeze_panes(1, 0) for cn, c in enumerate(r[0]): # Set column width width = get_column_width(c) if enable_autowidth and width < max_column_data_length[c]: width = max_column_data_length[c] ws.set_column(cn, cn, width=width) wb.close() response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/vnd.ms-excel") response["Content-Disposition"] = 'attachment; filename="%s.xlsx"' % filename response.close() return response
def imprimir_liquidacion(request,pk): try: liquidacion = Liquidacion.objects.get(id=pk) usuario = Usuario.objects.get(rut=request.user.first_name) except ValueError: # Si no existe llamamos a "pagina no encontrada". raise Http404() response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = "attachment; filename="+str(liquidacion.mes)+".pdf" Q = SimpleDocTemplate(response,rightMargin=30,leftMargin=30,topMargin=20,BottomMargin=5) im = Image("/var/www/redtel/static/Redtel_logo.gif") Story = [] styles = getSampleStyleSheet() t = Table([ ['Empleador','SERVICIOS E INGENIERIA LTDA','','','','',''], ['R.U.T.','77.869.650-9','','','','',''], ['Dirección','AVDA. PICARTE 3644, INTERIOR, VALDIVIA','','','','',''], ['','','','',im,'',''], ['','','LIQUIDACION DE SUELDO MES '+liquidacion.mes.upper(),'','','',''], ['','','','','','',''], ['NOMBRE',usuario.nombre,'','','','RUT',usuario.rut], ['C.COSTO',usuario.ccosto,'','','','AREA',usuario.zonal], ['CARGO',usuario.cargo,'','','','FECHA ING',usuario.fecha_ingreso], ['AFP',usuario.afp,'','','','SALUD',usuario.salud], ['DIAS TRABAJADOS',liquidacion.dias,'LICENCIA','','AUSENTE','',''], ['','','','','','',''], ['','','','HABERES','','',''], ['','','','','','',''], ['SUELDO DEL MES','','','','','','$'+'{:,}'.format(liquidacion.sueldo)], ['GRATIFICACION','','','','','','$'+'{:,}'.format(liquidacion.gratificacion)], ['COMISION PRODUCCION','','','','','','$'+'{:,}'.format(liquidacion.bonos_impon)], ['HORAS EXTRAS','','','','','','$'+'{:,}'.format(liquidacion.h_extras)], ['TOTAL HABERES IMPONIBLES','','','','','','$'+'{:,}'.format(liquidacion.total_impon)], ['ASIGNACION VIATICOS','','','','','','$'+'{:,}'.format(liquidacion.colacion)], ['MOVILIZACION COMBUSTIBLE','','','','','','$'+'{:,}'.format(liquidacion.movilizacion)], ['TOTAL NO IMPONIBLE','','','','','','$'+'{:,}'.format(liquidacion.total_no_impon)], ['TOTAL HABERES','','','','','','$'+'{:,}'.format(liquidacion.total_haberes)], ['','','','','','',''], ['','','','DESCUENTOS','','',''], ['','','','','','',''], ['AFP','','','','','','$'+'{:,}'.format(liquidacion.afp)], ['SALUD','','','','','','$'+'{:,}'.format(liquidacion.salud)], ['SEGURO CESANTIA','','','','','','$'+'{:,}'.format(liquidacion.seg_cesantia)], ['TOTAL DESCUENTOS LEGALES','','','','','',''], ['ANTICIPOS','','','','','',''], ['TOTAL OTROS DESCUENTOS','','','','','','$'+'{:,}'.format(liquidacion.otros_dsctos)], ['TOTAL DESCUENTOS','','','','','','$'+'{:,}'.format(liquidacion.total_dsctos)], ['','','','','','',''], ['','','','','','',''], ['LIQUIDO A PAGAR','','','','','','$'+'{:,}'.format(liquidacion.liquido_pago)], ['','','','','','',''], ['','','','','','',''], ['-------------','','','','','','--------------'], ['Firma Representante Legal','','','','','Recibí Conforme(Firma)',''], ['JULIO ZARECHT ORTEGA','','','','',usuario.nombre,''], ['R.U.T.:7.385.055-K','','','','','R.U.T.:'+usuario.rut,''], ], colWidths=80, rowHeights=10) Story.append(t) Q.build(Story) response.close() return response
def api_report( self, request, o_format, is_managed=None, administrative_domain=None, selector=None, pool=None, segment=None, avail_status=False, columns=None, ids=None, enable_autowidth=False, ): def row(row): def qe(v): if v is None: return "" if isinstance(v, str): return smart_text(v) elif isinstance(v, datetime.datetime): return v.strftime("%Y-%m-%d %H:%M:%S") elif not isinstance(v, str): return str(v) else: return v return [qe(x) for x in row] def translate_row(row, cmap): return [row[i] for i in cmap] type_columns = ["Up/10G", "Up/1G", "Up/100M", "Down/-", "-"] cols = [ "object1_admin_domain", # "id", "object1_name", "object1_address", "object1_platform", "object1_segment", "object1_tags", "object1_iface", "object1_descr", "object1_speed", "object2_admin_domain", "object2_name", "object2_address", "object2_platform", "object2_segment", "object2_tags", "object2_iface", "object2_descr", "object2_speed", "link_proto", "last_seen", ] header_row = [ "OBJECT1_ADMIN_DOMAIN", "OBJECT1_NAME", "OBJECT1_ADDRESS", "OBJECT1_PLATFORM", "OBJECT1_SEGMENT", "OBJECT1_TAGS", "OBJECT1_IFACE", "OBJECT1_DESCR", "OBJECT1_SPEED", "OBJECT2_ADMIN_DOMAIN", "OBJECT2_NAME", "OBJECT2_ADDRESS", "OBJECT2_PLATFORM", "OBJECT2_SEGMENT", "OBJECT2_TAGS", "OBJECT2_IFACE", "OBJECT2_DESCR", "OBJECT2_SPEED", "LINK_PROTO", "LAST_SEEN", ] if columns: cmap = [] for c in columns.split(","): try: cmap += [cols.index(c)] except ValueError: continue else: cmap = list(range(len(cols))) r = [translate_row(header_row, cmap)] if "interface_type_count" in columns.split(","): r[-1].extend(type_columns) # self.logger.info(r) # self.logger.info("---------------------------------") # print("-----------%s------------%s" % (administrative_domain, columns)) p = Pool.get_by_name(pool or "default") mos = ManagedObject.objects.filter() if request.user.is_superuser and not administrative_domain and not selector and not segment: mos = ManagedObject.objects.filter(pool=p) if ids: mos = ManagedObject.objects.filter(id__in=[ids]) if is_managed is not None: mos = ManagedObject.objects.filter(is_managed=is_managed) if pool: mos = mos.filter(pool=p) if not request.user.is_superuser: mos = mos.filter( administrative_domain__in=UserAccess.get_domains(request.user)) if administrative_domain: ads = AdministrativeDomain.get_nested_ids( int(administrative_domain)) mos = mos.filter(administrative_domain__in=ads) if selector: selector = ManagedObjectSelector.get_by_id(int(selector)) mos = mos.filter(selector.Q) if segment: segment = NetworkSegment.objects.filter(id=segment).first() if segment: mos = mos.filter(segment__in=segment.get_nested_ids()) mos_id = list(mos.values_list("id", flat=True)) rld = ReportLinksDetail(mos_id) mo_resolv = { mo[0]: mo[1:] for mo in ManagedObject.objects.filter().values_list( "id", "administrative_domain__name", "name", "address", "segment", "platform", "labels", ) } for link in rld.out: if len(rld.out[link]) != 2: # Multilink or bad link continue s1, s2 = rld.out[link] seg1, seg2 = None, None if "object1_segment" in columns.split( ",") or "object2_segment" in columns.split(","): seg1, seg2 = mo_resolv[s1["mo"][0]][3], mo_resolv[s2["mo"] [0]][3] plat1, plat2 = None, None if "object1_platform" in columns.split( ",") or "object2_platform" in columns.split(","): plat1, plat2 = mo_resolv[s1["mo"][0]][4], mo_resolv[s2["mo"] [0]][4] r += [ translate_row( row([ mo_resolv[s1["mo"][0]][0], mo_resolv[s1["mo"][0]][1], mo_resolv[s1["mo"][0]][2], "" if not plat1 else Platform.get_by_id(plat1), "" if not seg1 else NetworkSegment.get_by_id(seg1), ";".join(mo_resolv[s1["mo"][0]][5] or []), s1["iface_n"][0], s1.get("iface_descr")[0] if s1.get("iface_descr") else "", s1.get("iface_speed")[0] if s1.get("iface_speed") else 0, mo_resolv[s2["mo"][0]][0], mo_resolv[s2["mo"][0]][1], mo_resolv[s2["mo"][0]][2], "" if not plat2 else Platform.get_by_id(plat2), "" if not seg2 else NetworkSegment.get_by_id(seg2), ";".join(mo_resolv[s2["mo"][0]][5] or []), s2["iface_n"][0], s2.get("iface_descr")[0] if s2.get("iface_descr") else "", s2.get("iface_speed")[0] if s2.get("iface_speed") else 0, s2.get("dis_method", ""), s2.get("last_seen", ""), ]), cmap, ) ] filename = "links_detail_report_%s" % datetime.datetime.now().strftime( "%Y%m%d") if o_format == "csv": response = HttpResponse(content_type="text/csv") response[ "Content-Disposition"] = 'attachment; filename="%s.csv"' % filename writer = csv.writer(response, dialect="excel", delimiter=",", quoting=csv.QUOTE_MINIMAL) writer.writerows(r) return response elif o_format == "csv_zip": response = BytesIO() f = TextIOWrapper(TemporaryFile(mode="w+b"), encoding="utf-8") writer = csv.writer(f, dialect="excel", delimiter=";", quotechar='"') writer.writerows(r) f.seek(0) with ZipFile(response, "w", compression=ZIP_DEFLATED) as zf: zf.writestr("%s.csv" % filename, f.read()) zf.filename = "%s.csv.zip" % filename # response = HttpResponse(content_type="text/csv") response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/zip") response[ "Content-Disposition"] = 'attachment; filename="%s.csv.zip"' % filename return response elif o_format == "xlsx": response = BytesIO() wb = xlsxwriter.Workbook(response) cf1 = wb.add_format({"bottom": 1, "left": 1, "right": 1, "top": 1}) ws = wb.add_worksheet("Objects") max_column_data_length = {} for rn, x in enumerate(r): for cn, c in enumerate(x): if rn and (r[0][cn] not in max_column_data_length or len(str(c)) > max_column_data_length[r[0][cn]]): max_column_data_length[r[0][cn]] = len(str(c)) ws.write(rn, cn, c, cf1) ws.autofilter(0, 0, rn, cn) ws.freeze_panes(1, 0) for cn, c in enumerate(r[0]): # Set column width width = get_column_width(c) if enable_autowidth and width < max_column_data_length[c]: width = max_column_data_length[c] ws.set_column(cn, cn, width=width) wb.close() response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/vnd.ms-excel") # response = HttpResponse( # content_type="application/x-ms-excel") response[ "Content-Disposition"] = 'attachment; filename="%s.xlsx"' % filename response.close() return response
def api_report( self, request, reporttype=None, from_date=None, to_date=None, object_profile=None, filter_default=None, exclude_zero=None, interface_profile=None, selector=None, administrative_domain=None, columns=None, o_format=None, enable_autowidth=False, **kwargs ): def translate_row(row, cmap): return [row[i] for i in cmap] map_table = { "load_interfaces": "/Interface\s\|\sLoad\s\|\s[In|Out]/", "load_cpu": "/[CPU|Memory]\s\|\sUsage/", "errors": "/Interface\s\|\s[Errors|Discards]\s\|\s[In|Out]/", "ping": "/Ping\s\|\sRTT/", } cols = [ "id", "object_name", "object_address", "object_platform", "object_adm_domain", "object_segment", # "object_hostname", # "object_status", # "profile_name", # "object_profile", # "object_vendor", "iface_name", "iface_description", "iface_speed", "load_in", "load_in_p", "load_out", "load_out_p", "errors_in", "errors_out", "slot", "cpu_usage", "memory_usage", "ping_rtt", "ping_attempts", "interface_flap", "interface_load_url", ] header_row = [ "ID", "OBJECT_NAME", "OBJECT_ADDRESS", "OBJECT_PLATFORM", "OBJECT_ADM_DOMAIN", "OBJECT_SEGMENT", "IFACE_NAME", "IFACE_DESCRIPTION", "IFACE_SPEED", "LOAD_IN", "LOAD_IN_P", "LOAD_OUT", "LOAD_OUT_P", "ERRORS_IN", "ERRORS_OUT", "CPU_USAGE", "MEMORY_USAGE", "PING_RTT", "PING_ATTEMPTS", "INTERFACE_FLAP", "INTERFACE_LOAD_URL", ] if columns: cmap = [] for c in columns.split(","): try: cmap += [cols.index(c)] except ValueError: continue else: cmap = list(range(len(cols))) columns_order = columns.split(",") columns_filter = set(columns_order) r = [translate_row(header_row, cmap)] object_columns = [c for c in columns_order if c.startswith("object")] # Date Time Block if not from_date: from_date = datetime.datetime.now() - datetime.timedelta(days=1) else: from_date = datetime.datetime.strptime(from_date, "%d.%m.%Y") if not to_date or from_date == to_date: to_date = from_date + datetime.timedelta(days=1) else: to_date = datetime.datetime.strptime(to_date, "%d.%m.%Y") + datetime.timedelta(days=1) # interval = (to_date - from_date).days ts_from_date = time.mktime(from_date.timetuple()) ts_to_date = time.mktime(to_date.timetuple()) # Load managed objects mos = ManagedObject.objects.filter(is_managed=True) if not request.user.is_superuser: mos = mos.filter(administrative_domain__in=UserAccess.get_domains(request.user)) if selector: mos = mos.filter(ManagedObjectSelector.objects.get(id=int(selector)).Q) if administrative_domain: mos = mos.filter( administrative_domain__in=AdministrativeDomain.get_nested_ids( int(administrative_domain) ) ) if object_profile: mos = mos.filter(object_profile=object_profile) # iface_dict = {} d_url = { "path": "/ui/grafana/dashboard/script/report.js", "rname": map_table[reporttype], "from": str(int(ts_from_date * 1000)), "to": str(int(ts_to_date * 1000)), # o.name.replace("#", "%23") "biid": "", "oname": "", "iname": "", } report_map = { "load_interfaces": { "url": "%(path)s?title=interface&biid=%(biid)s" "&obj=%(oname)s&iface=%(iname)s&from=%(from)s&to=%(to)s", "q_group": ["interface"], "q_select": { (0, "managed_object", "id"): "managed_object", (1, "path", "iface_name"): "arrayStringConcat(path)", }, }, "errors": { "url": """%(path)s?title=errors&biid=%(biid)s&obj=%(oname)s&iface=%(iname)s&from=%(from)s&to=%(to)s""", "q_group": ["interface"], }, "load_cpu": { "url": """%(path)s?title=cpu&biid=%(biid)s&obj=%(oname)s&from=%(from)s&to=%(to)s""", "q_select": { (0, "managed_object", "id"): "managed_object", (1, "path", "slot"): "arrayStringConcat(path)", }, }, "ping": { "url": """%(path)s?title=ping&biid=%(biid)s&obj=%(oname)s&from=%(from)s&to=%(to)s""", "q_select": {(0, "managed_object", "id"): "managed_object"}, }, } query_map = { # "iface_description": ('', 'iface_description', "''"), "iface_description": ( "", "iface_description", "dictGetString('interfaceattributes','description' , (managed_object, arrayStringConcat(path)))", ), "iface_speed": ( "speed", "iface_speed", "if(max(speed) = 0, dictGetUInt64('interfaceattributes', 'in_speed', " "(managed_object, arrayStringConcat(path))), max(speed))", ), "load_in": ("load_in", "l_in", "round(quantile(0.90)(load_in), 0)"), "load_in_p": ( "load_in", "l_in_p", "replaceOne(toString(round(quantile(0.90)(load_in) / " "if(max(speed) = 0, dictGetUInt64('interfaceattributes', 'in_speed', " "(managed_object, arrayStringConcat(path))), max(speed)), 4) * 100), '.', ',')", ), "load_out": ("load_out", "l_out", "round(quantile(0.90)(load_out), 0)"), "load_out_p": ( "load_out", "l_out_p", "replaceOne(toString(round(quantile(0.90)(load_out) / " "if(max(speed) = 0, dictGetUInt64('interfaceattributes', 'in_speed', " "(managed_object, arrayStringConcat(path))), max(speed)), 4) * 100), '.', ',')", ), "errors_in": ("errors_in", "err_in", "quantile(0.90)(errors_in)"), "errors_out": ("errors_out", "err_out", "quantile(0.90)(errors_out)"), "cpu_usage": ("usage", "cpu_usage", "quantile(0.90)(usage)"), "ping_rtt": ("rtt", "ping_rtt", "round(quantile(0.90)(rtt) / 1000, 2)"), "ping_attempts": ("attempts", "ping_attempts", "avg(attempts)"), } query_fields = [] for c in report_map[reporttype]["q_select"]: query_fields += [c[2]] field_shift = len(query_fields) # deny replacing field for c in columns.split(","): if c not in query_map: continue field, alias, func = query_map[c] report_map[reporttype]["q_select"][ (columns_order.index(c) + field_shift, field, alias) ] = func query_fields += [c] metrics_attrs = namedtuple("METRICSATTRs", query_fields) mo_attrs = namedtuple("MOATTRs", [c for c in cols if c.startswith("object")]) moss = {} for row in mos.values_list( "bi_id", "name", "address", "platform", "administrative_domain__name", "segment" ): moss[row[0]] = mo_attrs( *[ row[1], row[2], str(Platform.get_by_id(row[3]) if row[3] else ""), row[4], str(NetworkSegment.get_by_id(row[5])) if row[5] else "", ] ) url = report_map[reporttype].get("url", "") report_metric = self.metric_source[reporttype]( tuple(sorted(moss)), from_date, to_date, columns=None ) report_metric.SELECT_QUERY_MAP = report_map[reporttype]["q_select"] if exclude_zero and reporttype == "load_interfaces": report_metric.CUSTOM_FILTER["having"] += ["max(load_in) != 0 AND max(load_out) != 0"] if interface_profile: interface_profile = InterfaceProfile.objects.filter(id=interface_profile).first() report_metric.CUSTOM_FILTER["having"] += [ "dictGetString('interfaceattributes', 'profile', " "(managed_object, arrayStringConcat(path))) = '%s'" % interface_profile.name ] # OBJECT_PLATFORM, ADMIN_DOMAIN, SEGMENT, OBJECT_HOSTNAME for row in report_metric.do_query(): mm = metrics_attrs(*row) mo = moss[int(mm.id)] res = [] for y in columns_order: if y in object_columns: res += [getattr(mo, y)] else: res += [getattr(mm, y)] if "interface_load_url" in columns_filter: d_url["biid"] = mm.id d_url["oname"] = mo[2].replace("#", "%23") # res += [url % d_url, interval] res.insert(columns_order.index("interface_load_url"), url % d_url) r += [res] filename = "metrics_detail_report_%s" % datetime.datetime.now().strftime("%Y%m%d") if o_format == "csv": response = HttpResponse(content_type="text/csv") response["Content-Disposition"] = 'attachment; filename="%s.csv"' % filename writer = csv.writer(response, dialect="excel", delimiter=",", quoting=csv.QUOTE_MINIMAL) writer.writerows(r) return response elif o_format == "xlsx": response = StringIO() wb = xlsxwriter.Workbook(response) cf1 = wb.add_format({"bottom": 1, "left": 1, "right": 1, "top": 1}) ws = wb.add_worksheet("Alarms") max_column_data_length = {} for rn, x in enumerate(r): for cn, c in enumerate(x): if rn and ( r[0][cn] not in max_column_data_length or len(str(c)) > max_column_data_length[r[0][cn]] ): max_column_data_length[r[0][cn]] = len(str(c)) ws.write(rn, cn, c, cf1) ws.autofilter(0, 0, rn, cn) ws.freeze_panes(1, 0) for cn, c in enumerate(r[0]): # Set column width width = get_column_width(c) if enable_autowidth and width < max_column_data_length[c]: width = max_column_data_length[c] ws.set_column(cn, cn, width=width) wb.close() response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/vnd.ms-excel") response["Content-Disposition"] = 'attachment; filename="%s.xlsx"' % filename response.close() return response
def api_report( self, request, from_date, to_date, o_format, min_duration=0, max_duration=0, min_objects=0, min_subscribers=0, segment=None, administrative_domain=None, selector=None, ex_selector=None, columns=None, source="both", alarm_class=None, subscribers=None, enable_autowidth=False, ): def row(row, container_path, segment_path): def qe(v): if v is None: return "" if isinstance(v, str): return smart_text(v) elif isinstance(v, datetime.datetime): return v.strftime("%Y-%m-%d %H:%M:%S") elif not isinstance(v, str): return smart_text(v) else: return v r = [qe(x) for x in row] if len(container_path) < self.CONTAINER_PATH_DEPTH: container_path += [""] * (self.CONTAINER_PATH_DEPTH - len(container_path)) else: container_path = container_path[:self.CONTAINER_PATH_DEPTH] if len(segment_path) < self.SEGMENT_PATH_DEPTH: segment_path += [""] * (self.SEGMENT_PATH_DEPTH - len(segment_path)) else: segment_path = segment_path[:self.SEGMENT_PATH_DEPTH] return r + container_path + segment_path def translate_row(row, cmap): return [row[i] for i in cmap] cols = ([ "id", "root_id", "from_ts", "to_ts", "duration_sec", "object_name", "object_address", "object_hostname", "object_profile", "object_admdomain", "object_platform", "object_version", "alarm_class", "alarm_subject", "maintenance", "objects", "subscribers", "tt", "escalation_ts", "location", "container_address", ] + ["container_%d" % i for i in range(self.CONTAINER_PATH_DEPTH)] + ["segment_%d" % i for i in range(self.SEGMENT_PATH_DEPTH)]) header_row = ( [ "ID", _("ROOT_ID"), _("FROM_TS"), _("TO_TS"), _("DURATION_SEC"), _("OBJECT_NAME"), _("OBJECT_ADDRESS"), _("OBJECT_HOSTNAME"), _("OBJECT_PROFILE"), _("OBJECT_ADMDOMAIN"), _("OBJECT_PLATFORM"), _("OBJECT_VERSION"), _("ALARM_CLASS"), _("ALARM_SUBJECT"), _("MAINTENANCE"), _("OBJECTS"), _("SUBSCRIBERS"), _("TT"), _("ESCALATION_TS"), _("LOCATION"), _("CONTAINER_ADDRESS"), ] + ["CONTAINER_%d" % i for i in range(self.CONTAINER_PATH_DEPTH)] + ["SEGMENT_%d" % i for i in range(self.SEGMENT_PATH_DEPTH)]) if columns: cmap = [] for c in columns.split(","): try: cmap += [cols.index(c)] except ValueError: continue else: cmap = list(range(len(cols))) subscribers_profile = self.default_subscribers_profile if subscribers: subscribers_profile = set( SubscriberProfile.objects.filter( id__in=subscribers.split(",")).scalar("id")) r = [translate_row(header_row, cmap)] fd = datetime.datetime.strptime( to_date, "%d.%m.%Y") + datetime.timedelta(days=1) match = { "timestamp": { "$gte": datetime.datetime.strptime(from_date, "%d.%m.%Y"), "$lte": fd } } match_duration = {"duration": {"$gte": min_duration}} if max_duration: match_duration = { "duration": { "$gte": min_duration, "$lte": max_duration } } mos = ManagedObject.objects.filter(is_managed=True) if segment: try: match["segment_path"] = bson.ObjectId(segment) except bson.errors.InvalidId: pass ads = [] if administrative_domain: if administrative_domain.isdigit(): administrative_domain = [int(administrative_domain)] ads = AdministrativeDomain.get_nested_ids( administrative_domain[0]) if not request.user.is_superuser: user_ads = UserAccess.get_domains(request.user) if administrative_domain and ads: if administrative_domain[0] not in user_ads: ads = list(set(ads) & set(user_ads)) if not ads: return HttpResponse( "<html><body>Permission denied: Invalid Administrative Domain</html></body>" ) else: ads = user_ads if ads: mos = mos.filter(administrative_domain__in=ads) if selector: selector = ManagedObjectSelector.get_by_id(int(selector)) mos = mos.filter(selector.Q) if ex_selector: ex_selector = ManagedObjectSelector.get_by_id(int(ex_selector)) mos = mos.exclude(ex_selector.Q) # Working if Administrative domain set if ads: try: match["adm_path"] = {"$in": ads} # @todo More 2 level hierarhy except bson.errors.InvalidId: pass mos_id = list(mos.order_by("id").values_list("id", flat=True)) mo_hostname = {} maintenance = [] if mos_id and (selector or ex_selector): match["managed_object"] = {"$in": mos_id} if "maintenance" in columns.split(","): maintenance = Maintenance.currently_affected() if "object_hostname" in columns.split(","): mo_hostname = ReportObjectsHostname1(sync_ids=mos_id) mo_hostname = mo_hostname.get_dictionary() moss = ReportAlarmObjects(mos_id).get_all() # container_lookup = ReportContainer(mos_id) container_lookup = None subject = "alarm_subject" in columns loc = AlarmApplication([]) if source in ["archive", "both"]: # Archived Alarms for a in (ArchivedAlarm._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED ).aggregate([ { "$match": match }, { "$addFields": { "duration": { "$divide": [ { "$subtract": ["$clear_timestamp", "$timestamp"] }, 1000, ] } } }, { "$match": match_duration }, # {"$sort": {"timestamp": 1}} ])): if int(a["managed_object"]) not in moss: continue dt = a["clear_timestamp"] - a["timestamp"] duration = int(dt.total_seconds()) total_objects = sum(ss["summary"] for ss in a["total_objects"]) if min_objects and total_objects < min_objects: continue total_subscribers = sum( ss["summary"] for ss in a["total_subscribers"] if subscribers_profile and ss["profile"] in subscribers_profile) if min_subscribers and total_subscribers < min_subscribers: continue if "segment_" in columns.split( ",") or "container_" in columns.split(","): path = ObjectPath.get_path(a["managed_object"]) if path: segment_path = [ NetworkSegment.get_by_id(s).name for s in path.segment_path if NetworkSegment.get_by_id(s) ] container_path = [ Object.get_by_id(s).name for s in path.container_path if Object.get_by_id(s) ] else: segment_path = [] container_path = [] else: segment_path = [] container_path = [] r += [ translate_row( row( [ smart_text(a["_id"]), smart_text(a["root"]) if a.get("root") else "", a["timestamp"], a["clear_timestamp"], smart_text(duration), moss[a["managed_object"]][0], moss[a["managed_object"]][1], smart_text( mo_hostname.get(a["managed_object"], "")), Profile.get_by_id( moss[a["managed_object"]][3]).name if moss[a["managed_object"]][5] else "", moss[a["managed_object"]][6], Platform.get_by_id( moss[a["managed_object"]][9]) if moss[a["managed_object"]][9] else "", smart_text( Firmware.get_by_id( moss[a["managed_object"]][10]).version) if moss[a["managed_object"]][10] else "", AlarmClass.get_by_id(a["alarm_class"]).name, ArchivedAlarm.objects.get( id=a["_id"]).subject if subject else "", "", total_objects, total_subscribers, a.get("escalation_tt"), a.get("escalation_ts"), ", ".join(ll for ll in ( loc.location(moss[a["managed_object"]][5] ) if moss[a["managed_object"]] [5] is not None else "") if ll), container_lookup[a["managed_object"]].get( "text", "") if container_lookup else "", ], container_path, segment_path, ), cmap, ) ] # Active Alarms if source in ["active", "both"]: datenow = datetime.datetime.now() for a in (ActiveAlarm._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED). aggregate([ { "$match": match }, { "$addFields": { "duration": { "$divide": [{ "$subtract": [fd, "$timestamp"] }, 1000] } } }, { "$match": match_duration }, # {"$sort": {"timestamp": 1}} ])): dt = datenow - a["timestamp"] duration = int(dt.total_seconds()) total_objects = sum(ss["summary"] for ss in a["total_objects"]) if min_objects and total_objects < min_objects: continue total_subscribers = sum( ss["summary"] for ss in a["total_subscribers"] if subscribers_profile and ss["profile"] in subscribers_profile) if min_subscribers and total_subscribers < min_subscribers: continue if "segment_" in columns.split( ",") or "container_" in columns.split(","): path = ObjectPath.get_path(a["managed_object"]) if path: segment_path = [ NetworkSegment.get_by_id(s).name for s in path.segment_path if NetworkSegment.get_by_id(s) ] container_path = [ Object.get_by_id(s).name for s in path.container_path if Object.get_by_id(s) ] else: segment_path = [] container_path = [] else: segment_path = [] container_path = [] r += [ translate_row( row( [ smart_text(a["_id"]), smart_text(a["root"]) if a.get("root") else "", a["timestamp"], # a["clear_timestamp"], "", smart_text(duration), moss[a["managed_object"]][0], moss[a["managed_object"]][1], smart_text( mo_hostname.get(a["managed_object"], "")), Profile.get_by_id(moss[a["managed_object"]][3]) if moss[a["managed_object"]][5] else "", moss[a["managed_object"]][6], Platform.get_by_id( moss[a["managed_object"]][9]) if moss[a["managed_object"]][9] else "", smart_text( Firmware.get_by_id( moss[a["managed_object"]][10]).version) if moss[a["managed_object"]][10] else "", AlarmClass.get_by_id(a["alarm_class"]).name, ActiveAlarm.objects.get( id=a["_id"]).subject if subject else None, "Yes" if a["managed_object"] in maintenance else "No", total_objects, total_subscribers, a.get("escalation_tt"), a.get("escalation_ts"), ", ".join(ll for ll in ( loc.location(moss[a["managed_object"]][5] ) if moss[a["managed_object"]] [5] is not None else "") if ll), container_lookup[a["managed_object"]].get( "text", "") if container_lookup else "", ], container_path, segment_path, ), cmap, ) ] if source in ["long_archive"]: o_format = "csv_zip" columns = [ "ALARM_ID", "MO_ID", "OBJECT_PROFILE", "VENDOR", "PLATFORM", "VERSION", "OPEN_TIMESTAMP", "CLOSE_TIMESTAMP", "LOCATION", "", "POOL", "ADM_DOMAIN", "MO_NAME", "IP", "ESCALATION_TT", "DURATION", "SEVERITY", "REBOOTS", ] from noc.core.clickhouse.connect import connection ch = connection() fd = datetime.datetime.strptime(from_date, "%d.%m.%Y") td = datetime.datetime.strptime( to_date, "%d.%m.%Y") + datetime.timedelta(days=1) if td - fd > datetime.timedelta(days=390): return HttpResponseBadRequest( _("Report more than 1 year not allowed. If nedeed - request it from Administrator" )) ac = AlarmClass.objects.get( name="NOC | Managed Object | Ping Failed") subs = ", ".join( "subscribers.summary[indexOf(subscribers.profile, '%s')] as `%s`" % (sp.bi_id, sp.name) for sp in SubscriberProfile.objects.filter().order_by("name")) if subs: columns += [ sp.name for sp in SubscriberProfile.objects.filter().order_by("name") ] r = ch.execute(LONG_ARCHIVE_QUERY % ( ", %s" % subs if subs else "", fd.date().isoformat(), td.date().isoformat(), ac.bi_id, )) filename = "alarms.csv" if o_format == "csv": response = HttpResponse(content_type="text/csv") response[ "Content-Disposition"] = 'attachment; filename="%s"' % filename writer = csv.writer(response) writer.writerows(r) return response elif o_format == "csv_zip": response = BytesIO() f = TextIOWrapper(TemporaryFile(mode="w+b"), encoding="utf-8") writer = csv.writer(f, dialect="excel", delimiter=";", quotechar='"') writer.writerow(columns) writer.writerows(r) f.seek(0) with ZipFile(response, "w", compression=ZIP_DEFLATED) as zf: zf.writestr(filename, f.read()) zf.filename = "%s.zip" % filename # response = HttpResponse(content_type="text/csv") response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/zip") response[ "Content-Disposition"] = 'attachment; filename="%s.zip"' % filename return response elif o_format == "xlsx": response = BytesIO() wb = xlsxwriter.Workbook(response) cf1 = wb.add_format({"bottom": 1, "left": 1, "right": 1, "top": 1}) ws = wb.add_worksheet("Alarms") max_column_data_length = {} for rn, x in enumerate(r): for cn, c in enumerate(x): if rn and (r[0][cn] not in max_column_data_length or len(str(c)) > max_column_data_length[r[0][cn]]): max_column_data_length[r[0][cn]] = len(str(c)) ws.write(rn, cn, c, cf1) ws.autofilter(0, 0, rn, cn) ws.freeze_panes(1, 0) for cn, c in enumerate(r[0]): # Set column width width = get_column_width(c) if enable_autowidth and width < max_column_data_length[c]: width = max_column_data_length[c] ws.set_column(cn, cn, width=width) wb.close() response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/vnd.ms-excel") response[ "Content-Disposition"] = 'attachment; filename="alarms.xlsx"' response.close() return response
def api_report( self, request, o_format, administrative_domain=None, selector=None, interface_profile=None, zero=None, def_profile=None, columns=None, enable_autowidth=False, ): def humanize_speed(speed): if not speed: return "-" for t, n in [(1000000, "G"), (1000, "M"), (1, "k")]: if speed >= t: if speed // t * t == speed: return "%d%s" % (speed // t, n) else: return "%.2f%s" % (float(speed) / t, n) return str(speed) def row(row): def qe(v): if v is None: return "" if isinstance(v, unicode): return v.encode("utf-8") elif isinstance(v, datetime.datetime): return v.strftime("%Y-%m-%d %H:%M:%S") elif not isinstance(v, str): return str(v) else: return v return [qe(x) for x in row] def translate_row(row, cmap): return [row[i] for i in cmap] cols = [ "object_name", "object_address", "object_model", "object_software", "object_port_name", "object_port_profile_name", "object_port_status", "object_link_status", "object_port_speed", "object_port_duplex", "object_port_untagged_vlan", "object_port_tagged_vlans", ] header_row = [ "MANAGED_OBJECT", "OBJECT_ADDRESS", "OBJECT_MODEL", "OBJECT_SOFTWARE", "PORT_NAME", "PORT_PROFILE_NAME", "PORT_STATUS", "LINK_STATUS", "PORT_SPEED", "PORT_DUPLEX", "PORT_UNTAGGED_VLAN", "PORT_TAGGED_VLANS", ] if columns: cmap = [] for c in columns.split(","): try: cmap += [cols.index(c)] except ValueError: continue else: cmap = list(range(len(cols))) r = [translate_row(header_row, cmap)] mo = {} if_p = {} DUPLEX = {True: "Full", False: "Half"} for ifp in InterfaceProfile.objects.filter(): if_p[ifp.id] = {"name": ifp.name} mos = ManagedObject.objects.filter(is_managed=True) if (request.user.is_superuser and not administrative_domain and not selector and not interface_profile): mos = ManagedObject.objects.filter(is_managed=True) if not request.user.is_superuser: mos = mos.filter( administrative_domain__in=UserAccess.get_domains(request.user)) if administrative_domain: ads = AdministrativeDomain.get_nested_ids( int(administrative_domain)) mos = mos.filter(administrative_domain__in=ads) if selector: selector = ManagedObjectSelector.get_by_id(int(selector)) mos = mos.filter(selector.Q) for o in mos: mo[o.id] = { "type": "managedobject", "id": str(o.id), "name": o.name, "status": o.is_managed, "address": o.address, "vendor": o.vendor, "version": o.version, "platform": o.platform, } mos_id = list(mos.values_list("id", flat=True)) rld = ReportInterfaceStatus(mos_id, zero, def_profile, interface_profile) for i in rld.out: untag, tagged = "", "" if i["subs"]: untag = i["subs"][0].get("untagged_vlan", "") tagged = list_to_ranges(i["subs"][0].get("tagged_vlans", [])) r += [ translate_row( row([ mo[i["managed_object"]]["name"], mo[i["managed_object"]]["address"], "%s %s" % ( str(mo[i["managed_object"]]["vendor"]), str(mo[i["managed_object"]]["platform"]), ), str(mo[i["managed_object"]]["version"]), i["name"], if_p[i["profile"]]["name"], "UP" if i["admin_status"] is True else "Down", "UP" if "oper_status" in i and i["oper_status"] is True else "Down", humanize_speed(i["in_speed"]) if "in_speed" in i else "-", DUPLEX.get(i["full_duplex"]) if "full_duplex" in i and "in_speed" in i else "-", untag, tagged, ]), cmap, ) ] filename = "interface_status_report_%s" % datetime.datetime.now( ).strftime("%Y%m%d") if o_format == "csv": response = HttpResponse(content_type="text/csv") response[ "Content-Disposition"] = 'attachment; filename="%s.csv"' % filename writer = csv.writer(response, dialect="excel", delimiter=";") writer.writerows(r) return response elif o_format == "xlsx": response = StringIO() wb = xlsxwriter.Workbook(response) cf1 = wb.add_format({"bottom": 1, "left": 1, "right": 1, "top": 1}) ws = wb.add_worksheet("Objects") max_column_data_length = {} for rn, x in enumerate(r): for cn, c in enumerate(x): if rn and (r[0][cn] not in max_column_data_length or len(str(c)) > max_column_data_length[r[0][cn]]): max_column_data_length[r[0][cn]] = len(str(c)) ws.write(rn, cn, c, cf1) ws.autofilter(0, 0, rn, cn) ws.freeze_panes(1, 0) for cn, c in enumerate(r[0]): # Set column width width = get_column_width(c) if enable_autowidth and width < max_column_data_length[c]: width = max_column_data_length[c] ws.set_column(cn, cn, width=width) wb.close() response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/vnd.ms-excel") # response = HttpResponse( # content_type="application/x-ms-excel") response[ "Content-Disposition"] = 'attachment; filename="%s.xlsx"' % filename response.close() return response
def api_report( self, request, from_date, to_date, o_format, min_duration=0, max_duration=0, min_objects=0, min_subscribers=0, segment=None, administrative_domain=None, selector=None, ex_selector=None, columns=None, source="both", alarm_class=None, subscribers=None, enable_autowidth=False, ): def row(row, container_path, segment_path): def qe(v): if v is None: return "" if isinstance(v, unicode): return v.encode("utf-8") elif isinstance(v, datetime.datetime): return v.strftime("%Y-%m-%d %H:%M:%S") elif not isinstance(v, str): return str(v) else: return v r = [qe(x) for x in row] if len(container_path) < self.CONTAINER_PATH_DEPTH: container_path += [""] * (self.CONTAINER_PATH_DEPTH - len(container_path)) else: container_path = container_path[:self.CONTAINER_PATH_DEPTH] if len(segment_path) < self.SEGMENT_PATH_DEPTH: segment_path += [""] * (self.SEGMENT_PATH_DEPTH - len(segment_path)) else: segment_path = segment_path[:self.SEGMENT_PATH_DEPTH] return r + container_path + segment_path def translate_row(row, cmap): return [row[i] for i in cmap] cols = ([ "id", "root_id", "from_ts", "to_ts", "duration_sec", "object_name", "object_address", "object_hostname", "object_profile", "object_admdomain", "object_platform", "object_version", "alarm_class", "alarm_subject", "maintenance", "objects", "subscribers", "tt", "escalation_ts", "location", "container_address", ] + ["container_%d" % i for i in range(self.CONTAINER_PATH_DEPTH)] + ["segment_%d" % i for i in range(self.SEGMENT_PATH_DEPTH)]) header_row = ( [ "ID", _("ROOT_ID"), _("FROM_TS"), _("TO_TS"), _("DURATION_SEC"), _("OBJECT_NAME"), _("OBJECT_ADDRESS"), _("OBJECT_HOSTNAME"), _("OBJECT_PROFILE"), _("OBJECT_ADMDOMAIN"), _("OBJECT_PLATFORM"), _("OBJECT_VERSION"), _("ALARM_CLASS"), _("ALARM_SUBJECT"), _("MAINTENANCE"), _("OBJECTS"), _("SUBSCRIBERS"), _("TT"), _("ESCALATION_TS"), _("LOCATION"), _("CONTAINER_ADDRESS"), ] + ["CONTAINER_%d" % i for i in range(self.CONTAINER_PATH_DEPTH)] + ["SEGMENT_%d" % i for i in range(self.SEGMENT_PATH_DEPTH)]) if columns: cmap = [] for c in columns.split(","): try: cmap += [cols.index(c)] except ValueError: continue else: cmap = list(range(len(cols))) subscribers_profile = self.default_subscribers_profile if subscribers: subscribers_profile = set( SubscriberProfile.objects.filter( id__in=subscribers.split(",")).scalar("id")) r = [translate_row(header_row, cmap)] fd = datetime.datetime.strptime( to_date, "%d.%m.%Y") + datetime.timedelta(days=1) match = { "timestamp": { "$gte": datetime.datetime.strptime(from_date, "%d.%m.%Y"), "$lte": fd } } match_duration = {"duration": {"$gte": min_duration}} if max_duration: match_duration = { "duration": { "$gte": min_duration, "$lte": max_duration } } mos = ManagedObject.objects.filter(is_managed=True) if segment: try: match["segment_path"] = bson.ObjectId(segment) except bson.errors.InvalidId: pass ads = [] if administrative_domain: if administrative_domain.isdigit(): administrative_domain = [int(administrative_domain)] ads = AdministrativeDomain.get_nested_ids( administrative_domain[0]) if not request.user.is_superuser: user_ads = UserAccess.get_domains(request.user) if administrative_domain and ads: if administrative_domain[0] not in user_ads: ads = list(set(ads) & set(user_ads)) else: ads = administrative_domain else: ads = user_ads if ads: mos = mos.filter(administrative_domain__in=ads) if selector: selector = ManagedObjectSelector.get_by_id(int(selector)) mos = mos.filter(selector.Q) if ex_selector: ex_selector = ManagedObjectSelector.get_by_id(int(ex_selector)) mos = mos.exclude(ex_selector.Q) # Working if Administrative domain set if ads: try: match["adm_path"] = {"$in": ads} # @todo More 2 level hierarhy except bson.errors.InvalidId: pass mos_id = list(mos.order_by("id").values_list("id", flat=True)) mo_hostname = {} maintenance = [] if mos_id and (selector or ex_selector): match["managed_object"] = {"$in": mos_id} if "maintenance" in columns.split(","): maintenance = Maintenance.currently_affected() if "object_hostname" in columns.split(","): mo_hostname = ReportObjectsHostname1(sync_ids=mos_id) mo_hostname = mo_hostname.get_dictionary() moss = ReportAlarmObjects(mos_id).get_all() # container_lookup = ReportContainer(mos_id) container_lookup = None subject = "alarm_subject" in columns loc = AlarmApplication([]) if source in ["archive", "both"]: # Archived Alarms for a in (ArchivedAlarm._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED ).aggregate([ { "$match": match }, { "$addFields": { "duration": { "$divide": [ { "$subtract": ["$clear_timestamp", "$timestamp"] }, 1000, ] } } }, { "$match": match_duration }, # {"$sort": {"timestamp": 1}} ])): if int(a["managed_object"]) not in moss: continue dt = a["clear_timestamp"] - a["timestamp"] duration = int(dt.total_seconds()) total_objects = sum(ss["summary"] for ss in a["total_objects"]) if min_objects and total_objects < min_objects: continue total_subscribers = sum( ss["summary"] for ss in a["total_subscribers"] if subscribers_profile and ss["profile"] in subscribers_profile) if min_subscribers and total_subscribers < min_subscribers: continue if "segment_" in columns.split( ",") or "container_" in columns.split(","): path = ObjectPath.get_path(a["managed_object"]) if path: segment_path = [ NetworkSegment.get_by_id(s).name for s in path.segment_path if NetworkSegment.get_by_id(s) ] container_path = [ Object.get_by_id(s).name for s in path.container_path if Object.get_by_id(s) ] else: segment_path = [] container_path = [] else: segment_path = [] container_path = [] r += [ translate_row( row( [ str(a["_id"]), str(a["root"]) if a.get("root") else "", a["timestamp"], a["clear_timestamp"], str(duration), moss[a["managed_object"]][0], moss[a["managed_object"]][1], mo_hostname.get(a["managed_object"], ""), Profile.get_by_id( moss[a["managed_object"]][3]).name if moss[a["managed_object"]][5] else "", moss[a["managed_object"]][6], Platform.get_by_id( moss[a["managed_object"]][9]) if moss[a["managed_object"]][9] else "", Firmware.get_by_id( moss[a["managed_object"]][10]) if moss[a["managed_object"]][10] else "", AlarmClass.get_by_id(a["alarm_class"]).name, ArchivedAlarm.objects.get( id=a["_id"]).subject if subject else "", "", total_objects, total_subscribers, a.get("escalation_tt"), a.get("escalation_ts"), ", ".join(l for l in ( loc.location(moss[a["managed_object"]][5] ) if moss[a["managed_object"]] [5] is not None else "") if l), container_lookup[a["managed_object"]].get( "text", "") if container_lookup else "", ], container_path, segment_path, ), cmap, ) ] # Active Alarms if source in ["active", "both"]: for a in (ActiveAlarm._get_collection().with_options( read_preference=ReadPreference.SECONDARY_PREFERRED). aggregate([ { "$match": match }, { "$addFields": { "duration": { "$divide": [{ "$subtract": [fd, "$timestamp"] }, 1000] } } }, { "$match": match_duration }, # {"$sort": {"timestamp": 1}} ])): dt = fd - a["timestamp"] duration = int(dt.total_seconds()) total_objects = sum(ss["summary"] for ss in a["total_objects"]) if min_objects and total_objects < min_objects: continue total_subscribers = sum( ss["summary"] for ss in a["total_subscribers"] if subscribers_profile and ss["profile"] in subscribers_profile) if min_subscribers and total_subscribers < min_subscribers: continue if "segment_" in columns.split( ",") or "container_" in columns.split(","): path = ObjectPath.get_path(a["managed_object"]) if path: segment_path = [ NetworkSegment.get_by_id(s).name for s in path.segment_path if NetworkSegment.get_by_id(s) ] container_path = [ Object.get_by_id(s).name for s in path.container_path if Object.get_by_id(s) ] else: segment_path = [] container_path = [] else: segment_path = [] container_path = [] r += [ translate_row( row( [ str(a["_id"]), str(a["root"]) if a.get("root") else "", a["timestamp"], # a["clear_timestamp"], "", str(duration), moss[a["managed_object"]][0], moss[a["managed_object"]][1], mo_hostname.get(a["managed_object"], ""), Profile.get_by_id(moss[a["managed_object"]][3]) if moss[a["managed_object"]][5] else "", moss[a["managed_object"]][6], Platform.get_by_id( moss[a["managed_object"]][9]) if moss[a["managed_object"]][9] else "", Firmware.get_by_id( moss[a["managed_object"]][10]) if moss[a["managed_object"]][10] else "", AlarmClass.get_by_id(a["alarm_class"]).name, ActiveAlarm.objects.get( id=a["_id"]).subject if subject else None, "Yes" if a["managed_object"] in maintenance else "No", total_objects, total_subscribers, a.get("escalation_tt"), a.get("escalation_ts"), ", ".join(l for l in ( loc.location(moss[a["managed_object"]][5] ) if moss[a["managed_object"]] [5] is not None else "") if l), container_lookup[a["managed_object"]].get( "text", "") if container_lookup else "", ], container_path, segment_path, ), cmap, ) ] if o_format == "csv": response = HttpResponse(content_type="text/csv") response[ "Content-Disposition"] = 'attachment; filename="alarms.csv"' writer = csv.writer(response) writer.writerows(r) return response elif o_format == "xlsx": response = StringIO() wb = xlsxwriter.Workbook(response) cf1 = wb.add_format({"bottom": 1, "left": 1, "right": 1, "top": 1}) ws = wb.add_worksheet("Alarms") max_column_data_length = {} for rn, x in enumerate(r): for cn, c in enumerate(x): if rn and (r[0][cn] not in max_column_data_length or len(str(c)) > max_column_data_length[r[0][cn]]): max_column_data_length[r[0][cn]] = len(str(c)) ws.write(rn, cn, c, cf1) ws.autofilter(0, 0, rn, cn) ws.freeze_panes(1, 0) for cn, c in enumerate(r[0]): # Set column width width = get_column_width(c) if enable_autowidth and width < max_column_data_length[c]: width = max_column_data_length[c] ws.set_column(cn, cn, width=width) wb.close() response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/vnd.ms-excel") response[ "Content-Disposition"] = 'attachment; filename="alarms.xlsx"' response.close() return response
def api_report( self, request, from_date, to_date, o_format, administrative_domain=None, columns=None, source="both", alarm_class=None, enable_autowidth=False, ): def row(row): def qe(v): if v is None: return "" if isinstance(v, str): return smart_text(v) elif isinstance(v, datetime.datetime): return v.strftime("%Y-%m-%d %H:%M:%S") elif not isinstance(v, str): return smart_text(v) else: return v return [qe(x) for x in row] def translate_row(row, cmap): return [row[i] for i in cmap] cols = [ "id", "alarm_class", "alarm_from_ts", "alarm_to_ts", "alarm_tt", "object_name", "object_address", "object_admdomain", "log_timestamp", "log_source", "log_message", # "tt", # "escalation_ts", ] header_row = [ "ID", _("ALARM_CLASS"), _("ALARM_FROM_TS"), _("ALARM_TO_TS"), _("ALARM_TT"), _("OBJECT_NAME"), _("OBJECT_ADDRESS"), _("OBJECT_ADMDOMAIN"), _("LOG_TIMESTAMP"), _("LOG_SOURCE"), _("LOG_MESSAGE"), ] if columns: cmap = [] for c in columns.split(","): try: cmap += [cols.index(c)] except ValueError: continue else: cmap = list(range(len(cols))) r = [translate_row(header_row, cmap)] fd = datetime.datetime.strptime( to_date, "%d.%m.%Y") + datetime.timedelta(days=1) match = { "timestamp": { "$gte": datetime.datetime.strptime(from_date, "%d.%m.%Y"), "$lte": fd } } mos = ManagedObject.objects.filter() ads = [] if administrative_domain: if administrative_domain.isdigit(): administrative_domain = [int(administrative_domain)] ads = AdministrativeDomain.get_nested_ids( administrative_domain[0]) if not request.user.is_superuser: user_ads = UserAccess.get_domains(request.user) if administrative_domain and ads: if administrative_domain[0] not in user_ads: ads = list(set(ads) & set(user_ads)) if not ads: return HttpResponse( "<html><body>Permission denied: Invalid Administrative Domain</html></body>" ) else: ads = user_ads if ads: mos = mos.filter(administrative_domain__in=ads) # Working if Administrative domain set if ads: try: match["adm_path"] = {"$in": ads} # @todo More 2 level hierarhy except bson.errors.InvalidId: pass addr_map = { mo[0]: (mo[1], mo[2]) for mo in mos.values_list("id", "name", "address") } # Active Alarms coll = ActiveAlarm._get_collection() for aa in coll.aggregate([ { "$match": match }, { "$unwind": "$log" }, { "$match": { "log.source": { "$exists": True, "$ne": None } } }, { "$project": { "timestamp": 1, "managed_object": 1, "alarm_class": 1, "escalation_tt": 1, "adm_path": 1, "log": 1, } }, { "$sort": { "_id": 1, "log.timestamp": 1 } }, ]): r += [ translate_row( row([ smart_text(aa["_id"]), AlarmClass.get_by_id(aa["alarm_class"]).name, aa["timestamp"], "", aa.get("escalation_tt", ""), addr_map[aa["managed_object"]][0], addr_map[aa["managed_object"]][1], AdministrativeDomain.get_by_id( aa["adm_path"][-1]).name, aa["log"]["timestamp"], aa["log"]["source"], aa["log"]["message"], ]), cmap, ) ] # Active Alarms coll = ArchivedAlarm._get_collection() for aa in coll.aggregate([ { "$match": match }, { "$unwind": "$log" }, { "$match": { "log.source": { "$exists": True } } }, { "$project": { "timestamp": 1, "clear_timestamp": 1, "managed_object": 1, "alarm_class": 1, "escalation_tt": 1, "adm_path": 1, "log": 1, } }, { "$sort": { "_id": 1, "log.timestamp": 1 } }, ]): r += [ translate_row( row([ smart_text(aa["_id"]), AlarmClass.get_by_id(aa["alarm_class"]).name, aa["timestamp"], aa["clear_timestamp"], aa.get("escalation_tt", ""), addr_map[aa["managed_object"]][0], addr_map[aa["managed_object"]][1], AdministrativeDomain.get_by_id( aa["adm_path"][-1]).name, aa["log"]["timestamp"], aa["log"]["source"], aa["log"]["message"], ]), cmap, ) ] filename = "alarm_comments.csv" if o_format == "csv": response = HttpResponse(content_type="text/csv") response[ "Content-Disposition"] = 'attachment; filename="%s"' % filename writer = csv.writer(response) writer.writerows(r) return response elif o_format == "csv_zip": response = BytesIO() f = TextIOWrapper(TemporaryFile(mode="w+b"), encoding="utf-8") writer = csv.writer(f, dialect="excel", delimiter=";", quotechar='"') writer.writerow(columns) writer.writerows(r) f.seek(0) with ZipFile(response, "w", compression=ZIP_DEFLATED) as zf: zf.writestr(filename, f.read()) zf.filename = "%s.zip" % filename # response = HttpResponse(content_type="text/csv") response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/zip") response[ "Content-Disposition"] = 'attachment; filename="%s.zip"' % filename return response elif o_format == "xlsx": response = BytesIO() wb = xlsxwriter.Workbook(response) cf1 = wb.add_format({"bottom": 1, "left": 1, "right": 1, "top": 1}) ws = wb.add_worksheet("Alarms") max_column_data_length = {} for rn, x in enumerate(r): for cn, c in enumerate(x): if rn and (r[0][cn] not in max_column_data_length or len(str(c)) > max_column_data_length[r[0][cn]]): max_column_data_length[r[0][cn]] = len(str(c)) ws.write(rn, cn, c, cf1) ws.autofilter(0, 0, rn, cn) ws.freeze_panes(1, 0) for cn, c in enumerate(r[0]): # Set column width width = get_column_width(c) if enable_autowidth and width < max_column_data_length[c]: width = max_column_data_length[c] ws.set_column(cn, cn, width=width) wb.close() response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/vnd.ms-excel") response[ "Content-Disposition"] = 'attachment; filename="alarm_comments.xlsx"' response.close() return response
def api_report( self, request, o_format, is_managed=None, administrative_domain=None, selector=None, pool=None, segment=None, avail_status=False, columns=None, ids=None, detail_stat=None, enable_autowidth=False, ): def row(row): def qe(v): if v is None: return "" if isinstance(v, unicode): return v.encode("utf-8") elif isinstance(v, datetime.datetime): return v.strftime("%Y-%m-%d %H:%M:%S") elif not isinstance(v, str): return str(v) else: return v return [qe(x) for x in row] def translate_row(row, cmap): return [row[i] for i in cmap] type_columns = ["Up/10G", "Up/1G", "Up/100M", "Up/10M", "Down/-", "-"] cols = [ "id", "object_name", "object_address", "object_hostname", "object_status", "profile_name", "object_profile", "object_vendor", "object_platform", "object_attr_hwversion", "object_version", "object_attr_bootprom", "object_serial", "object_attr_patch", "auth_profile", "avail", "admin_domain", "container", "segment", "phys_interface_count", "link_count", "last_config_ts" # "discovery_problem" # "object_tags" # "sorted_tags" # "object_caps" # "interface_type_count" ] header_row = [ "ID", "OBJECT_NAME", "OBJECT_ADDRESS", "OBJECT_HOSTNAME", "OBJECT_STATUS", "PROFILE_NAME", "OBJECT_PROFILE", "OBJECT_VENDOR", "OBJECT_PLATFORM", "OBJECT_HWVERSION", "OBJECT_VERSION", "OBJECT_BOOTPROM", "OBJECT_SERIAL", "OBJECT_ATTR_PATCH", "AUTH_PROFILE", "AVAIL", "ADMIN_DOMAIN", "CONTAINER", "SEGMENT", "PHYS_INTERFACE_COUNT", "LINK_COUNT", "LAST_CONFIG_TS", ] # "DISCOVERY_PROBLEM" # "ADM_PATH # "DISCOVERY_PROBLEM" # "OBJECT_TAGS" # "SORTED_TAGS" # "OBJECT_CAPS" # "INTERFACE_TYPE_COUNT" if columns: cmap = [] for c in columns.split(","): try: cmap += [cols.index(c)] except ValueError: continue else: cmap = list(range(len(cols))) r = [translate_row(header_row, cmap)] mos = self.get_report_object(request.user, is_managed, administrative_domain, selector, pool, segment, ids) columns_filter = set(columns.split(",")) mos_id = tuple(mos.order_by("id").values_list("id", flat=True)) mos_filter = None if detail_stat: ref = ReportModelFilter() ids = list(six.itervalues(ref.proccessed(detail_stat))) mos_filter = set(mos_id).intersection(ids[0]) mos_id = sorted(mos_filter) avail = {} if "avail" in columns_filter: avail = ObjectStatus.get_statuses(mos_id) link_count = iter(ReportObjectLinkCount(mos_id)) iface_count = iter(ReportObjectIfacesTypeStat(mos_id)) if "container" in columns_filter: container_lookup = iter(ReportContainerData(mos_id)) else: container_lookup = None if "object_serial" in columns_filter: container_serials = iter(ReportContainer(mos_id)) else: container_serials = None if "interface_type_count" in columns_filter: iss = iter(ReportObjectIfacesStatusStat(mos_id)) else: iss = None if "object_attr_patch" in columns_filter or "object_serial" in columns_filter: roa = iter(ReportObjectAttributes(mos_id)) else: roa = None hn = iter(ReportObjectsHostname1(mos_id)) rc = iter(ReportObjectConfig(mos_id)) # ccc = iter(ReportObjectCaps(mos_id)) if "adm_path" in columns_filter: ad_path = ReportAdPath() r[-1].extend([_("ADM_PATH1"), _("ADM_PATH1"), _("ADM_PATH1")]) if "interface_type_count" in columns_filter: r[-1].extend(type_columns) if "object_caps" in columns_filter: object_caps = ReportObjectCaps(mos_id) caps_columns = list(six.itervalues(object_caps.ATTRS)) ccc = iter(object_caps) r[-1].extend(caps_columns) if "object_tags" in columns_filter: r[-1].extend([_("OBJECT_TAGS")]) if "sorted_tags" in columns_filter: tags = set() for s in (ManagedObject.objects.filter().exclude( tags=None).values_list("tags", flat=True).distinct()): tags.update(set(s)) tags_o = sorted([t for t in tags if "{" not in t]) r[-1].extend(tags_o) if "discovery_problem" in columns.split(","): discovery_result = ReportDiscoveryResult(mos_id) discovery_result.safe_output = True discovery_result.unknown_value = ([""] * len(discovery_result.ATTRS), ) dp_columns = discovery_result.ATTRS dp = iter(discovery_result) r[-1].extend(dp_columns) for ( mo_id, name, address, is_managed, sa_profile, o_profile, auth_profile, ad, m_segment, vendor, platform, version, tags, ) in (mos.values_list( "id", "name", "address", "is_managed", "profile", "object_profile__name", "auth_profile__name", "administrative_domain__name", "segment", "vendor", "platform", "version", "tags", ).order_by("id").iterator()): if (mos_filter and mo_id not in mos_filter) or not mos_id: continue if container_serials: mo_serials = next(container_serials) else: mo_serials = [{}] if container_lookup: mo_continer = next(container_lookup) else: mo_continer = ("", ) if roa: serial, hw_ver, boot_prom, patch = next(roa)[0] # noqa else: serial, hw_ver, boot_prom, patch = "", "", "", "" # noqa r.append( translate_row( row([ mo_id, name, address, next(hn)[0], "managed" if is_managed else "unmanaged", Profile.get_by_id(sa_profile), o_profile, Vendor.get_by_id(vendor) if vendor else "", Platform.get_by_id(platform) if platform else "", hw_ver, Firmware.get_by_id(version) if version else "", boot_prom, # Serial mo_serials[0].get("serial", "") or serial, patch or "", auth_profile, _("Yes") if avail.get(mo_id, None) else _("No"), ad, mo_continer[0], NetworkSegment.get_by_id(m_segment) if m_segment else "", next(iface_count)[0], next(link_count)[0], next(rc)[0], ]), cmap, )) if "adm_path" in columns_filter: r[-1].extend([ad] + list(ad_path[ad])) if "interface_type_count" in columns_filter: r[-1].extend(next(iss)[0]) if "object_caps" in columns_filter: r[-1].extend(next(ccc)[0]) if "object_tags" in columns_filter: r[-1].append(",".join(tags if tags else [])) if "sorted_tags" in columns_filter: out_tags = [""] * len(tags_o) try: if tags: for m in tags: out_tags[tags_o.index(m)] = m except ValueError: logger.warning("Bad value for tag: %s", m) r[-1].extend(out_tags) if "discovery_problem" in columns_filter: r[-1].extend(next(dp)[0]) filename = "mo_detail_report_%s" % datetime.datetime.now().strftime( "%Y%m%d") if o_format == "csv": response = HttpResponse(content_type="text/csv") response[ "Content-Disposition"] = 'attachment; filename="%s.csv"' % filename writer = csv.writer(response, dialect="excel", delimiter=";", quotechar='"') writer.writerows(r) return response elif o_format == "xlsx": response = StringIO() wb = xlsxwriter.Workbook(response) cf1 = wb.add_format({"bottom": 1, "left": 1, "right": 1, "top": 1}) ws = wb.add_worksheet("Objects") max_column_data_length = {} for rn, x in enumerate(r): for cn, c in enumerate(x): if rn and (r[0][cn] not in max_column_data_length or len(str(c)) > max_column_data_length[r[0][cn]]): max_column_data_length[r[0][cn]] = len(str(c)) ws.write(rn, cn, c, cf1) # for ws.autofilter(0, 0, rn, cn) ws.freeze_panes(1, 0) for cn, c in enumerate(r[0]): # Set column width width = get_column_width(c) if enable_autowidth and width < max_column_data_length[c]: width = max_column_data_length[c] ws.set_column(cn, cn, width=width) wb.close() response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/vnd.ms-excel") # response = HttpResponse( # content_type="application/x-ms-excel") response[ "Content-Disposition"] = 'attachment; filename="%s.xlsx"' % filename response.close() return response
def api_report( self, request, reporttype=None, from_date=None, to_date=None, object_profile=None, filter_default=None, exclude_zero=True, interface_profile=None, selector=None, administrative_domain=None, columns=None, description=None, o_format=None, enable_autowidth=False, **kwargs, ): def load(mo_ids): # match = {"links.mo": {"$in": mo_ids}} match = {"int.managed_object": {"$in": mo_ids}} group = { "_id": "$_id", "links": { "$push": { "iface_n": "$int.name", # "iface_id": "$int._id", # "iface_descr": "$int.description", # "iface_speed": "$int.in_speed", # "dis_method": "$discovery_method", # "last_seen": "$last_seen", "mo": "$int.managed_object", "linked_obj": "$linked_objects", } }, } value = (get_db()["noc.links"].with_options( read_preference=ReadPreference.SECONDARY_PREFERRED).aggregate( [ { "$unwind": "$interfaces" }, { "$lookup": { "from": "noc.interfaces", "localField": "interfaces", "foreignField": "_id", "as": "int", } }, { "$match": match }, { "$group": group }, ], allowDiskUse=True, )) res = defaultdict(dict) for v in value: if v["_id"]: for vv in v["links"]: if len(vv["linked_obj"]) == 2: mo = vv["mo"][0] iface = vv["iface_n"] for i in vv["linked_obj"]: if mo != i: res[mo][i] = iface[0] return res def translate_row(row, cmap): return [row[i] for i in cmap] def str_to_float(str): return float("{0:.3f}".format(float(str))) cols = [ "object_id", "object_name", "object_address", "object_platform", "object_adm_domain", "object_segment", "object_container", # "object_hostname", # "object_status", # "profile_name", # "object_profile", # "object_vendor", "iface_name", "iface_description", "iface_speed", "max_load_in", "max_load_in_time", "max_load_out", "max_load_out_time", "avg_load_in", "avg_load_out", "total_in", "total_out", "uplink_iface_name", "uplink_iface_description", "uplink_iface_speed", "uplink_max_load_in", "uplink_max_load_in_time", "uplink_max_load_out", "uplink_max_load_out_time", "uplink_avg_load_in", "uplink_avg_load_out", "uplink_total_in", "uplink_total_out", ] header_row = [ "ID", _("OBJECT_NAME"), _("OBJECT_ADDRESS"), _("OBJECT_PLATFORM"), _("OBJECT_ADMDOMAIN"), _("OBJECT_SEGMENT"), _("CONTAINER_ADDRESS"), _("IFACE_NAME"), _("IFACE_DESCRIPTION"), _("IFACE_SPEED"), _("MAX_LOAD_IN, Mbps"), _("MAX_LOAD_IN_TIME"), _("MAX_LOAD_OUT, Mbps"), _("MAX_LOAD_OUT_TIME"), _("AVG_LOAD_IN, Mbps"), _("AVG_LOAD_OUT, Mbps"), _("TOTAL_IN, Mbyte"), _("TOTAL_OUT, Mbyte"), _("UPLINK_IFACE_NAME"), _("UPLINK_IFACE_DESCRIPTION"), _("UPLINK_IFACE_SPEED"), _("UPLINK_MAX_LOAD_IN, Mbps"), _("UPLINK_MAX_TIME_IN"), _("UPLINK_MAX_LOAD_OUT, Mbps"), _("UPLINK_MAX_TIME_OUT"), _("UPLINK_AVG_LOAD_IN, Mbps"), _("UPLINK_AVG_LOAD_OUT, Mbps"), _("UPLINK_TOTAL_IN, Mbyte"), _("UPLINK_TOTAL_OUT, Mbyte"), ] if columns: cmap = [] for c in columns.split(","): try: cmap += [cols.index(c)] except ValueError: continue else: cmap = list(range(len(cols))) columns_order = columns.split(",") columns_filter = set(columns_order) r = [translate_row(header_row, cmap)] # Date Time Block if not from_date: from_date = datetime.datetime.now() - datetime.timedelta(days=1) else: from_date = datetime.datetime.strptime(from_date, "%d.%m.%Y") if not to_date or from_date == to_date: to_date = from_date + datetime.timedelta(days=1) else: to_date = datetime.datetime.strptime( to_date, "%d.%m.%Y") + datetime.timedelta(days=1) diff = to_date - from_date # Load managed objects mos = ManagedObject.objects.filter(is_managed=True) if not request.user.is_superuser: mos = mos.filter( administrative_domain__in=UserAccess.get_domains(request.user)) if selector: mos = mos.filter( ManagedObjectSelector.objects.get(id=int(selector)).Q) if administrative_domain: mos = mos.filter( administrative_domain__in=AdministrativeDomain.get_nested_ids( int(administrative_domain))) if object_profile: mos = mos.filter(object_profile=object_profile) if interface_profile: interface_profile = InterfaceProfile.objects.filter( id=interface_profile).first() mo_attrs = namedtuple("MOATTRs", [c for c in cols if c.startswith("object")]) containers_address = {} if "object_container" in columns_filter: containers_address = ReportContainerData( set(mos.values_list("id", flat=True))) containers_address = dict(list(containers_address.extract())) moss = {} for row in mos.values_list("bi_id", "name", "address", "platform", "administrative_domain__name", "segment", "id"): moss[row[0]] = mo_attrs(*[ row[6], row[1], row[2], smart_text(Platform.get_by_id(row[3]) if row[3] else ""), row[4], smart_text(NetworkSegment.get_by_id(row[5])) if row[5] else "", containers_address. get(row[6], "") if containers_address and row[6] else "", ]) report_metric = ReportInterfaceMetrics(tuple(sorted(moss)), from_date, to_date, columns=None) report_metric.SELECT_QUERY_MAP = { (0, "managed_object", "id"): "managed_object", (1, "path", "iface_name"): "arrayStringConcat(path)", ( 2, "", "iface_description", ): "dictGetString('interfaceattributes','description' , (managed_object, arrayStringConcat(path)))", ( 3, "", "profile", ): "dictGetString('interfaceattributes', 'profile', (managed_object, arrayStringConcat(path)))", ( 4, "speed", "iface_speed", ): "dictGetUInt64('interfaceattributes', 'in_speed', (managed_object, arrayStringConcat(path)))", (5, "load_in_max", "load_in_max"): "divide(max(load_in),1048576)", (6, "load_out_max", "load_out_max"): "divide(max(load_out),1048576)", (7, "max_load_in_time", "max_load_in_time"): "argMax(ts,load_in)", (8, "max_load_out_time", "max_load_out_time"): "argMax(ts,load_out)", (9, "avg_load_in", "avg_load_in"): "divide(avg(load_in),1048576)", (10, "avg_load_out", "avg_load_out"): "divide(avg(load_out),1048576)", } ifaces_metrics = defaultdict(dict) for row in report_metric.do_query(): avg_in = str_to_float(row[9]) avg_out = str_to_float(row[10]) total_in = avg_in * diff.total_seconds() / 8 total_out = avg_out * diff.total_seconds() / 8 ifaces_metrics[row[0]][row[1]] = { "description": row[2], "profile": row[3], "bandwidth": row[4], "max_load_in": str_to_float(row[5]), "max_load_out": str_to_float(row[6]), "max_load_in_time": row[7], "max_load_out_time": row[8], "avg_load_in": avg_in, "avg_load_out": avg_out, "total_in": float("{0:.1f}".format(total_in)), "total_out": float("{0:.1f}".format(total_out)), } # find uplinks links = {} if cmap[-1] > 17: mos_id = list(mos.values_list("id", flat=True)) uplinks = {obj: [] for obj in mos_id} for d in ObjectData._get_collection().find( {"_id": { "$in": mos_id }}, { "_id": 1, "uplinks": 1 }): uplinks[d["_id"]] = d.get("uplinks", []) rld = load(mos_id) for mo in uplinks: for uplink in uplinks[mo]: if rld[mo]: if mo in links: links[mo] += [rld[mo][uplink]] else: links[mo] = [rld[mo][uplink]] for mo_bi in ifaces_metrics: mo_id = moss[int(mo_bi)] mo_ids = getattr(mo_id, "object_id") for i in ifaces_metrics[mo_bi]: if not exclude_zero: if (ifaces_metrics[mo_bi][i]["max_load_in"] == 0 and ifaces_metrics[mo_bi][i]["max_load_out"] == 0): continue if description: if description not in ifaces_metrics[mo_bi][i][ "description"]: continue if interface_profile: if interface_profile.name not in ifaces_metrics[mo_bi][i][ "profile"]: continue row2 = [ mo_ids, getattr(mo_id, "object_name"), getattr(mo_id, "object_address"), getattr(mo_id, "object_platform"), getattr(mo_id, "object_adm_domain"), getattr(mo_id, "object_segment"), getattr(mo_id, "object_container"), i, ifaces_metrics[mo_bi][i]["description"], ifaces_metrics[mo_bi][i]["bandwidth"], ifaces_metrics[mo_bi][i]["max_load_in"], ifaces_metrics[mo_bi][i]["max_load_in_time"], ifaces_metrics[mo_bi][i]["max_load_out"], ifaces_metrics[mo_bi][i]["max_load_out_time"], ifaces_metrics[mo_bi][i]["avg_load_in"], ifaces_metrics[mo_bi][i]["avg_load_out"], ifaces_metrics[mo_bi][i]["total_in"], ifaces_metrics[mo_bi][i]["total_out"], "", "", "", "", "", "", "", "", "", "", "", ] ss = True if mo_ids in links: for ifname_uplink in links[mo_ids]: if ifname_uplink in ifaces_metrics[mo_bi]: row2[18] = ifname_uplink row2[19] = ifaces_metrics[mo_bi][ifname_uplink][ "description"] row2[20] = ifaces_metrics[mo_bi][ifname_uplink][ "bandwidth"] row2[21] = ifaces_metrics[mo_bi][ifname_uplink][ "max_load_in"] row2[22] = ifaces_metrics[mo_bi][ifname_uplink][ "max_load_in_time"] row2[23] = ifaces_metrics[mo_bi][ifname_uplink][ "max_load_out"] row2[24] = ifaces_metrics[mo_bi][ifname_uplink][ "max_load_out_time"] row2[25] = ifaces_metrics[mo_bi][ifname_uplink][ "avg_load_in"] row2[26] = ifaces_metrics[mo_bi][ifname_uplink][ "avg_load_out"] row2[27] = ifaces_metrics[mo_bi][ifname_uplink][ "total_in"] row2[28] = ifaces_metrics[mo_bi][ifname_uplink][ "total_out"] r += [translate_row(row2, cmap)] ss = False if ss: r += [translate_row(row2, cmap)] filename = "metrics_detail_report_%s" % datetime.datetime.now( ).strftime("%Y%m%d") if o_format == "csv": response = HttpResponse(content_type="text/csv") response[ "Content-Disposition"] = 'attachment; filename="%s.csv"' % filename writer = csv.writer(response, dialect="excel", delimiter=",", quoting=csv.QUOTE_MINIMAL) writer.writerows(r) return response elif o_format == "csv_zip": response = BytesIO() f = TextIOWrapper(TemporaryFile(mode="w+b"), encoding="utf-8") writer = csv.writer(f, dialect="excel", delimiter=";", quotechar='"') writer.writerows(r) f.seek(0) with ZipFile(response, "w", compression=ZIP_DEFLATED) as zf: zf.writestr("%s.csv" % filename, f.read()) zf.filename = "%s.csv.zip" % filename # response = HttpResponse(content_type="text/csv") response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/zip") response[ "Content-Disposition"] = 'attachment; filename="%s.csv.zip"' % filename return response elif o_format == "xlsx": response = BytesIO() wb = xlsxwriter.Workbook(response) cf1 = wb.add_format({"bottom": 1, "left": 1, "right": 1, "top": 1}) ws = wb.add_worksheet("Metrics") max_column_data_length = {} for rn, x in enumerate(r): for cn, c in enumerate(x): if rn and (r[0][cn] not in max_column_data_length or len(str(c)) > max_column_data_length[r[0][cn]]): max_column_data_length[r[0][cn]] = len(str(c)) ws.write(rn, cn, c, cf1) ws.autofilter(0, 0, rn, cn) ws.freeze_panes(1, 0) for cn, c in enumerate(r[0]): # Set column width width = get_column_width(c) if enable_autowidth and width < max_column_data_length[c]: width = max_column_data_length[c] ws.set_column(cn, cn, width=width) wb.close() response.seek(0) response = HttpResponse(response.getvalue(), content_type="application/vnd.ms-excel") response[ "Content-Disposition"] = 'attachment; filename="%s.xlsx"' % filename response.close() return response