def save(self, commit=True): if not self.edit: persona = super(PersonaForm, self).save(commit=False) nUsuario = User() nUsuario.username = persona.identificacion nUsuario.first_name = persona.nombre nUsuario.last_name = persona.apellido nUsuario.email = persona.email contrase = Util.contrasena_aleatoria() nUsuario.set_password(contrase) nUsuario.save() persona.usuario = nUsuario persona.edad = Util.getEdad(persona.fechaNacimiento) self.personaServicio.insert(persona) correoUtil=CorreoUtil() correoUtil.correoCreacionEmpleado(persona.identificacion, contrase, persona) else: persona = super(PersonaForm, self).save(commit=False) persona.edad = Util.getEdad(persona.fechaNacimiento) nUsuario = persona.usuario nUsuario.first_name = persona.nombre nUsuario.last_name = persona.apellido nUsuario.email = persona.email self.personaServicio.update(persona)
def save(self, commit=True): if not self.edit: activo = super(ActivoForm, self).save(commit=False) impLegal = activo.impacto_legal impImagen = activo.impacto_imagen impConfianza = activo.impacto_confianza impInteres = activo.impacto_interes impactos = [Criterio.get(impLegal).calificacion, Criterio.get(impImagen).calificacion, Criterio.get(impConfianza).calificacion, Criterio.get(impInteres).calificacion] activo.relevancia = Util.calcularaRelevancia(impactos) self.activoServicios.insert(activo) self.save_m2m() else: activo = super(ActivoForm, self).save(commit=False) impLegal = activo.impacto_legal impImagen = activo.impacto_imagen impConfianza = activo.impacto_confianza impInteres = activo.impacto_interes impactos = [Criterio.get(impLegal).calificacion, Criterio.get(impImagen).calificacion, Criterio.get(impConfianza).calificacion, Criterio.get(impInteres).calificacion] self.save_m2m() activo.relevancia = Util.calcularaRelevancia(impactos) self.activoServicios.update(activo)
def get(self): utileria = Util() listadoDePuntos = utileria.obtenerPuntosArea(self.request,0.03) jsonAMostrar = json.dumps([punto.to_dict() for punto in listadoDePuntos]) mapaRespuesta = {'puntosClave': [punto.to_dict() for punto in listadoDePuntos]} self.response.headers['Content-Type'] = 'application/json; charset=utf-8' self.response.write(json.dumps(mapaRespuesta))
def realize_avaliation(self, fc_number, fbc_number, with_context=False): Util.write_result("relatorio_50", "user;avaliacao_real;avaliacao_gerada;diferenca") # CARREGA BASE DE AVALIAÇÕES if with_context: database_ratings = MoviesDao.get_movies(True) else: database_ratings = MoviesDao.get_movies(False) # INICIALIZA AS VARIAVEIS DOS RESULTADOS result_total_hyb = Result(0, 0, 0, 0) result_total_fc = Result(0, 0, 0, 0) result_total_fbc = Result(0, 0, 0, 0) # PERCORRE CADA USUÁRIOS PARA CALCULAR OS MÉTODOS AVALIATIVOS for user_id in range(1, 11): print("AVALIANDO USUARIO: " + str(user_id)) # GERA A RECOMENDAÇÃO POR MEIO DA FILTRAGEM COLABORATIVA PARA O USUARIO EM QUESTÃO result_fc = Recommender.recommender_collaborative( database_ratings, str(user_id)) # INICIA A LISTA DE RECOMENDAÇÃO HIBRIDA result_hybrid = result_fc[0:fc_number] # RECUPERA AS AVALIAÇÕES REAIS DO USUÁRIO data_ratings = MoviesDao.get_user_ratings(str(user_id)) # GERA A RECOMENDAÇÃO POR MEIO DA FILTRAGEM BASEADA EM CONTEÚDO data_movies = list(MoviesDao.get_all_movies()) result_fbc = Recommender.recommender_content( result_hybrid, data_ratings, fbc_number, data_movies) # JUNTA OS RESULTADOS DA FC E FBC result_hybrid.extend(result_fbc) # DEFINE A LISTA DE RECOMENDAÇÃO FC LIMITADA A QUANTIDADE DE RECOMENDAÇÕES result_fc_analyze = self.incrementFC(result_hybrid, result_fc, fc_number) # CALCULA AS MÉTRICAS AVALIATIVAS PARA CADA MODELO avaliation_hybrid = self.calculate_result(database_ratings, user_id, result_hybrid, "HIB") avaliation_fbc = self.calculate_result(database_ratings, user_id, result_fbc, "FBC") avaliation_fc = self.calculate_result(database_ratings, user_id, result_fc_analyze, "FC") # REALIZA A SOMA GERAL DAS MÉTRICAS result_total_hyb.increment(avaliation_hybrid) result_total_fc.increment(avaliation_fc) result_total_fbc.increment(avaliation_fbc) # APRESENTA OS RESULTADOS self.toString(result_total_hyb, "RSME") self.toString(result_total_fc, "FC") self.toString(result_total_fbc, "FBC")
def decorador(request, *args, **kwargs): url_r = request.get_full_path() permisoServicio= PermisoServicios() path= f.func_globals['__package__']+"/"+f.__name__ user = request.user if user.username: perm= permisoServicio.getPermisoByUserAndUrl(user, path) if perm: return f(request, *args, **kwargs) else: return Util.redireccionar('inicio') return Util.redireccionar('inicio')
def cambiarEstadoDocumento(self, idDocumento, idEstadoDoc): documento = self.get(int(idDocumento)) estado = EstadoDocumento.get(int(idEstadoDoc)) if estado.getValor() == EstadoDocumento.PUBLICADO.getValor(): documento.version = Util.versionDocumento(documento.version, False) documento.estadoDocumento = estado elif estado.getValor() == EstadoDocumento.REVISION.getValor(): documento.estadoDocumento = estado documento.version = Util.versionDocumento(documento.version, True) else: documento.estadoDocumento = estado #TODO: Revisar la actualizaciónde los objetos self.documentoCrud.update(documento)
def whereObject(self, *args, **kwargs): queryset = Util._get_queryset(PersonaPermiso) try: personaPermiso = queryset.get(*args, **kwargs) personaPermiso = self.__changeEnumerated(personaPermiso) return personaPermiso except queryset.model.DoesNotExist: return None
def whereList(self, *args, **kwargs): queryset = Util._get_queryset(Actividad) obj_list = list(queryset.filter(*args, **kwargs)) if not obj_list: obj_list=list() for actividad in obj_list: actividad = self.changeEnumerated(actividad) return obj_list
def whereList(self, *args, **kwargs): queryset = Util._get_queryset(Permiso) obj_list = list(queryset.filter(*args, **kwargs)) if not obj_list: obj_list=list() for permiso in obj_list: permiso = self.__changeEnumerated(permiso) return obj_list
def whereObject(self, *args, **kwargs): queryset = Util._get_queryset(Activo) try: activo = queryset.get(*args, **kwargs) activo = self.__changeEnumerated(activo) return activo except queryset.model.DoesNotExist: return None
def whereObject(self, *args, **kwargs): queryset = Util._get_queryset(Cargo) try: cargo = queryset.get(*args, **kwargs) cargo = self.__changeEnumerated(cargo) return cargo except queryset.model.DoesNotExist: return None
def whereList(self, *args, **kwargs): queryset = Util._get_queryset(Grupo) obj_list = list(queryset.filter(*args, **kwargs)) if not obj_list: obj_list=list() for grupo in obj_list: grupo = self.__changeEnumerated(grupo) return obj_list
def whereObject(self, *args, **kwargs): queryset = Util._get_queryset(Grupo) try: grupo = queryset.get(*args, **kwargs) grupo = self.__changeEnumerated(grupo) return grupo except queryset.model.DoesNotExist: return None
def whereObject(self, *args, **kwargs): queryset = Util._get_queryset(Departamento) try: departamento = queryset.get(*args, **kwargs) departamento = self.__changeEnumerated(departamento) return departamento except queryset.model.DoesNotExist: return None
def whereList(self, *args, **kwargs): queryset = Util._get_queryset(Observacion) obj_list = list(queryset.filter(*args, **kwargs)) if not obj_list: obj_list=list() for observacion in obj_list: observacion = self.__changeEnumerated(observacion) return obj_list
def whereObject(self, *args, **kwargs): queryset = Util._get_queryset(Observacion) try: return queryset.get(*args, **kwargs) observacion = self.__changeEnumerated(observacion) return observacion except queryset.model.DoesNotExist: return None
def whereList(self, *args, **kwargs): queryset = Util._get_queryset(Categoria) obj_list = list(queryset.filter(*args, **kwargs)) if not obj_list: obj_list=list() for categoria in obj_list: categoria = self.__changeEnumerated(categoria) return obj_list
def whereList(self, *args, **kwargs): queryset = Util._get_queryset(Departamento) obj_list = list(queryset.filter(*args, **kwargs)) if not obj_list: obj_list=list() for departamento in obj_list: departamento = self.__changeEnumerated(departamento) return obj_list
def categoryNewRender(request): if request.method == 'POST': formulario = CategoriaForm(request.POST) if formulario.is_valid(): formulario.save() return HttpResponseRedirect(reverse('lCategoriaActivo')) else: formulario = CategoriaForm return Util.pagina(request, "gestion_activos/categoria_form.html", formulario)
def activeNewRender(request): if request.method == 'POST': formulario = ActivoForm(request.POST) if formulario.is_valid(): formulario.save() return HttpResponseRedirect(reverse('lActivo')) else: formulario = ActivoForm() return Util.pagina(request, "gestion_activos/activo_form.html", {'formulario':formulario})
def getWeeksStartEnd(numWeeks): actvWeeks = [] actvWeeks.append({}) actvWeeks[0]['end'] = Util.getPreviousSunday(datetime.date.today()) actvWeeks[0]['start'] = actvWeeks[0]['end'] - datetime.timedelta(days=6) for i in range(1,numWeeks): actvWeeks.append({}) actvWeeks[i]['end'] = actvWeeks[i-1]['start'] - datetime.timedelta(days=1) actvWeeks[i]['start'] = actvWeeks[i]['end'] - datetime.timedelta(days=6) return actvWeeks
def activeEditRender(request, id): activo = activoServicios.get(id) if request.method == 'POST': formulario = ActivoForm(request.POST, edit=True,instance=activo) if formulario.is_valid(): formulario.save() return HttpResponseRedirect(reverse('lActivo')) else: activoServicios.saveEnumerated(activo) formulario = ActivoForm(instance=activo) return Util.pagina(request, 'gestion_activos/activo_form.html', {'formulario': formulario})
def categoryEditRender(request, id): if request.method == 'POST': categoria = categoriaServicios.get(id) formulario = CategoriaForm(request.POST, edit=True, instance=categoria) if formulario.is_valid(): formulario.save() return HttpResponseRedirect(reverse('lCategoriaActivo')) else: categoria = categoriaServicios.get(id) formulario = CategoriaForm(instance=categoria) return Util.pagina(request, 'gestion_activos/categoria_form.html', {'formulario': formulario})
def calculate_result(self, database_ratings, user_id, database_movies, log=None): # RESULTADO GERAL sum_rsme = 0 sum_mae = 0 sum_mse = 0 count = 0 # PERCORRE AS RECOMENDAÇÕES GERADAS for movie in database_movies: # VERIFICA SE A RECOMENDAÇÃO GERADA JÁ FOI AVALIADA PELO USUÁRIO if movie[1] in database_ratings[str(user_id)]: # CALCULA DIFERENÇA dif = database_ratings[str(user_id)][movie[1]] - movie[0] # LOG DA DIFERENÇA ENTRE AVALIAÇÃO REAL E A GERADA '''if log is not None: print(log + ": " + str(database_ratings[str(user_id)][movie[1]]) + " - " + str( movie[0]) + " -> " + str(dif))''' Util.write_result( "relatorio_50", str(user_id) + ";" + log + ";" + str(database_ratings[str(user_id)][movie[1]]) + ";" + str(movie[0]) + ";" + str(dif)) # RESULTADOS sum_rsme += pow(dif, 2) sum_mae += abs(dif) sum_mse += pow(dif, 2) # COUNT count += 1 # RETORNA RESULTADO return Result(sum_rsme, sum_mae, sum_mse, count)
def __isValidBox(deviceIdentifier): try: LogService.logMessage( "api.__isValidBox : deviceIdentifier {0}".format(deviceIdentifier), LogService.INFO) decodedMessage = Util.decrypt(deviceIdentifier) LogService.logMessage( "api.__isValidBox : decodedMessage {0}".format(decodedMessage), LogService.INFO) deviceInformation = Util.parseDecodedMessage(decodedMessage) LogService.logMessage( "api.__isValidBox : device information parsed {0} {1}".format( deviceInformation[0], deviceInformation[1]), LogService.INFO) devices = QueryService.getDevice(deviceInformation[0], deviceInformation[1]) if len(devices) > 0: device = devices[0] LogService.logMessage( "api.__isValidBox : device found {0}".format(device.device_id), LogService.INFO) if (datetime.now() - datetime.strptime( device.device_deactivationdate, "%d/%m/%Y %H:%M:%S")).total_seconds() > 0: LogService.logMessage( "api.__isValidBox : device deactivated {0}".format( device.device_id), LogService.INFO) return False LogService.logMessage( "api.__isValidBox : device active {0}".format( device.device_id), LogService.INFO) return True else: LogService.logMessage("api.__isValidBox : device not found", LogService.INFO) return False except Error as e: LogService.logMessage("api.__isValidBox : {0} {1}".format( e.errno, e.strerror)) return False
def test_crypt(self): print("test_crypt\n") #message is CLIENT_ID + # + CPU_ID clientID = "1111" cpuID = "52EFD2D148F17AF3AC967EE8F4E736D7" separator = settings.SEPARATOR * (48 - (len(clientID) + len(cpuID))) message = clientID + separator + cpuID print("MESSAGE : \n" + message) encoded = Util.encrypt(message) print("ENCODED : \n" + str(encoded)) decoded = Util.decrypt(encoded) print("DECODED : \n" + str(decoded)) self.assertEqual(message, decoded, "MESSAGE SUCCESSFULLY DECRYPTED") valuesAfterDecrypt = Util.parseDecodedMessage(decoded) self.assertEqual(clientID, valuesAfterDecrypt[0], "CLIENT ID MUST EQUAL AFTER DECRYPT") self.assertEqual(cpuID, valuesAfterDecrypt[1], "CPU ID MUST EQUAL AFTER DECRYPT")
def generateCycleSummary(ex): strLst = [] strLst.append('\tBike ride Count: ') strLst.append(str(ex.ct)) strLst.append('\n') strLst.append('\tBiked ') strLst.append(str(ex.distTot)) strLst.append(' Miles') strLst.append('\n') strLst.append('\tBiked for ') strLst.append(Util.convertTimeFromSeconds(ex.durTot)) strLst.append('\n') return ''.join(strLst)
def generateSwimSummary(ex): strLst = [] strLst.append('\tSwim Count: ') strLst.append(str(ex.ct)) strLst.append('\n') strLst.append('\tSwam ') strLst.append(str(ex.distTot)) strLst.append(' yards') strLst.append('\n') strLst.append('\tSwam for ') strLst.append(Util.convertTimeFromSeconds(ex.durTot)) strLst.append('\n') return ''.join(strLst)
def get_movies(with_context): # COLEÇÃO DE AVALIAÇÕES col_ratings = Connection.db()["ratings"] # INICIA A BASE DE AVALIACOES base_ratings = {} for data in col_ratings.find(): base_ratings.setdefault(data['userId'], {}) # APLICAÇÃO DO CONTEXTO -> FILTRA AS AVALIACOES PELO CONTEXTO(SE É FIM DE SEMANA OU NÃO) if with_context and not Util.is_context_dayweek( datetime.fromtimestamp(float( data['timestamp'])).weekday()): continue base_ratings[data['userId']][data['movieId']] = float( data['rating']) return base_ratings
def generateHtmlSwimSummary(ex): strLst = [] strLst.append(r'<tr>') strLst.append(r'<td align="center">') # strLst.append(r'<b>') strLst.append(str(ex.ct)) strLst.append(' swims') # strLst.append(r'</b>') strLst.append(r'</td>') strLst.append(r'<td align="center">') strLst.append(str(ex.distTot)) strLst.append(' yards swam') strLst.append(r'<td align="center">') strLst.append('Swam for ') strLst.append(Util.convertTimeFromSeconds(ex.durTot)) strLst.append(r'</td>') strLst.append(r'</tr>') return ''.join(strLst)
def generateHtmlCycleSummary(ex): strLst = [] strLst.append(r'<tr>') strLst.append(r'<td align="center">') # strLst.append(r'<b>') strLst.append(str(ex.ct)) strLst.append(' bike rides') # strLst.append(r'</b>') strLst.append('</td>') strLst.append('<td align="center">') strLst.append(str(ex.distTot)) strLst.append(' Miles Biked') strLst.append('</td>') strLst.append('<td align="center">') strLst.append('Biked for ') strLst.append(Util.convertTimeFromSeconds(ex.durTot)) strLst.append(r'</td>') strLst.append(r'</tr>') return ''.join(strLst)
def generateHtmlRunSummary(currWk, prevWk): strLst = [] currEx = currWk.exRun prevEx = '' if 'exRun' in vars(prevWk): prevEx = prevWk.exRun else: prevEx = ExerciseInfo('Running') strLst.append(r'<tr>') # Count of Runs wkDiff = currEx.ct - prevEx.ct diffInd = '' if (wkDiff < 0): diffInd = 'down' wkDiff = abs(wkDiff) else: diffInd = 'up' strLst.append(r'<td align="center" width="33%">') strLst.append('<b>') strLst.append('{0:.{1}f}'.format(currEx.ct,0)) strLst.append(r' runs') strLst.append('</b>') strLst.append(r'<br>') strLst.append(' (') strLst.append('{0:.{1}f} '.format(wkDiff,0)) strLst.append(diffInd) strLst.append(')') strLst.append(r'</td>') # Run Distances wkDiff = currEx.distTot - prevEx.distTot if (wkDiff < 0): diffInd = 'down' wkDiff = abs(wkDiff) else: diffInd = 'up' strLst.append(r'<td align="center" width="33%">') strLst.append('<b>') strLst.append('{0:.{1}f}'.format(currEx.distTot,2)) strLst.append(' Miles Run') strLst.append(r'</b><br>') strLst.append(' (') strLst.append('{0:.{1}f} '.format(wkDiff,2)) strLst.append(diffInd) strLst.append(')') strLst.append(r'</td>') # Run Times wkDiff = currEx.durTot - prevEx.durTot if (wkDiff < 0): diffInd = ' down' wkDiff = abs(wkDiff) else: diffInd = ' up' strLst.append(r'<td align="center" width="34%">') strLst.append('<b>') strLst.append('Ran for ') strLst.append(Util.convertTimeFromSeconds(currEx.durTot)) strLst.append(r'</b><br>') strLst.append(' (') strLst.append(Util.convertTimeFromSeconds(wkDiff)) strLst.append(diffInd) strLst.append(')') strLst.append(r'</td>') strLst.append(r'</tr>') strLst.append(r'<tr>') strLst.append(r'<td>') strLst.append(r'<b>Easy Runs: </b><br>') strLst.append(Util.convertTimeFromSeconds(currEx.avgEasyPace)) strLst.append(' avg pace <br>') strLst.append(str(round(currEx.avgEasyHr,2))) strLst.append(' avg heart rate') strLst.append(r'</td>') strLst.append(r'<td colspan="2">') if ('avgLongDur' in vars(currEx)): strLst.append(r'<b>Long Run: </b><br>') strLst.append(Util.convertTimeFromSeconds(currEx.avgLongDur)) strLst.append(' for ') strLst.append(str(currEx.avgLongDist)) strLst.append(' miles <br>') strLst.append('Avg pace: ') strLst.append(Util.convertTimeFromSeconds(currEx.avgLongPace)) strLst.append('<br>') strLst.append('Avg heart rate: ') strLst.append(str(currEx.avgLongHr)) else: strLst.append('No Long Run') strLst.append(r'</td>') strLst.append(r'</tr>') return ''.join(strLst)
def main(): global device_list, verbose start_time = time.time() # example: # - manually set the in/out directories, experiment_list, device_list below # - call: python3 process_traces.py --override True -p 1 print("Processing pcaps") print("Start time: %s\n" % time.strftime("%A %d %B %Y %H:%M:%S %Z", time.localtime(start_time))) #Options parser = argparse.ArgumentParser( usage="Look in process_traces.py to see supported arguments", add_help=False) #parser.add_argument("-i", dest="input_dir", default="/home/robin/datasets/moniotr") # parser.add_argument("-i", dest="input_dir", default="/home/robin/datasets/yourthings") parser.add_argument( "-o", dest="output_dir", default="/home/robin/datasets/scratch/test/moniotr_test5") # parser.add_argument("-o", dest="output_dir", default="/home/robin/datasets/scratch/test/yourthings_test1") # parser.add_argument("-d", dest="device_list_path", default="/home/robin/datasets/moniotr/devices.json") # parser.add_argument("-d", dest="device_list_path", default="/home/robin/datasets/yourthings/devices.json") # experiment_list must be a comma-separated list of directories that are recursively traversed, looking for pcaps to process # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/t-philips-hub,iot-data/uk/smarter-coffee-mach,iot-data/uk/echoplus") # parser.add_argument("-l", dest="experiment_list", default="12") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/smarter-coffee-mach") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/echoplus/volume") # parser.add_argument("-l", dest="experiment_list", default="../scratch/TEMP/tls_1_3.pcapng") # TLS 1.3 example trace # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/appletv/power/2019-04-26_12:59:01.247s.pcap") # has TLS 1.3 data # parser.add_argument("-l", dest="experiment_list", default="../scratch/TEMP/2019-04-26_12_23_35.222s.pcap") # spurious retransmits # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/echoplus") # interval tester 1 # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/allure-speaker") # interval tester 2 # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/echoplus") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/samsungtv-wired") # local_menu # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/blink-camera/alexa_watch") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/ring-doorbell/android_wan_watch") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/bosiwo-camera-wired") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/yi-camera") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/smarter-coffee-mach") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/appletv") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/blink-security-hub") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/charger-camera") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/dlink-camera") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/echodot") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/echospot") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/firetv") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/smarter-coffee-mach/alexa_on") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/smarter-coffee-mach/power") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/smarter-coffee-mach,iot-data/uk/ring-doorbell") # parser.add_argument("-l", dest="experiment_list", default="../scratch") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/smarter-coffee-mach/power,iot-data/uk/smarter-coffee-mach/android_wan_on") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/smarter-coffee-mach/") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/smarter-coffee-mach/power,iot-data/uk/smarter-coffee-mach/android_wan_on,iot-data/uk/smarter-coffee-mach/android_lan_on") # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/magichome-strip/power,iot-data/uk/magichome-strip/android_wan_on,iot-data/uk/magichome-strip/android_lan_on") # parser.add_argument("-l", dest="experiment_list", default="iot-data/us/philips-bulb/power,iot-data/us/philips-bulb/android_wan_on,iot-data/us/philips-bulb/android_lan_on") # parser.add_argument("-l", dest="experiment_list", default="../thebasement/wemo_initial") # # parser.add_argument("-l", dest="experiment_list", default="iot-data/uk/smarter-coffee-mach/power,iot-idle/uk/smarter-coffee-mach,iot-data/uk/smarter-coffee-mach/android_wan_on,iot-data/uk/smarter-coffee-mach/android_lan_on") parser.add_argument("-i", dest="input_dir", default="/home/robin/datasets/yourthings") parser.add_argument("-d", dest="device_list_path", default="/home/robin/datasets/yourthings/devices.json") parser.add_argument("-l", dest="experiment_list", default="11/eth1-20180411.0000.1523422800" ) # small one at 76MB # takes about 40s # parser.add_argument("-l", dest="experiment_list", default="11/eth1-20180411.2020.1523496000") # medium one at 181 MB # takes about 1 minute # parser.add_argument("-l", dest="experiment_list", default="11/eth1-20180411.0055.1523426100") # big one at 227MB # takes about 1.5 minutes # parser.add_argument("-l", dest="experiment_list", default="11/eth1-20180411.0410.1523437800") # biggest one at 345 MB # takes about 2 minutes parser.add_argument("-v", dest="verbose", default=True) parser.add_argument("-p", dest="process_count", default=4) parser.add_argument("--override", dest="override_results", default=False) parser.add_argument("-h", dest="help", action="store_true", default=False) #Parse Arguments args = parser.parse_args() if args.help: print_usage(0) errors = False experiment_list = [] if args.experiment_list is not None: experiment_list = args.experiment_list.split(",") if len(experiment_list) == 0: errors = True print("Error: experiment list was empty!", file=sys.stderr) #check -i input_dir if args.input_dir == "": errors = True print("Error: Pcap input directory (-i) required.", file=sys.stderr) elif Util.check_dir(args.input_dir, "Input pcap directory"): errors = True #check -i output_dir if args.output_dir == "": errors = True print("Error: output directory (-o) required.", file=sys.stderr) elif Util.check_dir(args.output_dir, "Output directory"): errors = True device_list = [] if args.device_list_path == "": errors = True print("Error: device list (-d) required.", file=sys.stderr) else: if not os.path.isfile(args.device_list_path): errors = True print("Error: specified device list file does not exist. " + str(args.device_list_path), file=sys.stderr) else: with open(args.device_list_path, "r") as f: device_list = json.loads(f.read()) if len(device_list) == 0: errors = True print("Error: device list file was empty or not JSON. " + str(args.device_list_path), file=sys.stderr) else: device_list = Device.DeviceList(device_list) if errors: print_usage(1) process_count = int(args.process_count) override_results = args.override_results verbose = args.verbose #Create the groups to run analysis with processes raw_files = [[] for _ in range(process_count)] # Split the pcap files into num_proc groups # TODO: adjust this logic for when we start processing actual batches def walk_directory(dir_path, output_list, output_index): print("Walk dir " + dir_path) if os.path.isfile(dir_path): raw_files[output_index].append(dir_path) output_index += 1 if output_index >= process_count: output_index = 0 return output_index for root, dirs, files in os.walk(dir_path): for filename in files: if ( filename.endswith(".pcap") and not filename.startswith(".") ) or filename.endswith(".pcapng") or filename.startswith( "eth" ): # one of the datasets has pcaps without extensions but starts with "eth" raw_files[output_index].append(os.path.join( root, filename)) output_index += 1 if output_index >= process_count: output_index = 0 for dir in dirs: output_index = walk_directory(os.path.join(root, dir), raw_files, output_index) return output_index for experiment in experiment_list: walk_directory(args.input_dir + os.path.sep + experiment, raw_files, 0) if override_results: if verbose: print("Clearing output directory %s" % args.output_dir) for root, dirs, files in os.walk(args.output_dir): for f in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) gc.collect() if verbose: print("Analyzing input pcap files...") # TODO: refactor this into a proper class params = { "device_list": device_list, "output_dir": args.output_dir, "verbose": verbose } # run analysis with num_proc processes procs = [] for pid, files in enumerate(raw_files): p = Process(target=run, args=(pid, files, params)) procs.append(p) p.start() for p in procs: p.join() end_time = time.time() print("\nEnd time: %s" % time.strftime("%A %d %B %Y %H:%M:%S %Z", time.localtime(end_time))) #Calculate elapsed time sec = round(end_time - start_time) hrs = sec // 3600 if hrs != 0: sec = sec - hrs * 3600 minute = sec // 60 if minute != 0: sec = sec - minute * 60 print("Elapsed time: %s hours %s minutes %s seconds" % (hrs, minute, sec)) if verbose: print("\nAnalysis finished.")
def categoryAllRender(request): categorias = categoriaServicios.getAll() return Util.pagina(request, 'gestion_activos/categoria_listar.html', {'categorias':categorias})
def activeRender(request,id): if request.method == 'GET': activo = activoServicios.get(id) return Util.pagina(request, "gestion_activos/activo_detalle.html", {'activo':activo})
# To change this license header, choose License Headers in Project Properties. # To change this template file, choose Tools | Templates # and open the template in the editor. __author__ = "asim" __date__ = "$19.Ara.2015 23:11:46$" import uuid import settings from util.Util import Util if __name__ == "__main__": print("test_crypt\n") #message is CLIENT_ID + # + CPU_ID clientID = "1" cpuID = uuid.uuid4().hex separator = settings.SEPARATOR * (48 - (len(clientID) + len(cpuID))) message = clientID + separator + cpuID print("MESSAGE : \n" + message) encoded = Util.encrypt(message) print("ENCODED : \n" + str(encoded)) decoded = Util.decrypt(encoded) print("DECODED : \n" + str(decoded))
def activeAllRender(request): return Util.pagina(request, 'gestion_activos/activo_listar.html',{})
def generateRunSummary(currWk, prevWk): strLst = [] currEx = currWk.exRun prevEx = '' if 'exRun' in vars(prevWk): prevEx = prevWk.exRun else: prevEx = ExerciseInfo('Running') # Count of Runs wkDiff = currEx.ct - prevEx.ct diffInd = '' if (wkDiff < 0): diffInd = 'down' wkDiff = abs(wkDiff) else: diffInd = 'up' strLst.append('\tRuns Count: ') strLst.append('{0:.{1}f}'.format(currEx.ct,0)) strLst.append(' (') strLst.append('{0:.{1}f} '.format(wkDiff,0)) strLst.append(diffInd) strLst.append(')') strLst.append('\n') # Run Distances wkDiff = currEx.distTot - prevEx.distTot if (wkDiff < 0): diffInd = 'down' wkDiff = abs(wkDiff) else: diffInd = 'up' strLst.append('\tRan ') strLst.append('{0:.{1}f}'.format(currEx.distTot,2)) strLst.append(' Miles') strLst.append(' (') strLst.append('{0:.{1}f} '.format(wkDiff,2)) strLst.append(diffInd) strLst.append(')') strLst.append('\n') # Run Times wkDiff = currEx.durTot - prevEx.durTot if (wkDiff < 0): diffInd = ' down' wkDiff = abs(wkDiff) else: diffInd = ' up' strLst.append('\tRan for ') strLst.append(Util.convertTimeFromSeconds(currEx.durTot)) strLst.append(' (') strLst.append(Util.convertTimeFromSeconds(wkDiff)) strLst.append(diffInd) strLst.append(')') strLst.append('\n') strLst.append('\tEasy Runs avg pace was ') strLst.append(Util.convertTimeFromSeconds(currEx.avgEasyPace)) strLst.append(' with avg heart rate of ') strLst.append(str(round(currEx.avgEasyHr,2))) strLst.append('\n') if ('avgLongDur' in vars(currEx)): strLst.append('\tLong run: ') strLst.append(Util.convertTimeFromSeconds(currEx.avgLongDur)) strLst.append(' for ') strLst.append(str(currEx.avgLongDist)) strLst.append(' miles, with avg pace ') strLst.append(Util.convertTimeFromSeconds(currEx.avgLongPace)) strLst.append(', and avg heart rate ') strLst.append(str(currEx.avgLongHr)) strLst.append('\n') return ''.join(strLst)
def main(): global device_list, verbose start_time = time.time() print("Creating intermediate JSON files for faster processing later") print("Start time: %s\n" % time.strftime("%A %d %B %Y %H:%M:%S %Z", time.localtime(start_time))) #Options parser = argparse.ArgumentParser( usage="Look in create_json.py to see supported arguments", add_help=False) parser.add_argument("-i", dest="input_dir", default="/home/robin/datasets/yourthings/11") parser.add_argument("-o", dest="output_dir", default="/home/robin/datasets/yourthings/11_json") parser.add_argument("-v", dest="verbose", default=True) parser.add_argument("--override", dest="override_results", default=True) parser.add_argument("-h", dest="help", action="store_true", default=False) #Parse Arguments args = parser.parse_args() if args.help: print_usage(0) errors = False #check -i input_dir if args.input_dir == "": errors = True print("Error: Results input directory (-i) required.", file=sys.stderr) elif Util.check_dir(args.input_dir, "Results directory"): errors = True #check -i output_dir if args.output_dir == "": errors = True print("Error: output directory (-o) required.", file=sys.stderr) elif Util.check_dir(args.output_dir, "Output directory"): errors = True if errors: print_usage(1) process_count = 1 # args.process_count override_results = args.override_results verbose = args.verbose #Create the groups to run analysis with processes raw_files = [[] for _ in range(process_count)] # Split the pcap files into num_proc groups # TODO: adjust this logic for when we start processing actual batches def walk_directory(dir_path, output_list, output_index): print("Walk dir " + dir_path + "," + str(output_index)) for root, dirs, files in os.walk(dir_path): for filename in files: if ( filename.endswith(".pcap") and not filename.startswith(".") ) or filename.endswith(".pcapng") or filename.startswith( "eth" ): # one of the datasets has pcaps without extensions but starts with "eth" raw_files[output_index].append(os.path.join( root, filename)) output_index += 1 if output_index >= process_count: output_index = 0 for dir in dirs: output_index = walk_directory(os.path.join(root, dir), raw_files, output_index) return output_index walk_directory(args.input_dir, raw_files, 0) if override_results: if verbose: print("Clearing output directory %s" % args.output_dir) for root, dirs, files in os.walk(args.output_dir): for f in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) gc.collect() if verbose: print("Summarizing device result files...") # TODO: refactor this into a proper class params = {"output_dir": args.output_dir, "verbose": verbose} # run analysis with num_proc processes procs = [] for pid, files in enumerate(raw_files): p = Process(target=run, args=(pid, files, params)) procs.append(p) p.start() for p in procs: p.join() end_time = time.time() print("\nEnd time: %s" % time.strftime("%A %d %B %Y %H:%M:%S %Z", time.localtime(end_time))) #Calculate elapsed time sec = round(end_time - start_time) hrs = sec // 3600 if hrs != 0: sec = sec - hrs * 3600 minute = sec // 60 if minute != 0: sec = sec - minute * 60 print("Elapsed time: %s hours %s minutes %s seconds" % (hrs, minute, sec)) if verbose: print("\nTransformation finished.")