def get_config(cfg_name, log_path=None, debug=False): cfg_path = os.path.dirname( os.path.realpath(__file__)) + "/../conf.d/" + cfg_name + ".yml" if cfg_name == 'targets': while True: try: ymlfile = open(cfg_path, 'r+') fcntl.flock(ymlfile, fcntl.LOCK_EX | fcntl.LOCK_NB) break except IOError as e: if e.errno != errno.EAGAIN: logger.write( str(datetime.now()) + " ERROR " + str(e) + ". New file will be created.", log_path) ymlfile = open(cfg_path, 'w') break else: if debug is True: logger.write( str(datetime.now()) + " DEBUG " + str(e) + ". File targets.yml is locked. Waiting ...", log_path) ymlfile.close() time.sleep(1) else: ymlfile = open(cfg_path, 'r') return ymlfile
def connect(self, _localMACAddress): self.localMACAddress = _localMACAddress try: # Creamos un nuevo socket Bluetooth que usa el protocolo de transporte especificado self.serverSocketRFCOMM = bluetooth.BluetoothSocket(bluetooth.RFCOMM) # Enlazamos al adaptador local algun puerto disponible self.serverSocketRFCOMM.bind((self.localMACAddress, bluetooth.PORT_ANY)) # Especificamos el numero de conexiones permitidas (todavia sin aceptar) antes de rechazar las nuevas entrantes self.serverSocketRFCOMM.listen(CONNECTIONS) # Especificamos el tiempo de espera de conexiones (funcion 'accept') self.serverSocketRFCOMM.settimeout(TIMEOUT) # Utilizamos SDP para anunciar nuestro servicio bluetooth.advertise_service(self.serverSocketRFCOMM, self.localServiceName, service_id = self.localUUID, service_classes = [self.localUUID, bluetooth.SERIAL_PORT_CLASS], profiles = [bluetooth.SERIAL_PORT_PROFILE]) # Almacenamos el puerto asignado por el 'bind' self.localPortRFCOMM = self.serverSocketRFCOMM.getsockname()[1] ####################################################################### self.bluetoothTransmitter = bluetoothTransmitter.BluetoothTransmitter() ####################################################################### self.successfulConnection = True return True except bluetooth._bluetooth.error as bluetoothError: logger.write('ERROR', '[BLUETOOTH] Código de error %s - %s.' % (bluetoothError[0], bluetoothError[1])) self.successfulConnection = False return False
def _endTurn(self, pid, post_turn_data): if post_turn_data['yaniv']: self.yaniv = True self.yaniv_pid = pid return # if chosen, draw card from deck self.lucky_draw = False if post_turn_data['pick_up_idx'] == 0: new_card = self.deck.drawCard() # check for lucky draw self.lucky_draw = self._isLuckyDraw(post_turn_data['discards'], new_card) if self.lucky_draw: self.last_pick_up = new_card else: self.last_pick_up = ["D", "D", 0] # else draw top discard card else: new_card = self.deck.drawDiscard(post_turn_data['pick_up_idx']) self.last_pick_up = new_card logger.write("Player discards:\n" + self._cardsString(post_turn_data['discards'])) logger.write("Player picks up:\n" + new_card[0] + new_card[1]) if not self.lucky_draw: self._insertCard(pid, new_card) # get rid of client's discards self.deck.discardCards(post_turn_data['discards']) for discard in post_turn_data['discards']: if self.players[pid]['hand'].count(discard): self.players[pid]['hand'].remove(discard)
def load_schedule(config: configparser.ConfigParser, check_config): '''Load a schedule from a configuration file and register it with the scheduler.''' schedule_map = { 'monday' : monday, 'tuesday' : tuesday, 'wednesday' : wednesday, 'thursday' : thursday, 'friday' : friday, 'saturday' : saturday, 'sunday' : sunday } logger.write('Loading schedule...') options = config.options('Schedule') for o in options: times = config.get('Schedule', o) times = times.replace('"', '') times = times.split(',') if times[0] == '': continue for t in times: time = t.strip(' []') pair = time.split(' ') start = pair[0] stop = pair[1] schedule_map[o](start, stop) register_config(check_config, config) schedule.every(1).days.do(logger.clear) check_config()
def forward(self, pids, endpoints, **kwargs): batch_hard_loss = 0.0 bh_losses = [] for triplet in endpoints["triplet"]: dist = calc_cdist(triplet, triplet) bh = self.batch_hard(dist, pids) batch_hard_loss += bh f_bh = float(var2num(torch.mean(bh))) bh_losses.append(f_bh) bh_loss_overall = float(var2num(torch.mean(batch_hard_loss))) cross_entropy_loss = 0.0 ce_losses = [] for softmax in endpoints["soft"]: ce = self.cross_entropy(softmax, pids) cross_entropy_loss += ce f_ce = float(var2num(ce)) ce_losses.append(f_ce) ce_loss_overall = float(var2num(cross_entropy_loss)) print("bh loss {:.3f} ce loss: {:.3f}".format(bh_loss_overall, ce_loss_overall)) loss_info = [bh_loss_overall] + bh_losses + [ce_loss_overall ] + ce_losses log.write("loss", loss_info, dtype=np.float32) return batch_hard_loss + cross_entropy_loss
def _displayYaniv(self, stdscr, update_data): # get rid of all cards and messages self._eraseCards(True) self._eraseCards(False) self.renderMessage("") cur_pid = update_data['cur_pid'] yaniv_name = update_data['players'][cur_pid]['name'] self.yaniv_win.addstr(1, 0, yaniv_name + " has called Yaniv!") logger.write(yaniv_name + " has called Yaniv!") points_str = "" for pid, player in enumerate(update_data['players']): points_str += player['name'] + " finished with: " for card in player['hand']: points_str += card[0] + " " points_str += "for a total of " + str( update_data['hand_sums'][pid]) + " points\n" logger.write(points_str) self.yaniv_win.addstr(3, 0, points_str) self.yaniv_win.refresh() # display a countdown timer until the next round timer = self.round_break self.yaniv_win.addstr(self.yaniv_height - 1, 1, "Continue in: ") while timer > 0: self.yaniv_win.addstr(self.yaniv_height - 1, 14, " ") self.yaniv_win.addstr(self.yaniv_height - 1, 14, str(timer)) self.yaniv_win.refresh() time.sleep(1) timer -= 1 self.yaniv_win.erase() self.yaniv_win.refresh()
def verifyGsmConnection(self): # Generamos la expresión regular ttyUSBPattern = re.compile('ttyUSB[0-9]+') lsDevProcess = subprocess.Popen(['ls', '/dev/'], stdout = subprocess.PIPE, stderr = subprocess.PIPE) lsDevOutput, lsDevError = lsDevProcess.communicate() ttyUSBDevices = ttyUSBPattern.findall(lsDevOutput) # Se detectaron dispositivos USB conectados for ttyUSBx in reversed(ttyUSBDevices): # Si el puerto serie nunca fue establecido, entonces la instancia no esta siendo usada if self.gsmInstance.serialPort is None: # Si no se produce ningún error durante la configuración, ponemos al módem a recibir SMS y llamadas if self.gsmInstance.connect('/dev/' + ttyUSBx): gsmThread = threading.Thread(target = self.gsmInstance.receive, name = gsmThreadName) logger.write('INFO', '[GSM] Listo para usarse (' + ttyUSBx + ').') gsmThread.start() return True # Si se produce un error durante la configuración, devolvemos 'False' else: return False # Si el módem ya está en modo activo (funcionando), devolvemos 'True' elif self.gsmInstance.isActive: return True # Llegamos acá si se produce un error en el 'connect' del módem (y todavía está conectado) else: return False # Si anteriormente hubo un intento de 'connect()' con o sin éxito, debemos limpiar el puerto if self.gsmInstance.serialPort is not None: self.gsmInstance.successfulConnection = None self.gsmInstance.serialPort = None self.gsmInstance.isActive = False self.gsmInstance.closePort() return False
def receiveTCP(self): while self.isActive: try: # Espera por una conexion entrante y devuelve un nuevo socket que representa la conexion, como asi tambien la direccion del cliente remoteSocket, addr = self.tcpReceptionSocket.accept() enabledFilter = False ipAddress = addr[0] # Aplicamos el filtro de recepción en caso de estar activado if JSON_CONFIG["COMMUNICATOR"]["RECEPTION_FILTER"]: enabledFilter = True for valueList in contactList.allowedHosts.values(): if ipAddress in valueList: # Deshabilitamos el filtro ya que el cliente estaba registrado enabledFilter = False break # El filtro está activado y el cliente fue encontrado, o el filtro no está habilitado if not enabledFilter: #logger.write('DEBUG', '[NETWORK-TCP] Conexion desde \'%s\' aceptada.' % ipAddress) receptorThread = threading.Thread(target = self.receiveFile, args = (remoteSocket, )) receptorThread.start() # El cliente no fue encontrado, por lo que debemos rechazar su mensaje else: logger.write('WARNING', '[%s-TCP] Mensaje de \'%s\' rechazado!' % (self.MEDIA_NAME, ipAddress)) remoteSocket.close() # Para que el bloque 'try' (en la funcion 'accept') no se quede esperando indefinidamente except socket.timeout as errorMessage: pass self.tcpReceptionSocket.close() logger.write('WARNING','[%s-TCP] Función \'%s\' terminada.' % (self.MEDIA_NAME, inspect.stack()[0][3]))
def answerVoiceCall(self): try: self.sendAT('ATA') # Atiende la llamada entrante logger.write('INFO', '[GSM] Conectado con el número %s.' % self.callerID) return True except: return False
def verifyEmailConnection(self): TEST_REMOTE_SERVER = 'www.gmail.com' try: remoteHost = socket.gethostbyname(TEST_REMOTE_SERVER) testSocket = socket.create_connection((remoteHost, 80), 2) # Se determina si es alcanzable # Comprobamos si aún no intentamos conectarnos con los servidores de GMAIL (por eso el 'None') if self.emailInstance.successfulConnection is None: # Si no se produce ningún error durante la configuración, ponemos a recibir EMAILs if self.emailInstance.connect(): emailThread = threading.Thread(target = self.emailInstance.receive, name = emailThreadName) emailThread.start() logger.write('INFO', '[EMAIL] Listo para usarse (' + self.emailInstance.emailAccount + ').') return True # Si se produce un error durante la configuración, devolvemos 'False' else: return False # Si EMAIL ya está en modo activo (funcionando), devolvemos 'True' elif self.emailInstance.isActive: return True # Llegamos acá si se produce un error en el 'connect' (servidores o puertos mal configurados) else: return False # No hay conexión a Internet (TEST_REMOTE_SERVER no es alcanzable), por lo que se vuelve a intentar except socket.error as DNSError: if self.emailInstance.isActive: self.emailInstance.successfulConnection = None self.emailInstance.emailAccount = None self.emailInstance.isActive = False return False
def createMenubar(self, items): logger.write( "Creating Menubar:" ) logger.write( items ) menuBar = wx.MenuBar( 0 ) fileMenu = wx.Menu() self.toolsMenu = wx.Menu() helpMenu = wx.Menu() menuBar.Append(fileMenu, "File") self.buildMenus(items, menuBar) menuBar.Append(helpMenu, "Help") # create menu items rescan = wx.MenuItem( fileMenu, wx.ID_ANY, "Rescan", "Rescans for any attached devices", wx.ITEM_NORMAL ) reloadtools = wx.MenuItem( fileMenu, wx.ID_ANY, "Reload Plugins", "Rescans plugin directory and loads changes", wx.ITEM_NORMAL ) exit = wx.MenuItem( fileMenu, wx.ID_ANY, "Exit", "Closes the application", wx.ITEM_NORMAL ) about = wx.MenuItem( helpMenu, wx.ID_ANY, "About", "About Box", wx.ITEM_NORMAL ) # append menu items #fileMenu.AppendItem(rescan) fileMenu.AppendItem(reloadtools) fileMenu.AppendItem(exit) helpMenu.AppendItem(about) # bind items self.Bind( wx.EVT_MENU, self.OnRescan, id=rescan.GetId() ) self.Bind( wx.EVT_MENU, self.OnReload, id=reloadtools.GetId() ) self.Bind( wx.EVT_MENU, self.OnExit, id=exit.GetId() ) self.Bind( wx.EVT_MENU, self.OnAbout, id=about.GetId() ) return menuBar
def killJob(param_sPGIDFILE): # If file does not exists, we are waiting for the startJob # to create the file, as the screensaver must be active, # before it can be deactivatet. while not os.path.exists(param_sPGIDFILE): logger.write("Waiting for file: '" + param_sPGIDFILE + "' to be created") time.sleep(10) fh = open(param_sPGIDFILE, 'r') pid = int(fh.readline().strip().strip('\n')) fh.close() writeScreensaverStatus('deactivated', param_sPGIDFILE, pid) if os.path.exists(param_sPGIDFILE): os.remove(param_sPGIDFILE) fd = os.popen('./mig_xsss_stop_resource_exe.sh ') exit_code = fd.readline().strip().strip('\n') fd.close() logger.write('PID: ' + str(pid) + ' deactivated, resource stop status: ' + exit_code)
def load(self, filename): if logger.level >= 1: logger.writeln('loading rules from %s...' % filename) percent_counter = PercentCounter(input=filename, file=logger.file) f = open(filename) for i, line in enumerate(f): if logger.level >= 1: percent_counter.print_percent(i) try: rule = Rule() rule.fromstr(line) except AssertionError: logger.write('bad rule: %s %s: %s\n' % (filename, i, line)) self.nbadrules += 1 continue rule.grammar = self # used in computing features scores self.features.score_rule(rule) if rule.arity == 0: self.lexgrammar.add(rule) else: self.itg.add(rule) f.close() if logger.level >= 1: logger.writeln() logger.writeln(self.stats())
def receiveRFCOMM(self): while self.isActive: try: # Espera por una conexión entrante y devuelve un nuevo socket que representa la conexión, como así también la dirección del cliente remoteSocket, addr = self.serverSocketRFCOMM.accept() remoteSocket.settimeout(TIMEOUT) enabledFilter = False macAddress = addr[0] # Aplicamos el filtro de recepción en caso de estar activado if JSON_CONFIG["COMMUNICATOR"]["RECEPTION_FILTER"]: enabledFilter = True for valueList in contactList.allowedBtAddress.values(): if ipAddress in valueList: # Deshabilitamos el filtro ya que el cliente estaba registrado enabledFilter = False break # El filtro está activado y el cliente fue encontrado, o el filtro no está habilitado if not enabledFilter: logger.write('DEBUG', '[BLUETOOTH] Conexión desde \'%s\' aceptada.' % macAddress) receptorThread = bluetoothReceptor.BluetoothReceptor('Thread-Receptor', remoteSocket, self.receptionQueue) receptorThread.start() # El cliente no fue encontrado, por lo que debemos rechazar su mensaje else: logger.write('WARNING', '[BLUETOOTH] Mensaje de \'%s\' rechazado!' % macAddress) remoteSocket.close() # Para que el bloque 'try' (en la funcion 'accept') no se quede esperando indefinidamente except bluetooth.BluetoothError, msg: pass
def add_sister_prefixes_helper(a, ephrases, enode, i): """if a phrase comprises one or more (but not all) leftmost children of a constituent, then add it and give it a fake label""" j = i + enode.length if logger.level >= 3: logger.write("(i,j) = %s\n" % ((i, j), )) x = enode.label j1 = i for ci in range(len(enode.children)): child = enode.children[ci] j1 += child.length if logger.level >= 3: logger.write("(i,j1) = %s\n" % ((i, j1), )) if j1 < j and (i, j1) in ephrases: # constprefix3: #x1 = sym.fromtag("%s*" % x) # subcat-lr2: #subcat = [sister.label for sister in enode.children[ci+1:] if sister.required] #x1 = sym.fromtag("/".join(["%s*"%x]+subcat)) # markov1: x1 = sym.fromtag("%s/%s" % (x, enode.children[ci + 1].label)) # markov2: #x1 = sym.fromtag("%s(%s)" % (x, enode.children[ci].label)) a.espans.setdefault((i, j1), []).append(x1) prefix_labels.add(x1) for child in enode.children: add_sister_prefixes_helper(a, ephrases, child, i) i += child.length
def socket_remote_access_connection_handler(self, conn, s_key): import logger import cryptographer as cr import main loggerloc = "File: main.py | Class: Handler | Function: socket_remote_access_connection_handler | " logger.write("i", loggerloc+"Started new connection handler!") logger.write("i", loggerloc+"Waiting for client to authenticate!") authenticated = False text = "[send];[Please authenticate with username:password !];[info]" text = cr.encrypt(s_key, text) conn.send(text) while authenticated is False: edata = conn.recv(15360) if edata is "123": authenticated = True while True: edata = conn.recv(15360) logger.write("i", loggerloc+"Received data! Decrypting it! Data: "+str(edata)) try: ddata = cr.decrypt(s_key, edata) except: logger.write("e", loggerloc+"Could not decrypt data: "+str(edata)) conn.send("[send];[Server was not able to decrypt sent data!];[error]") logger.write("i", loggerloc+"Handing over decrypted data to socket_remote_access_command_handler! Data: "+str(ddata)) Handler.socket_remote_access_command_handler(ddata, conn, VarKeeper.s_key)
def realTime(self, tStamp, streamID=-1): if tStamp >= self.lastTime and tStamp - self.lastTime < self.MAXTICK / 2: # this timestamp is after last sync, and no rollovers elapsedTicks = tStamp - self.lastTime elif self.lastTime - tStamp > self.MAXTICK / 2: # this timestamp is new, but rolled over since last sync #logger.write( "!!" + str(self.lastTime) + "->" + str(tStamp)) elapsedTicks = tStamp + (self.MAXTICK - self.lastTime) elif tStamp < self.lastTime: # this timestamp is slightly old, assuming it is not garbage data. elapsedTicks = tStamp - self.lastTime #returns a negative elapsed time. elif tStamp > self.lastTime and tStamp - self.lastTime > self.MAXTICK / 2: #this timestamp is in the past, but clock recently rolled over. logger.write("!!@" + str(self.lastTime) + "->" + str(tStamp)) elapsedTicks = tStamp - self.MAXTICK - self.lastTime else: logger.log("No condition matched for 'realTime()'", "Propellor.py", logger.ERROR) elapsedTicks = 0 try: lastRTime = self.lastRTime[streamID] lastTStamp = self.lastTStamp[streamID] except KeyError as e: lastRTime = 0 lastTStamp = 0 rTime = (self.cnt + elapsedTicks) / float(self.CLOCKPERSEC) if lastRTime > rTime + 0.5: logger.log( "Went back in time??? [" + str(streamID) + "] (" + str(lastTStamp) + "->" + str(tStamp) + ") Dif=" + str(lastTStamp - tStamp) + "ticks, " + str(lastRTime - rTime) + "seconds", rTime, logger.WARNING) self.lastTStamp[streamID] = tStamp self.lastRTime[streamID] = rTime return rTime
def failover_hdf5(hdf5_file): from dxtbx.serialize import xds from dxtbx.datablock import DataBlockFactory import time t0 = time.time() db = DataBlockFactory.from_filenames([hdf5_file])[0] sweep = db.extract_sweeps()[0] t1 = time.time() if version == 2: try: write('Reading %s took %.2fs' % (hdf5_file, t1 - t0)) except: pass else: write('Reading {} took {:.2f}s'.format(hdf5_file, t1 - t0)) d = sweep.get_detector() s = sweep.get_scan() g = sweep.get_goniometer() b = sweep.get_beam() # returns slow, fast, convention here is reverse size = tuple(reversed(d[0].get_image_size())) size0k_to_class = { 1: 'eiger 1M', 2: 'eiger 4M', 3: 'eiger 9M', 4: 'eiger 16M' } header = {} header['detector_class'] = size0k_to_class[int(size[0] / 1000)] header['detector'] = size0k_to_class[int(size[0] / 1000)].upper().replace( ' ', '_') header['size'] = size header['serial_number'] = 0 header['extra_text'] = find_hdf5_lib() header['phi_start'] = s.get_angle_from_image_index(1.0, deg=True) header['phi_end'] = s.get_angle_from_image_index(2.0, deg=True) header['phi_width'] = header['phi_end'] - header['phi_start'] header['oscillation'] = header['phi_start'], header['phi_width'] header['exposure_time'] = s.get_exposure_times()[0] header['oscillation_axis'] = 'Omega_I_guess' header['distance'] = d[0].get_distance() header['wavelength'] = b.get_wavelength() header['pixel'] = d[0].get_pixel_size() header['saturation'] = d[0].get_trusted_range()[1] header['sensor'] = d[0].get_thickness() header['beam'] = d[0].get_beam_centre(b.get_s0()) images = s.get_image_range() directory, template = os.path.split(hdf5_file) header['directory'] = directory header['template'] = template.replace('master', '??????') header['start'] = images[0] header['end'] = images[1] header['matching'] = range(images[0], images[1] + 1) return header
def add(self, Val, tStamp, rTime): """add a value into the data queue""" data = (tStamp, rTime, Val) if logger.options["log_points"]: logger.write(self.name + " + (" + str(data) + ")") self.values.append(data) if len(self.values) > logger.options["buffer_size"]: self.flush()
def setupdb(self): write("Database not created, creating...", logging.WARN) c = self.conn.cursor() c.execute(""" CREATE TABLE IF NOT EXISTS items(id NOT_NULL AUTO_INCREMENT,product TEXT, price REAL, location TEXT, store TEXT, date TEXT, PRIMARY KEY(id)); """) c.close() CREATED = True
def buildMenus(self, items, menuBar): """items is a list of tuples""" logger.write( "Building Menus" ) for i in items: logger.write( "Building " + i[0] + " Menu" ) name = i[0] menu = wx.Menu() menuBar.Append( self.buildSubMenu(i, menu), name )
def buildMenus(self, items, menuBar): """items is a list of tuples""" logger.write("Building Menus") for i in items: logger.write("Building " + i[0] + " Menu") name = i[0] menu = wx.Menu() menuBar.Append(self.buildSubMenu(i, None), name)
def sendVoiceCall(self, telephoneNumber): try: self.sendAT('ATD' + str(telephoneNumber) + ';') # Numero al cual se quiere llamar logger.write('INFO', '[GSM] Llamando al número %s...' % str(telephoneNumber)) return True except: logger.write('ERROR', '[GSM] Se produjo un error al intentar realizar la llamada!') return False
def cancel(self, notify=True): logger.write('Alarm.cancel: Cancelling {}'.format(self.name)) timer = AlarmStore.get(self.name) xbmc.executebuiltin('XBMC.CancelAlarm({}, silent)'.format(self.name)) xbmc.executebuiltin('XBMC.CancelAlarm({}, silent)'.format(timer['reminder'])) if notify: if timer and self.settings('notifications.cancel') == 'true': xbmcgui.Dialog().notification(timer['friendly'], '{} {}'.format(timer['friendly'], self.language(32076))) AlarmStore.unset(self.name)
def verifyEthernetConnection(self): # Generamos la expresión regular ethPattern = re.compile('eth[0-9]+') activeInterfacesList = open('/tmp/activeInterfaces', 'a+').read() for networkInterface in os.popen('ip link show').readlines(): # Con 'ethPattern.search(networkInterface)' buscamos alguna coincidencia matchedPattern = ethPattern.search(networkInterface) # La interfaz actual coincide con un patrón 'eth' if matchedPattern is not None and networkInterface.find("state UP") > 0: # El patrón coincidente no está siendo usado y la instancia no está activa (habrá que habilitarla) if matchedPattern.group() not in activeInterfacesList and self.ethernetInstance.localInterface is None: # Obtenemos la interfaz que concide con el patrón self.ethernetInstance.localInterface = matchedPattern.group() # Escribimos en nuestro archivo la interfaz, para indicar que está ocupada activeInterfacesFile = open('/tmp/activeInterfaces', 'a+') activeInterfacesFile.write(self.ethernetInstance.localInterface + '\n') activeInterfacesFile.close() # Obtenemos la dirección IP local asignada estáticamente o por DHCP commandToExecute = 'ip addr show ' + self.ethernetInstance.localInterface + ' | grep inet' localIPAddress = os.popen(commandToExecute).readline().split()[1].split('/')[0] # Si no se produce ningún error durante la configuración, ponemos a la IP a escuchar if self.ethernetInstance.connect(localIPAddress): ethernetThread = threading.Thread(target = self.ethernetInstance.receive, name = ethernetThreadName) ethernetInfo = self.ethernetInstance.localInterface + ' - ' + self.ethernetInstance.localIPAddress logger.write('INFO', '[ETHERNET] Listo para usarse (' + ethernetInfo + ').') ethernetThread.start() return True # Si se produce un error durante la configuración, devolvemos 'False' else: return False # El patrón coincidente es igual a la interfaz de la instancia elif matchedPattern.group() == self.ethernetInstance.localInterface: # Si no se produjo ningún error durante la configuración, devolvemos 'True' if self.ethernetInstance.successfulConnection: return True # Entonces significa que hubo un error, devolvemos 'False' else: return False # El patrón coincidente está siendo usado pero no es igual a la interfaz de la instancia else: continue # No se encontró coincidencia en la iteración actual, entonces seguimos buscando else: continue # Si anteriormente hubo un intento de 'connect()' con o sin éxito, debemos limpiar la interfaz if self.ethernetInstance.localInterface is not None: localInterface = self.ethernetInstance.localInterface # Limpiamos todos los campos del objeto NETWORK self.ethernetInstance.successfulConnection = None self.ethernetInstance.localInterface = None self.ethernetInstance.localIPAddress = None self.ethernetInstance.isActive = False # Eliminamos del archivo la interfaz de red usada dataToWrite = open('/tmp/activeInterfaces').read().replace(localInterface + '\n', '') activeInterfacesFile = open('/tmp/activeInterfaces', 'w') activeInterfacesFile.write(dataToWrite) activeInterfacesFile.close() return False
def getTimeLeft(self): ret = None logger.write('Alarm.getTimeLeft: Checking time left on {}'.format(self.name)) if self.isSet(log=False): timer = AlarmStore.get(self.name) if timer: ret = (timer['start'] + timer['timeout']) - int(time.time()) logger.write('Alarm.getTimeLeft: {} has {} seconds left'.format(self.name, ret)) return ret
def PrintDatabase(voicedb): from voiceid.sr import Voiceid from voiceid.db import GMMVoiceDB import logger try: return voicedb.get_speakers() except: logger.write("e", "File: voicehandler.py | Function: PrintDatabase | Error: Could not return or get voicedb dictionary!") return False
def log(self, jid): """read and write result of one job""" if logger.level >= 1: fname = '%s/%s_%s' % (FLAGS.run_dir, 'log', str(jid).rjust(5, '0')) f = open(fname) for line in f: logger.write(line) logger.writeln() f.close()
def hangUpVoiceCall(self): try: self.sendAT('ATH') # Cuelga la llamada en curso if self.callerID is not None: logger.write('INFO', '[GSM] Conexión con el número %s finalizada.' % self.callerID) self.callerID = None return True except: return False
def process_cycle(monograph, graph): cycle = collect_negative_cycle(graph) if cycle != None: logs = trade(graph, monograph, cycle) logger.write(logs) #json_io.save(graph) #csv_io.save(graph) time.sleep(1) time.sleep(2)
def toggle_fan(): global state if state == 'off': state = 'on' else: state = 'off' logger.write(MODULE, 'toggling fan {}'.format(state))
def set_plugin_library(self, plugin_library): if version == 2: try: write('set_plugin_library %s' % plugin_library) except: pass else: write('set_plugin_library {}'.format(plugin_library)) self._plugin_library = plugin_library
def length(): if alreadyOpen: if receptionQueue.qsize() == None: return 0 else: return receptionQueue.qsize() else: logger.write('WARNING', 'El Comunicador no se encuentra abierto!') return False
def infoHook(propCom, cIdx, pVal, dirs): # bitmask = 1 << 31 # pVal = (pVal | bitmask) ^ bitmask logger.write(pVal) self.setValue(int(pVal)) if self.pinDirs != dirs: self.pinDirs = dirs self.setDir(dirs) self.resetWidgets()
def request(flow: mitmproxy.http.HTTPFlow): """ The full HTTP request has been read. """ global log_file host = checker.check_host(flow) if host: if checker.check_TLS(flow): logger.write(log_file, \ "[TLS] " + flow.request.pretty_url)
def extend(self, extendby): if extendby: logger.write('Alarm.extend: Extending {}'.format(self.friendly)) timeLeft = self.getTimeLeft() if timeLeft: self.cancel(notify=False) if self.set(timeout=timeLeft + extendby, extend=True): if self.settings('notifications.extend') == 'true': xbmcgui.Dialog().notification(self.friendly, '{} {} {}'.format(self.language(32079), divmod(extendby, 60)[0], self.language(32073))) logger.write('Alarm.extend: Extended {} by {}'.format(self.friendly, extendby))
def get(name, log=True): ret = False if log: logger.write('AlarmStore.get: Retrieving details for {}'.format(name)) timers = AlarmStore.Location.getProperty(AlarmStore.PropertyName) if timers: timers = json.loads(timers) timer = [t for t in timers if t['name'] == name] ret = timer[0] if len(timer) else False if log: logger.write('AlarmStore.get: {}: {}'.format(name, ret)) return ret
def __init__(self,filename): CREATED = os.path.exists(filename) self.conn = sqlite3.connect(filename) if not CREATED: self.setupdb() else: write("Database exists at:" + filename,logging.INFO)
def AddWaveFile(voicedb, Speaker_Name, WavFile_Path): from voiceid.sr import Voiceid from voiceid.db import GMMVoiceDB import logger try: voicedb.add_model(WavFile_Path, Speaker_Name) except: logger.write("e", "File: voicehandler.py | Function: AddWaveFile | Error: Could not add speaker to database!") return False return True
def set_h5toxds(self, h5toxds): if version == 2: try: write('set_h5toxds %s' % h5toxds) except: pass else: write('set_h5toxds {}'.format(h5toxds)) self._h5toxds = h5toxds os.environ['H5TOXDS_PATH'] = h5toxds
def savefile(self,inputfile,location,store): filename = datetime.datetime.now().strftime("%Y-%m-%d-%H%M%S") save = open(constants.UPLOADS + filename,"w") for line in inputfile.file: save.write(line) save.close() write("File saved to: " + constants.UPLOADS + filename,logging.INFO) parsefile.parsefile(filename,location,store)
def __del__(self): try: # Eliminamos del archivo la MAC usada en esta misma instancia dataToWrite = open('/tmp/activeInterfaces').read().replace(self.localInterface + '\n', '') activeInterfacesFile = open('/tmp/activeInterfaces', 'w') activeInterfacesFile.write(dataToWrite) activeInterfacesFile.close() except Exception as errorMessage: pass finally: logger.write('INFO', '[BLUETOOTH] Objeto destruido.')
def newconsole(user, host, title, debug=0): try: import logger import sys lloc = "File: console.py | Function: newconsole | Message: " logger.write("i", "Trying create new console with arguments: user=["+str(user)+"], host=["+str(host)+"], title=["+str(title)+"], debug=["+str(debug)+"]!", lloc=lloc) while True: userinput = raw_input(user+"$ ") uinsplit(userinput, debug=debug) except: pass
def send(message, receiver = None, media = None): if alreadyOpen: if not transmissionQueue.full(): # Si el mensaje no es una instancia, la creamos para poder hacer el manejo de transmisión con prioridad if not isinstance(message, messageClass.Message): # Al no tratarse de una instancia, no podemos conocer el destino salvo que el usuario lo especifique if receiver is not None: tmpMessage = message # Creamos la instancia general de un mensaje message = messageClass.Message('', receiver, 10) # Verificamos si el mensaje es una ruta a un archivo (path relativo o path absoluto)... if os.path.isfile(tmpMessage): # Insertamos el campo 'fileName' setattr(message, 'fileName', tmpMessage) # Entonces es un mensaje de texto plano else: # Insertamos el campo 'plainText' setattr(message, 'plainText', tmpMessage) else: logger.write('ERROR', '[COMMUNICATOR] No se especificó un destino para el mensaje!') return False ################################## VERIFICACIÓN DE CONTACTO ################################## # Antes de poner el mensaje en la cola, comprobamos que el cliente esté en algún diccionario clientList = list() + contactList.allowedHosts.keys() clientList += contactList.allowedBtAddress.keys() clientList += contactList.allowedEmails.keys() clientList += contactList.allowedNumbers.keys() # Quitamos los clientes repetidos clientList = list(set(clientList)) # Buscamos por lo menos una coincidencia, para luego intentar hacer el envío if message.receiver not in clientList: # El cliente fue encontrado como entrada de un diccionario logger.write('WARNING', '[COMMUNICATOR] \'%s\' no registrado! Mensaje descartado...' % message.receiver) return False ################################ FIN VERIFICACIÓN DE CONTACTO ################################ # Ponemos en maýusculas el dispositivo preferido, si es que se estableció alguno if media is not None: media = media.upper() # Damos mayor prioridad al dispositivo referenciado por 'media' (si es que hay alguno) setattr(message, 'media', media) # Indicamos con una marca de tiempo, la hora exacta en la que se almacenó el mensaje en la cola de transmisión setattr(message, 'timeStamp', time.time()) # Establecemos el tiempo que permanecerá el mensaje en la cola antes de ser desechado en caso de no ser enviado setattr(message, 'timeToLive', JSON_CONFIG["COMMUNICATOR"]["TIME_TO_LIVE"]) # Almacenamos el mensaje en la cola de transmisión, con la prioridad correspondiente transmissionQueue.put((message.priority, message)) logger.write('INFO', '[COMMUNICATOR] Mensaje almacenado en la cola esperando ser enviado...') return True else: logger.write('WARNING', '[COMMUNICATOR] La cola de transmisión esta llena, imposible enviar!') return False else: logger.write('WARNING', 'El Comunicador no se encuentra abierto!') return False
def newkey(self): import logger as logger import cryptographer as cr loggerloc = "File: main.py | Class: Security | Function: newkey | " logger.write("i", loggerloc+"Generating new security key!") try: key = cr.newkey() except: logger.write("e", loggerloc+"Could not generate new key!") return False return key
def join(self, proc_dict): import logger as logger import multiprocessing proc = proc_dict["proc_object"] try: proc.join() except: logger.write("e", "File: main.py | Class: Multiprocessing | Function: join | Could not join proc object!") return False logger.write("i", "File: main.py | Class: Multiprocessing | Function: join | Successfully joined proc object!") return True
def receive(): if alreadyOpen: if receptionQueue.qsize() > 0: # El elemento 0 es la prioridad, por eso sacamos el 1 porque es el mensaje return receptionQueue.get_nowait()[1] else: logger.write('INFO', '[COMMUNICATOR] La cola de mensajes esta vacía!') return None else: logger.write('WARNING', 'El Comunicador no se encuentra abierto!') return False
def sendMessage(self, plainText, clientSocket): try: clientSocket.send(plainText) logger.write('INFO', '[BLUETOOTH] Mensaje enviado correctamente!') return True except Exception as errorMessage: logger.write('WARNING', '[BLUETOOTH] Mensaje no enviado: %s' % str(errorMessage)) return False finally: # Cierra la conexion del socket cliente clientSocket.close()
def _aiTurn(self): # thinking..... time.sleep(self.ai_think_secs) logger.write("Last discards:\n" + self._cardsString(self.deck.getLastDiscards())) logger.write("Pre-turn hand:\n" + self._cardsString(self.players[self.cur_pid]['hand'])) post_turn_data = ai.makeDecision(self.deck, self.players, self.cur_pid) self._endTurn(self.cur_pid, post_turn_data)
def handle(self): """Handles the request and calls the appropriate method handler """ logger.write('handling request\n{}'.format(self.request)) if self.request.preamble.http_method == 'GET': self.do_GET() elif self.request.preamble.http_method == 'POST': self.do_POST() else: self.do_invalid_method()
def uinsplit(userinput, debug=0): try: import logger lloc = "File: console.py | Function: uinsplit | Message: " logger.write("i", "Trying to split userinput with arguments: userinput=["+str(userinput)+"], debug=["+str(debug)+"]!", lloc=lloc) splitted = userinput.split(" ") command = splitted[0] del splitted[0] args = splitted print "args: "+args commandanalyzer(command, args, debug=debug) except: pass
def forward(self, dist, pids, endpoints, **kwargs): batch_hard_loss = self.batch_hard(dist, pids) if self.a > 0: cross_entropy_loss = 0.0 for softmax in endpoints["soft"]: cross_entropy_loss += self.cross_entropy(softmax, pids) ce_loss = float(var2num(cross_entropy_loss)) ce_loss = float(var2num(cross_entropy_loss)) bh_loss = float(var2num(torch.mean(batch_hard_loss))) print("bh loss {:.3f} ce loss: {:.3f}".format(bh_loss, ce_loss)) log.write("loss", (bh_loss, ce_loss), dtype=np.float32) return batch_hard_loss + self.a * cross_entropy_loss else: return batch_hard_loss
def add_constituent_prefixes(a, ephrase_index): """if a phrase is a prefix of a constituent, give it a fake label""" if logger.level >= 3: logger.write( str([(i, j, sym.tostring(x)) for ((i, j), l) in a.espans.iteritems() for x in l])) logger.write("\n") ei_index = {} for ((ei, ej), labels) in a.espans.iteritems(): ei_index.setdefault(ei, []).extend([(ej, x) for x in reversed(labels)]) for ei in ei_index.iterkeys(): ei_index[ei].sort() # stable for (ei, ej) in ephrase_index: if True or not (a.espans.has_key( (ei, ej)) and len(a.espans[ei, ej]) > 0): for (ej1, x) in ei_index.get(ei, []): if ej1 > ej: x1 = sym.fromtag(sym.totag(x) + "*") a.espans.setdefault((ei, ej), []).append(x1) prefix_labels.add(x1) break if logger.level >= 3: logger.write( str([(i, j, sym.tostring(x)) for ((i, j), l) in a.espans.iteritems() for x in l])) logger.write("\n---\n")
def add(self, item): added = False bin_idx = self.key(item) if bin_idx: # discard items with None key bin = self.bins.setdefault(bin_idx, self.binclass(FLAGS.bin_size, self)) # preprune if not FLAGS.use_simple_bin and item.rank_cost() > bin.cutoff: if logger.level >= 4: logger.writeln('prepruned: %s' % item) self.prepruned += 1 # TODO: a hack: ban unary negative deduction, # only for ghkm rules elif item.incoming[0].rule.arity == 1 and len(item.incoming[0].rule.f) == 1 and \ item.incoming[0].cost <= 0 and \ item.incoming[0].rule.grammar is not None and \ 'ghkm' in item.incoming[0].rule.grammar.name: if logger.level >= 4: logger.write( 'negative unary deduction for ghkm banned: %s' % item) self.neg_unary_pruned += 1 # ban negative deduction elif FLAGS.ban_negative_deduction and item.incoming[0].cost <= 0: if logger.level >= 4: logger.writeln('negative deduction banned: %s' % item.incoming[0]) self.negcost_pruned += 1 # unary cycle banned elif item.unary_cycle(): if logger.level >= 4: logger.writeln('unary cycle broken: %s' % item) self.unary_cycle_broken += 1 # merging needed elif (not FLAGS.use_simple_bin) and item in self.index: oldcost, olditem = self.index[item] item_merged = item.merge(olditem) if item_merged: # old item better if logger.level >= 4: logger.writeln('merged: %s' % item) else: # new item better bin.add(item) if not FLAGS.use_simple_bin: bin.ndead += 1 added = True self.merged += 1 # no need to merge else: bin.add(item) added = True return added
def do_GET(self): logger.write('Handler GET requestURL: {}'.format(self.request.preamble.url)) url = self.request.preamble.url if url == "/": files = json.dumps({'files': list_dir()}) self.server.send_response(files, {"Content-Type": "application/json"}) elif not url.startswith("/"): self.server.send_error("400", "Bad Request") else: try: self.server.send_response(get_file(url.lstrip("/")), {"Content-Type": "text/*"}) except Exception as err: logger.write(err) self.server.send_error("404", "Not Found")
def setDevice(deviceId): global jdata global selDev global tls devList = jdata['devices'] for i in devList: if(i["id"] == deviceId): selDev = i log.write("device selected: " + selDev["id"]) if selDev == None: log.write("No device with device name: " + deviceId) return errDist = selDev["errDist"] tls.retain = retainErr(selDev["id"], errDist) return selDev
def forward(self, pids, endpoints, **kwargs): # only one triplet embedding is passed triplet = endpoints["triplet"][0] triplet_pids = pids dist = calc_cdist(triplet, triplet) bh_loss = self.batch_hard(dist, triplet_pids) # here is no data parallel anymore targets = torch.zeros_like(pids, dtype=torch.float).unsqueeze(1) targets[-self.num_junk_images:] = 1.0 ce_loss = self.cross_entropy(endpoints["junk"], targets) ce_loss_f = float(var2num(torch.mean(ce_loss))) bh_loss_f = float(var2num(torch.mean(bh_loss))) acc = self._calc_junk_acc(endpoints["junk"], targets) print("bh loss {:.3f} ce loss: {:.3f} acc: {:.3f}".format(bh_loss_f, ce_loss_f, acc)) log.write("loss", (ce_loss_f, bh_loss_f), dtype=np.float32) return torch.mean(bh_loss) + torch.mean(ce_loss)
def setHandler(propCom, cIdx, pVal): #if val is None: # logger.log("bad request", "set", logger.ERROR) #else: try: #cIdx = val[0] #pVal = val[1] if cIdx not in device.channels: logger.log("invalid channel index", str(cIdx), logger.WARNING) else: logger.write(".") #device.channels[cIdx]. #device.channels[cIdx].setValue(pVal) #frame.widgets[cIdx].setChan(device.channels[cIdx]) except IndexError as e: logger.log("not enough values", "set", logger.ERROR)