def handle_file_failed(dongle, remote, pending, channel, files, ret, err, services): logger.info("handle file failed %s[%s]: %s" % (remote, channel, files)) logger.debug(err) try: try_again = [] rules = get_campaign_rule(files) if rules is None or len(rules)==0: raise Exception("Couldn't find rule") for rule in rules: record = save_file_failed(rule, dongle, ret, remote) # from here we try again either on timeout or if rejected count is # smaller than filter if rule.tryAgain(record=record): try_again.append(rule) # if we can't try again this method will raise an exception # and the try/catch will make sure the remote device gets out # of our pending list if upload_after_rejected(try_again, services, dongle=dongle, remote=remote): return except Exception, err: logger.error("OOOPS!!!") logger.exception(err)
def get_dongles(dongles): out = list() for address in dongles: print address try: if not is_known_dongle(address, UploaderBluetoothDongle) and \ isAIRcable(address): logger.info('not known uploader %s' % address) settings = SET.getSettingsByAddress(address) if not 'uploader' in settings: logger.info('no settings for uploaders') continue logger.info('default settings where found') logger.debug(settings['uploader']) max_conn = settings['uploader'].get('max_conn', 1) enabled = settings['uploader'].get('enable', True) name = settings['uploader'].get( 'name', _("Autodiscovered Bluetooth dongle")) UploaderBluetoothDongle.objects.get_or_create(address=address, defaults={ 'name': name, 'enabled': enabled, 'max_conn': max_conn }) dongle = UploaderBluetoothDongle.objects.get(address=address, enabled=True) out.append((address, dongle.max_conn, dongle.name)) except Exception, err: logger.exception(err)
def handle_file_failed(dongle, remote, pending, channel, files, ret, err, services): logger.info("handle file failed %s[%s]: %s" % (remote, channel, files)) logger.debug(err) try: try_again = [] rules = get_campaign_rule(files) if rules is None or len(rules) == 0: raise Exception("Couldn't find rule") for rule in rules: record = save_file_failed(rule, dongle, ret, remote) # from here we try again either on timeout or if rejected count is # smaller than filter if rule.tryAgain(record=record): try_again.append(rule) # if we can't try again this method will raise an exception # and the try/catch will make sure the remote device gets out # of our pending list if upload_after_rejected(try_again, services, dongle=dongle, remote=remote): return except Exception, err: logger.error("OOOPS!!!") logger.exception(err)
def get_dongles(dongles): out = list() for address in dongles: print address try: if not is_known_dongle(address, UploaderBluetoothDongle) and \ isAIRcable(address): logger.info('not known uploader %s' % address) settings = SET.getSettingsByAddress(address) if not 'uploader' in settings: logger.info('no settings for uploaders') continue logger.info('default settings where found') logger.debug(settings['uploader']) max_conn = settings['uploader'].get('max_conn', 1) enabled = settings['uploader'].get('enable', True) name = settings['uploader'].get('name', _("Autodiscovered Bluetooth dongle")) UploaderBluetoothDongle.objects.get_or_create(address=address, defaults={ 'name': name, 'enabled': enabled, 'max_conn': max_conn } ) dongle = UploaderBluetoothDongle.objects.get(address=address, enabled=True) out.append( (address, dongle.max_conn, dongle.name) ) except Exception, err: logger.exception(err)
def force_disconnect(self): try: if self.client is not None: self.client.disconnect(force=True) # in case client died without closing connection #os.system('hcitool dc %s' % self.remote) except Exception, err: logger.exception(err)
def post_init(): try: for i in ALERT_INFO: info = ALERT_INFO[i] notification.create_notice_type(info['name'], info['short'], info['long']) except Exception, err: logger.exception(err)
def db_ready(): try: from openproximity.models import BluetoothDongle BluetoothDongle.objects.count() return True except Exception, err: logger.info("Database not ready") logger.exception(err) return False
def __sanitize(self): try: if self.tree is None: self.__getXmlTree() except Exception, err: logger.info("failed while loading file settings, trying to simulate" " config file") logger.exception(err) self.tree=etree.fromstring(DEFAULT)
def __import_plugin(self): ''' Try to import the plugin, if it fails then it wasn't available. ''' plugin=__import__(self.name, {}, {}, [], 0) try: plugin=getattr(plugins, self.name.split('.',1)[-1]) except Exception, err: logger.error('plugin was not part of plugins.*') logger.exception(err)
def __sanitize(self): try: if self.tree is None: self.__getXmlTree() except Exception, err: logger.info( "failed while loading file settings, trying to simulate" " config file") logger.exception(err) self.tree = etree.fromstring(DEFAULT)
def connect(address, port): ''' This method wraps rpyc server connection, so when no server is available the pairing manager will still work. ''' logger.info("Connecting to %s:%s" % (address, port)) try: s = rpyc.connect(address, int(port)) return s except Exception, err: logger.error("can't connect to server") logger.exception(err)
def exposed_getPIN(self, remote, local): logger.info("getPIN request for %s->%s" % (local, remote)) remote = RemoteDevice.getRemoteDevice(address=remote) try: camps = getMatchingCampaigns(remote=remote, enabled=True) for camp in camps: if camp.pin_code: logger.debug("pin code: %s" % camp.pin_code) return camp.pin_code except Exception, err: logger.error(err) logger.exception(err)
def exposed_getPIN(self, remote, local): logger.info("getPIN request for %s->%s" % (local, remote) ) remote = RemoteDevice.getRemoteDevice(address=remote) try: camps = getMatchingCampaigns(remote=remote, enabled=True) for camp in camps: if camp.pin_code: logger.debug("pin code: %s" % camp.pin_code) return camp.pin_code except Exception, err: logger.error(err) logger.exception(err)
def stats_restart(request): ''' Delete statistics, we do drop table, not the recommended way but damn effective. ''' from django.core import management from django.db import connection, models from django.core.management.color import no_style from django.core.management import sql cursor = connection.cursor() logger.info("stats restart") # this tables are not going to be deleted tables = [ 'openproximity_bluetoothdongle', 'openproximity_campaignfile', 'openproximity_marketingcampaign', 'openproximity_remotescannerbluetoothdongle', 'openproximity_scannerbluetoothdongle', 'openproximity_uploaderbluetoothdongle', 'openproximity_generalsetting', 'openproximity_userprofile' ] model = models.get_app('openproximity') drop = "" drop_table = sql.sql_delete(model, no_style()) for line in drop_table: table_name = line.split()[2].replace('"', '').replace(';','') if line.startswith('DROP TABLE'): # we don't want to loose settings if table_name not in tables: drop+="DROP TABLE %s;\n" % table_name elif line.find('CREATE INDEX') > -1: drop += "DROP INDEX %s;\n" % table_name try: server=rpyc.connect('localhost', 8010) server.root.Lock() logger.info("database locked") except: pass logger.info("about to drop") for line in drop.splitlines(): try: connection.cursor().execute(line) except Exception, err: logger.error("%s failed" %line) logger.exception(err)
def initAgent(): ''' Initialize the agent, register with every available path if possible. ''' try: manager = dbus.Interface(bus.get_object("org.bluez", "/"), "org.bluez.Manager") for path in manager.ListAdapters(): registerAgent(path) logger.info("Agent registered on all paths") except Exception, err: logger.error("Something went wrong on the agent application") logger.exception(err)
def poll(fd, condition): ''' This function gets called whenever there's data waiting in the incomming socket, so we can flush the data from the server. ''' try: server.poll() return True except EOFError, eof: logger.error("EOF while polling %s" % eof) logger.exception(eof) stop() return False
def stats_restart(request): ''' Delete statistics, we do drop table, not the recommended way but damn effective. ''' from django.core import management from django.db import connection, models from django.core.management.color import no_style from django.core.management import sql cursor = connection.cursor() logger.info("stats restart") # this tables are not going to be deleted tables = [ 'openproximity_bluetoothdongle', 'openproximity_campaignfile', 'openproximity_marketingcampaign', 'openproximity_remotescannerbluetoothdongle', 'openproximity_scannerbluetoothdongle', 'openproximity_uploaderbluetoothdongle', 'openproximity_generalsetting', 'openproximity_userprofile' ] model = models.get_app('openproximity') drop = "" drop_table = sql.sql_delete(model, no_style()) for line in drop_table: table_name = line.split()[2].replace('"', '').replace(';', '') if line.startswith('DROP TABLE'): # we don't want to loose settings if table_name not in tables: drop += "DROP TABLE %s;\n" % table_name elif line.find('CREATE INDEX') > -1: drop += "DROP INDEX %s;\n" % table_name try: server = rpyc.connect('localhost', 8010) server.root.Lock() logger.info("database locked") except: pass logger.info("about to drop") for line in drop.splitlines(): try: connection.cursor().execute(line) except Exception, err: logger.error("%s failed" % line) logger.exception(err)
def get_campaign_rule(files): logger.info('get_campaign_rule %s' % files) out = set() for file, camp_id in files: logger.debug(file) try: camp = MarketingCampaign.objects.get(pk=camp_id) logger.debug(camp) if len(out) > 0 and camp not in out: logger.error("multiple return values") out.add(camp) except Exception, err: logger.exception(err)
def _find_plugins_for_egg(self, egg_name): ''' Find plugin in egg file ''' b=zipfile.PyZipFile(egg_name) for a in b.namelist(): if not a.startswith('EGG-INFO') and a.endswith('__init__.py'): try: if not egg_name in sys.path: sys.path.append(egg_name) self.load_info(egg_name, a.split('/')[0], egg=True) return except Exception, err: logger.error("Failed to load info from egg file: %s" % egg_name) logger.exception(err)
def parsereading(device=None, seconds=None, battery=None, reading=None, dongle=None): '''This method expects to get a valid reading, generating a record out of it''' logger.info("linkthrecord parsereading: %s" % reading) #find ambient device, or create if there's none yet created device,created=LinkTHDevice.objects.get_or_create( address=device, defaults={ 'friendly_name': _('Auto Discovered LinkTH Sensor'), 'sensor': _('Linkth'), 'mode': _('Monitor'), }) record = LinkTHRecord() record.remote=device record.dongle=dongle record.time=datetime.fromtimestamp(seconds) record.battery=int(battery)/1000.0 # asume we get battery * 1000 while len(reading) > 0: m = LINE.match(reading) if not m: logger.error("monitorlinkth NO MATCH: %s" % reading) break m=m.groupdict() reading = m['rest'] val = m['val'] typ = val.split(',')[0].strip() m['id']=m['id'].strip() try: sen_id = device.findSensorForId(m['id']) except Exception, err: # if we got here then we have more than MAXSENSORS # registered on this linkth logger.exception(err) break if not getattr(device, 'sensor%s_id' % sen_id, None): setattr(device, 'sensor%s_id' % sen_id, m['id']) setattr(device, 'sensor%s_name' % sen_id, _("Auto discovered 1wire sensor")) setattr(device, 'sensor%s_family' % sen_id, SENSOR_FAMILY[typ]) device.save() val = SENSOR_EXTRACTION[typ](val) setattr(record, 'value%s' % sen_id, val['temperature']) if typ in ['19', '1A', '1B']: setattr(record, 'value%sa' % send_id, val['extra'])
def python_reloader(main_func, args, kwargs): ''' Internal method that will setup the forking process. If it's the parent then it will do the forks, otherwise it will start a new worker thread and setup a reloader thread. ''' if os.environ.get("RUN_MAIN") == "true": signal.signal(signal.SIGUSR1, handle_child) t = threading.Thread(target=main_func, args=args, kwargs=kwargs) t.start() try: reloader_thread(t) except KeyboardInterrupt: pass except Exception, err: logger.exception(err)
def found_action(services, address, record, pending, dongle): line = LogLine() line.content="Found action for: %s" % address try: for plugin in pluginsystem.get_plugins('found_action'): logger.info("found action trying with %s" % plugin.name) service = plugin.rpc['found_action'](services=services, record=record) if service: logger.info("plugin has handled") line.content+=" %s is handling" % getattr(plugin, 'name', 'plugin') line.save() pending[record.remote.address]=service return True except Exception, err: logger.error("plugin do_action") logger.exception(err)
def getPIN(address, dongle): ''' This method will try to ask the server which PIN code to use, otherwise fallback to the default PIN. ''' global server if len(sys.argv) > 2: logger.info("server available") if not server: logger.info("server available") server = connect(sys.argv[1], sys.argv[2]) try: out = server.root.getPIN(address, dongle) return str(out) except Exception, err: logger.error("couldn't get PIN from server") logger.exception(err)
def readBuffer(self, honnor_eol=False, timeout=1, log=False): ''' Reads all the data inside the buffer ''' out = buffer("") # timeout__ = self.socket.gettimeout() # self.socket.settimeout(timeout) start = time.time() try: while (1): out += self.read(bytes=1, log=False, timeout=timeout) if honnor_eol and self.__pattern.match(out): break if time.time() - start > timeout: break except SPPException, err: if log: logger.exception(err)
def exposed_listener(self, signal, *args, **kwargs): global enabled if not enabled: logger.debug("rpc is locked, dropping signal %s" % signal) return logger.debug("exposed_listener %s %s %s" % ( signal, args, kwargs) ) kwargs['pending']=pending try: for plugin in pluginsystem.get_plugins('rpc'): plugin.rpc['handle'](signal=signal, services=services, manager=self, *args, **kwargs) transaction.commit() # commit only after all the plugins have handled except Exception, err: logger.error("rpc listener while doing plugins") logger.exception(err) transaction.rollback() # oops rollback
def found_action(services, address, record, pending, dongle): line = LogLine() line.content = "Found action for: %s" % address try: for plugin in pluginsystem.get_plugins('found_action'): logger.info("found action trying with %s" % plugin.name) service = plugin.rpc['found_action'](services=services, record=record) if service: logger.info("plugin has handled") line.content += " %s is handling" % getattr( plugin, 'name', 'plugin') line.save() pending[record.remote.address] = service return True except Exception, err: logger.error("plugin do_action") logger.exception(err)
def readBuffer(self, honnor_eol=False,timeout=1, log=False): ''' Reads all the data inside the buffer ''' out = buffer(""); # timeout__ = self.socket.gettimeout() # self.socket.settimeout(timeout) start = time.time() try: while ( 1 ): out += self.read(bytes=1, log=False, timeout=timeout) if honnor_eol and self.__pattern.match(out): break; if time.time() - start > timeout: break; except SPPException, err: if log: logger.exception(err)
def handle_picture(picture=None, target=None, dongle=None, pending=None, *args, **kwargs): try: dongle = CameraBluetoothDongle.objects.get(address=dongle) remote, created = CameraRemoteDevice.objects.get_or_create( address=target, defaults = {'name':_('Generic Camera')} ) remote.save() record = CameraRecord() record.dongle = dongle record.remote = remote record.picture.save( "%s_%s.jpg" % (target.replace(':', ''), time.time()), ContentFile(picture) ) record.save() except Exception, err: logger.exception(err)
def __scan(self, times): try: self.client.socket.settimeout(30) self.client.sendLine("r%i" % times) logger.debug(self.client.readBuffer()) flag = True while ( flag ): lines=self.client.readBuffer(honnor_eol=True,timeout=1) for line in lines.splitlines(): if self.record_pattern.match(line.strip()): address, rssi = line.split('RSSI') self.DeviceFound( AddDots(address) , {'RSSI': int(rssi)}) elif line.find('COMMAND') >-1: logger.info("RSSI completed") flag = False break except (SPPException, SPPNotConnectedException, TypeError), e: logger.error("error while scanning, could be that we lost connection") logger.exception(e)
def found_action(record, services): dongle = record.dongle.address logger.info("camera device_found %s: %s[%s]" % (dongle , record.remote.address, record.remote.name)) camps = getMatchingCampaigns(record.remote, enabled=True, classes=[CameraCampaign,]) if len(camps) == 0: return False if len(camps) > 1: e = Exception("There's more than one campaign that matches, check settings") logger.exception(e) raise e logger.debug("found campaign") camp = camps[0] global clients if clients.get(dongle, None) is None: logger.debug("dongle not registered as client") logger.debug(clients) logger.debug(dongle) return False # there's no registered service I can't do a thing address = record.remote.address if not check_if_service(address): return False latest = CameraRemoteDevice.objects.filter(address=address) if latest.count() > 0: for k in latest.all(): k.save() # mark elements as served, so timeout can exist service[address] = time.time() logger.info("handling device %s" % address) client = clients[dongle] client.setZoneMinderBackend(camp.enable_zm) client.setOpenProximityBackend(camp.use_backend) client.connect(record.remote.address) logger.debug("connecting") return client
def get_dongles(dongles): def create_new_discovered_dongle(address, settings): logger.info("going to setup as scanner") priority = settings['scanner'].get('priority', 1) enabled = settings['scanner'].get('enable', True) name = settings['scanner'].get('name', _("Auto Discovered Dongle")) obj, created = ScannerBluetoothDongle.objects.get_or_create( address=address, defaults={ 'priority': priority, 'enabled': enabled, 'name': name }) logger.debug("%s %s[%s]" % (address, name, priority)) def internal_get_dongles(address): dongle = ScannerBluetoothDongle.objects.get(address=address) logger.info("%s is a scanner dongle" % address) if dongle.enabled: yield (address, dongle.priority, dongle.name) if dongle.remote_dongles.count() > 0: logger.info("We have remote dongles available") for remote in dongle.remote_dongles.all(): if remote.enabled: yield ( remote.address, remote.priority, dongle.address ) out = list() for address in dongles: try: if not is_known_dongle(address, ScannerBluetoothDongle): logger.info("dongle not known yet %s" % address) settings = SET.getSettingsByAddress(address) if 'scanner' in settings: create_new_discovered_dongle(address, settings) out.extend(internal_get_dongles(address)) except Exception, err: logger.exception(err)
def connect(self): try: self.client = sppClient( target = self.remote, channel = 1, service = "spp", device = self.local ) self.client.connect() time.sleep(3) # empty all buffers get it to clean state self.client.socket.settimeout(30) logger.debug(self.client.readLine()) logger.debug(self.client.readBuffer()) self.ScannerConnected(self.local, self.remote) except Exception, err: logger.exception(err) self.client = None self.ScannerDisconnected(self.local, self.remote)
def exposed_listener(self, signal, *args, **kwargs): global enabled if not enabled: logger.debug("rpc is locked, dropping signal %s" % signal) return logger.debug("exposed_listener %s %s %s" % (signal, args, kwargs)) kwargs['pending'] = pending try: for plugin in pluginsystem.get_plugins('rpc'): plugin.rpc['handle'](signal=signal, services=services, manager=self, *args, **kwargs) transaction.commit( ) # commit only after all the plugins have handled except Exception, err: logger.error("rpc listener while doing plugins") logger.exception(err) transaction.rollback() # oops rollback
def exposed_generic_register(self, remote_quit=None, dongles=None, client=None): logger.info("generic register") all_dongles.update(dongles) if not enabled: return if not db_ready(): return try: for plugin in pluginsystem.get_plugins('rpc_register'): logger.debug("plugin %s provides rpc register" % plugin.name) if plugin.rpc['register'](dongles=dongles, client=client): # wrap all calls as async, to avoid collitions self.remote_quit = async(remote_quit) logger.info("plugin %s handled rpc_register" % plugin.name) return except Exception, err: logger.exception(err)
def exposed_generic_register(self, remote_quit=None, dongles=None, client=None): logger.info("generic register") all_dongles.update(dongles) if not enabled: return if not db_ready(): return try: for plugin in pluginsystem.get_plugins('rpc_register'): logger.debug("plugin %s provides rpc register" % plugin.name) if plugin.rpc['register'](dongles=dongles, client=client): # wrap all calls as async, to avoid collitions self.remote_quit = async (remote_quit) logger.info("plugin %s handled rpc_register" % plugin.name) return except Exception, err: logger.exception(err)
def scan(self, times=1): try: self.PropertyChanged('Discovering', 1) threading.Thread(target=self.__scan, args=(times, )).start() except Exception, err: logger.exception(err)
# # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. from net.aircable.utils import logger, logmain if __name__ == '__main__': logmain("manage") from django.core.management import execute_manager, setup_environ try: import settings # Assumed to be in the same directory. setup_environ(settings) except ImportError, e: import sys, traceback logger.error("Error: Can't find the file 'settings.py' in the directory " "containing %r. It appears you've customized things.\nYou'll have " "to run django-admin.py, passing it your settings module.\n(If the " "file settings.py does indeed exist, it's causing an ImportError " "somehow.)\n" % __file__) logger.exception(e) sys.exit(1) from net.aircable.openproximity.pluginsystem import pluginsystem pluginsystem.post_environ() if __name__ == "__main__": execute_manager(settings)
def poll(fd, condition): ''' This function gets called whenever there's data waiting in the incomming socket, so we can flush the data from the server. ''' try: server.poll() return True except EOFError, eof: logger.error("EOF while polling %s" % eof) logger.exception(eof) stop() return False except Exception, err: logger.error("error during poll %s" % err) logger.exception(err) return True def stop(): ''' Safe stop function ''' global manager from uploader import UploadManager if getattr(manager, 'exposed_stop', None): manager.exposed_stop() loop.quit() def handle_name_owner_changed(own, old, new): ''' Will get called whenever a name owner changes in dbus.
def disconnect(self): try: self.client.sendLine('c') self.client.disconnect(True) except Exception, err: logger.exception(err)
class OpenProximityService(Service): dongles = None remote_quit = None def on_connect(self): services.add(self) def on_disconnect(self): a = [p for p in pending if pending[p] == self] if len(a) > 0: logger.info( "a client disconnected, clearing %s pending transactions" % len(a)) for p in a: pending.pop(p) services.remove(self) def exit(self, exit): for ser in services: if ser.remote_quit is not None: try: ser.remote_quit() except: pass pending = set() if exit: sys.exit(3) # restart me please @transaction.commit_manually def exposed_listener(self, signal, *args, **kwargs): global enabled if not enabled: logger.debug("rpc is locked, dropping signal %s" % signal) return logger.debug("exposed_listener %s %s %s" % (signal, args, kwargs)) kwargs['pending'] = pending try: for plugin in pluginsystem.get_plugins('rpc'): plugin.rpc['handle'](signal=signal, services=services, manager=self, *args, **kwargs) transaction.commit( ) # commit only after all the plugins have handled except Exception, err: logger.error("rpc listener while doing plugins") logger.exception(err) transaction.rollback() # oops rollback try: if signals.isScannerSignal(signal): rpc.scanner.handle(services, signal, self, *args, **kwargs) elif signals.isUploaderSignal(signal): rpc.uploader.handle(services, signal, self, *args, **kwargs) transaction.commit( ) # commit only after scanner and upload has done it's work except Exception, err: logger.error("rpc listener while doing scanner or uploader") logger.exception(err) transaction.rollback() # oops rollback
logger.info("about to drop") for line in drop.splitlines(): try: connection.cursor().execute(line) except Exception, err: logger.error("%s failed" % line) logger.exception(err) logger.info("allowing plugins to drop statistic it's tables") for plugin in pluginsystem.get_plugins('statistics_reset'): try: getattr(plugin.module, 'statistics_reset')(connection) except Exception, err: logger.error("plugin failed to reset statistics %s" % plugin) logger.exception(err) logger.info("calling syncdb") management.call_command('syncdb', migrate_all=True) try: server = rpyc.connect('localhost', 8010) server.root.Unlock() server.root.restart() except: pass logger.info("database unlocked") return HttpResponse("DELETE COMPLETE")
from net.aircable.utils import logger, logmain if __name__ == "__main__": logmain("manage") from django.core.management import execute_manager, setup_environ try: import settings # Assumed to be in the same directory. setup_environ(settings) except ImportError, e: import sys, traceback logger.error( "Error: Can't find the file 'settings.py' in the directory " "containing %r. It appears you've customized things.\nYou'll have " "to run django-admin.py, passing it your settings module.\n(If the " "file settings.py does indeed exist, it's causing an ImportError " "somehow.)\n" % __file__ ) logger.exception(e) sys.exit(1) from net.aircable.openproximity.pluginsystem import pluginsystem pluginsystem.post_environ() if __name__ == "__main__": execute_manager(settings)