def handle_file_failed(dongle, remote, pending, channel, files, ret, err, services): logger.info("handle file failed %s[%s]: %s" % (remote, channel, files)) logger.debug(err) try: try_again = [] rules = get_campaign_rule(files) if rules is None or len(rules) == 0: raise Exception("Couldn't find rule") for rule in rules: record = save_file_failed(rule, dongle, ret, remote) # from here we try again either on timeout or if rejected count is # smaller than filter if rule.tryAgain(record=record): try_again.append(rule) # if we can't try again this method will raise an exception # and the try/catch will make sure the remote device gets out # of our pending list if upload_after_rejected(try_again, services, dongle=dongle, remote=remote): return except Exception, err: logger.error("OOOPS!!!") logger.exception(err)
def handle_file_failed(dongle, remote, pending, channel, files, ret, err, services): logger.info("handle file failed %s[%s]: %s" % (remote, channel, files)) logger.debug(err) try: try_again = [] rules = get_campaign_rule(files) if rules is None or len(rules)==0: raise Exception("Couldn't find rule") for rule in rules: record = save_file_failed(rule, dongle, ret, remote) # from here we try again either on timeout or if rejected count is # smaller than filter if rule.tryAgain(record=record): try_again.append(rule) # if we can't try again this method will raise an exception # and the try/catch will make sure the remote device gets out # of our pending list if upload_after_rejected(try_again, services, dongle=dongle, remote=remote): return except Exception, err: logger.error("OOOPS!!!") logger.exception(err)
def handle(services, signal, uploader, *args, **kwargs): logger.info("uploader signal: %s" % signals.TEXT[signal]) logl = LogLine() logl.content += signals.TEXT[signal] if signal == signals.SDP_RESOLVED: logl.content += ' %s:%s' % (kwargs['address'], kwargs['port']) handle_sdp_resolved(kwargs['dongle'], kwargs['address'], kwargs['port']) elif signal == signals.SDP_NORECORD: logl.content += ' %s' % (kwargs['address']) handle_sdp_norecord(kwargs['dongle'], kwargs['address'], kwargs['pending']) elif signal == signals.SDP_TIMEOUT: logl.content += ' %s' % (kwargs['address']) handle_sdp_timeout(kwargs['dongle'], kwargs['address'], kwargs['pending']) elif signal == signals.FILE_UPLOADED: logl.content += ' %s' % (kwargs['address']) handle_file_uploaded(kwargs['dongle'], kwargs['address'], kwargs['pending'], kwargs['port'], kwargs['files']) elif signal == signals.FILE_FAILED: logl.content += ' %s, ret:%s' % (kwargs['address'], kwargs['ret']) handle_file_failed(kwargs['dongle'], kwargs['address'], kwargs['pending'], kwargs['port'], kwargs['files'], kwargs['ret'], kwargs['stderr'], services) else: logger.error("signal ignored") logl.save()
def parsereading(device=None, seconds=None, battery=None, reading=None, dongle=None): '''This method expects to get a valid reading, generating a record out of it''' #extract parameters from reading string m = tamb.match(reading) if not m: logger.error("NO MATCH %s" % reading) return temp = m.groupdict()['temperature'] #find ambient device, or create if there's none yet created device, created = AmbientDevice.objects.get_or_create( address=device, defaults={ 'friendly_name': _('Auto Discovered Ambient Sensor'), 'sensor': _('Ambient'), 'mode': _('Monitor'), }) temp = device.getTemperature(float(temp)) record = AmbientRecord() record.remote = device record.dongle = dongle record.temperature = temp record.time = datetime.fromtimestamp(seconds) record.battery = battery record.save()
def __import_plugin(self): ''' Try to import the plugin, if it fails then it wasn't available. ''' plugin=__import__(self.name, {}, {}, [], 0) try: plugin=getattr(plugins, self.name.split('.',1)[-1]) except Exception, err: logger.error('plugin was not part of plugins.*') logger.exception(err)
def handle(signal, services, manager, *args, **kwargs): if not signals.isCameraSignal(signal): return global handlers logger.info("Camera HANDLE %s %s %s" % (signals.TEXT[signal], args, kwargs) ) if signal in handlers: return handlers[signal](manager=manager, *args, **kwargs) logger.error("Camera, no handler %s" % signals.TEXT[signal])
def connect(address, port): ''' This method wraps rpyc server connection, so when no server is available the pairing manager will still work. ''' logger.info("Connecting to %s:%s" % (address, port)) try: s = rpyc.connect(address, int(port)) return s except Exception, err: logger.error("can't connect to server") logger.exception(err)
def exposed_getPIN(self, remote, local): logger.info("getPIN request for %s->%s" % (local, remote) ) remote = RemoteDevice.getRemoteDevice(address=remote) try: camps = getMatchingCampaigns(remote=remote, enabled=True) for camp in camps: if camp.pin_code: logger.debug("pin code: %s" % camp.pin_code) return camp.pin_code except Exception, err: logger.error(err) logger.exception(err)
def exposed_getPIN(self, remote, local): logger.info("getPIN request for %s->%s" % (local, remote)) remote = RemoteDevice.getRemoteDevice(address=remote) try: camps = getMatchingCampaigns(remote=remote, enabled=True) for camp in camps: if camp.pin_code: logger.debug("pin code: %s" % camp.pin_code) return camp.pin_code except Exception, err: logger.error(err) logger.exception(err)
def stats_restart(request): ''' Delete statistics, we do drop table, not the recommended way but damn effective. ''' from django.core import management from django.db import connection, models from django.core.management.color import no_style from django.core.management import sql cursor = connection.cursor() logger.info("stats restart") # this tables are not going to be deleted tables = [ 'openproximity_bluetoothdongle', 'openproximity_campaignfile', 'openproximity_marketingcampaign', 'openproximity_remotescannerbluetoothdongle', 'openproximity_scannerbluetoothdongle', 'openproximity_uploaderbluetoothdongle', 'openproximity_generalsetting', 'openproximity_userprofile' ] model = models.get_app('openproximity') drop = "" drop_table = sql.sql_delete(model, no_style()) for line in drop_table: table_name = line.split()[2].replace('"', '').replace(';','') if line.startswith('DROP TABLE'): # we don't want to loose settings if table_name not in tables: drop+="DROP TABLE %s;\n" % table_name elif line.find('CREATE INDEX') > -1: drop += "DROP INDEX %s;\n" % table_name try: server=rpyc.connect('localhost', 8010) server.root.Lock() logger.info("database locked") except: pass logger.info("about to drop") for line in drop.splitlines(): try: connection.cursor().execute(line) except Exception, err: logger.error("%s failed" %line) logger.exception(err)
def connection_ready(self, sock, condition): while True: try: logger.info("connection ready") conn, remote = self.socket.accept() #sock.listen(1) logger.info("accepted") self.handle_connection(conn, remote) except sk.error, e: logger.error(e) if e[0] not in (errno.EWOULDBLOCK, errno.EAGAIN): raise return True
def poll(fd, condition): ''' This function gets called whenever there's data waiting in the incomming socket, so we can flush the data from the server. ''' try: server.poll() return True except EOFError, eof: logger.error("EOF while polling %s" % eof) logger.exception(eof) stop() return False
def connection_ready(self, sock, condition): while True: try: logger.info("connection ready") conn, remote = self.socket.accept() # sock.listen(1) logger.info("accepted") self.handle_connection(conn, remote) except sk.error, e: logger.error(e) if e[0] not in (errno.EWOULDBLOCK, errno.EAGAIN): raise return True
def initAgent(): ''' Initialize the agent, register with every available path if possible. ''' try: manager = dbus.Interface(bus.get_object("org.bluez", "/"), "org.bluez.Manager") for path in manager.ListAdapters(): registerAgent(path) logger.info("Agent registered on all paths") except Exception, err: logger.error("Something went wrong on the agent application") logger.exception(err)
def stats_restart(request): ''' Delete statistics, we do drop table, not the recommended way but damn effective. ''' from django.core import management from django.db import connection, models from django.core.management.color import no_style from django.core.management import sql cursor = connection.cursor() logger.info("stats restart") # this tables are not going to be deleted tables = [ 'openproximity_bluetoothdongle', 'openproximity_campaignfile', 'openproximity_marketingcampaign', 'openproximity_remotescannerbluetoothdongle', 'openproximity_scannerbluetoothdongle', 'openproximity_uploaderbluetoothdongle', 'openproximity_generalsetting', 'openproximity_userprofile' ] model = models.get_app('openproximity') drop = "" drop_table = sql.sql_delete(model, no_style()) for line in drop_table: table_name = line.split()[2].replace('"', '').replace(';', '') if line.startswith('DROP TABLE'): # we don't want to loose settings if table_name not in tables: drop += "DROP TABLE %s;\n" % table_name elif line.find('CREATE INDEX') > -1: drop += "DROP INDEX %s;\n" % table_name try: server = rpyc.connect('localhost', 8010) server.root.Lock() logger.info("database locked") except: pass logger.info("about to drop") for line in drop.splitlines(): try: connection.cursor().execute(line) except Exception, err: logger.error("%s failed" % line) logger.exception(err)
def get_campaign_rule(files): logger.info('get_campaign_rule %s' % files) out = set() for file, camp_id in files: logger.debug(file) try: camp = MarketingCampaign.objects.get(pk=camp_id) logger.debug(camp) if len(out) > 0 and camp not in out: logger.error("multiple return values") out.add(camp) except Exception, err: logger.exception(err)
def handle(services, signal, uploader, *args, **kwargs): logger.info("uploader signal: %s" % signals.TEXT[signal]) logl = LogLine() logl.content += signals.TEXT[signal] if signal == signals.SDP_RESOLVED: logl.content += ' %s:%s' %( kwargs['address'], kwargs['port']) handle_sdp_resolved( kwargs['dongle'], kwargs['address'], kwargs['port'] ) elif signal == signals.SDP_NORECORD: logl.content += ' %s' %( kwargs['address']) handle_sdp_norecord( kwargs['dongle'], kwargs['address'], kwargs['pending'] ) elif signal == signals.SDP_TIMEOUT: logl.content += ' %s' %( kwargs['address']) handle_sdp_timeout( kwargs['dongle'], kwargs['address'], kwargs['pending']) elif signal == signals.FILE_UPLOADED: logl.content += ' %s' %( kwargs['address']) handle_file_uploaded( kwargs['dongle'], kwargs['address'], kwargs['pending'], kwargs['port'], kwargs['files']) elif signal == signals.FILE_FAILED: logl.content += ' %s, ret:%s' %( kwargs['address'], kwargs['ret']) handle_file_failed( kwargs['dongle'], kwargs['address'], kwargs['pending'], kwargs['port'], kwargs['files'], kwargs['ret'], kwargs['stderr'], services) else: logger.error("signal ignored") logl.save()
def _find_plugins_for_egg(self, egg_name): ''' Find plugin in egg file ''' b=zipfile.PyZipFile(egg_name) for a in b.namelist(): if not a.startswith('EGG-INFO') and a.endswith('__init__.py'): try: if not egg_name in sys.path: sys.path.append(egg_name) self.load_info(egg_name, a.split('/')[0], egg=True) return except Exception, err: logger.error("Failed to load info from egg file: %s" % egg_name) logger.exception(err)
def parsereading(device=None, seconds=None, battery=None, reading=None, dongle=None): '''This method expects to get a valid reading, generating a record out of it''' logger.info("linkthrecord parsereading: %s" % reading) #find ambient device, or create if there's none yet created device,created=LinkTHDevice.objects.get_or_create( address=device, defaults={ 'friendly_name': _('Auto Discovered LinkTH Sensor'), 'sensor': _('Linkth'), 'mode': _('Monitor'), }) record = LinkTHRecord() record.remote=device record.dongle=dongle record.time=datetime.fromtimestamp(seconds) record.battery=int(battery)/1000.0 # asume we get battery * 1000 while len(reading) > 0: m = LINE.match(reading) if not m: logger.error("monitorlinkth NO MATCH: %s" % reading) break m=m.groupdict() reading = m['rest'] val = m['val'] typ = val.split(',')[0].strip() m['id']=m['id'].strip() try: sen_id = device.findSensorForId(m['id']) except Exception, err: # if we got here then we have more than MAXSENSORS # registered on this linkth logger.exception(err) break if not getattr(device, 'sensor%s_id' % sen_id, None): setattr(device, 'sensor%s_id' % sen_id, m['id']) setattr(device, 'sensor%s_name' % sen_id, _("Auto discovered 1wire sensor")) setattr(device, 'sensor%s_family' % sen_id, SENSOR_FAMILY[typ]) device.save() val = SENSOR_EXTRACTION[typ](val) setattr(record, 'value%s' % sen_id, val['temperature']) if typ in ['19', '1A', '1B']: setattr(record, 'value%sa' % send_id, val['extra'])
def parsereading(device=None, seconds=None, battery=None, reading=None, dongle=None): '''This method expects to get a valid reading, generating a record out of it''' logger.info("parsereading %s: %s" % (device, reading)) #extract parameters from reading string m = READING.match(reading) if not m: logger.error("NO MATCH %s" % reading) return vals = m.groupdict() #find ambient device, or create if there's none yet created device, created = SolarDevice.objects.get_or_create( address=device, defaults={ 'friendly_name': _('Autodiscovered Solar Sensor'), 'sensor': _('Solar'), 'mode': _('Monitor'), }) record = SolarRecord() record.remote = device record.dongle = dongle for i in vals.keys(): if i in ['solar', 'pool', 'tank']: setattr(record, '%s_v' % i, vals[i]) setattr(record, i, SolarDevice.ntc_to_temperature(vals[i])) elif i == 'flow': setattr(record, '%s_v' % i, vals[i]) flow = SolarDevice.flow(vals[i]) record.flow = SolarDevice.gpm2lpm(flow) elif i == 'wattm': record.wattm = int(vals[i]) elif i == 'day': record.day = vals['day'][0] == '1' record.watt_in = SolarDevice.power(flow, record.solar, record.pool) record.watt_out = SolarDevice.power(flow, record.tank, record.pool) record.watt_delta = record.watt_out - record.watt_in record.time = datetime.fromtimestamp(seconds) record.battery = int(battery) / 1000.0 record.save() logger.info("work done")
def send_to_all(self, text, mimetype="text/plain"): delete = [] for client in self.clientsockets: try: s = self.clientsockets[client] s.sendall("--myboundary\r\n") s.sendall("Content-type: %s\r\n" % mimetype) s.sendall("Content-size: %s\r\n" % len(text)) s.sendall("\r\n") s.sendall(text) s.sendall("\r\n") except sk.error, err: logger.info("%s:%s got disconnected" % client) logger.error(err) s.close() delete.append(client)
def send_to_all(self, text, mimetype='text/plain'): delete = [] for client in self.clientsockets: try: s = self.clientsockets[client] s.sendall('--myboundary\r\n') s.sendall('Content-type: %s\r\n' % mimetype) s.sendall('Content-size: %s\r\n' % len(text)) s.sendall('\r\n') s.sendall(text) s.sendall('\r\n') except sk.error, err: logger.info("%s:%s got disconnected" % client) logger.error(err) s.close() delete.append(client)
def found_action(services, address, record, pending, dongle): line = LogLine() line.content="Found action for: %s" % address try: for plugin in pluginsystem.get_plugins('found_action'): logger.info("found action trying with %s" % plugin.name) service = plugin.rpc['found_action'](services=services, record=record) if service: logger.info("plugin has handled") line.content+=" %s is handling" % getattr(plugin, 'name', 'plugin') line.save() pending[record.remote.address]=service return True except Exception, err: logger.error("plugin do_action") logger.exception(err)
def getPIN(address, dongle): ''' This method will try to ask the server which PIN code to use, otherwise fallback to the default PIN. ''' global server if len(sys.argv) > 2: logger.info("server available") if not server: logger.info("server available") server = connect(sys.argv[1], sys.argv[2]) try: out = server.root.getPIN(address, dongle) return str(out) except Exception, err: logger.error("couldn't get PIN from server") logger.exception(err)
def parsereading(device=None, seconds=None, battery=None, reading=None, dongle=None): '''This method expects to get a valid reading, generating a record out of it''' #extract parameters from reading string m = lin.match(reading) if not m: logger.error("NO MATCH %s" % reading) return m = m.groupdict() value = int(m['value']) mode = m['mode'] slope = int(m['slope']) offset = int(m['offset']) if mode == 'A': slope = 1.0 / slope logger.debug("reading %s, slope %s, offset %s, mode %s" % (value, slope, offset, mode)) #find ambient device, or create if there's none yet created device, created = GenericLinearDevice.objects.get_or_create( address=device, defaults={ 'friendly_name': _('Auto Discovered Generic Linear Sensor'), 'sensor': _('Temperature'), 'mode': _('Monitor'), 'slope': slope, 'offset': offset }) reading = device.getValue(value) record = GenericLinearRecord() record.slope = slope record.offset = offset record.remote = device record.dongle = dongle record.reading = reading record.reading_mv = value record.time = datetime.fromtimestamp(seconds) record.battery = battery record.save()
def exposed_listener(self, signal, *args, **kwargs): global enabled if not enabled: logger.debug("rpc is locked, dropping signal %s" % signal) return logger.debug("exposed_listener %s %s %s" % ( signal, args, kwargs) ) kwargs['pending']=pending try: for plugin in pluginsystem.get_plugins('rpc'): plugin.rpc['handle'](signal=signal, services=services, manager=self, *args, **kwargs) transaction.commit() # commit only after all the plugins have handled except Exception, err: logger.error("rpc listener while doing plugins") logger.exception(err) transaction.rollback() # oops rollback
def found_action(services, address, record, pending, dongle): line = LogLine() line.content = "Found action for: %s" % address try: for plugin in pluginsystem.get_plugins('found_action'): logger.info("found action trying with %s" % plugin.name) service = plugin.rpc['found_action'](services=services, record=record) if service: logger.info("plugin has handled") line.content += " %s is handling" % getattr( plugin, 'name', 'plugin') line.save() pending[record.remote.address] = service return True except Exception, err: logger.error("plugin do_action") logger.exception(err)
def __scan(self, times): try: self.client.socket.settimeout(30) self.client.sendLine("r%i" % times) logger.debug(self.client.readBuffer()) flag = True while ( flag ): lines=self.client.readBuffer(honnor_eol=True,timeout=1) for line in lines.splitlines(): if self.record_pattern.match(line.strip()): address, rssi = line.split('RSSI') self.DeviceFound( AddDots(address) , {'RSSI': int(rssi)}) elif line.find('COMMAND') >-1: logger.info("RSSI completed") flag = False break except (SPPException, SPPNotConnectedException, TypeError), e: logger.error("error while scanning, could be that we lost connection") logger.exception(e)
def find_plugins(self): ''' Init the plugin system, look for available plugins. This can only be done once. ''' if self.plugin_infos is not None: return logger.info("looking for plugins") self.plugin_infos=dict() for path in plugins.__path__: if not os.path.isdir(path): continue for entry in os.listdir(path): if entry.startswith('_'): continue # __init__.py etc. if entry.endswith('.py') or os.path.isdir( os.path.join(path, entry) ): try: self.load_info(path, entry.split('.')[0]) except Exception, err: logger.error("Failed to load info %s" % entry) if entry.endswith('.egg'): self._find_plugins_for_egg(os.path.join(path, entry))
def exposed_listener(self, signal, *args, **kwargs): global enabled if not enabled: logger.debug("rpc is locked, dropping signal %s" % signal) return logger.debug("exposed_listener %s %s %s" % (signal, args, kwargs)) kwargs['pending'] = pending try: for plugin in pluginsystem.get_plugins('rpc'): plugin.rpc['handle'](signal=signal, services=services, manager=self, *args, **kwargs) transaction.commit( ) # commit only after all the plugins have handled except Exception, err: logger.error("rpc listener while doing plugins") logger.exception(err) transaction.rollback() # oops rollback
def handle(services, signal, scanner, *args, **kwargs): logger.info("scanner signal: %s" % signals.TEXT[signal]) logl = LogLine() logl.content += signals.TEXT[signal] if signal == signals.DONGLES_ADDED: logger.info("Dongles initializated") cycle_completed(scanner) elif signal == signals.NO_DONGLES: logger.error("NO SCANNER DONGLES!!!") elif signal == signals.DONGLE_NOT_AVAILABLE: logger.error("DONGLE NOT AVAILABLE %s" % kwargs['address']) logl.content += " " + kwargs['address'] do_scan(scanner) elif signal == signals.CYCLE_SCAN_DONGLE_COMPLETED: logger.info("DONGLE DONE WITH SCAN %s" % kwargs['address']) logl.content += " " + kwargs['address'] do_scan(scanner) elif signal == signals.CYCLE_COMPLETE: cycle_completed(scanner) elif signal == signals.CYCLE_START: pass elif signal == signals.CYCLE_SCAN_DONGLE: logl.content += " " + kwargs['address'] started(scanner, kwargs['address']) elif signal == signals.FOUND_DEVICE: logl.content += " " + kwargs['address'] addrecords(services, kwargs['address'], kwargs['records'], kwargs['pending'] ) else: logger.error("unknown signal") raise Exception("Not known signal") logl.save()
logger.info("about to drop") for line in drop.splitlines(): try: connection.cursor().execute(line) except Exception, err: logger.error("%s failed" %line) logger.exception(err) logger.info("allowing plugins to drop statistic it's tables") for plugin in pluginsystem.get_plugins('statistics_reset'): try: getattr(plugin.module,'statistics_reset')(connection) except Exception, err: logger.error("plugin failed to reset statistics %s" % plugin) logger.exception(err) logger.info("calling syncdb") management.call_command('syncdb', migrate_all=True) try: server=rpyc.connect('localhost', 8010) server.root.Unlock() server.root.restart() except: pass logger.info("database unlocked") return HttpResponse("DELETE COMPLETE")
def poll(fd, condition): ''' This function gets called whenever there's data waiting in the incomming socket, so we can flush the data from the server. ''' try: server.poll() return True except EOFError, eof: logger.error("EOF while polling %s" % eof) logger.exception(eof) stop() return False except Exception, err: logger.error("error during poll %s" % err) logger.exception(err) return True def stop(): ''' Safe stop function ''' global manager from uploader import UploadManager if getattr(manager, 'exposed_stop', None): manager.exposed_stop() loop.quit() def handle_name_owner_changed(own, old, new): '''
''' # Plugin system base import os, re, StringIO, zipfile import ConfigParser, pkgutil, traceback import sys,functools from net.aircable.utils import logger __all__=['pluginsystem'] # try getting the plugins dir from the PYTHON_PATH otherwise create a fake one try: import plugins except Exception, err: import new logger.error("no plugins dir found") plugins = new.module('plugins') plugins.__path__=[] def find_plugin_dirs(): ''' Add external plugins dirs. ''' return [os.path.expanduser('~/.openproximity/plugins'), '/usr/lib/openproximity/plugins'] # add dirs from sys.path: plugins.__path__ = pkgutil.extend_path(plugins.__path__, plugins.__name__) # add dirs specific to sensorsdk: plugins.__path__ = find_plugin_dirs() + plugins.__path__
def poll(fd, condition): ''' This function gets called whenever there's data waiting in the incomming socket, so we can flush the data from the server. ''' try: server.poll() return True except EOFError, eof: logger.error("EOF while polling %s" % eof) logger.exception(eof) stop() return False except Exception, err: logger.error("error during poll %s" % err) logger.exception(err) return True def stop(): ''' Safe stop function ''' global manager from uploader import UploadManager if getattr(manager, 'exposed_stop', None): manager.exposed_stop() loop.quit()
from net.aircable.utils import logger, logmain if __name__ == '__main__': logmain('rpc.py') # setup Django ORM try: import settings # Assumed to be in the same directory. setattr(settings, "DEBUG", False) logger.info("RPC-DEBUG %s" % getattr(settings, "DEBUG")) from django.core.management import setup_environ setup_environ(settings) except ImportError: logger.error( "Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__) sys.exit(1) from net.aircable.openproximity.pluginsystem import pluginsystem pluginsystem.post_environ() # now we can safely import the rest import net.aircable.openproximity.signals as signals import openproximity.rpc as rpc import openproximity.rpc.scanner, openproximity.rpc.uploader import threading, time, traceback, sys from django.db import transaction, models, close_connection, reset_queries from openproximity.models import CampaignFile, Setting, getMatchingCampaigns, RemoteDevice from rpyc import Service, async
class OpenProximityService(Service): dongles = None remote_quit = None def on_connect(self): services.add(self) def on_disconnect(self): a = [p for p in pending if pending[p] == self] if len(a) > 0: logger.info( "a client disconnected, clearing %s pending transactions" % len(a)) for p in a: pending.pop(p) services.remove(self) def exit(self, exit): for ser in services: if ser.remote_quit is not None: try: ser.remote_quit() except: pass pending = set() if exit: sys.exit(3) # restart me please @transaction.commit_manually def exposed_listener(self, signal, *args, **kwargs): global enabled if not enabled: logger.debug("rpc is locked, dropping signal %s" % signal) return logger.debug("exposed_listener %s %s %s" % (signal, args, kwargs)) kwargs['pending'] = pending try: for plugin in pluginsystem.get_plugins('rpc'): plugin.rpc['handle'](signal=signal, services=services, manager=self, *args, **kwargs) transaction.commit( ) # commit only after all the plugins have handled except Exception, err: logger.error("rpc listener while doing plugins") logger.exception(err) transaction.rollback() # oops rollback try: if signals.isScannerSignal(signal): rpc.scanner.handle(services, signal, self, *args, **kwargs) elif signals.isUploaderSignal(signal): rpc.uploader.handle(services, signal, self, *args, **kwargs) transaction.commit( ) # commit only after scanner and upload has done it's work except Exception, err: logger.error("rpc listener while doing scanner or uploader") logger.exception(err) transaction.rollback() # oops rollback
#gc.set_debug(gc.DEBUG_STATS) from net.aircable.utils import logger, logmain if __name__ == '__main__': logmain('rpc.py') # setup Django ORM try: import settings # Assumed to be in the same directory. setattr(settings, "DEBUG", False) logger.info("RPC-DEBUG %s" % getattr(settings, "DEBUG")) from django.core.management import setup_environ setup_environ(settings) except ImportError: logger.error("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__) sys.exit(1) from net.aircable.openproximity.pluginsystem import pluginsystem pluginsystem.post_environ() # now we can safely import the rest import net.aircable.openproximity.signals as signals import openproximity.rpc as rpc import openproximity.rpc.scanner, openproximity.rpc.uploader import threading, time, traceback, sys from django.db import transaction, models, close_connection, reset_queries from openproximity.models import CampaignFile, Setting, getMatchingCampaigns, RemoteDevice from rpyc import Service, async from rpyc.utils.server import ThreadedServer, ForkingServer
def handle_failed(pending, target, *args, **kwargs): logger.error("handle failed %s" % target) pending.pop(target)
logger.info("about to drop") for line in drop.splitlines(): try: connection.cursor().execute(line) except Exception, err: logger.error("%s failed" % line) logger.exception(err) logger.info("allowing plugins to drop statistic it's tables") for plugin in pluginsystem.get_plugins('statistics_reset'): try: getattr(plugin.module, 'statistics_reset')(connection) except Exception, err: logger.error("plugin failed to reset statistics %s" % plugin) logger.exception(err) logger.info("calling syncdb") management.call_command('syncdb', migrate_all=True) try: server = rpyc.connect('localhost', 8010) server.root.Unlock() server.root.restart() except: pass logger.info("database unlocked") return HttpResponse("DELETE COMPLETE")