def start(self): self.nodes = [] from mpx.service.time.time_zone import TimeZone if not IScheduleHolderParent.implementedBy(TimeZone): class_implements(TimeZone, IScheduleHolderParent) self.security_manager = self.nodespace.as_node(self.security_manager) self._pdo_lock.acquire() try: if not self._pdo: self._pdo = PersistentDataObject(self, dmtype=GC_NEVER) self._pdo.holders = {} self._pdo.schedules = {} self._pdo.load() holderdicts = self._pdo.holders.values() holderdumps = [] for holderdict in holderdicts: holderdumps.extend(holderdict.items()) # schedules = {'local': {'Holder1': {'Sched1': Sched1Dump}, 'Holder2': ...}, 'UTC': ...} scheduledicts = [] [ scheduledicts.extend(holderdict.values()) for holderdict in self._pdo.schedules.values() ] scheduledumps = [] for scheduledict in scheduledicts: scheduledumps.extend(scheduledict.items()) finally: self._pdo_lock.release() super(ScheduleConfigurator, self).start() tstart = time.time() for holdername, holderdump in holderdumps: try: self.nodes.append(IPickles(cPickle.loads(holderdump))()) except: message = self.LOADFAILURE % (self.name, 'Schedule Holder', holdername) msglog.log('broadway', msglog.types.ERR, message) msglog.exception(prefix='Handled') tend = time.time() tlapse = tend - tstart msglog.log( 'broadway', msglog.types.INFO, 'Schedule Configurator loaded ' '%s nodes in %s seconds.' % (len(holderdumps), tlapse)) tstart = time.time() for schedulename, scheduledump in scheduledumps: try: self.nodes.append(IPickles(cPickle.loads(scheduledump))()) except: message = self.LOADFAILURE % (self.name, 'Schedule Holder', schedulename) msglog.log('broadway', msglog.types.ERR, message) msglog.exception(prefix='Handled') tend = time.time() tlapse = tend - tstart msglog.log( 'broadway', msglog.types.INFO, 'Schedule Configurator loaded ' '%s nodes in %s seconds.' % (len(scheduledumps), tlapse)) self.template = self.parent.read_resource(self.page_template)
def start(self): self.security_manager = self.nodespace.as_node(self.security_manager) self._pdo_lock.acquire() try: self._pdo = PersistentDataObject(self, dmtype=GC_NEVER) self._pdo.valuedrivers = {} self._pdo.load() valuedriverdumps = self._pdo.valuedrivers.items() finally: self._pdo_lock.release() super(DriverConfigurator, self).start() tstart = time.time() for drivername, driverdump in valuedriverdumps: try: IPickles(cPickle.loads(driverdump))() except: message = self.LOADFAILURE % (self.name, 'Value Driver', drivername) msglog.log('broadway', msglog.types.ERR, message) msglog.exception(prefix='Handled') tend = time.time() tlapse = tend - tstart msglog.log( 'broadway', msglog.types.INFO, 'Value Driver Configurator loaded ' '%s nodes in %s seconds.' % (len(valuedriverdumps), tlapse)) return
def start(self): self.manager = self.nodespace.as_node(self.manager) self.security_manager = as_node(self.security_manager) self._pdo = PersistentDataObject(self) msg='The CloudConfigurator Persistent Object is in the file :%s' %str(self._pdo.filename()) msglog.log('CloudConfigurator', msglog.types.INFO,msg) if os.path.exists(self._pdo.filename()): # Migration msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration in Progress') self._pdo.formation = cPickle.dumps(IPickles(self.manager.formation)) self._pdo.load() formation = IPickles(cPickle.loads(self._pdo.formation))() msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration for the Formation:%s' %str(formation)) self.manager.update_formation(formation,None) self._pdo.destroy() del(self._pdo) msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration is Complete') return super(CloudConfigurator, self).start()
def updatepdo(self): exporters = {} self._pdo_lock.acquire() try: for exporter in self.manager.get_exporters(): exporters[exporter.name] = cPickle.dumps(IPickles(exporter)) self._pdo.exporters = exporters self._pdo.save() finally: self._pdo_lock.release()
def decode(self, data): state = cPickle.loads(data) decoder = IPickles(state) try: obj = decoder(True) except: message = "Decoder %s failed to decode state: %r." msglog.warn(message % (decoder, state)) msglog.exception() raise return obj
def handle_remote_event(self, data): cloudevent = IPickles(cPickle.loads(data))() self.message('Handling remote event from : %s topic=%s ' %(cloudevent.origin,cloudevent.topics)) cloudevent.set_data(data) if(self.is_event_valid(cloudevent) == False ): self.message('Dropping the remote event from : %s topic=%s ' %(cloudevent.origin,cloudevent.topics),msglog.types.WARN) return self.dispatcher.dispatch(cloudevent, cloudevent.topics) if(not ('CloudFormation' in cloudevent.topics) ): return ''' Dont propogate an event if we are Portal ''' if((cloudevent.portal != None )and (utils.same_host(self.peer,cloudevent.portal)) ): self.message('Not Propagating remote event, since I am getting it as a portal:') return self.propogate(cloudevent)
def updatepdo(self, managernode): self.debugout("updatepdo(%s)", as_node_url(managernode)) tstart = time.time() while not IScheduleHolderParent.providedBy(managernode): managernode = managernode.parent managername = managernode.name self.message('updating PDO for %s.', as_node_url(managernode)) self._pdo_lock.acquire() try: holderdict = self._pdo.holders.setdefault(managername, {}) holderdict.clear() scheduledicts = self._pdo.schedules.setdefault(managername, {}) scheduledicts.clear() children = managernode.children_nodes() holders = filter(IScheduleHolder.providedBy, children) for holder in holders: if holder.source == 'ScheduleConfigurator': self.debugout('storing holder %s.', as_node_url(holder)) holderdict[holder.name] = cPickle.dumps(IPickles(holder)) else: self.debugout('skipping holder %s.', as_node_url(holder)) scheduledict = scheduledicts.setdefault(holder.name, {}) scheduledict.clear() schedules = filter(ISchedule.providedBy, holder.children_nodes()) for schedule in schedules: if schedule.source == 'ScheduleConfigurator': self.debugout('storing schedule %s.', as_node_url(schedule)) scheduledata = cPickle.dumps(IPickles(schedule)) scheduledict[schedule.name] = scheduledata else: self.debugout('skipping schedule %s.', as_node_url(schedule)) self._pdo.save() finally: self._pdo_lock.release() tend = time.time() self.debugout('took %0.3f secs to update PDO.', tend - tstart)
def __getstate__(self): state = {} state['class'] = type(self.event) source = self.event.source if not IPickles.providedBy(source): source = EventSource(source) state['source'] = source state['origin'] = self.event.origin state['targets'] = self.event.targets state['topics'] = self.event.topics state['event'] = self.event.event state['guid'] = self.event.GUID state['portal']=self.event.portal return state
def start(self): self.container = self.nodespace.as_node(self.container) self._pdo_lock.acquire() try: self._pdo = PersistentDataObject(self, dmtype=GC_NEVER) self._pdo.exporters = {} self._pdo.load() exporterdumps = self._pdo.exporters.values() finally: self._pdo_lock.release() super(ExportersConfigurator, self).start() tstart = time.time() for exporterdump in exporterdumps: IPickles(cPickle.loads(exporterdump))() tend = time.time() tlapse = tend - tstart msglog.log( 'broadway', msglog.types.INFO, 'Exporter Configurator loaded ' '%s exporters in %s seconds.' % (len(exporterdumps), tlapse)) self.manager = self.container
def __handle_by_context(self, context, request, data): node_url = urllib.unquote_plus(context) manager, managernode = self.__as_node(node_url) adapt = manager update_pdo = False if data.has_key('add'): childnode = None childconfig = {} # First check for 'edit' request, wherein a node is created iff # it does not exist. The value of edit is the name of the child. if data.has_key('edit'): name = urllib.unquote_plus(data['edit'][0]) if managernode.has_child(name): childnode = managernode.get_child(name) childconfig['name'] = name # Testing Authorization by getting add_child function. if data.has_key('configure.name'): # Use setdefault to leave name untouched if already provided. childconfig.setdefault( 'name', urllib.unquote_plus(data['configure.name'][0])) if data.has_key('configure.input'): # Use setdefault to leave name untouched if already provided. childconfig.setdefault( 'input', urllib.unquote_plus(data['configure.input'][0])) if childnode is None: childnode = self._create_child(ValueDriver, managernode, childconfig) update_pdo = True adapt = childnode elif data.has_key('remove'): # Testing Authorization by getting remove_child. manager.remove_child name = urllib.unquote_plus(data['remove'][0]) childnode = manager.get_child(name) childnode.prune() adapt = manager update_pdo = True elif data.has_key('configure'): name = urllib.unquote_plus(data['configure'][0]) childnode = manager.get_child(name) # Testing Authorization by getting configure function. childnode.configure config = {} for attrname in data.keys(): splitname = attrname.split('.') if len(splitname) == 2 and splitname[0] == 'configure': values = map(urllib.unquote_plus, data[attrname]) if len(values) == 1: config[splitname[1]] = values[0] else: config[splitname[1]] = values msglog.log('broadway', msglog.types.DB, 'Configuring "%s" with %s.' % (name, config)) try: childnode.stop() except: msglog.exception(prefix='Handled') childnode.configure(config) try: childnode.start() except: msglog.exception(prefix='Handled') adapt = childnode update_pdo = True elif data.has_key('edit'): editing = data.get('edit') name = urllib.unquote_plus(editing[0]) adapt = manager.get_child(name) update_pdo = False ## # Generic action handling for flexibility. action = None if data.has_key('actionName'): action = urllib.unquote_plus(data.get('actionName')[0]) elif data.has_key('invocation'): action = urllib.unquote_plus(data.get('invocation')[0]) if action is not None: if data.has_key('target'): target = urllib.unquote_plus(data.get('target')[0]) elif data.has_key('edit'): target = urllib.unquote_plus(data['edit'][0]) else: raise ValueError( 'Action must include either "target" or "edit" params.') params = map(urllib.unquote_plus, data.get('params', [])) child = manager.get_child(target) method = getattr(child, action) result = method(*params) update_pdo = True print 'On %s: %s%s -> %s' % (target, action, params, result) # End Generic action handling. ## if update_pdo: pdodict = self._pdo.valuedrivers tstart = time.time() self._pdo_lock.acquire() try: pdodict.clear() children = managernode.children_nodes() for child in children: pdodict[child.name] = cPickle.dumps(IPickles(child)) self._pdo.save() finally: self._pdo_lock.release() tend = time.time() tlapse = tend - tstart message = 'Driver Configurator pickled and wrote ' message += '%s nodes in %s seconds ' % (len(children), tlapse) message += 'for "%s".' % managernode.name msglog.log('broadway', msglog.types.INFO, message) if self.secured: adapt = self.security_manager.as_secured_node(adapt) adapt.test_adaptability() return adapt
def unmarshal(dump): return IPickles(cPickle.loads(dump))()
def marshal(node): return cPickle.dumps(IPickles(node))
def unmarshal(trenddump): return IPickles(cPickle.loads(trenddump))()
def marshal(trend): # IConfigurableNode is so IPickles finds the right adapter... trend = IConfigurableNode(trend) return cPickle.dumps(IPickles(trend))
def encode(self, obj): return cPickle.dumps(IPickles(obj))
from mpx.lib.node import as_node am = as_node('/services/Alarm Manager') import cPickle from mpx.componentry.interfaces import IPickles alarms = am.get_alarms() pickles = [] for alarm in alarms: pickles.append(cPickle.dumps(alarm)) for i in range(0, len(pickles)): file = open('/tmp/alarm%s' % i, 'w') file.write(pickes[i]) file.close() a1 = alarms[0] a1p = IPickles(a1) a1data = cPickle.dumps(a1p) a2 = alarms[1] a2p = IPickles(a2) a2data = cPickle.dumps(a2p) f1 = open('/tmp/a1','w') f1.write(a1data) f1.close() f2 = open('/tmp/a2','w') f2.write(a2data) f2.close() f1 = open('/tmp/a1','r') f2 = open('/tmp/a2','r') a1data = f1.read() a2data = f2.read()