def start(self): AVRNode.start(self) self._start_called = 1 self.devices, self.device_addresses = self.findall() if self.running: raise EAlreadyRunning() # Inform in msglog the number of devices on the dallas bus and their addresses (CSCtl81599) if(self.devices == None ): no_of_devices=0 else: no_of_devices=len(self.device_addresses) msglog.log('broadway',msglog.types.INFO,'There are %d devices found on "%s" bus' %(no_of_devices, self.name)) if no_of_devices: addr_str='' for addr in self.device_addresses: dallas_bus_addr=address_to_asciihex(addr) addr_str=addr_str+' '+dallas_bus_addr msglog.log('broadway',msglog.types.INFO,'The device addresses on "%s" bus : %s\n' %(self.name,addr_str)) # Start the thread to read the dallas bus irrespective for whether the devices are # present or not (CSCtl81599) self.running = 1 thread = Thread(name=self.name,target=self._queue_thread,args=()) self.request(self._convert_temperature_sensor_list) thread.start()
def configure_node(self, nodeurl, config): node = self.get_managed_node(nodeurl) node.stop() try: node.configure(config) except: msglog.log("broadway", msglog.types.WARN, "Error prevented reconfiguration of node: %s" % node) msglog.exception(prefix="handled") msglog.log("broadway", msglog.types.WARN, "Rolling back configuration.") try: node.configure(self.nodes[nodeurl]) except: msglog.log("broadway", msglog.types.WARN, "Configuration rollback failed.") msglog.exception(prefix="handled") else: msglog.log("broadway", msglog.types.INFO, "Rollback of configuration succeeded.") else: msglog.log("broadway", msglog.types.INFO, "Node reconfigured: %s" % node) self.updatepdo(nodeurl, node) finally: node.start() return node.configuration()
def create_partition_tree(name, mount_point): global storage_root_url children_names = ['available', 'size', 'used', ] msgstr = 'In create_drive_tree with name of %s ' % name msgstr += 'and mount_point of %s.' % mount_point msglog.log('Services_Storage', msglog.types.INFO, msgstr) pnode = as_node(storage_root_url) dict = {'name':name, 'mount_point':mount_point, 'debug':1, 'parent':pnode, } drive_node = DriveNode() drive_node.configure(dict) # for x in children_names: dict = {'name':x, 'debug':1, 'parent':drive_node, } drive_attr_node = DriveAttributeNode() drive_attr_node.configure(dict) # drive_node.start()
def set_permissions(self, role, *permissions): ## # Permissions parameter can be one of three things: # - A single list or tuple object, whose items will # replace the permissions tuple. # - A variable number of permission strings. # - A single callable object which will be called # and whose return value will be returned anytime # the permissions for this role are queried. if not isinstance(role, str): role = role.name if len(permissions) == 1: if type(permissions[0]) in (list, tuple): permissions = permissions[0][:] elif callable(permissions[0]): permissions = permissions[0] if not self.parent.parent.role_manager.has_role(role): raise ValueError('Role "%s" does not exist.' % role) if isinstance(permissions, (list, tuple)): defined = self.parent.get_permissions() for permission in permissions: if permission not in defined: raise ValueError('Permission "%s" not defined.' % permission) self.__lock.acquire() try: inherent = self.rolemap.callable_subset() self.rolemap[role] = permissions self.rolemap.update(inherent) finally: self.__lock.release() if inherent.has_key(role): message = 'Permissions for role "%s" in policy "%s" ' message += 'cannot be changed. An attempt has been ignored.' message = message % (role, self.name) msglog.log('broadway', msglog.types.WARN, message) return
def start(self): msglog.log('Delphi', INFO, "Property Start") if self.parent.enabled: self.local_file_path = '/tmp/'+self.name+'_EVENTS.NSS' # Lets remove if there is any stale file if exists(self.local_file_path): remove(self.local_file_path) if exists(self.local_file_path+'_bak'): remove(self.local_file_path+'_bak') if exists(self.local_file_path+'_prevday'): remove(self.local_file_path+'_prevday') for child in self.children_nodes(): if child.identity == 'status': self.downloadStatus = child break if self.interface_pc_addr == None or self.event_file_location == None: msglog.log('Delphi', WARN, "Check interface pc address and event file location configurations") else: protocol=self.communication_interface+'://' if self.interface_pc_addr[:len(protocol)] == protocol: self.interface_pc_addr = self.interface_pc_addr[len(protocol):] if not self.scheduler.is_started(): try: self.scheduler.start() self.schedulePollAfterInterval( 5 )#start scheduler in 5 second except: msglog.exception() super(Property, self).start()
def output_callback(self, stream): ws_dicts = stream.get_meta_value('data') index = stream.get_meta_value('index') formatter = stream.get_meta_value('formatter') remaining = stream.get_meta_value('remaining') if remaining: remaining = remaining[stream.write(remaining):] stream.set_meta('remaining',remaining) if remaining: return None for i in range(index, index + 10): try: ws_dict = ws_dicts[i] except IndexError: formatter.close_tag('workout-sessions') stream.write(formatter.output()) stream.close() msglog.log('mpx',msglog.types.INFO,'Sent %u workout sessions to InSite server.' \ % len(ws_dicts)) return None if not ws_dict.has_key('timestamp'): raise EIncompatiableFormat() ws = _xml.WorkoutSession(ws_dict) ws.get_xml(formatter) output = formatter.output() count = stream.write(output) stream.set_meta('index', i + 1) if count != len(output): stream.set_meta('remaining',output[count:]) return None return None
def get(self, manager, **keywords): if debug: print 'Start of Batch:%s' % (self.properties) now = time.time() self.last_get_time = now callback = None if keywords.has_key('callback'): keywords['callback'].callback(self.rpm_callback) try: #print 'Calling read property multiple' rars = rpm(self.device, self.properties, self.timeout, **keywords) if isinstance(rars, Callback): return rars #if we are in callback mode #print 'RPM TIME:%s' % (time.time()-now,) except BACnetException, e: if len(self.ids) == 1: #single propety rpms throw exceptions at this level return {self.ids[0]: e} #simply return exception as the result self.total_rpm_error_counter += 1 if self.total_rpm_error_counter > 0: #catch run away thrashing for id in self.ids: nr = self.map[id] nr.set_batch_manager(None) #none of these points can batch raise EBadBatch('bacnet', self, 'error threshold exceeded') #at this point this is a multiple property read that failed msglog.exception() if debug: print 'rpm failed, trying rp', str(self.properties) answer = {} for id in self.ids: nr = self.map[id] result = nr.get_result() answer[id] = result if isinstance(result.value.value, BACnetError): nr.set_batch_manager(None) #turn off rpm for the offending property msglog.log('bacnet', nr.as_node_url(), 'cannot get multiple') if debug: msglog.exception() raise EBadBatch('bacnet', self, 'rpm failed')
def _setup_trigger(self): try: self._sid = SM.create_delivered(self, {1:as_node(self.trigger)}) except ENotStarted, ENoSuchNode: msg = 'TriggeredExporter trigger: %s does not exist - could be nascent' % self._trigger msglog.log('broadway',msglog.types.WARN,msg) scheduler.seconds_from_now_do(60, self._setup_trigger)
def start(self): RequestHandler.start(self) self.WEB_ROOT = properties.HTTP_ROOT if self.parent.server_type == 'HTTPS': self.WEB_ROOT = properties.HTTPS_ROOT for handler in self.parent.children_nodes(): if isinstance(handler, FileSpace): self.filespace = handler break err_message = 'PSP handler not running in secured mode because \ no %s was found. Config parameter "secured" \ being overriden to False' if self.secured and self.filespace: try: sm = as_node('/services/Security Manager') except KeyError: msglog.log('broadway', msglog.types.WARN, err_message % 'Security Manager') msglog.exception(prefix = 'Handled') self.provides_security = self._secured = False else: self.security_manager = sm self.provides_security = self._secured = True else: if self.secured: # not secured because we could not locate filespace object. msglog.log('broadway', msglog.types.WARN, err_message % 'FileSpace manager') self.provides_security = self._secured = False
def _process_put(self, path, request): if self.debug: msg = "putting <%s>" % path msglog.log(self.name, msglog.types.DB, msg) try: if self._filesystem.isdir(path): request.error(405, "A PUT request must specify" " a file, not a directory.") return update = self._filesystem.isfile(path) file = self._openwrite(path, "wb") except IOError: request.error(405) return data = request.get_data() read = data.read(1024, 60) if not read: request.error(411) return while read: file.write(read) read = data.read(1024) else: file.close() rzutils.wefs_cache(os.path.join(self.server_root, path)) if update: request.reply(204) else: request.reply(201)
def checkAndCreateGzipDirectory(self, absFilepath): dirname = os.path.dirname(absFilepath) if not os.path.exists(dirname): try: os.makedirs(dirname) except Exception, e: msglog.log("broadway", msglog.types.INFO, "EXCEPTION in creating directory: %r" % e)
def cancel(self): msglog.log('broadway', WARN, '%r closing channel' % self) try: self.channel.close() except: msglog.exception(prefix = 'Handled') return
def _stop(self): while self.state is self.PENDING: pause(.1) if self.state is not self.STOPPED: self.state = self.HALTING msg = 'RNA service stopping on %s.' msglog.log('broadway', msglog.types.INFO, msg % self.transport) try: # Hack to wake up the tread... t = self.transportClass(**self.configuration()) # TODO: THIS CANNOT WORK. Neither SimpleTcpService nor # SrnaService has a connect() method: t.connect() i = mpx.lib.rna._InvokeCommand("BOGUS") i.pack(ProtocolCommand('/','no_such_method_i_hope',())) i.totransport(t.send) # TODO: THIS CANNOT WORK. Neither SimpleTcpService nor # SrnaService has a disconnect() method: t.disconnect() while self.state is not self.STOPPED: pause(.1) return 1 except: msglog.exception() return 0
def start(self): if self.debug: msg = 'Inside Start()' msglog.log('Energywise:', msglog.types.INFO, msg) if not self.running: if not (1024 < self.cpex_port < 65536): raise EConfiguration( "Invalid port specified (%d). " "Please enter values between 1025 and 65535 " % self.cpex_port ) self.running = 1 self._cpex_connect = self._cpex_connect_orig Node.start(self) if self.PROTOCOL_SNMP == self.protocol: # Create SNMP node for this remote_agent self.createEnergywiseSNMPNodes() self.snmp_switch_agent_node = self.get_snmp_switch_agent_node() self.cewEntEnergyUsage_node = self.get_cewEntEnergyUsage_node() self.cewEntEnergyUnits_node = self.get_cewEntEnergyUnits_node() self.snmp_usage_map = {} for child in self.cewEntEnergyUsage_node.children_nodes(): self.snmp_usage_map[('usage', child.name)] = child for child in self.cewEntEnergyUnits_node.children_nodes(): self.snmp_usage_map[('units', child.name)] = child self.get_switch_usage = self.snmp_switch_usage else: self.get_switch_usage = self.cpex_switch_usage return
def start(self): filename = '%s (%s)' % (self.name, 'triggers') self.manager = self.nodespace.as_node(self.manager) self._pdo_lock.acquire() try: if self._triggers is None: self._triggers = PersistentDictionary( filename, encode=None, decode=None) if not self._triggers: pdodata = PersistentDataObject(self, dmtype=GC_NEVER) if os.path.exists(pdodata.filename()): msglog.log('broadway', msglog.types.INFO, "Migrating previous trigger data.") pdodata.triggers = {} pdodata.load() self._triggers.update(pdodata.triggers) pdodata.destroy() del(pdodata) self._loadtriggers() if self.secured: self.security_manager = self.as_node("/services/Security Manager") else: self.security_manager = None finally: self._pdo_lock.release() return super(TriggersConfigurator, self).start()
def update(self): try: value = ts = None result = SM.poll_all(self._sid) if result is not None: value = result[self._nid]['value'] ts = result[self._nid]['timestamp'] self._history_lock.acquire() try: if value is None or isinstance(value, MpxException): # there were problems collecting during this period, # our calculation should not proceed self._history.clear() if not self._poll_failure: # log the failure, but don't spam the msglog self._poll_failure = True msglog.log('Kwh2Kw', msglog.types.WARN, 'Failed to retrieve data from %s' % self.link) else: self._poll_failure = False self._history.add(value, ts) finally: self._history_lock.release() except: msglog.exception() self._scheduled = scheduler.seconds_from_now_do(self.sample_period, self.run_update) return
def handle_update_password(self, request): parameters = request.get_query_dictionary() parameters.update(request.get_post_data()) # POST value for destination overrides query-string if both provided. username = parameters.get("username", "") password = parameters.get("password", "") newpwd = parameters.get("newpwd", "") confirm_newpwd = parameters.get("confirm_newpwd", "") try: if Properties.get_boolean('PAM_ENABLE'): user = self.manager.user_from_pam(username, password) else: user = self.manager.user_from_cleartext(username, password) except (PAMError, EAuthenticationFailed): msglog.exception(prefix="Handled") msg = "Invalid old password. Enter correct old password for authentication." self.handle_form(request, "update_password", "Authentication Failed", msg) else: if newpwd == confirm_newpwd: try: user.set_password(newpwd) except EInvalidValue, e: msglog.exception(prefix="Handled") msg = "".join(e[1].splitlines()) self.handle_form(request, "update_password", "Invalid new password. Please try again", msg) else: msg = "Password updated successfully for %s." % (username) msglog.log("broadway", msglog.types.INFO, msg) self.handle_form(request, "information", msg) else:
def get_cache_control_header(self, filename, user_agent=None): if self.checkIfUrlAccessibleHtml(filename): return "no-cache, no-store" file_ext = filename.split(".")[-1].lower() is_IE = None # a request over https from IE will fail if # response headers prevent caching. So, set # the cache info header to 1s for IE if user_agent: is_IE = re.search("MSIE", user_agent) cache_control_header = None for file_type in self.cache_info: if file_ext in self.cache_info[file_type]["extensions"]: if self.cache_info[file_type]["max_age"] != NOT_CACHED: cache_control_header = "public,max-age=%d, must - revalidate" % ( int(self.cache_info[file_type]["max_age"] * ONE_DAY) ) else: if not is_IE: cache_control_header = "no-cache, no-store" else: cache_control_header = "public,max-age=1, must - revalidate" break if not cache_control_header: msglog.log("broadway", msglog.types.INFO, "HTTP File Handler: unknown file ext: %s" % file_ext) if not is_IE: cache_control_header = "no-cache, no-store" else: cache_control_header = "public,max-age=1, must - revalidate" return cache_control_header
def logoutput(self, message, *args, **kw): logtype = kw.get("type", msglog.types.DB) debuglevel = kw.get("level", 1) if logtype == msglog.types.DB and debuglevel > self.debug: return False msglog.log('broadway', logtype, message % args) return True
def _process_securely(self, operation, path, request, *args, **kw): user = request.user_object() groups = os.getgroups() is_root = not os.getuid() uid = os.geteuid() gid = os.getegid() if not is_root: msg = ( "Framework is not running as root so effective uid " "and gid are not being changed prior to doing %s: " "%s" % (request.get_command(), path) ) msglog.log("FileRequestHandler", msglog.types.WARN, msg) else: if self.debug: msg = "%s command: file %s, user %s" % (request.get_command(), path, user.name()) msglog.log(self.name, msglog.types.DB, msg) os.setgroups(user.group_ids()) os.setegid(user.gid()) os.seteuid(user.uid()) try: result = operation(path, request, *args, **kw) finally: if is_root: os.seteuid(uid) os.setegid(gid) os.setgroups(groups) return result
def cleanup_modem_configuration(self): if not self.modem_map : cf=properties.MPXINIT_CONF_FILE if not os.access(cf, os.F_OK): return if os.access(cf, os.R_OK|os.W_OK): f=open(cf, 'r') cp = ConfigParser.ConfigParser() cp.readfp(f) f.close() else: if not os.access(cf, os.R_OK): raise EPermission(reason = 'Cannot read file %s' % cf) elif not os.access(cf, os.W_OK): raise EPermission(reason = 'Cannot write file %s' % cf) write_file = False if cp.has_section('dialin'): cp.remove_section('dialin') write_file = True if cp.has_section('dialout'): cp.remove_section('dialout') write_file = True if write_file == True: f=open(properties.MPXINIT_CONF_FILE, 'w') cp.write(f) f.close() msglog.log('broadway', INFO, 'Writting Modem info to mpxinit conf.') else: msglog.log('broadway', INFO, 'Not Writting modem info to mpxinit conf.')
def handle_request(self, request): command = request.get_command().lower() if self.debug: msg = "got command:<%s>" % command msglog.log(self.name, msglog.types.DB, msg) if command not in self.valid_commands: request.error(400) # bad request return self._hit_counter.increment() path, params, query, fragment = request.split_uri() if "%" in path: path = unquote(path) while path and path[0] == "/": path = path[1:] if command in ("put", "delete"): if command == "put": operation = self._process_put else: operation = self._process_delete return self._process_securely(operation, path, request) # implicitly 'elif'... if self._filesystem.isdir(path): if path and path[-1] != "/": name = request.server_name() # Try to get the real name from the HTTP header if request._headers[0].find("Host: ") == 0: name = request._headers[0][6:] request["Location"] = "http://%s/%s/" % (name, path) request.error(301) return for default in self.directory_defaults: if self._filesystem.isfile(path + default): path = path + default break return self._process_read(path, request)
def updateMeetingSpaceDetail(self, meetingInfoLine): #Fetch information from meetingInfoLine and store it in child nodes #updating Dephi Meeting Event Name self.eventName.event = meetingInfoLine[EVENT_NAME_OFFSET:EVENT_NAME_OFFSET+EVENT_NAME_LEN].strip() #updating startDateTime and startEpochTime eventStartTime = meetingInfoLine[START_TIME_OFFSET:START_TIME_OFFSET+START_TIME_LEN] time_tuple = (date.today().year, date.today().month, date.today().day, int(eventStartTime[0:2]), \ int(eventStartTime[2:4]), 0, 0, 0, -1) self.startEpochTime.sec = time.mktime( time_tuple ) self.startDateTime.time = time.ctime( self.startEpochTime.sec ) #updating endDateTime and endEpochTime eventEndTime = meetingInfoLine[END_TIME_OFFSET:END_TIME_OFFSET+END_TIME_LEN] time_tuple = (date.today().year, date.today().month, date.today().day, int(eventEndTime[0:2]), \ int(eventEndTime[2:4]), 0, 0, 0, -1) self.endEpochTime.sec = time.mktime( time_tuple ) self.endDateTime.time = time.ctime( self.endEpochTime.sec ) #updating Agreed Attendance self.agreedAttd.count = int(meetingInfoLine[AGREED_ATTD_OFFSET:AGREED_ATTD_OFFSET+AGREED_ATTD_LEN].strip()) self.updateMeetingStatus('update') if self.parent.parent.parent.debug: msglog.log('Delphi', INFO, "%s, EventName - %s, StartTime - %s, StartEpochTime - %f, EndTime - %s, EndEpochTime - %f, AgrAtd - %d" \ %(self.name, self.eventName.event, self.startDateTime.time, self.startEpochTime.sec, self.endDateTime.time, self.endEpochTime.sec, \ self.agreedAttd.count))
def handle_error(self): warning = msglog.types.WARN msglog.log('broadway', warning, 'Transaction handling error: %r' % self) if not self.is_complete() and self.manager: msglog.log('broadway', warning, 'Incomplete, will notify manager') self.manager.handle_failed_transaction(self)
def start(self): if self.__running: return if self.debug: msglog.log('EnergywiseManager :', msglog.types.INFO, 'Inside start' ) CompositeNode.start(self) # start_node = as_node('/services/EnergywiseManager/') # self.configure_trend_in_switches(start_node, 60) self.__running = True self._pdo_lock.acquire() self._pdo = PersistentDataObject(self, dmtype=GC_NEVER) self._pdo.trends = {} self._pdo.load() self._pdo_lock.release() if self.has_child('trends'): self.trends = self.get_child('trends') else: self.trends = CompositeNode() self.trends.configure({'parent':self, 'name':'trends'}) self.trends.start() # start trending for saved domains for domain,freq in self._pdo.trends.items(): try: start_node = as_node(domain) # self.configure_trend_in_switches( start_node,freq ) start_node.new_trend(freq) except: self.delete_trend_configuration(domain) return
def _conversate(self, connection): connection.request('GET', self._request_path) response = connection.getresponse() requestdata = response.read() while requestdata: if not self._running.isSet(): error = 'Request Service not running. Exiting conversation' raise Exception(error) self.debug_message( 'response code %s. Data included.' % (response.status)) if response.status >= 400: error = '%s request returned error code %s, reason "%s"' error = error % (self.name, response.status, response.reason) msglog.log('broadway', msglog.types.ERR, error) raise TypeError(error) elif not (requestdata.startswith('GET') or requestdata.startswith('POST')): error = 'Request service retrieved invalid command: %s' raise TypeError(error % requestdata) else: result = self._forward_request(requestdata).read() headers = {'Cookie': client_cookie(response)} self.debug_message('POSTing: ' + result) connection.request('POST', self._request_path, result, headers) response = connection.getresponse() requestdata = response.read() self.debug_message('response code %s' % response.status)
def configure(self,config): if self.debug: msglog.log('EnergywiseManager:', msglog.types.INFO, 'Inside configure' ) CompositeNode.configure(self, config) set_attribute(self, 'debug', 0, config, int) return
def start(self): if self.parent.room == 1: msglog.log('USAP', INFO, 'Object:start() - %s --> %s --> %s' % (self.parent.parent.name, self.parent.name, self.name)) self.path = string.join([self.parent.name, self.name], '.') self.lh = self.parent.parent.lh self.request_obj = self.parent.parent.request_obj self.response_obj = self.parent.parent.response_obj self.unison_v1_9_0_or_prior = self.parent.parent.unison_v1_9_0_or_prior else: msglog.log('USAP', INFO, 'Object:start() - %s --> %s --> %s --> %s' % (self.parent.parent.parent.name, self.parent.parent.name, self.parent.name, self.name)) self.path = string.join([self.parent.parent.name, \ self.parent.name, self.name], '.') self.lh = self.parent.parent.parent.lh self.request_obj = self.parent.parent.parent.request_obj self.response_obj = self.parent.parent.parent.response_obj self.unison_v1_9_0_or_prior = self.parent.parent.parent.unison_v1_9_0_or_prior # Find the relevant protocol child objects ahead of time to # be more efficient. self.req_startCode_obj = self.request_obj.findChildByName("startCode") self.req_vData_obj = self.request_obj.findChildByName("vData") self.req_crc_obj = self.request_obj.findChildByName("crc") # self.resp_startCode_obj = self.response_obj.findChildByName("startCode") self.resp_vData_obj = self.response_obj.findChildByName("vData") self.resp_crc_obj = self.response_obj.findChildByName("crc") # CompositeNode.start(self)
def get_context_policies(self, context, ascending = True): if not self.__started: self.start() if not isinstance(context, str): context = ISecurityContext(context).url active = [] children = self.children_nodes() ranked = [] for child in children: try: ranked.append((child.rank_match(context), child)) except ENotRunning: # Only log once for consecutive failures. if (child.__ENotRunning_logged % 1000) == 0: msglog.log('broadway', msglog.types.WARN, 'Policy "%s" not running.' % child.name) child.__ENotRunning_logged += 1 else: child.__ENotRunning_logged = 0 ranked.sort() ranked.reverse() for rank, child in ranked: if rank: active.insert(0, child) else: break if not child.acquires: break if not ascending: active.reverse() return active
def save_trends(self, trend_list): # Traverse through _pdo.items and check if new domain is either subset # of any configured or superset. # If subset return with msg already covered and dont save this # If superset then configure new ones and delete subset from # _pdo.items '''Adding and saving trends''' for point in reversed(trend_list): point_period = point['frequency'] point_domain = point['domain'] for saved_domain,saved_period in tuple(self._pdo.trends.items()): if saved_domain == point_domain: if saved_period != point_period: self.delete_trend_configuration(saved_domain) break if not self._pdo.trends.has_key(point_domain): # add this trend try: domain_node = as_node(point_domain) if isinstance(domain_node,EnergywiseSwitch) or isinstance(domain_node,EnergywiseDomain): self.add_trend_configuration(point_period, point_domain) domain_node.new_trend(point_period) except Exception: msglog.exception() msglog.log( "Energywise",msglog.types.ERR, "Failed to create trend for %r every %r seconds" %(point_domain,point_period) ) return
def handle_log(self, event): self._event_count += 1 self.debug_information('Log entry event caught.') if self._event_count >= self.log_multiple: self.debug_information('Going to start export thread.') if self._lock.acquire(0): try: thread = Thread(name=self.name, target=self.go, args=(event.values[0], )) thread.start() self._event_count = 0 finally: self._lock.release() else: msglog.log('broadway', msglog.types.WARN, ('Last export still active, ' + 'skipping current request.'))
def _run(self): global commands x = xml.sax.make_parser() x.setContentHandler(ContentHandler(self.debug)) while self._running: # all in try statement/catch-all so that # service continues to run indefinately. try: if self.connection.acquire(self.timeout): try: server_url = self.server_url command_url = server_url + 'get?nodeid=' + self.node if self.debug: print "Executing %s" % command_url x.parse(command_url) for c in commands: if self.debug: print "Setting %s to %f with seq %s" % ( c[0], c[1], c[2]) try: node = as_node(c[0]) node.set(_holistic_conversion(c[1])) except (KeyError): msglog.log('sie', msglog.types.ERR, 'Point %s does not exist.' % c[0]) if self.debug: print "Acknowledging setting point %s to %d with sequence %s" % \ (c[0], c[1], c[2]) encoded_name = urllib.quote_plus(c[0]) encoded_param = urllib.quote_plus(c[3]) ack_url = server_url + 'ack?PointID=%s&SeqNum=%s&varParam=%s' % \ (encoded_name, c[2], encoded_param) if self.debug: print "Acknowledging with %s" % ack_url # uses the parser's ability to retrieve url content # so we dont have to put http logic here. x.parse(ack_url) finally: self.connection.release() except: msglog.exception() pause(self.period)
def set_roles(self, *roles): # Allow roles to be list or tuple, or many params. if len(roles) == 1 and isinstance(roles[0], (list, tuple)): roles = roles[0][:] for role in roles: if not self.parent.parent.role_manager.has_role(role): raise ValueError('Role "%s" does not exist.' % role) rolenames = [] for role in roles: if isinstance(role, str): rolenames.append(role) else: rolenames.append(role.name) if self.parent.anonymous is not self: authenticated = self.parent.role_manager.authenticated.name if self.parent.sysadmin is self: adminrole = self.parent.role_manager.administrator.name if adminrole not in rolenames: message = 'User "%s" is system admin. Appending role "%s".' msglog.log('broadway', msglog.types.WARN, message % (self.name, adminrole)) rolenames.append(adminrole) elif self.parent.anonymous is self: unknownrole = self.parent.role_manager.unknown.name if unknownrole not in rolenames: message = 'User "%s" is anonymous. Appending role "%s".' msglog.log('broadway', msglog.types.WARN, message % (self.name, unknownrole)) rolenames.append(unknownrole) self._lock.acquire() try: previous = self.roles if len(rolenames) == 0: unknownrole = self.parent.role_manager.unknown.name rolenames.append(unknownrole) self.roles = rolenames finally: self._lock.release() if self.roles != previous: event = UserRolesModified(self, self.roles, previous) self.dispatcher.dispatch(event) return
def updateMeetingStatus(self, flag='clear'): self.currentEpochTime = time.time() grace_time = self.parent.parent.grace_time * MINUTE #If flag is clear then except "In Progress" clear other the Meeting Spaces to update new data #If flag is update then update meeting space based on timestamp if exists(self.parent.parent.local_file_path) or exists( self.parent.parent.local_file_path + '_bak'): if (self.startEpochTime.sec < self.currentEpochTime) and ( (self.endEpochTime.sec + grace_time) > self.currentEpochTime): self.status = MeetingSpaceStatus['3:In Progress'] elif flag == 'update': if (self.endEpochTime.sec + grace_time) < self.currentEpochTime: self.status = MeetingSpaceStatus['1:Available'] self.eventName.event = None self.startDateTime.time = None self.startEpochTime.sec = 0 self.endDateTime.time = None self.endEpochTime.sec = 0 self.agreedAttd.count = 0 else: self.status = MeetingSpaceStatus['2:Scheduled'] else: self.status = MeetingSpaceStatus['1:Available'] self.eventName.event = None self.startDateTime.time = None self.startEpochTime.sec = 0 self.endDateTime.time = None self.endEpochTime.sec = 0 self.agreedAttd.count = 0 else: self.status = MeetingSpaceStatus['0:Unknown'] self.eventName.event = None self.startDateTime.time = None self.startEpochTime.sec = 0 self.endDateTime.time = None self.endEpochTime.sec = 0 self.agreedAttd.count = 0 if self.parent.parent.parent.debug: msglog.log( 'Delphi', INFO, "updateMeetingStatus: %s - %s" % (self.name, str(self.status)))
def create_node(self, name, config=()): config = dict(config) type = config['type'].lower() manager = self.get_manager() # Next statements verify access to modifier permitted. if type == "peer": manager.add_peer else: manager.set_portal config.setdefault("parent", self.manager) peer_or_portal = config.setdefault("name", name).strip() ret = self.validate(peer_or_portal) if(ret != 0 ): msg='Add Peer/Portal failed. %s is a invalid hostname/IP Address' %(peer_or_portal) raise ValueError(msg) if(valid_hostname(peer_or_portal)): tmp=get_ip_addr(peer_or_portal) if(not valid_ip_address(tmp) ): raise ValueError('Cannot resolve the hostname %s. Please try with a valid Hostname' %(peer_or_portal)) if(type == 'peer'): peer=peer_or_portal if (self.manager.is_peer_in_formation(peer) == False): if(self.manager.is_host_the_portal(peer) == False): msg='Adding %s as a Peer' %str(peer) msglog.log('CloudConfigurator', msglog.types.INFO,msg) # Use possibly secured reference for the add. manager.add_peer(peer) else: raise ValueError,'A Portal cannot be a Peer : "%s" is the Portal for the Cloud.' % peer else: raise ValueError,'Add peer did nothing: "%s" already in Cloud Formation.' % peer else: portal=peer_or_portal if(self.manager.is_host_the_portal(portal) == False): if (self.manager.is_peer_in_formation(portal) == False): msg='Setting the Portal as :%s' %str(portal) msglog.log('CloudConfigurator', msglog.types.INFO,msg) # Use possibly secured reference for the modification. manager.set_portal(portal) else: raise ValueError,'%s is in the formation. It cannot be added as Portal ' % portal else: raise ValueError,'Set Portal did nothing: "%s" already the Portal' % portal return(peer_or_portal)
def start(self): try: self._pdo_lock.acquire() try: if self.__running: return self.__running = True self._trendconfig = PersistentDictionary(filename(self), encode=None, decode=None) if not self._trendconfig: pdodata = PersistentDataObject(self, dmtype=GC_NEVER) if os.path.exists(pdodata.filename()): msglog.log('broadway', msglog.types.INFO, "Migrating previous trend data") pdodata.trends = {} pdodata.load() self._trendconfig.update(pdodata.trends) del (pdodata) finally: self._pdo_lock.release() super(TrendManager, self).start() self.logger = node.as_internal_node(self.logger_url) if self.has_child('trends'): self.trends = self.get_child('trends') else: self.trends = CompositeNode() self.trends.configure({'parent': self, 'name': 'trends'}) self.trends.start() corrupt_trends = [] for trendname, trenddump in self._trendconfig.items(): msg = "Loading trend: %s" % trendname msglog.log('trendmanager', msglog.types.INFO, msg) try: trend = unmarshal(trenddump) except: corrupt_trends.append(trendname) msg = "Failed to load trend: %s" % trendname msglog.log('trendmanager', msglog.types.ERR, msg) msglog.exception(prefix='Handled') for trendname in corrupt_trends: try: msg = "Deleting trend information: %s" % trendname msglog.log('trendmanager', msglog.types.INFO, msg) self._delete_trend_configuration(trendname) if self.trends.has_child(trendname): trend = self.trends.get_child(trendname) trend.prune(force=True) except: msglog.exception(prefix='Handled') except: self.__running = False raise return
def write(self, method_name, params): self.__lock.acquire() try: if not self.connection_ok(): self.open_connection() # marshal data from param tuple data = xmlrpclib.dumps(tuple([params]), method_name) #payload is 4 byte, little endian, field representing the length of #the xml data, followed by the data msg = struct.pack('<I', len(data)) + data try: self._s.send(msg) except: msglog.log('Adura', ERR, 'Error writing to XCommand socket.') raise EConnectionError rslt = self.read() finally: self.close_connection() self.__lock.release()
def _setup_request_handler(self, handler): if not isinstance(handler, HandlerAlias): for server in self.servers: if server is not self: if (not server.has_child(handler.name) and not server.handles_path(getpath(handler))): message = "%s aliasing %r handler from %s." msglog.log("broadway", msglog.types.INFO, message % (server, getpath(handler), self)) alias = server.create_alias(handler) if server.is_running(): server.server.install_request_handler(handler) if isinstance(handler, FileRequestHandler): self._file_request_handler = handler handler.setup_filesystem() back = True else: back = False self.server.install_request_handler(handler, back)
def get_override(self): try: ovr = self.priority_array.get() except: msg = 'Unable to get override for device %s' % \ (self.as_node_url()) msglog.log('Entity Manager', msglog.types.WARN, msg) msglog.exception() raise pa = {} for idx in range(16): value = ovr[idx] if hasattr(value, 'as_magnitude'): try: value = value.as_magnitude() except: pass pa[str(idx + 1)] = value return OverrideDict(pa, self.get_default())
def _find_domain(target): if target.debug: msglog.log('Energywise:', msglog.types.INFO, 'Inside find_domain') from energywise_manager import EnergywiseManager list_name = [] parent = target.parent while not isinstance(parent, EnergywiseManager): list_name.append(parent.name) parent = parent.parent list_name.reverse() str = '' size = len(list_name) for name in list_name: str += name size -= 1 if size != 0: str += '.' return str
def _poll_alarms(self): # thread while 1: ideal_alarms = [] cpc_alarms = self._cpc_uhp_node.get_alarms() for cpc_alarm in cpc_alarms: if not isinstance(cpc_alarm, CpcAlarm): # could be None or Exception continue src = '%s:%s:%s,%s:%s' % (cpc_alarm.device_name, str(cpc_alarm.item), \ cpc_alarm.item_num, str(cpc_alarm.obj), \ cpc_alarm.obj_num) tm_tuple = (cpc_alarm.orig_date[0], cpc_alarm.orig_date[1], cpc_alarm.orig_date[2], \ cpc_alarm.orig_time[0], cpc_alarm.orig_time[1], 0, 0, 0, -1) tm_sec = time.mktime(tm_tuple) state = 'Not acked' if cpc_alarm.ack_date >= cpc_alarm.orig_date: state = 'Acked' type = 'Alarm' if cpc_alarm.type == 0: type = 'Notice' i = cpc_alarm.text.find('\x00') data = cpc_alarm.text[:i] ideal_alarm = Alarm(id=cpc_alarm.id, type=type, source=src, timestamp=tm_sec, data=data, state=state) ideal_alarms.append(ideal_alarm) msglog.log('mpx',msglog.types.INFO,'CPC Alarm: %s' % ideal_alarm.as_list()) # legal protection in case CPC eqpt fails or Costco doesn't see alarm if len(ideal_alarms) > 0: ae = NewAlarmsEvent(self, ideal_alarms) self.parent.event_generate(ae) for i in range(30): if self._go == 0: return time.sleep(1.0) return
def get(self, skipCache=0): req = self._req_templ[:] resp_list = self._dev.get_values([req]) result = None if isinstance(resp_list, Exception): msglog.log('mpx',msglog.types.ERR,'Failed to get value of Property %s of %s Object %u.' \ % (self._prop_type_name, self.parent._obj_type_name, self.parent._obj_inst_num)) result = resp_list[0][0] elif resp_list[0][0] != 0: msglog.log('mpx',msglog.types.ERR,'Recvd error response (%s) to request for ' \ 'value of Property %s for %s Object %s' \ % (resp_list[0][0], self._prop_type_name, self.parent._obj_type_name, self.parent._obj_inst_num)) result = 'Response Error %s' % str(resp_list[0][0]) else: result = resp_list[0][1] if type(result) == types.TupleType: result = list(result) return result
def create_session(self, *args): user = None password = None if len(args) == 2: user, password = args msglog.log( 'broadway', msglog.types.WARN, 'Deprecated create_session with ' 'explicit password being used.') else: if len(args) == 1: try: user = self.security_manager.user_manager.get_user(args[0]) except KeyError: msglog.exception(prefix='Handled') raise ValueError('Invalid user name: %s' % args[0]) else: user = self.security_manager.user_manager.user_from_current_thread( ) return super(SecuredXmlRpcHandler, self).create_session(user, password)
def poll_for_incomming_packets( self): #return ACK responses and handle commands from CPC #if self.debug: print 'enter poll_for_incoming_packets' pkt = self.read() #block in here until data shows up pobj = find_response_type(pkt) if len( pkt ) > pobj.pkt_len: #possibly read in two packets catenated together self.read_buffer = pkt[pobj.pkt_len:] if self.debug: print CSI_RED + 'unread: ', len(self.read_buffer), CSI_Reset if type(pobj) == ScreenUpdate: #command to update screen object self.handle_screen_update(pobj) elif type(pobj) == InitScreenCmd: #command from cpc self.handle_screen_init(pobj) elif pobj: # put it into queue for return pobj else: #unknown packets type msglog.log(WARN, 'CPC', 'Unknown packet type: %s' % (repr(pkt), )) return None
def __new__(klass, covdict): names = covdict.keys() results = map(covdict.get, names) try: values = map(_SimpleResultStructure, results) except AttributeError: nonecount = results.count(None) if nonecount == 0: raise msglog.log('broadway', msglog.types.WARN, 'COV Conversion object filtering NoneType values. ') msglog.log('broadway', msglog.types.INFO, 'Original COV dictionary was: %r' % covdict) while nonecount: index = results.index(None) results.pop(index) names.pop(index) nonecount = nonecount - 1 values = map(_SimpleResultStructure, results) return dict(zip(names, values))
def configure(self, config): ServiceNode.configure(self, config) set_attribute(self, 'security_level', 'NoSec', config) msglog.log( 'broadway', msglog.types.INFO, 'RNA_Socket.configure: security_level = %s.' % self.security_level) self.transportClass = SimpleTcpService if (self.security_level == 'Auth-Only') \ or (self.security_level == 'Full-Enc'): self.transportClass = SrnaService # Handle changes to the enabled attribute, once we've started. if self.enabled != self.was_enabled: if self.enabled: if self.start_count and self._start(): self.start_count += 1 self.was_enabled = self.enabled elif self.start_count > self.stop_count: self.stop_count += 1 self._stop() return
def handle_log(self, event): self.debug_information('Log export triggered.') self.evt = event #dil - debug value = event.results()[1]['value'] if isinstance(value, Exception): raise value if value: # only export when value is true self.debug_information('Going to start export thread.') if self._lock.acquire(0): try: thread = Thread(name=self.name, target=self.go, args=(time.time(), )) thread.start() finally: self._lock.release() else: msglog.log('broadway', msglog.types.WARN, ('Last export still active, ' + 'skipping current request.'))
def __init__(self, name, password_file=PASSWD_FILE, group_file=GROUP_FILE, shadow_file=SHADOW_FILE): self.__lock = Lock() self.__password_file = password_file self.__shadow_file = shadow_file self.__group_file = group_file self.__loaded = 0 self.load(name) self.meta = {} self.USERS.load() if not self.USERS.has_key(self.name()): msglog.log('broadway', msglog.types.INFO, ('No profile for user %s found, creating' ' new profile' % name)) self.USERS[self.name()] = str(UUID()) PersistentDataObject.__init__(self, self.USERS[self.name()]) PersistentDataObject.load(self)
def start(self): self.container = self.nodespace.as_node(self.container) self._pdo_lock.acquire() try: self._pdo = PersistentDataObject(self, dmtype=GC_NEVER) self._pdo.exporters = {} self._pdo.load() exporterdumps = self._pdo.exporters.values() finally: self._pdo_lock.release() super(ExportersConfigurator, self).start() tstart = time.time() for exporterdump in exporterdumps: IPickles(cPickle.loads(exporterdump))() tend = time.time() tlapse = tend - tstart msglog.log( 'broadway', msglog.types.INFO, 'Exporter Configurator loaded ' '%s exporters in %s seconds.' % (len(exporterdumps), tlapse)) self.manager = self.container
def _read(self, wait_time=None, numretries=None): """Reads the device. Sends the request packet, recieves the response packet. Parses it. Updates the current reading value Should be called only by get_value() """ if numretries is None: numretries = self.retry_count while numretries: try: self._send_request(self.request_obj, self.response_obj, wait_time, 1) resp_addr = self.response_obj.findChildByName('addr') resp_cs = self.response_obj.findChildByName('cs') resp_data = self.response_obj.findChildByName('data') addr = resp_addr.getValue() cs = resp_cs.getValue() data = resp_data.getValue() checksum = (0x5D + calc_sum(addr) + calc_sum(data)) & 0xFF if checksum != cs: #some error in the response packet if self.debug: msglog.log("omnimeter", msglog.types.WARN, "Checksum didn't match %s" % self.address) raise EBadChecksum() if self.bin_addr != addr: #it's not me. don't think this would ever happen, but who knows if self.debug: msglog.log( 'omnimeter', msglog.types.WARN, "Got some other meter's response (Strange!!) %s" % self.address) raise EInvalidMessage() meter_reading = format_reading(data) self.update_value(meter_reading) return except: numretries -= 1 raise
def getapi(instance, public=True): # Hack to get handle to inspectable node. if isinstance(instance, _ReloadableSingleton): instance = instance.as_node() # Mapping method-names to method-code objects. methods = inspect.getmembers(instance, inspect.ismethod) if public: methods = [(name, method) for name, method in methods if not name.startswith("_")] methods = [(name, inspect.getargspec(method)) for name, method in methods] descriptors = [] for name, argspec in methods: args, varargs, kwargs, defaults = argspec if defaults: defaults = list(defaults) parameters = [] for arg in reversed(args): if arg == "self": continue parameter = {"name": arg} if defaults: default = defaults.pop() if default is not None and type(default) not in primitives: message = ("JSON API ignoring default value %r for" " parameter %s of method %s on object %s." " Declaring parameter optional instead.") msglog.log("broadway", msglog.types.WARN, message % (default, arg, name, instance)) else: parameter["default"] = default parameter["optional"] = True parameters.append(parameter) parameters.reverse() if varargs: for arg in varargs: parameter = {"name": arg} parameter["optional"] = True parameters.append(parameter) descriptors.append({"name": name, "parameters": parameters}) return descriptors
def get(self, prop_name): cmd, rsp, mode = self.properties[prop_name] for i in range(3): try: status, msg = self.send_command(cmd) #(status, payload) if debug: print 'response payload: ', self.hexdump(msg) if rsp: return (status, rsp(msg),) return (status, msg,) except ETimeout: if debug: print '@@@ csafe ate a timeout' continue except struct.error, e: msg_hex_str = '' for c in msg: msg_hex_str += (str(ord(c)) + ' ') msglog.log('mpx:csafe',msglog.types.ERR,'Badly formatted status:msg (%s:%s) recvd from FEU %s' \ ', for property %s' % (status, msg_hex_str, self.port.name, prop_name)) msglog.exception() return (None, None,) pass
def __init__(self, name='RNA_Scan_Thread'): pfx = 'RNA_Scan_Thread.__init__:' msg = '%s Entering...' % pfx msglog.log('broadway', msglog.types.INFO, msg) self.debug = 0 # Maps file-descriptors to host-names. self.hosts = {} # Maps file-descriptors to sessions. self.sessions = {} # Maps host-names to sets of file-descriptors self.socketmap = {} self.connections = {} self.bPollRun = False self.work_queue = None self.descriptors = set() self.trigger_channel = Trigger(self.socketmap) self.descriptors.add(self.trigger_channel.fileno()) msg = '%s Done.' % pfx msglog.log('broadway', msglog.types.INFO, msg) super(RNA_Scan_Thread, self).__init__(name=name)
def httpDownload(self): full_url = self.communication_interface + '://' + self.user_name + ':' + self.password + '@' + self.interface_pc_addr + self.event_file_location try: url_opener = urllib.URLopener() webFileHandle = url_opener.open(full_url) localFileHandle = open(self.local_file_path, 'wb') localFileHandle.write(webFileHandle.read()) webFileHandle.close() localFileHandle.close() return True except IOError, e: if e[0] == "http error": msglog.log( 'Delphi', ERR, "HTTP Download Error: status = %d, reason = %s" % (e[1], e[2])) else: msglog.log('Delphi', ERR, "HTTP Download Error: %s" % e) if exists(self.local_file_path): remove(self.local_file_path)
def ConversionFactory(class_name, caller): # class_name is really provided by user via broadway.xml - we don't want # to just eval it. conversion_classes = { 'LightFromRaw': LightFromRaw, 'TempFFromRaw': TempFFromRaw, 'TempCFromRaw': TempCFromRaw, 'SoilTempFFromRaw': SoilTempFFromRaw, 'AccelFromRaw': AccelFromRaw, 'ADCPrecisionFromRaw': ADCPrecisionFromRaw, 'ADCSingleFromRaw': ADCSingleFromRaw, 'BinaryValueFromRaw': BinaryValueFromRaw } if class_name in conversion_classes.keys(): return conversion_classes[class_name](caller) else: msg = 'could not find conversion class %s for %s' % ( class_name, as_node_url(caller)) msglog.log('mpx.xbow', WARN, msg) return XbowConversion(caller)
def _do_poll(self): if self.debug: msglog.log('DRAS', INFO, 'Polling the demand response server') for soap_func, callback_list in self.__observers.items(): for obj in callback_list: args = obj.get_args() try: if args: value = soap_func(*args) else: value = soap_func() except: # SOAP errors live here if self.debug: msglog.log('DRAS', INFO, 'Error polling the demand response server') msglog.exception() value = ETimeout() obj.update(value) self._schedule() return
def get(self, skipCache=0): result = None if self._dev.item_types[self._item_type_name].obj_types.has_key( 'Status'): reqs = [self._req_templ[:]] reqs[0][2] = 'Status' item_resp_list = self._dev._line_handler.get_values( self._dev._id, reqs) if isinstance(item_resp_list, Exception): msglog.log('mpx',msglog.types.ERR,'Failed to get response to request for Status for %s Item %u' \ % (self._item_type_name, self._item_inst_num)) result = item_resp_list elif item_resp_list[0][0] != 0: msglog.log('mpx',msglog.types.ERR,'Recvd error response (%s) to request for Status for %s Item %u' \ % (item_resp_list[0][0], self._item_type_name, self._item_inst_num)) result = 'Response Error %s' % item_resp_list[0][0] else: result = item_resp_list[0][1] if type(result) == types.TupleType: result = list(result) return result
def _load(cd, verbosity=properties.INIT_VERBOSITY): verbosity = int(verbosity) path = cd['parent'] if path[-1] != '/': path = path + '/' path = path + cd['name'] if verbosity > 0: msg = 'Looking up node: ' + str(path) msglog.log('broadway',msglog.types.INFO,msg) if mpx.lib.node.is_node_url(path): node = mpx.lib.node.from_path(path, 1) else: if verbosity > 0: msg = 'Creating node: ' + str(path) msglog.log('broadway',msglog.types.INFO,msg) node = mpx.lib.factory(cd['module']) if cd.has_key('node_id'): # Override the 'default' node id. node.__node_id__ = cd['node_id'] else: # If there is no __node_id__, add the factory. # This doesn't make sense here, in 1.4 all this # moves into mpx.lib.factory(). node.__factory__ = cd['module'] if verbosity > 0: # Don't log actual passwords. if cd.has_key('password'): _cd = cd.copy() _cd['password'] = '******' else: _cd = cd msg = 'Configuring node %s:\n%r' % (path,_cd) msglog.log('broadway',msglog.types.INFO,msg) return node
def get_addrs(): contact_addr = None internal_addr = None if os.path.exists(prop.SRNA_IP_ADDR_FILE): ip_addr_file = open(prop.SRNA_IP_ADDR_FILE, 'r') addrs = ip_addr_file.read(1024) ip_addr_file.close() os.remove(prop.SRNA_IP_ADDR_FILE) addr_pr = string.split(addrs, ',') if len(addr_pr) != 2: msglog.log('get_addrs()', msglog.types.WARN, 'Invalid list of addresses: %s. Using default '\ 'interface IP addresses to generate certs/keys.') else: for i in range(0, 2): len_addr = len(addr_pr[i]) if len_addr < 7 or len_addr > 15: msglog.log('get_addrs()', msglog.types.WARN, 'Invalid address: %s. Using default'\ 'interface IP addresses to generate '\ 'certs/keys.') break else: contact_addr = addr_pr[0] internal_addr = addr_pr[1] msglog.log('get_addrs()', msglog.types.INFO, 'Contact: %s. Internal %s.' \ % (contact_addr, internal_addr)) return (contact_addr, internal_addr)
def validate_materials(): if not os.path.exists(prop.SRNATMP_DATA): msglog.log('update_srna()', msglog.types.ERR, 'File "%s" did not contain directory "%s". '\ 'Abort SRNA update.'\ % (prop.SRNA_UPDATE_TGZ, prop.SRNATMP_DATA)) return False os.chdir(prop.SRNATMP_DATA) # new cwd, freshly untarred cwd PASSWORD_PATH = join(prop.SRNATMP_DATA, 'password') if not os.path.exists(PASSWORD_PATH): msglog.log('update_srna()', msglog.types.ERR, 'File "%s" did not contain file "%s". '\ 'Abort SRNA update.'\ % (prop.SRNA_UPDATE_TGZ, PASSWORD_PATH)) return False # Double check password params: MIN_PASSWORD_LEN = 5 MAX_PASSWORD_LEN = 512 try: statPassFile = os.stat(PASSWORD_PATH) if statPassFile.st_size < MIN_PASSWORD_LEN: msglog.log('update_srna()', msglog.types.ERR, 'Password file is too small: less than %d bytes.'\ 'Abort SRNA update.' % MIN_PASSWORD_LEN) return False if statPassFile.st_size > MAX_PASSWORD_LEN: msglog.log('update_srna()', msglog.types.ERR, 'Password file is too large: more than %d bytes.'\ 'Abort SRNA update.' % MAX_PASSWORD_LEN) return False filePassword = open(PASSWORD_PATH, 'r') password = filePassword.read(MAX_PASSWORD_LEN) filePassword.close() password = string.strip(password, string.whitespace) os.remove(PASSWORD_PATH) except: msglog.exception(prefix='Unhandled') msglog.log('update_srna()', msglog.types.ERR, 'Abort SRNA update.') return False return password