def _Check(cls, username, logs_query): """Checks that a user has the specified logs access. Args: username: The user whose logs access we're checking. logs_query: a string; see the comments for TextQuery in //logs/accesstools:report.py for the syntax. Returns: True or False, depending whether the user's permissions match the query. """ logging.debug('Checking for logs access "%s".', logs_query) command = "%s --query '%s' '%s'" % (cls.LOGS_REPORT, username, logs_query) (status, report_result) = _GET_STATUS_OUTPUT(command) report_result = report_result.strip() # The output should be 'True' or 'False'. Convert it to bool. if status == 0 and report_result == 'True': return True if status == 0 and report_result == 'False': return False logging.warning('Error running %s, got: %s', command, report_result) return False
def _execute(self): """ The actual execution --- executes a composed string that calles the corresponding functions """ if self.error: return try: # build up a list of arguments to use # we don't pass self.prefixes because methods aren't prepared for them. # (prefixes can be found in self.prefixes) args = [] args.extend(self.params) args.extend(self.lines) if self.expected_bytes >= 0 : args.append(self.bytes) # call the command logging.debug("calling %s %s %s" % ( str(self.prefixes), self.command, str(args))) method = self.accepted_commands[self.command].method self.data = apply(method, args, {}) except Exception, e: (t, v, tb) = sys.exc_info() exc_msg = string.join(traceback.format_exception(t, v, tb)) logging.error(exc_msg) self.error = str(e) self.data = None
def setUp(self): """all tests start with this.""" if FLAGS.mockmode == 'replay': assert MQLTest.MOCK_DATA is not None, 'Must call SetMockPath()' logging.debug('mockmode: replay') self.conn = graph.MockReplayConnector(MQLTest.MOCK_DATA) elif FLAGS.mockmode == 'record': assert MQLTest.MOCK_DATA is not None, 'Must call SetMockPath()' logging.debug('mockmode: record') self.conn = graph.MockRecordConnector(MQLTest.MOCK_DATA, self._GetStubbyConnector()) elif FLAGS.mockmode == 'nomock': self.conn = self._GetStubbyConnector() self.mql_service = pymql.MQLService(connector=self.conn) self.dateline = None self.env = {} self.mql_result = None self.json_query = None self._dumped_query = None self.json_response = None self._dumped_response = None
def PrependBase(rel_path): """Returns the absolute path to the requested file resource. Args: rel_path: string - Relative path to file Returns: string - Absolute path to resource. If resource startswith("/) then resource is returned else resource will have GetARootDirWithAllResources() prepended to it. Raises: ValueError: When constructed path does not exist. Or is not absolute. """ rel_base = os.path.join(FLAGS.test_srcdir, 'google3', 'third_party', 'py', 'pymql', 'test') resource = rel_base + '/' + rel_path if resource.startswith('/'): abs_path = resource else: base = resources.ParExtractAllFiles() if not base: base = resources.GetARootDirWithAllResources() abs_path = os.path.join(base, resource) if os.path.isabs(abs_path) and os.path.exists(abs_path): logging.debug('static file path found: %s', abs_path) else: raise ValueError( 'File in pymql/test does not exist. Please create it. %s', rel_path) return abs_path
def DoQuery(self, query, mqlwrite=False, exp_response=None, exc_response=None): """test a query. Runs a mql query and asserts on the expected result or expected exception. Args: query: json string that is a mql query mqlwrite: boolean, else do mqlread exp_response: json string that matches the expected response exc_response: expected exception. tuple: exception class, msg Raises: AssertionError """ exc = None msg = None try: self.MQLQuerier(query, mqlwrite) except ( # add expected exceptions here error.MQLParameterizedError, error.MQLError, error.MQLParseError, error.MQLInternalError, error.MQLTypeError, error.MQLResultError, error.MQLInternalParseError, error.MQLAccessError, error.MQLTimeoutError, error.MQLGraphError, error.MQLDatelineInvalidError, error.MQLConnectionError, error.GraphIsSnapshottingError, error.MQLReadWriteError, error.NamespaceException): exc = sys.exc_info()[0] msg = str(sys.exc_info()[1]) exc_actual = (exc, msg) exc_str = 'exception encountered: %s msg: %s' % exc_actual logging.debug(exc_str) if not exc_response: self.fail('exception. was not expected: %s' % exc_str) if exc_response: if not msg: self.fail( 'we expected an exception but did not get one: %s %s' % exc_response) self.AssertErrorEqual(exc_actual, exc_response) elif exp_response: self.AssertMQLEqual(simplejson.loads(exp_response)) else: # the calling test should do some kind of assert logging.debug('no expected response was given for this query')
def MQLQuerier(self, q, mqlwrite=False): """do a mqlread. Args: q: the mql query (json string) mqlwrite: boolean, else do mqlread """ self.json_query = simplejson.loads(q, object_pairs_hook=OrderedDict) self._dumped_query = simplejson.dumps(self.json_query, indent=2) if self.dateline: logging.debug('including write_dateline %s', self.dateline) self.env['write_dateline'] = self.dateline logging.debug('mql query:\n%s', self._dumped_query) if mqlwrite is True: logging.debug('doing mqlwrite') self.mql_result = self.mql_service.write(self.json_query, **self.env) self.dateline = self.mql_result.dateline else: logging.debug('doing mqlread') self.mql_result = self.mql_service.read(self.json_query, **self.env) self._dumped_response = simplejson.dumps(self.mql_result.result, indent=2) # for some reason the when asserting on utf-8 unicode objects # the object returned by mql represents the string as hex utf-16 # while the json expected object represents as utf-8 # same underlying data... # this makes the comparison apples to apples self.json_response = simplejson.loads(self._dumped_response) logging.debug('mql response:\n%s', self._dumped_response)
def _Check(cls, username, usermap): """Checks the usermap for the given username. This depends on the class attribute defined above that says where the borg usermap directory is, and uses that to grep the usermap for the given username. Args: username: The name of the user to search for usermap: The borg usermap to check for existance. This should be the relative path starting from the production/borg/usermaps/services/ directory. Returns: True or False depending on if the usermap contains the given user. """ path_sep = os.path.sep clean_user = _CleanForShell(username) map_parts = [_CleanForShell(part) for part in usermap.split(path_sep)] full_path = reduce(os.path.join, map_parts, cls.__BORG_USERMAP_DIR) full_path = os.path.normpath(full_path) logging.debug('Searching usermap %s for %s', full_path, clean_user) cmd = ('grep -s \'"%s"\' %s' % (clean_user, full_path)) (status, unused_output) = _GET_STATUS_OUTPUT(cmd) return status == 0
def __init__(self, node): self.__id = node.getAttribute('id') self.__tunnel_ip = node.getAttribute('tunnel-ip') self.__tunnel_port = node.getAttribute('tunnel-port') self.__ppp_ip = node.getAttribute('ppp-ip') self.__secret = node.getAttribute('secret') logging.debug('%s' % self.GetConfigString())
def ExecuteCmd(self, cmd, timeout=120, cmdstrip=0): """executes the given command on admin-runner and returns a tuple (succeeded_flag, response) """ if cmdstrip: cmd = string.strip(cmd) logging.debug("Executing legacy command: %s" % cmd) cmd += '\n' return self.ExecuteCmdWithHandler('legacyCommand', cmd, timeout)
def DoQueryException(self, query, expected, **kwargs): """expect a failure.""" try: self.DoQuery(query, **kwargs) except AssertionError: msg = str(sys.exc_info()[1]) if not expected in msg: self.fail('expected: %s\ngot: %s' % (expected, msg)) else: logging.debug('assertion raised, as expected! got: %s', expected)
def _Check(cls, username, owners_file): """Checks that a user is explicitly mentioned in an OWNERS file. Args: username: The user whose access we're checking. owners_file: The owners file to check, relative to /home/build. Returns: True or False depending if the user is present. """ logging.debug('Searching OWNERS file %s for %s', owners_file, username) # Check for group membership. return cls.UserInOwnersFile(username, owners_file)
def GetClientCommand(self, appliance_id): """Generate the command that will start the pppd and stunnel. Args: appliance_id: The appliance id of the corpus root to connect to Returns: a string containing the command to execute. None on failure. """ try: logging.debug('Fetch client command for %s' % appliance_id) corpus_root_config = self.__super_root_config.GetCorpusRoot(appliance_id) except fed_network_config.FederationConfigException, ex: logging.error('Exception in creating client tunnel conf %s' % ex.message) return None
def HandleCompletion(req): """ This is called when a request is completed with failure or success. We use this to record the fact that the scheduling info for this request is freed """ logging.debug("Handling END %s" % req.GetFilename()) scheduling_info = req.GetSchedulingInfo() if not scheduling_info: return global RUNNING_INFO for si in scheduling_info: if si in RUNNING_INFO: RUNNING_INFO.remove(si)
def ParseJavaFlags(self, cnt=0): """Parse Java style flags (com.google.common.flags).""" # The java flags prints starts with a "Standard flags" "module" # that doesn't follow the standard module syntax. modname = 'Standard flags' # name of current module self.module_list.append(modname) self.modules.setdefault(modname, []) modlist = self.modules[modname] flag = None for cnt in range(cnt, len(self.output)): # collect flags line = self.output[cnt].rstrip() logging.vlog(2, 'Line: "%s"' % line) if not line: # blank lines terminate module if flag: # save last flag modlist.append(flag) flag = None continue mobj = self.module_java_re.match(line) if mobj: # start of a new module modname = mobj.group(1) logging.debug('Module: %s' % line) if flag: modlist.append(flag) self.module_list.append(modname) self.modules.setdefault(modname, []) modlist = self.modules[modname] flag = None continue mobj = self.flag_java_re.match(line) if mobj: # start of a new flag if flag: # save last flag modlist.append(flag) logging.debug('Flag: %s' % line) flag = Flag(mobj.group(1), mobj.group(2)) continue # append to flag help. type and default are part of the main text if flag: flag.help += ' ' + line.strip() else: logging.info('Extra: %s' % line) if flag: modlist.append(flag)
def GetClientCommand(self, appliance_id): """Generate the command that will start the pppd and stunnel. Args: appliance_id: The appliance id of the corpus root to connect to Returns: a string containing the command to execute. None on failure. """ try: logging.debug('Fetch client command for %s' % appliance_id) corpus_root_config = self.__super_root_config.GetCorpusRoot( appliance_id) except fed_network_config.FederationConfigException, ex: logging.error('Exception in creating client tunnel conf %s' % ex.message) return None
def GetCorpusRoot(self, appliance_id): """Get the corpus root for the specified id. Args: appliance_id: The appliance id of the corpus root. Returns: The CorpusRootConfig object of the specified id, None if invalid. Raises: FederationConfigException: Invalid Corpus Root Id. """ for corpus in self.__corpus_roots: logging.debug('Match id %s with corpus %s' % (appliance_id, corpus.GetId())) if corpus.GetId() == appliance_id: return corpus raise FederationConfigException('Invalid Corpus Root Id')
def tearDown(self): """teardown.""" logging.debug('teardown!') if FLAGS.mockmode == 'record': # It would be nice to only do this after the last testcase # but I'm not aware of a way to do that. logging.info('writing mockdata to %s', MQLTest.MOCKFILE_PATH) fl = open(MQLTest.MOCKFILE_PATH, 'w') fl.write(yaml.dump(self.conn.mockdata)) fl.close() cost = self.mql_service.get_cost() if cost: for c in cost: if c in MQLTest.COSTS: MQLTest.COSTS[c] += cost[c] else: MQLTest.COSTS[c] = cost[c]
def Stop(self): """Stop the Stunnel CorpusRoot service. Returns: (-1, message) on failure and (0, message) on success. """ pid = self.__stunnel_config.GetStunnelPid() if pid == -1: logging.error('No such process - possible that stunnel is not running') (status_jail_destroy, message) = self.__jail.Destroy() return (-1, 'No such process - possible that stunnel is not running') else: cmd = ('%s -15 %d') % ('kill', pid) logging.debug('Executing command %s' % cmd) (status_kill, message) = self.__os.Execute(cmd) (status_kill, message) = self.__os.Execute('killall -15 stunnel') (status_jail_destroy, message) = self.__jail.Destroy() return (status_kill, message)
def ParseDesc(self, cnt=0): """Parse the initial description. This could be Python or C++. Returns: (line_count, lang_type) line_count Line to start parsing flags on (int) lang_type Either 'python' or 'c' (-1, '') if the flags start could not be found """ exec_mod_start = self.executable + ':' after_blank = False cnt = 0 for cnt in range(cnt, len(self.output)): # collect top description line = self.output[cnt].rstrip() # Python flags start with 'flags:\n' if ('flags:' == line and len(self.output) > cnt+1 and '' == self.output[cnt+1].rstrip()): cnt += 2 logging.debug('Flags start (python): %s' % line) return (cnt, 'python') # SWIG flags just have the module name followed by colon. if exec_mod_start == line: logging.debug('Flags start (swig): %s' % line) return (cnt, 'python') # C++ flags begin after a blank line and with a constant string if after_blank and line.startswith(' Flags from '): logging.debug('Flags start (c): %s' % line) return (cnt, 'c') # java flags begin with a constant string if line == 'where flags are': logging.debug('Flags start (java): %s' % line) cnt += 2 # skip "Standard flags:" return (cnt, 'java') logging.debug('Desc: %s' % line) self.desc.append(line) after_blank = (line == '') else: logging.warn('Never found the start of the flags section for "%s"!' % self.long_name) return (-1, '')
def increaseCountInternal(self, count, startCounting): """ it tries to increase the counter. ** to prevent from corruption, it writes to two files ** it also checks to enforce the serving time limit """ self.counter_lock.acquire() try: counter = self.getLicenseCounter() license = self.getCurrentLicense() # only mess with the counter if the license has started. make sure # that we still enforce the license to avoid setting the time back to # pre license. if not license.hasStarted(): logging.debug("license hasn't started; not incrementing") else: # if the counter hasn't started and we _want_ to start it, do so if not counter.hasStarted() and startCounting: logging.debug('starting counter') if not counter.startCounter(): return false # increment and save (if the counter hasn't started, it will ignore) if not counter.incrementCount(count): # log that we couldn't increment counter logging.info('Could not increment license counter') return false # update license to reflect the new counter value license.updateTimeLimit(counter.getCount()) # save changes to the license self.cfg.setGlobalParam('ENT_LICENSE_INFORMATION', license.license) # make sure that we enforce any changes to the license (even if the # license hasn't started) self.enforceLicense(license, counter) finally: self.counter_lock.release() return true
def Stop(self): """Stop the Stunnel CorpusRoot service. Returns: (-1, message) on failure and (0, message) on success. """ pid = self.__stunnel_config.GetStunnelPid() if pid == -1: logging.error( 'No such process - possible that stunnel is not running') (status_jail_destroy, message) = self.__jail.Destroy() return (-1, 'No such process - possible that stunnel is not running') else: cmd = ('%s -15 %d') % ('kill', pid) logging.debug('Executing command %s' % cmd) (status_kill, message) = self.__os.Execute(cmd) (status_kill, message) = self.__os.Execute('killall -15 stunnel') (status_jail_destroy, message) = self.__jail.Destroy() return (status_kill, message)
def GetStunnelConfigurationInFile(self, appliance_id, file_name): """Generate and write stunnel configuration into file. Args: appliance_id: The id of the appliance to generate the config for. file_name: the file that the generated config is to be written to. Returns: the file that was open for writing. """ file_object = self.__os.OpenWrite(file_name) if file_object: tunnel_config = self.GetStunnelConfiguration(appliance_id) logging.debug('Print the config %s in %s' % (tunnel_config, file_name)) if tunnel_config is None: return (-1, 'Invalid Stunnel configuration') file_object.write(tunnel_config) file_object.close() return (0, 'Successful configuration') return (-1, 'Invalid file object')
def AssertErrorEqual(self, exc_actual, exc_response): """log the query, exception and expected exception in event of failure. Args: exc_actual: actual Exception class and msg string (tuple) exc_response: expected Exception class and msg string (tuple) Raises: AssertionError """ try: logging.debug('expected exception: %s %s', exc_response[0], exc_response[1]) self.assertEqual(exc_actual[0], exc_response[0]) self.assertEqual(exc_actual[1], exc_response[1]) except AssertionError: msg = ('expected a different error\nquery:\n' '%s\nexception:\n%s\nexpected:\n%s' % (self._dumped_query, exc_actual, exc_response)) logging.error(msg) raise
def __init__(self, federation_config, sys_abstraction, ec=None): """Initialize the Superroot Stunnel config creation object. Args: federation_config: The federation network for which stunnel conf is reqd. sys_abstraction: The base system methods that we use. ec: The enterprise configuration. Raises: FederationConfigException: if the ID of the appliance does not have a valid superroot configuration. """ self.__os = sys_abstraction self.__federation_config = federation_config if ec is None: self.__ec = GetEnterpriseConfiguration() else: self.__ec = ec logging.debug('Name - %s' % self.__ec.ENT_CONFIG_NAME) self.__super_root_config = ( self.__federation_config.GetSuperRootConfig(self.__ec.ENT_CONFIG_NAME))
def _Check(cls, username, p4_group): """Checks if the user is in the p4 group, returning True or False. This only works if a .p4config can be found somewhere above your current working directory. If there, this will use the p4 command and grep to see if the given user is a member of the correct group. Args: username: The username to check for membership in the following group. p4_group: The p4 group the user should be a member of. Returns: True or False depending on whether the p4 group contains the given user. """ clean_user = _CleanForShell(username) logging.debug('Searching %s for %s', p4_group, clean_user) # catch error output as well, to see if we're not in a client. (status, p4_output) = _GET_STATUS_OUTPUT('p4 groups ' + clean_user) logging.debug('P4 command: p4 groups %s', clean_user) logging.debug('P4 output: %s', p4_output) if status != 0: logging.info('Error running p4 groups: %s. Try running from your home ' 'directory.', p4_output) return False return p4_group in p4_output.splitlines()
def AllowedFilter(req, _, dummy): """ Called to check if we allow to execute request considerring it's scheduling info. If we schedule this one we add it's shceduling info to RUNNING_INFO """ logging.debug("Filtering %s" % req.GetFilename()) scheduling_info = req.GetSchedulingInfo() if not scheduling_info: return 0 # Globals does not allow anything if GLOBAL in RUNNING_INFO: return 1 # see if we running something with the same scheduling info for si in scheduling_info: if si in RUNNING_INFO: return 1 RUNNING_INFO.extend(scheduling_info) logging.debug("Allowed %s" % req.GetFilename()) return 0
def __init__(self, federation_config, sys_abstraction, ec=None): """Initialize the Superroot Stunnel config creation object. Args: federation_config: The federation network for which stunnel conf is reqd. sys_abstraction: The base system methods that we use. ec: The enterprise configuration. Raises: FederationConfigException: if the ID of the appliance does not have a valid superroot configuration. """ self.__os = sys_abstraction self.__federation_config = federation_config if ec is None: self.__ec = GetEnterpriseConfiguration() else: self.__ec = ec logging.debug('Name - %s' % self.__ec.ENT_CONFIG_NAME) self.__super_root_config = ( self.__federation_config.GetSuperRootConfig( self.__ec.ENT_CONFIG_NAME))
def ParseCFlags(self, cnt=0): """Parse C style flags.""" modname = None # name of current module flag = None for cnt in range(cnt, len(self.output)): # collect flags line = self.output[cnt].rstrip() if not line: # blank lines terminate flags if flag: # save last flag modlist.append(flag) flag = None continue mobj = self.module_c_re.match(line) if mobj: # start of a new module modname = mobj.group(1) logging.debug('Module: %s' % line) if flag: modlist.append(flag) self.module_list.append(modname) self.modules.setdefault(modname, []) modlist = self.modules[modname] flag = None continue mobj = self.flag_c_re.match(line) if mobj: # start of a new flag if flag: # save last flag modlist.append(flag) logging.debug('Flag: %s' % line) flag = Flag(mobj.group(1), mobj.group(2)) continue # append to flag help. type and default are part of the main text if flag: flag.help += ' ' + line.strip() else: logging.info('Extra: %s' % line) if flag: modlist.append(flag)
def AllowedFilter(req, _, dummy): """ Called to check if we allow to execute request considerring it's scheduling info. If we schedule this one we add it's shceduling info to RUNNING_INFO """ logging.debug("Filtering %s" % req.GetFilename()) scheduling_info = req.GetSchedulingInfo() if not scheduling_info: return 0 # Globals does not allow anything if GLOBAL in RUNNING_INFO: return 1 # see if we running something with the same scheduling info for si in scheduling_info: if si in RUNNING_INFO: return 1 RUNNING_INFO.extend(scheduling_info) logging.debug("Allowed %s" %req.GetFilename()) return 0
def __init__(self, node, tunnel_config): """Initialize the Superrootconfig with all the attributes of a Superroot. Args: node: The XML Node that has configuration information about the superroot. tunnel_config: The federation configuration for the tunnels. Raises: FederationConfigException: Invalid federation configuration. Missing Id. """ self.__id = node.getAttribute('id') if self.__id is None: raise FederationConfigException('Missing Id') self.__user = node.getAttribute('user') self.__secret = node.getAttribute('secret') self.__ppp_ip = node.getAttribute('ppp-ip') self.__corpus_roots = [] self.__tunnel_config = tunnel_config self._ParseCorpusRoots(node) logging.debug('ID - %s, User %s, secret %s, Corpus %s' % (self.__id, self.__user, self.__secret, str(self.__corpus_roots)))
def _Check(cls, username, mdb_group): """Checks the given mdb group for the given username. Args: username: The user that should be a member of the mdb group. mdb_group: The group to check for the user in. Returns: True or False depending on if the mdb group contains the given user. """ clean_user = _CleanForShell(username) logging.debug('Searching %s for %s', mdb_group, clean_user) # Expect lines like: # ^hurstdog build-admin@prod$ (status, mdb_out) = _GET_STATUS_OUTPUT( 'ganpati --noheaders lsmem --mdb --include_indirect -m %s' % clean_user) if status != 0: # Only output 1000 characters of mdb_out because of limits in logging.py logging.info('Error running ganpati --noheaders lsmem --mdb ' '--include_indirect -m %s, got: %s', clean_user, mdb_out[0:1000]) return False mdb_lines = mdb_out.splitlines() try: groups = map(lambda x: x.split()[1].split('@')[0], mdb_lines) except IndexError: # Only output 1000 characters of mdb_out because of limits in logging.py logging.info('Error running ganpati --noheaders lsmem --mdb ' '--include_indirect -m %s, got: %s', clean_user, mdb_out[0:1000]) return False return mdb_group in groups
def _Check(cls, username, unix_group): """Checks the given unix group for the given username. Args: username: The username to check for membership in the group. unix_group: The unix group the user should be a member of. Returns: True or False depending on whether the unix group contains the given user. """ clean_user = _CleanForShell(username) logging.debug('Searching %s for %s', unix_group, clean_user) (status, id_output) = _GET_STATUS_OUTPUT('id -Gn %s' % clean_user) if status != 0: logging.info('Error running id, got: %s', id_output) return False else: logging.debug('Output from id: %s', id_output) return unix_group in id_output.split()
def GetStunnelConfiguration(self): """Generate Stunnel Configuration for an appliance. Returns: the stunnel conf file contents as a string, None on failure. """ logging.debug('Appliance ID %s' % self.__ec.ENT_CONFIG_NAME) corpus_root_config = (self.__federation_config.GetCorpusRootConfig( self.__ec.ENT_CONFIG_NAME)) tunnel_config = 'cert=/export/hda3/%s%s\n' % (self.__ec.VERSION, STUNNEL_SERVER_CERT) key = 'key=/export/hda3/%s%s\n' % (self.__ec.VERSION, STUNNEL_SERVER_KEY) tunnel_config += key pid = 'pid=/stunnel_federation.pid\n' tunnel_config += pid tunnel_config += ('chroot=%s\n' % STUNNEL_CHROOT) tunnel_config += ('debug=%s\n' % STUNNEL_DEBUG_LEVEL) tunnel_config += ('foreground=%s\n' % 'yes') tunnel_config += ('%s\n' % STUNNEL_SERVER) tunnel_config += ('output=/export/hda3/chroot%s%s\n' % (STUNNEL_LOGS, self.__ec.ENT_CONFIG_NAME)) tunnel_config += '[ppp]\n' listen_port = STUNNEL_LISTEN_PORT if corpus_root_config.GetTunnelPort() is not None: listen_port = corpus_root_config.GetTunnelPort() tunnel_config += ('accept=%s\n' % listen_port) tunnel_config += ('exec=%s\n' % STUNNEL_PPPD_EXEC) # Get the ip that the ppp interface will bind to on the local corpus_root_config_ip = corpus_root_config.GetPppIp() exec_args = ('execargs=%s %s %s:\n') % (STUNNEL_PPPD_EXEC_ARGS, self.__ec.ENT_CONFIG_NAME, corpus_root_config_ip) tunnel_config += exec_args tunnel_config += ('pty=%s\n' % STUNNEL_PTY) return tunnel_config
def ParsePythonFlags(self, cnt=0): """Parse python/swig style flags.""" modname = None # name of current module flag = None for cnt in range(cnt, len(self.output)): # collect flags line = self.output[cnt].rstrip() if not line: # blank continue mobj = self.module_py_re.match(line) if mobj: # start of a new module modname = mobj.group(1) logging.debug('Module: %s' % line) if flag: modlist.append(flag) self.module_list.append(modname) self.modules.setdefault(modname, []) modlist = self.modules[modname] flag = None continue mobj = self.flag_py_re.match(line) if mobj: # start of a new flag if flag: modlist.append(flag) logging.debug('Flag: %s' % line) flag = Flag(mobj.group(1), mobj.group(2)) continue if not flag: # continuation of a flag logging.error('Flag info, but no current flag "%s"' % line) mobj = self.flag_default_py_re.match(line) if mobj: # (default: '...') flag.default = mobj.group(1) logging.debug('Fdef: %s' % line) continue mobj = self.flag_tips_py_re.match(line) if mobj: # (tips) flag.tips = mobj.group(1) logging.debug('Ftip: %s' % line) continue if flag and flag.help: flag.help += line # multiflags tack on an extra line else: logging.info('Extra: %s' % line) if flag: modlist.append(flag)
def main(argv): FLAGS(argv) if FLAGS.deb: logging.set_verbosity(logging.DEBUG) # start a service if the command is the default specified in flags if FLAGS.command is 'DEFAULT': fed_network_client = FederationNetworkClientService() logging.debug('Launched as a service. Start the service.') fed_network_client.execute(argv) return ec = fed_stunnel_config.GetEnterpriseConfiguration() file_path = FEDERATION_NETWORK_CONFIG % ec.ENTERPRISE_HOME sys_abstraction = stunnel_jail.GetSystemAbstraction() try: fed_config = fed_network_config.FederationConfig(file_path, None, sys_abstraction) logging.info('Federation config read successfully') client = fed_network_util.SuperRootStunnelService(sys_abstraction, fed_config) except fed_network_config.FederationConfigException, ex: print ex.message logging.error('Exception in configuration %s' % ex.message) sys.exit(-1)
def GetStunnelConfiguration(self): """Generate Stunnel Configuration for an appliance. Returns: the stunnel conf file contents as a string, None on failure. """ logging.debug('Appliance ID %s' % self.__ec.ENT_CONFIG_NAME) corpus_root_config = ( self.__federation_config.GetCorpusRootConfig(self.__ec.ENT_CONFIG_NAME)) tunnel_config = 'cert=/export/hda3/%s%s\n' % (self.__ec.VERSION, STUNNEL_SERVER_CERT) key = 'key=/export/hda3/%s%s\n' % (self.__ec.VERSION, STUNNEL_SERVER_KEY) tunnel_config += key pid = 'pid=/stunnel_federation.pid\n' tunnel_config += pid tunnel_config += ('chroot=%s\n' % STUNNEL_CHROOT) tunnel_config += ('debug=%s\n' % STUNNEL_DEBUG_LEVEL) tunnel_config += ('foreground=%s\n' % 'yes') tunnel_config += ('%s\n' % STUNNEL_SERVER) tunnel_config += ('output=/export/hda3/chroot%s%s\n' % (STUNNEL_LOGS, self.__ec.ENT_CONFIG_NAME)) tunnel_config += '[ppp]\n' listen_port = STUNNEL_LISTEN_PORT if corpus_root_config.GetTunnelPort() is not None: listen_port = corpus_root_config.GetTunnelPort() tunnel_config += ('accept=%s\n' % listen_port) tunnel_config += ('exec=%s\n' % STUNNEL_PPPD_EXEC) # Get the ip that the ppp interface will bind to on the local corpus_root_config_ip = corpus_root_config.GetPppIp() exec_args = ('execargs=%s %s %s:\n') % (STUNNEL_PPPD_EXEC_ARGS, self.__ec.ENT_CONFIG_NAME, corpus_root_config_ip) tunnel_config += exec_args tunnel_config += ('pty=%s\n' % STUNNEL_PTY) return tunnel_config
def _CheckMembershipInSection(self, check_class, req_opt, checker): """Checks that the user is a member of the groups in the given section. For every entry in the config file section name as given by 'check_class:req_opt:section' uses the given check_object to see if the instance's username is in that group. Returns a dict with the key being the name of the group they're missing membership in, and the value being a description of it. If you get an empty dict back, they're missing no memberships. Note that if the config file is incorrect, this will call sys.exit() and stop immediately letting the user know (through PrintUsageAndExit). Args: check_class: The name of the class of groups. req_opt: "required" or "optional". checker: A method that takes a username and group name and checks for membership. Generally should be a value returned from _SUPPORTED_SECTIONS Returns: {missing_group: group_description, ...} """ missing_groups = {} section_name = '%s:%s:%s' % (check_class, req_opt, checker.NAME) for option, entry in self._GetOptions(section_name): entry = self._config.get(section_name, option) logging.debug('Found option %s with value %s', option, entry) if checker.Check(self._user, option): logging.debug('%s is a member of %s', self._user, option) else: logging.debug('%s is NOT a member of %s', self._user, option) missing_groups[option] = entry return missing_groups
if status_chap_secrets: logging.error('Exception in getting the chap secret file created') return (status_chap_secrets, message) file_name = self.__stunnel_config.GetStunnelConfigFileName(appliance_id) chroot_file_name = ('%s%s') % (fed_stunnel_config.STUNNEL_CLIENT_CHROOT, file_name) (status_configure, message) = ( self.__stunnel_config.GetStunnelConfigurationInFile(appliance_id, chroot_file_name)) except fed_network_config.FederationConfigException, ex: logging.error('Exception in getting configuration %s' % ex.message) return (-1, ex.message) else: if not status_configure: cmd = self.__stunnel_config.GetClientCommand(appliance_id) logging.debug('Command Executed %s' % cmd) if cmd is None: return (-1, 'Command generation failed') (status_connect, message) = self.__os.Execute(cmd) if status_connect: logging.error('Connect to %s appliance failed %d %s' % (appliance_id, status_connect, message)) else: (status_reachable, message) = self.Status(appliance_id) if not status_reachable: logging.info('Connect to %s appliance success %d %s' % (appliance_id, status_reachable, message)) logging.error('Connect to %s appliance failed %d %s' % (appliance_id, status_reachable, message)) return (status_connect, message) logging.error('Connect to %s appliance failed - invalid id' %
'Exception in getting the chap secret file created') return (status_chap_secrets, message) file_name = self.__stunnel_config.GetStunnelConfigFileName( appliance_id) chroot_file_name = ('%s%s') % ( fed_stunnel_config.STUNNEL_CLIENT_CHROOT, file_name) (status_configure, message) = (self.__stunnel_config.GetStunnelConfigurationInFile( appliance_id, chroot_file_name)) except fed_network_config.FederationConfigException, ex: logging.error('Exception in getting configuration %s' % ex.message) return (-1, ex.message) else: if not status_configure: cmd = self.__stunnel_config.GetClientCommand(appliance_id) logging.debug('Command Executed %s' % cmd) if cmd is None: return (-1, 'Command generation failed') (status_connect, message) = self.__os.Execute(cmd) if status_connect: logging.error('Connect to %s appliance failed %d %s' % (appliance_id, status_connect, message)) else: (status_reachable, message) = self.Status(appliance_id) if not status_reachable: logging.info('Connect to %s appliance success %d %s' % (appliance_id, status_reachable, message)) logging.error('Connect to %s appliance failed %d %s' % (appliance_id, status_reachable, message)) return (status_connect, message) logging.error('Connect to %s appliance failed - invalid id' %
def _CheckForEmail(regex, email_list, max_depth=2): """Recursively search for a pattern in expn'd email addresses. Args: regex: A Pattern object to match against the email expansions. email_list: List of emails to check recursively for regex. max_depth: The number of times this method can be called recursively before we give up the search. Returns: True or False whether the matching regex was found. This runs as a breadth-first search. The stop condition is running expn on a mailing list/username that doesn't give us any @google.com addresses back. Unless there are mail loops, this shouldn't be a problem. There is also a max_depth parameter, which allows us to make sure we don't iterate recursively if there is a huge chain of mailing lists, or if it gets stuck on a regular expression for a private mailing list, etc. This method will stop recursing when max_depth==0 """ clean_email = ' '.join(map(_CleanForShell, email_list)) logging.debug("Searching '%s' for /%s/", clean_email, regex.pattern) (status, expn_result) = _GET_STATUS_OUTPUT( '%s %s' % (FLAGS.expn_command, clean_email)) if status != 0: # Don't give up if there was a "User unknown", common when people leave if '>... User unknown>' not in expn_result: logging.info('Error running expn %s, got: %s', clean_email, expn_result) return False logging.debug('For %s found:\n%s', clean_email, expn_result) # Match the regex against the results of the expn email_match = regex.search(expn_result) if email_match: logging.debug('Found %s == /%s/', email_match.group(0), regex) return True else: # If max_depth is 0, we don't recurse any farther if max_depth <= 0: return False max_depth -= 1 # Now search recursively, since we didn't find a match, but only search # @google.com addresses since that's only what will expand. check_emails = [] for line in set(expn_result.splitlines()): idx = line.find('@google.com') if idx >= 0: check_emails.append(line[0:idx]) if len(check_emails) >= MAX_EXPN_ARGS: if _CheckForEmail(regex, check_emails, max_depth): return True else: del check_emails[:] else: if check_emails: return _CheckForEmail(regex, check_emails, max_depth) return False
def testHttpLib2(self): logging.debug('httplib2 loaded from %s' % str(httplib2))
def RemoteCommandString(machine, commandline, **kwargs): """ This builds a command string for remote execution of some command with various standard arguments. Supported keywords include: "alarm" to specify a maximum wait time in seconds "alarmpath" to override the default alarm command "lalarmpath" to override the local alarm command "ralarmpath" to override the remote alarm command "alarmargs" to specify additional arguments "sshpath" to specify the remote shell command "sshargs" to specify extra arguments "user" to specify the remote user account "cd" to set a working directory for the remote process "output" if set, means to pass stderr and stdout "console" runs the command within a local terminal "limitcore" adds ulimit -c to the command string "limitfiles" adds ulimit -n to the command string By default there is no alarm and output is directed to /dev/null. """ user = '' alarmtime = 0 alarmargs = '' lalarmpath = '/root/google/setup/alarm' ralarmpath = '/root/google/setup/alarm' chdir = None output = '>/dev/null 2>&1' sshpath = 'ssh' sshargs = '-n -P -o BatchMode=yes' console = 0 limitcore = None limitfiles = None if kwargs.has_key('alarmpath') and (kwargs.has_key('lalarmpath') or kwargs.has_key('ralarmpath')): raise Exception, '\'alarmpath\' and (\'alarmpath\' or \'alarmpath\') kwargs are exclusive' if kwargs: for key in kwargs.keys(): if 'alarm' == key: alarmtime = int(kwargs[key]) elif 'alarmpath' == key: lalarmpath = kwargs[key] ralarmpath = kwargs[key] elif 'lalarmpath' == key: lalarmpath = kwargs[key] elif 'ralarmpath' == key: ralarmpath = kwargs[key] elif 'alarmargs' == key: alarmargs = kwargs[key] elif 'sshpath' == key: sshpath = kwargs[key] elif 'sshargs' == key: sshargs = '%s' % kwargs[key] elif 'cd' == key: chdir = kwargs[key] elif 'output' == key: output = '2>&1' elif 'user' == key: user = '******' % kwargs[key] elif 'console' == key: console = kwargs[key] elif 'limitcore' == key: limitcore = kwargs[key] elif 'limitfiles' == key: limitfiles = kwargs[key] else: raise Exception, 'no such keyword argument: %s' % key # end if # end for # end if cmd = '. /etc/profile' if chdir: cmd = cmd + ' && cd %s' % (commands.mkarg(chdir)) # end if if limitcore != None: cmd = cmd + ' && ulimit -c %d' % limitcore # end if if limitfiles != None: cmd = cmd + ' && ulimit -n %d' % limitfiles # end if cmd = ('%s && %s %s' % (cmd, JoinUsingMkarg(commandline), output)) # The innermost alarm kills the process on the remote machine; the outer # alarm protects against network faults. if alarmtime: cmd = '%s %s %d sh -c %s' % (ralarmpath, alarmargs, alarmtime, commands.mkarg(cmd)) # end if # the ssh command cmd = '%s %s %s%s %s' % (sshpath, sshargs, user, machine, commands.mkarg(cmd)) if alarmtime: cmd = '%s %s %d sh -c %s' % (lalarmpath, alarmargs, alarmtime, commands.mkarg(cmd)) # end if if console: cmd = '%s -T %s -e %s' % (console, machine, cmd ) # no mkarg, xterm/konsole use exec() # end if logging.debug('running: %s' % cmd) return cmd
def RemoteCommandString(machine, commandline, **kwargs): """ This builds a command string for remote execution of some command with various standard arguments. Supported keywords include: "alarm" to specify a maximum wait time in seconds "alarmpath" to override the default alarm command "lalarmpath" to override the local alarm command "ralarmpath" to override the remote alarm command "alarmargs" to specify additional arguments "sshpath" to specify the remote shell command "sshargs" to specify extra arguments "user" to specify the remote user account "cd" to set a working directory for the remote process "output" if set, means to pass stderr and stdout "console" runs the command within a local terminal "limitcore" adds ulimit -c to the command string "limitfiles" adds ulimit -n to the command string By default there is no alarm and output is directed to /dev/null. """ user = '' alarmtime = 0 alarmargs = '' lalarmpath = '/root/google/setup/alarm' ralarmpath = '/root/google/setup/alarm' chdir = None output = '>/dev/null 2>&1' sshpath = 'ssh' sshargs = '-n -P -o BatchMode=yes' console = 0 limitcore = None limitfiles = None if kwargs.has_key('alarmpath') and (kwargs.has_key('lalarmpath') or kwargs.has_key('ralarmpath')): raise Exception, '\'alarmpath\' and (\'alarmpath\' or \'alarmpath\') kwargs are exclusive' if kwargs: for key in kwargs.keys(): if 'alarm' == key: alarmtime = int(kwargs[key]) elif 'alarmpath' == key: lalarmpath = kwargs[key] ralarmpath = kwargs[key] elif 'lalarmpath' == key: lalarmpath = kwargs[key] elif 'ralarmpath' == key: ralarmpath = kwargs[key] elif 'alarmargs' == key: alarmargs = kwargs[key] elif 'sshpath' == key: sshpath = kwargs[key] elif 'sshargs' == key: sshargs = '%s' % kwargs[key] elif 'cd' == key: chdir = kwargs[key] elif 'output' == key: output = '2>&1' elif 'user' == key: user = '******' % kwargs[key] elif 'console' == key: console = kwargs[key] elif 'limitcore' == key: limitcore = kwargs[key] elif 'limitfiles' == key: limitfiles = kwargs[key] else: raise Exception, 'no such keyword argument: %s' % key # end if # end for # end if cmd = '. /etc/profile' if chdir: cmd = cmd + ' && cd %s' % (commands.mkarg(chdir)) # end if if limitcore != None: cmd = cmd + ' && ulimit -c %d' % limitcore # end if if limitfiles != None: cmd = cmd + ' && ulimit -n %d' % limitfiles # end if cmd = ('%s && %s %s' % (cmd, JoinUsingMkarg(commandline), output)) # The innermost alarm kills the process on the remote machine; the outer # alarm protects against network faults. if alarmtime: cmd = '%s %s %d sh -c %s' % (ralarmpath, alarmargs, alarmtime, commands.mkarg(cmd)) # end if # the ssh command cmd = '%s %s %s%s %s' % (sshpath, sshargs, user, machine, commands.mkarg(cmd)) if alarmtime: cmd = '%s %s %d sh -c %s' % (lalarmpath, alarmargs, alarmtime, commands.mkarg(cmd)) # end if if console: cmd = '%s -T %s -e %s' % (console, machine, cmd) # no mkarg, xterm/konsole use exec() # end if logging.debug('running: %s' % cmd) return cmd