Пример #1
0
def syscmd(command, expected_retcode=0):
	""" Execute `command` in a subshell and grab the return value to test it.
		If the test fails, an exception is raised.
		The exception must be an instance of exceptions.SystemCommandError or
		an inherited class.
	"""

	logging.progress('syscmd(): executing "%s" in a subshell.' % command)

	result = os.system(command)
	# res is a 16bit integer, decomposed as:
	#	- a low byte: signal (with its high bit is set if a core was dumped)
	#	- a high byte: the real exit status, if signal is 0
	# see os.wait() documentation for more

	retcode = 0
	signal  = result & 0x00FF
	if signal == 0:
		retcode = (result & 0xFF00) >> 8

	logging.progress('syscmd(): "%s" exited with code %s (%s).' % (command,
		retcode, result))

	if retcode != expected_retcode:
		raise exceptions.SystemCommandError(command, retcode)
	if signal != 0:
		raise exceptions.SystemCommandSignalError(command, signal)
 def _waitForAllocation(self):
     logging.info("Waiting for all nodes to be allocated...")
     INTERVAL = 5
     lastOverallPercent = 0
     lastOverallPercentChange = time.time()
     while self._allocation.dead() is None:
         try:
             self._allocation.wait(timeout=INTERVAL)
             return
         except:
             if self._overallPercent != lastOverallPercent:
                 lastOverallPercent = self._overallPercent
                 lastOverallPercentChange = time.time()
                 if lastOverallPercent < 100:
                     msg = "Allocation %(percent)s%% complete"
                 else:
                     msg = "Allocation %(percent)s%% complete, but still waiting for the 'go-ahead' " \
                           "from Rackattack..."
                 logging.progress(msg, dict(percent=lastOverallPercent))
             if time.time(
             ) > lastOverallPercentChange + self._NO_PROGRESS_TIMEOUT:
                 raise Exception(
                     "Allocation progress hanged at %(percent)s%% for %(seconds)s seconds",
                     dict(percent=lastOverallPercent,
                          seconds=self._NO_PROGRESS_TIMEOUT))
     raise Exception(self._allocation.dead())
Пример #3
0
def use_log_file(log_file, process_name=None):
	""" replace stdout/stderr with the logfile.
		stderr becomes /dev/null.
	"""

	if process_name is None:
		my_process_name = stylize(ST_NAME, 'foundations.use_log_file')
	else:
		my_process_name = stylize(ST_NAME, process_name)

	logging.progress(_(u'{0}({1}): using {2} as log channel.').format(
					my_process_name, stylize(ST_UGID, os.getpid()),
					stylize(ST_PATH, log_file if log_file else 'stdout')))

	if log_file:
		out_log = file(log_file, 'ab+')

	else:
		out_log = sys.stdout

	dev_null = file(os.devnull, 'rw')

	# not needed.
	#sys.stdout.flush()
	#sys.stderr.flush()

	# also not needed.
	#os.close(sys.stdin.fileno())
	#os.close(sys.stdout.fileno())
	#os.close(sys.stderr.fileno())

	os.dup2(dev_null.fileno(), sys.stdin.fileno())
	os.dup2(out_log.fileno(), sys.stdout.fileno())
	os.dup2(out_log.fileno(), sys.stderr.fileno())
Пример #4
0
 def _checkNodeStats( self ):
     logging.progress( "Verifying Node stats" )
     hostnames = [ host[ 'name' ] for host in self.vm( 'controller' ).openStackREST().nodeList() ]
     for hostname in hostnames:
         response = self._sendRequestAndUpdateCookies( requests.get, self._restApiUrl + "/nodes/%(hostname)s/stats" % { 'hostname' : hostname } )
         TS_ASSERT( response.ok )
         logging.progress( "Got response %(response)s", dict( response = response.content ) )
Пример #5
0
def fork_licorn_daemon(pid_to_wake=None):
	""" Start the Licorn® daemon (fork it). """

	try:
		logging.progress(_(u'Forking licornd.'))

		if os.fork() == 0:
			# NOTE: we need to force a replace, in case the existing daemon is
			# in a bad posture, eg. stuck in a restart procedure: it's not
			# responding to the calling CLI process, but in the wait for a
			# restart. This the current starting daemon will fail, then the
			# waiting CLI will not be awaken and will timeout and show the
			# "connect timeout, something is bad" message to the administrator,
			# which will be a totally false-negative situation because
			# meanwhile the restarting daemon will be ready in a perfect
			# state.
			# All of this is a timing problem, and I hope the soft
			# :arg:`--replace` flag will solve this corner-case situation.
			args = ['licornd', '--replace']

			if pid_to_wake:
				args.extend(['--pid-to-wake1', str(pid_to_wake)])

			os.execv('/usr/sbin/licornd', args)

	except (IOError, OSError), e:
		logging.error(_(u'licornd fork failed: errno {0} ({1}).').format(
														e.errno, e.strerror))
Пример #6
0
	def browse_callback(sdRef, flags, interfaceIndex, errorCode, serviceName,
														regtype, replyDomain):
		if errorCode != pybonjour.kDNSServiceErr_NoError:
			return

		caller = stylize(ST_NAME, current_thread().name)

		if not (flags & pybonjour.kDNSServiceFlagsAdd):
			logging.warning(_(u'{0}: service {1} removed!').format(caller,
																serviceName))
			return

		logging.progress(_(u'{0}: service {1} added; now resolving…').format(
														caller, serviceName))

		resolve_sdRef = pybonjour.DNSServiceResolve(0,
													interfaceIndex,
													serviceName,
													regtype,
													replyDomain,
													resolve_callback)

		try:
			current_wait = 0
			while current_wait < 5:
				waited.append(1)
				current_wait += 1

				ready = select.select([resolve_sdRef], [], [], timeout)

				if resolve_sdRef in ready[0]:
					pybonjour.DNSServiceProcessResult(resolve_sdRef)

		finally:
			resolve_sdRef.close()
Пример #7
0
 def run(self):
     TS_ASSERT(self._isClientPingable())
     host.pxeclient.node.disablePXE()
     logging.progress("PXE has been disabled for %s",
                      host.pxeclient.node.primaryMACAddress())
     host.pxeclient.node.coldRestart()
     logging.progress(
         "Client is being rebooted. sleeping for %d seconds to make sure client stays down",
         self.MAX_TIME_FOR_NODE_TO_ANSWER_PING_AFTER_BOOTING_FROM_PXE)
     time.sleep(
         self.MAX_TIME_FOR_NODE_TO_ANSWER_PING_AFTER_BOOTING_FROM_PXE)
     TS_ASSERT_EQUALS(self._isClientPingable(), False)
     logging.progress(
         "Client is not reachable. looks like its PXE has been disabled successfully"
     )
     host.pxeclient.node.enablePXE()
     logging.progress("PXE has been enabled for %s",
                      host.pxeclient.node.primaryMACAddress())
     host.pxeclient.node.coldRestart()
     logging.progress(
         "Client is being rebooted. making sure that client is booting nicely"
     )
     TS_ASSERT_PREDICATE_TIMEOUT(
         self._isClientPingable,
         TS_timeout=self.
         MAX_TIME_FOR_NODE_TO_ANSWER_PING_AFTER_BOOTING_FROM_PXE)
Пример #8
0
def run():
	global looper_thread
	looper_thread = RoundRobinEventLooper(tname='EventLooper')
	looper_thread.start()

	logging.progress(_(u'{0}: Licorn® Event Loop started.').format(
									stylize(ST_NAME, current_thread().name)))
Пример #9
0
 def executeTestScenario(self):
     abortTestTimeout = getattr(self._test, 'ABORT_TEST_TIMEOUT', self.ABORT_TEST_TIMEOUT_DEFAULT)
     timeoutthread.TimeoutThread(abortTestTimeout, self._testTimedOut)
     logging.info("Test timer armed. Timeout in %(seconds)d seconds", dict(seconds=abortTestTimeout))
     discardinglogger.discardLogsOf(self.DISCARD_LOGGING_OF)
     self._hosts = dict()
     suite.findHost = self.host
     suite.hosts = self.hosts
     if not hasattr(self._test, 'host'):
         self._test.host = self.host
     if not hasattr(self._test, 'hosts'):
         self._test.hosts = self.hosts
     logging.info("Allocating Nodes")
     self._allocation = rackattackallocation.RackAttackAllocation(self._test.HOSTS)
     logging.progress("Done allocating nodes")
     try:
         self._setUp()
         try:
             self._run()
         finally:
             self._tearDown()
     finally:
         try:
             self._allocation.free()
         except:
             logging.exception("Unable to free allocation")
Пример #10
0
	def resolve_callback(sdRef, flags, interfaceIndex, errorCode, fullname,
												hosttarget, port, txtRecord):

		if errorCode == pybonjour.kDNSServiceErr_NoError:
			logging.progress(_(u'{0}: successfully found a Licorn® server via '
								u'Bonjour at address {1}.').format(
									stylize(ST_NAME, current_thread().name),
									stylize(ST_URL, 'pyro://{0}:{1}/'.format(
										hosttarget[:-1]
											if hosttarget.endswith('.')
											else hosttarget, port))))

			txtRecord = pybonjour.TXTRecord.parse(txtRecord)

			# Store host with uuid / group, to help find our eventual favorite.
			resolved.append((txtRecord['uuid'], txtRecord['group'], hosttarget, port))

			if favorite and txtRecord['uuid'] == favorite:
				found.set()

			else:
				# We already have one server in the resolved list.
				# If we have waited long enough, go with it and take
				# the best we can.
				if len(waited) > 12:
					found.set()
Пример #11
0
 def executeTestScenario(self):
     discardinglogger.discardLogsOf(self.DISCARD_LOGGING_OF)
     self._hosts = dict()
     suite.findHost = self.host
     suite.hosts = self.hosts
     if not hasattr(self._test, 'host'):
         self._test.host = self.host
     if not hasattr(self._test, 'hosts'):
         self._test.hosts = self.hosts
     logging.info("Allocating Nodes")
     self._allocation = rackattackallocation.RackAttackAllocation(self._test.HOSTS)
     timeoutthread.TimeoutThread(self._testTimeout, self._testTimedOut)
     logging.info("Test timer armed. Timeout in %(seconds)d seconds", dict(seconds=self._testTimeout))
     logging.progress("Done allocating nodes")
     try:
         self._setUp()
         try:
             self._run()
         finally:
             self._tearDown()
     finally:
         try:
             self._allocation.free()
         except:
             logging.exception("Unable to free allocation")
Пример #12
0
 def beamADirectory(self):
     host.it.ssh.run.script("mkdir /tmp/aDirectory")
     host.it.ssh.run.script("echo hello > /tmp/aDirectory/bye")
     host.it.logbeam.beam("/tmp/aDirectory/")
     expectedFile = os.path.join(log.config.LOGS_DIRECTORY, "it", "bye")
     logging.progress("Expected file: %(expectedFile)s",
                      dict(expectedFile=expectedFile))
     TS_ASSERT(os.path.exists(expectedFile))
Пример #13
0
def sleep(interval, reason):
    logging.progress(
        "Sleeping for %(interval).3f, Reason: '%(reason)s'",
        dict(interval=interval, reason=reason))
    time.sleep(interval)
    logging.progress(
        "Done sleeping for %(interval).3f, Reason: '%(reason)s'",
        dict(interval=interval, reason=reason))
Пример #14
0
 def _checkBackendsStats( self ):
     logging.progress( "Checking backends types" )
     backends = self._sendRequestAndUpdateCookies( requests.get, "%s/%s" % ( self._restApiUrl, 'storage/backends' ) )
     TS_ASSERT( backends.ok )
     TS_ASSERT_EQUALS( len( backends.json() ), 1 )
     TS_ASSERT_EQUALS( backends.json()[ 0 ][ 'name' ], 'rack-storage' )
     rackstorage_backend = self._sendRequestAndUpdateCookies( requests.get, "%s/%s/%s" % ( self._restApiUrl, 'storage/backends', 'rack-storage' ) )
     TS_ASSERT( rackstorage_backend.ok )
     TS_ASSERT_EQUALS( rackstorage_backend.json()[ 'name' ], 'rack-storage' )
Пример #15
0
 def _verifyDeleteNodeAgent2( self ):
     logging.progress( "Verifying agent2 node deletion" )
     hostName = self.vm( 'agent2' ).openStackAgent().hostName()
     TS_ASSERT_THROWS_ANYTHING( self.vm( 'controller' ).openStackREST().deleteNode, hostName )
     self.vm( 'agent2' ).openStackAPI().stopNovaCompute()
     self.vm( 'controller' ).openStackController().subNovaComputeUpdateTimeForHost( hostName, 60 )
     self.vm( 'controller' ).openStackAPI().waitForNovaComputeToBeDownOnHost( hostName )
     self.vm( 'controller' ).openStackREST().deleteNode( hostName )
     nodes = self.vm( 'controller' ).openStackREST().nodeList()
     TS_ASSERT_EQUALS( [ n for n in nodes if n[ 'name' ] == hostName ], [] )
Пример #16
0
 def _verifyNodedServices( self ):
     logging.progress( "Verifying service monitoring enabling/disabling" )
     serviceName = 'dummyNode'
     him = self.vm( 'agent1' )
     TS_ASSERT_PREDICATE_TIMEOUT( lambda: him.consul().get( him.openStackAgent().hostName(), 'status' ) == 'ready', TS_timeout = 300, TS_interval = 3 )
     endpoint = "/nodes/%s/services" % him.openStackAgent().hostName()
     data = { 'service-name' : serviceName }
     self._sendRequestAndUpdateCookies( requests.post, self._restApiUrl + endpoint, data = data )
     TS_ASSERT_PREDICATE_TIMEOUT( lambda: serviceName in self._sendRequestAndUpdateCookies( requests.get, self._restApiUrl + endpoint ).json(), TS_timeout = 60, TS_interval = 3 )
     self._sendRequestAndUpdateCookies( requests.delete, self._restApiUrl + endpoint, data = data )
     TS_ASSERT_PREDICATE_TIMEOUT( lambda: serviceName not in self._sendRequestAndUpdateCookies( requests.get, self._restApiUrl + endpoint ).json(), TS_timeout = 60, TS_interval = 3 )
Пример #17
0
 def _run(self):
     logging.progress("Running test in '%(filename)s'", dict(filename=self._filename()))
     try:
         self._test.run()
         logging.success(
             "Test completed successfully, in '%(filename)s', with %(asserts)d successfull asserts",
             dict(filename=self._filename(), asserts=suite.successfulTSAssertCount()))
         print ".:1: Test passed"
     except:
         logging.exception("Test failed, in '%(filename)s'", dict(filename=self._filename()))
         suite.outputExceptionStackTrace()
         raise
Пример #18
0
 def _run(self):
     logging.progress("Running test in '%(filename)s'", dict(filename=self._filename()))
     assert hasattr(self._test, 'run'), "Test class must have a 'run' method"
     try:
         self._test.run()
         logging.progress(
             "Run completed successfully, in '%(filename)s', with %(asserts)d successfull asserts",
             dict(filename=self._filename(), asserts=suite.successfulTSAssertCount()))
     except:
         logging.exception("Test failed, in '%(filename)s'", dict(filename=self._filename()))
         suite.outputExceptionStackTrace()
         raise
Пример #19
0
 def _verifyNodes( self ):
     logging.progress( "Verifying node list" )
     nodes = self.vm( 'controller' ).openStackREST().nodeList( getVms = True )
     TS_ASSERT_EQUALS( set( [ node[ 'status' ] for node in nodes ] ), set( [ 'up' ] ) )
     TS_ASSERT_EQUALS( set( [ node[ 'name' ] for node in nodes ] ),
                       set( self.HOSTNAMES ) )
     vmOnNodes = set()
     for node in nodes:
         vmsOnNode = set( node[ 'vms' ] )
         TS_ASSERT_EQUALS( vmsOnNode, set( self.vm( 'controller' ).openStackREST().getNodeResponse( node[ 'name'], getVms = True ).json()[ 'vms' ] ) )
         TS_ASSERT( vmOnNodes.isdisjoint( vmsOnNode ) )
         vmOnNodes.update( vmsOnNode )
     TS_ASSERT_EQUALS( vmOnNodes, set( self._vmNameToId.values() ) )
Пример #20
0
def stop():
	""" Completely stop the event queue. This is meant to be done only
		one time, when the daemon stops. """
	# be sure the EventManager thread will stop when we'll tell
	# him to do so.
	looper_thread.stop()

	# enqueue a super high priority job containing None, which will
	# unblock the EventManager run_action_method().
	events_queue.put((-1, None))

	logging.progress(_(u'{0}: Licorn® Event Loop stopped.').format(
									stylize(ST_NAME, current_thread().name)))
Пример #21
0
def process_dir(bucket, _dir, delete_source, extf, path_move):
    
    logging.progress("Processing: %s" % _dir)
    
    parts=os.path.splitext(_dir)
    bn=os.path.basename(parts[0])
    filename="%s.%s" % (bn, extf)
    
    logging.progress("Generating: file '%s' in bucket '%s'" % (filename, bucket.name))

    k=S3Key(bucket)
    k.key=filename
    
    try:
        k.set_contents_from_string("")
    except:
        logging.warning("Can't generate file '%s' in bucket '%s'" % (filename, bucket.name))
        return
    
    if delete_source:
        logging.progress("Deleting: %s" % _dir)
        rmdir(_dir)
        return
    
    if path_move is not None:
        
        bn_src_dir=os.path.basename(_dir)
        ddir=os.path.join(path_move, bn_src_dir)
        rmdir(ddir)
        
        logging.progress("Moving: %s => %s" % (_dir, ddir))
        code, msg=move(_dir, ddir)
        if not code.startswith("ok"):
            logging.warning("Can't move '%s' to '%s': %s" % (_dir, ddir, msg))
Пример #22
0
 def _run(self):
     logging.progress("Running test in '%(filename)s'", dict(filename=self._filename()))
     try:
         self._test.run()
         suite.anamnesis['testSucceeded'] = True
         logging.success(
             "Test completed successfully, in '%(filename)s', with %(asserts)d successfull asserts",
             dict(filename=self._filename(), asserts=suite.successfulTSAssertCount()))
         print ".:1: Test passed"
     except:
         suite.anamnesis['testFailed'] = True
         logging.exception("Test failed, in '%(filename)s'", dict(filename=self._filename()))
         suite.outputExceptionStackTrace()
         raise
Пример #23
0
	def remove(self, key, value=None, dont_check=False):
		""" TODO. """

		if dont_check or self.has(key, value):
			if hasattr(self[key], '__iter__'):
				self[key].remove(value)
				if self[key] in ('', []):
					del self[key]
			else:
				del self[key]
			logging.progress('%s: %s configuration key %s with value %s' % (
				stylize(ST_PATH, self.name), stylize(ST_BAD, "removed"),
				stylize(ST_NAME, key), stylize(ST_NAME, value)))
			assert ltrace(TRACE_OBJECTS, "%s: removed '%s%s'" % (
				self.name, key, ' ' + value if value else ''))
Пример #24
0
def unregister_collector(collector):

		assert ltrace_func(TRACE_EVENTS)

		with loop_lock:
			try:
				events_collectors.remove(collector)

				logging.progress(_(u'{0}: unregistered event '
									u'collector {0}.').format(
										stylize(ST_NAME, current_thread().name),
										stylize(ST_NAME, collector)))

			except ValueError:
				logging.exception(_(u'Error while trying to unregister '
												u'collector {0}'), collector)
Пример #25
0
	def add(self, key, value, dont_check=False, replace=False):
		""" TODO. """
		if replace:
			self[key] = value
			logging.progress('%s: %s configuration key %s with value %s' % (
				stylize(ST_PATH, self.name), stylize(ST_OK, "modified"),
				stylize(ST_NAME, key), stylize(ST_NAME, value)))
			assert ltrace(TRACE_OBJECTS, "%s: overwritten '%s %s'" % (
				self.name, key, value))
		elif dont_check or not self.has(key, value):
			pyutils.add_or_dupe_enumeration(self, key, value)
			logging.progress('%s: %s configuration key %s with value %s' % (
				stylize(ST_PATH, self.name), stylize(ST_OK, "added"),
				stylize(ST_NAME, key), stylize(ST_NAME, value)))
			assert ltrace(TRACE_OBJECTS, "%s: added '%s %s'" % (
					self.name, key, value))
Пример #26
0
def pidof(process_name):
	""" This works only on Linux...

		.. note:: the pidof feature works on /proc/%s/comm and
			matches only the exact word. There is no kind of fuzzy
			matching yet.

		..versionadded:: 1.3
	"""

	pids = []

	if 'licornd' in process_name:
		# licorn / linux 3.x specifiq : we can match 'licornd/wmi'
		# faster than 'licornd-wmi', and in some case the 'cmdline'
		# is empty, whereas the 'comm' is not.
		names = [ process_name, process_name.replace('/', '-') ]

	else:
		names = [ process_name ]

	for entry in os.listdir('/proc'):
		if entry.isdigit():
			try:

				if cgroup and open('/proc/%s/cpuset' % entry).read().strip() != cgroup:
					logging.progress(_(u'Skipped process @{0} which is not '
										u'in the same cgroup.').format(entry))
					continue

				try:
					# Linux 3.x only
					command_line1 = open('/proc/%s/comm' % entry).read().strip()
				except:
					command_line1 = ''

				command_line2 = open('/proc/%s/cmdline' % entry).read().strip()

				for pname in names:
					if pname == command_line1 or pname+'\0' in command_line2:
						pids.append(int(entry))

			except (IOError, OSError), e:
				# in rare cases, the process vanishes during iteration. This
				# is harmless. Any other error is not cool, raise it.
				if e.errno != errno.ENOENT:
					raise e
 def _waitForAllocation(self):
     INTERVAL = 5
     lastOverallPercent = 0
     lastOverallPercentChange = time.time()
     while self._allocation.dead() is None:
         try:
             self._allocation.wait(timeout=INTERVAL)
             return
         except:
             if self._overallPercent != lastOverallPercent:
                 lastOverallPercent = self._overallPercent
                 lastOverallPercentChange = time.time()
                 logging.progress("Allocation %(percent)s%% complete", dict(percent=lastOverallPercent))
             if time.time() > lastOverallPercentChange + self._NO_PROGRESS_TIMEOUT:
                 raise Exception("Allocation progress hanged at %(percent)s%% for %(seconds)s seconds",
                                 dict(percent=lastOverallPercent, seconds=self._NO_PROGRESS_TIMEOUT))
     raise Exception(self._allocation.dead())
Пример #28
0
 def run(self):
     shutil.rmtree(os.path.join(log.config.LOGS_DIRECTORY, "it"),
                   ignore_errors=True)
     expectedFile = os.path.join(log.config.LOGS_DIRECTORY, "it",
                                 "postmortem", 'df')
     serialLogFile = os.path.join(log.config.LOGS_DIRECTORY, "it",
                                  "postmortem", "serial.txt")
     uniqueString = 'time: ' + str(time.time())
     host.it.ssh.run.script("echo %s > /dev/console" % uniqueString)
     TS_ASSERT(not os.path.exists(expectedFile))
     TS_ASSERT(not os.path.exists(serialLogFile))
     host.it.logbeam.postMortem()
     logging.progress("Expected file: %(expectedFile)s",
                      dict(expectedFile=expectedFile))
     TS_ASSERT(os.path.exists(expectedFile))
     TS_ASSERT(os.path.exists(serialLogFile))
     contents = open(serialLogFile).read()
     TS_ASSERT(uniqueString in contents)
     self.useLogBeamFromLocal()
     self.beamADirectory()
Пример #29
0
    def _checkNotFound( self ):
        logging.progress( "Verifying not found statuses" )
        BOGUS_ID = "THIS_IS_NOT_AN_ID_OF_ANYTHING_AND_NOT_EVEN_A_NAME_OF_A_NODE"

        response = self.vm( 'controller' ).openStackREST().getServerResponse( BOGUS_ID )
        logging.progress( "Got response %(response)s", dict( response = response.content ) )
        TS_ASSERT_EQUALS( response.status_code, ResourceNotFoundException.code )
        response = self.vm( 'controller' ).openStackREST().getNodeResponse( BOGUS_ID )
        logging.progress( "Got response %(response)s", dict( response = response.content ) )
        TS_ASSERT_EQUALS( response.status_code, ResourceNotFoundException.code )
        response = self.vm( 'controller' ).openStackREST().getVolumeResponse( BOGUS_ID )
        logging.progress( "Got response %(response)s", dict( response = response.content ) )
        TS_ASSERT_EQUALS( response.status_code, ResourceNotFoundException.code )
Пример #30
0
def register_collector(collector):
	assert ltrace_func(TRACE_EVENTS)

	with loop_lock:
		events_collectors.append(collector)

		try:
			collector._setTimeout(3.0)

		except:
			# in case the event collector is inside the daemon, it is not
			# a pyro proxy, but just a thread. _setTimeout() will fail.
			pass

	logging.progress( _('{0}: registered event collector {1}.').format(
									stylize(ST_NAME, current_thread().name),
									stylize(ST_NAME, collector)))

	# we wait 6 seconds to send this special event, because all web clients
	# will take at most 5 seconds to reconnect to the WMI when it comes back.
	# This signal will tell them to resynchronize internal structures.
	LicornEvent('collector_reinit', collector=collector).emit(priorities.HIGH, delay=6.0)
Пример #31
0
 def _createAllocations(self):
     rackattackToHostMap = self._createRackattackToHostMap(self._test.HOSTS)
     allocations = dict()
     for rackattack, hostsFromRackattack in rackattackToHostMap.iteritems():
         logging.progress(
             'Allocating %(_hosts)s from Rackattack %(_rackattack)s',
             dict(_hosts=hostsFromRackattack.keys(),
                  _rackattack=rackattack))
         try:
             allocations[
                 rackattack] = rackattackallocation.RackAttackAllocation(
                     hosts=hostsFromRackattack)
         except Exception:
             logging.error(
                 'failed to allocate from %(_rackattack)s, '
                 'freeing all allocations', dict(_rackattack=rackattack))
             for allocation in allocations:
                 self._tryFreeAllocation(allocation)
             raise
         logging.progress(
             'Finished allocating hosts from Rackattack %(_rackattack)s',
             dict(_rackattack=rackattack))
     return allocations
Пример #32
0
def daemonize(log_file=None, close_all=False, process_name=None):
	""" UNIX double-fork magic to create a daemon.
		See Stevens' "Advanced Programming in the UNIX Environment"
		for details (ISBN 0201563177).


		.. versionadded:: 1.2.5
			this function doesn't write the pid file anymore. its up to the
			calling process to do it. This makes things much logical in the
			daemon.
	"""

	assert ltrace_func(TRACE_PROCESS, devel=True, level=2)

	if process_name is None:
		my_process_name = stylize(ST_NAME, 'foundations.daemonize')
	else:
		my_process_name = stylize(ST_NAME, process_name)

	logging.progress(_(u'{0}({1}): fork #1.').format(
		my_process_name, stylize(ST_UGID, os.getpid())))

	# decouple from parent environment
	os.chdir('/')
	os.chroot('/')
	os.umask(0)

	try:
		if os.fork() > 0:
			logging.progress(_(u'{0}({1}): exit parent #1.').format(
							my_process_name, stylize(ST_UGID, os.getpid())))
			sys.exit(0)

	except OSError, e:
		logging.error(_(u'{0}({1}): fork #1 failed: errno {2} ({3}).').format(
							my_process_name, stylize(ST_UGID, os.getpid()),
												e.errno, e.strerror))
Пример #33
0
def refork_as_root_or_die(process_title='licorn-generic', prefunc=None,
															group='admins'):
	""" check if current user is root. if not, check if he/she is member of
		group "admins" and then refork ourselves with sudo, to gain root
		privileges, needed for Licorn® daemon.
		Do it with traditionnal syscalls, because the rest of Licorn® is not
		initialized if we run this function. """

	assert ltrace_func(TRACE_PROCESS)

	try:
		gmembers = getent.group(group).members

	except AttributeError:
		logging.error(_(u'group %s does not exist and we are not root, '
			u'aborting. Please manually relaunch this program with root '
			u'privileges to automatically create this group.') % group)

	if whoami() in gmembers:

		cmd = [ process_title ]
		cmd.extend(insert_ltrace())

		cmd.extend(sys.argv)

		if prefunc != None:
			prefunc()

		logging.progress(_(u'Re-exec() ourselves with sudo to gain root '
									u'privileges (execvp(%s)).') % cmd)

		os.execvp('sudo', cmd)

	else:
		raise exceptions.LicornRuntimeError(_(u'You are not a member of group '
							u'%s; cannot do anything for you, sorry!') % group)
Пример #34
0
    def _checkStorageClusterAPI( self ):
        logging.progress( "Checking storage cluster API" )
        clusterStatus = self._sendRequestAndUpdateCookies( requests.get, "%s/storage/cluster" % self._restApiUrl )
        TS_ASSERT( clusterStatus.ok )
        TS_ASSERT_EQUALS( clusterStatus.json(), storagecluster.StatusEnum.UP )

        storageClusterNodes = self._sendRequestAndUpdateCookies( requests.get, "%s/storage/cluster/nodes" % self._restApiUrl )
        TS_ASSERT( storageClusterNodes.ok )
        for node in storageClusterNodes.json():
            nodeInfo = self._sendRequestAndUpdateCookies( requests.get, "%s/storage/cluster/nodes/%s" % ( self._restApiUrl, node[ 'name' ] ) )
            TS_ASSERT( nodeInfo.ok )
            TS_ASSERT_EQUALS( nodeInfo.json()[ 'status' ], storagecluster.StatusEnum.UP )

        storageClusterDisks = self._sendRequestAndUpdateCookies( requests.get, "%s/storage/cluster/disks" % self._restApiUrl )
        TS_ASSERT( storageClusterDisks.ok )
        for disk in storageClusterDisks.json():
            diskStatus = self._sendRequestAndUpdateCookies( requests.get, "%s/storage/cluster/disks/%s" % ( self._restApiUrl, disk[ 'id' ] ) )
            TS_ASSERT( diskStatus.ok )
            TS_ASSERT_EQUALS( diskStatus.json()[ 'status' ], storagecluster.StatusEnum.UP )

        for node in storageClusterNodes.json():
            getData = { 'node-name' : node[ 'name' ] }
            nodeDisksList = self._sendRequestAndUpdateCookies( requests.get, "%s/storage/cluster/disks" % ( self._restApiUrl ), data = getData )
            TS_ASSERT( nodeDisksList.ok )
Пример #35
0
 def executeTestScenario(self):
     discardinglogger.discardLogsOf(self.DISCARD_LOGGING_OF)
     self._hosts = dict()
     suite.findHost = self.host
     suite.hosts = self.hosts
     if not hasattr(self._test, 'host'):
         self._test.host = self.host
     if not hasattr(self._test, 'hosts'):
         self._test.hosts = self.hosts
     if not hasattr(self._test, 'releaseHost'):
         self._test.releaseHost = self._releaseHost
     if not self.RUN_ON_DETACHED:
         logging.progress("Allocating hosts...")
         self._allocations = self._createAllocations()
         timeoutthread.TimeoutThread(self._testTimeout, self._testTimedOut)
         logging.info("Test timer armed. Timeout in %(seconds)d seconds",
                      dict(seconds=self._testTimeout))
         logging.progress("Done allocating hosts.")
     else:
         logging.progress("Attempting connection to detached nodes...")
     try:
         self._setUp()
         self._run()
     finally:
         self._tearDown()
         self._cleanUp()
         for allocation in self._allocations.values():
             wasAllocationFreedSinceAllHostsWereReleased = not bool(
                 allocation.nodes())
             if not wasAllocationFreedSinceAllHostsWereReleased:
                 try:
                     self._tryFreeAllocation(allocation)
                 except:
                     logging.exception(
                         "Unable to free allocation, hosts: "
                         "%(_nodes)s may still be allocated",
                         dict(_nodes=','.join([
                             node.id()
                             for node in allocation.nodes().values()
                         ])))
                     raise Exception('Unable to free allocation')
             else:
                 logging.info('Not freeing allocation')
Пример #36
0
 def tearDown(self):
     logging.progress("enabling PXE for %s",
                      host.pxeclient.node.primaryMACAddress())
     host.pxeclient.node.enablePXE()
Пример #37
0
 def _checkShutDownNotRunningServer( self ):
     logging.progress( "Check response status for shutting down a not running server" )
     response = self.vm( 'controller' ).openStackREST().sendShutDownServerRequest( self._vmNameToId[ 'vm1' ] )
     TS_ASSERT_EQUALS( response.status_code, ResourceNotFoundException.code )
Пример #38
0
 def _checkClusterStats( self ):
     logging.progress( "Verifying Cluster stats" )
     response = self._sendRequestAndUpdateCookies( requests.get, self._restApiUrl + "/cluster/stats" )
     TS_ASSERT( response.ok )
     TS_ASSERT( any( val != 0 for val in self._getAllDictValuesRecursive( response.json() ) ) )
     logging.progress( "Got response %(response)s", dict( response = response.content ) )
Пример #39
0
 def _checkDeleteNotShutDownServer( self ):
     logging.progress( "Check response status for deleting a not shut downed server" )
     response = self.vm( 'controller' ).openStackREST().sendDeleteServerRequest( self._vmNameToId[ 'vm1' ] )
     TS_ASSERT_EQUALS( response.status_code, BadRequestException.code )