def blocking_shutdown(): log.info("%s beginning shutdown" % self.fqdn) self._enter_shutdown() shutdown_start_time = IMLDateTime.utcnow() self.shutdown_agent() self._cluster.leave(self.nodename) shutdown_end_time = IMLDateTime.utcnow() shutdown_time = (shutdown_end_time - shutdown_start_time).seconds while shutdown_time < MIN_SHUTDOWN_DURATION: if not simulate_shutdown: break remaining_delay = MIN_SHUTDOWN_DURATION - shutdown_time log.info("%s, sleeping for %d seconds on shutdown" % (self.fqdn, remaining_delay)) shutdown_time += remaining_delay time.sleep(remaining_delay) self._exit_shutdown() log.info("%s shutdown complete" % self.fqdn) if reboot: log.info("%s rebooting" % self.fqdn) self.startup(simulate_shutdown)
def start_session(self): # This fake plugin needs to look at it corosync defined peers of # this fake server and determine # which are online. This happens in production by shelling out the # call crm_mon --one-shot --as-xml # To simulate this, the _server object which is a FakeServer, must # be able to tell this server what it's peers are. # This implementation looks at ALL the servers in the simulator, # and those ones that are also join'ed in the cluster are online. log.debug('cluster nodes: %s' % self._server._cluster.state['nodes']) nodes = [ (node_dict['nodename'], node_dict['online']) for node_dict in self._server._cluster.state['nodes'].values() ] log.debug('Nodes and state: %s' % nodes) dt = IMLDateTime.utcnow().isoformat() message = self.get_test_message(utc_iso_date_str=dt, node_status_list=nodes) log.debug(message) return message
def _acquire_token(self, url, username, password, credit_count, duration=None, preferred_profile=None): """ Localised use of the REST API to acquire a server registration token. """ session = self._get_authenticated_session(url, username, password) # Acquire a server profile response = session.get("%sapi/server_profile/" % url) if not preferred_profile: profile = response.json()['objects'][0] else: try: profile = [ p for p in response.json()['objects'] if p['name'] == preferred_profile ][0] except IndexError: raise RuntimeError("No such profile: %s" % preferred_profile) args = {'credits': credit_count, 'profile': profile['resource_uri']} if duration is not None: args['expiry'] = (IMLDateTime.utcnow() + duration).isoformat() response = session.post("%sapi/registration_token/" % url, data=json.dumps(args)) assert response.ok return response.json()['secret']
def test_creation(self): """ During a POST, only expiry should be settable """ # Empty is OK response = self.api_client.post(self.RESOURCE_PATH, data={'profile': self.profile['resource_uri']}) self.assertHttpCreated(response) expiry_value = IMLDateTime.utcnow() expiry_value += datetime.timedelta(seconds = 120) expiry_value = expiry_value.replace(microsecond = 0) creation_allowed_values = { 'expiry': expiry_value.isoformat(), 'credits': 129 } for attr, test_val in creation_allowed_values.items(): response = self.api_client.post(self.RESOURCE_PATH, data={ 'profile': self.profile['resource_uri'], attr: test_val}) self.assertHttpCreated(response) created_obj = self.deserialize(response) self.assertEqual(created_obj[attr], test_val) # Anything else is not OK creation_forbidden_values = { 'secret': "X" * SECRET_LENGTH * 2, 'cancelled': True, 'id': 0 } for attribute, test_val in creation_forbidden_values.items(): response = self.api_client.post(self.RESOURCE_PATH, data={ 'profile': self.profile['resource_uri'], attribute: test_val}) self.assertHttpBadRequest(response)
def _send_package_data(self, host, data): # UpdateScan is a weird class, we have to instantiate and assign a host # to run the function we're testing. self.update_scan = UpdateScan() self.update_scan.host = host self.update_scan.started_at = IMLDateTime.utcnow() self.update_scan.update_packages({'agent': data})
def validate_token(key, credits=1): """ Validate that a token is valid to authorize a setup/register operation: * Check it's not expired * Check it has some credits :param credits: number of credits to decrement if valid :return 2-tuple (<http response if error, else None>, <registration token if valid, else None>) """ try: with transaction.commit_on_success(): token = RegistrationToken.objects.get(secret = key) if not token.credits: log.warning("Attempt to register with exhausted token %s" % key) return HttpForbidden(), None else: # Decrement .credits RegistrationToken.objects.filter(secret = key).update(credits = token.credits - credits) except RegistrationToken.DoesNotExist: log.warning("Attempt to register with non-existent token %s" % key) return HttpForbidden(), None else: now = IMLDateTime.utcnow() if token.expiry < now: log.warning("Attempt to register with expired token %s (now %s, expired at %s)" % (key, now, token.expiry)) return HttpForbidden(), None elif token.cancelled: log.warning("Attempt to register with cancelled token %s" % key) return HttpForbidden(), None return None, token
def test_update_properties(self): update_scan = UpdateScan() update_scan.host = synthetic_host('test1') update_scan.started_at = IMLDateTime.utcnow() self.assertEqual(update_scan.host.properties, '{}') update_scan.update_properties(None) update_scan.update_properties({'key': 'value'})
def setUp(self): if not ManagedHost.objects.filter(fqdn=self.CLIENT_NAME).count(): if not ServerProfile.objects.filter(name="TestAgentRpcProfile").count(): server_profile = ServerProfile.objects.create( name="TestAgentRpcProfile", ui_name="Profile created to TestAgentRpc can work", managed=True, worker=False, ntp=True, corosync=True, corosync2=False, ) else: server_profile = ServerProfile.objects.get(name="TestAgentRpcProfile") self.host = ManagedHost.objects.create( fqdn=self.CLIENT_NAME, nodename=self.CLIENT_NAME, address=self.CLIENT_NAME, state="lnet_down", state_modified_at=IMLDateTime.utcnow(), server_profile=server_profile, ) LNetConfiguration.objects.create(host=self.host, state="lnet_down") ClientCertificate.objects.create(host=self.host, serial=self.CLIENT_CERT_SERIAL) else: self.host = ManagedHost.objects.get(fqdn=self.CLIENT_NAME) super(TestAgentRpc, self).setUp()
def _scan(self, initial=False): started_at = IMLDateTime.utcnow().isoformat() audit = local.LocalAudit() # Only set resource_locations if we have the management package try: from chroma_agent.action_plugins import manage_targets resource_locations = manage_targets.get_resource_locations() except ImportError: resource_locations = None mounts = self._scan_mounts() # FIXME: HYD-1095 we should be sending a delta instead of a full dump every time # FIXME: At this time the 'capabilities' attribute is unused on the manager return { "started_at": started_at, "agent_version": agent_version(), "capabilities": plugin_manager.ActionPluginManager().capabilities, "metrics": audit.metrics(), "properties": audit.properties(), "mounts": mounts, "resource_locations": resource_locations, }
def poll(self): if self._healthy: time_since_contact = IMLDateTime.utcnow() - self.last_contact if time_since_contact > datetime.timedelta( seconds=self.CONTACT_TIMEOUT): self.update_health(False) return self._healthy
def update(self, boot_time, client_start_time): """ :return A boolean, true if the agent should be sent a SESSION_TERMINATE_ALL: indicates whether a fresh client run (different start time) is seen. """ self.last_contact = IMLDateTime.utcnow() if boot_time is not None and boot_time != self._boot_time: if self._boot_time is not None: HostRebootEvent.register_event(alert_item=self._host, boot_time=boot_time, severity=logging.WARNING) log.warning("Server %s rebooted at %s" % (self.fqdn, boot_time)) self._boot_time = boot_time job_scheduler_notify.notify(self._host, self._boot_time, {'boot_time': boot_time}) require_reset = False if client_start_time is not None and client_start_time != self._client_start_time: if self._client_start_time is not None: log.warning("Agent restart on server %s at %s" % (self.fqdn, client_start_time)) require_reset = True self._client_start_time = client_start_time if not self._healthy: self.update_health(True) return require_reset
def fake_log_message(message): return LogMessage.objects.create( datetime=IMLDateTime.utcnow(), message=message, severity=0, facility=0, tag="", message_class=LogMessage.get_message_class(message))
def process_response(self, request, response): content_type = response['Content-Type'] if not any(x in content_type for x in TYPES.values()): return response def get_meta(prop): return request.META.get(prop, '') def try_loads(string, default): if TYPES['JSON'] not in content_type: return default try: return json.loads(string) except ValueError: return default request_data = { 'status': response.status_code, 'content_length': get_meta('CONTENT_LENGTH'), 'user_agent': get_meta('HTTP_USER_AGENT').decode('utf-8', 'replace'), 'body': try_loads(request.body, ''), 'response': try_loads(response.content, response.content), 'request_headers': dict([(key, val) for key, val in request.META.items() if key.isupper()]), 'response_headers': dict([(key.upper().replace('-', '_'), val) for key, val in response.items()]), # The following are required by Bunyan. 'hostname': get_meta('HTTP_X_FORWARDED_HOST'), 'name': 'Request Log', 'time': IMLDateTime.utcnow().isoformat(), 'v': 0, 'pid': os.getpid(), 'msg': 'Request made to {0} {1}'.format(request.method, request.get_full_path()), # Bunyan log level is python's log level + 10 'level': settings.LOG_LEVEL + 10, } logger.debug(json.dumps(request_data)) return response
def inject_log_message(self, message): log.debug("Injecting log message %s/%s" % (self.fqdn, message)) self._log_messages.append({ 'source': 'cluster_sim', 'severity': 1, 'facility': 1, 'message': message, 'datetime': IMLDateTime.utcnow().isoformat() })
def __init__(self, fqdn, boot_time, client_start_time): self.last_contact = None self.fqdn = fqdn self._healthy = False self._host = ManagedHost.objects.get(fqdn=self.fqdn) self._last_contact = IMLDateTime.utcnow() self._boot_time = boot_time self._client_start_time = client_start_time
class Meta: object_class = RegistrationToken authentication = AnonymousAuthentication() authorization = TokenAuthorization() serializer = DateSerializer() list_allowed_methods = ["get", "post"] detail_allowed_methods = ["patch", "get"] fields = ["id", "secret", "expiry", "credits", "cancelled", "profile", "register_command"] resource_name = "registration_token" queryset = RegistrationToken.objects.filter(cancelled=False, expiry__gt=IMLDateTime.utcnow(), credits__gt=0) validation = RegistrationTokenValidation() always_return_data = True
def _scan(self, initial=False): started_at = IMLDateTime.utcnow().isoformat() audit = local.LocalAudit() # FIXME: HYD-1095 we should be sending a delta instead of a full dump every time # FIXME: At this time the 'capabilities' attribute is unused on the manager return { "started_at": started_at, "agent_version": agent_version(), "capabilities": plugin_manager.ActionPluginManager().capabilities, "metrics": audit.metrics(), "properties": audit.properties(), }
def test_priorities(self): """ Test that messages are consumed for POST based on the priority of the payload (data plane), or at the highest priority if no payload (control plane) """ client = mock.Mock() client._fqdn = "test_server" client.boot_time = IMLDateTime.utcnow() client.start_time = IMLDateTime.utcnow() writer = HttpWriter(client) def inject_messages(*args, **kwargs): # A control plane message writer.put( Message("SESSION_CREATE_REQUEST", "plugin_fuz", None, None, None)) low_body = DevicePluginMessage("low", PRIO_LOW) normal_body = DevicePluginMessage("normal", PRIO_NORMAL) high_body = DevicePluginMessage("high", PRIO_HIGH) writer.put(Message("DATA", "plugin_foo", low_body, "foo", 0)) writer.put(Message("DATA", "plugin_bar", normal_body, "foo", 1)) writer.put(Message("DATA", "plugin_baz", high_body, "foo", 2)) inject_messages() writer.send() self.assertEqual(client.post.call_count, 1) messages = client.post.call_args[0][0]["messages"] self.assertEqual(len(messages), 4) # First two messages (of equal priority) arrive in order or insertion self.assertEqual(messages[0]["plugin"], "plugin_fuz") self.assertEqual(messages[1]["plugin"], "plugin_baz") # Remaining messages arrive in priority order self.assertEqual(messages[2]["plugin"], "plugin_bar") self.assertEqual(messages[3]["plugin"], "plugin_foo")
def pop_log_messages(self): messages = self._log_messages d = datetime.datetime.now() messages.extend([{ 'source': 'cluster_sim', 'severity': 1, 'facility': 1, 'message': "%s %s %s" % (self.fqdn, d, a), 'datetime': IMLDateTime.utcnow().isoformat() } for a in range(0, self.log_rate)]) self._log_messages = [] return messages
def start_session(self): return { 'log_lines': [{ 'source': 'cluster_sim', 'severity': 1, 'facility': 1, 'message': 'Lustre: Cluster simulator systemd_journal session start %s %s' % (self._server.fqdn, datetime.datetime.now()), 'datetime': IMLDateTime.utcnow().isoformat() + 'Z' }] }
class Meta: object_class = RegistrationToken authentication = AnonymousAuthentication() authorization = TokenAuthorization() serializer = DateSerializer() list_allowed_methods = ['get', 'post'] detail_allowed_methods = ['patch', 'get'] fields = [ 'id', 'secret', 'expiry', 'credits', 'cancelled', 'profile', 'register_command' ] resource_name = 'registration_token' queryset = RegistrationToken.objects.filter( cancelled=False, expiry__gt=IMLDateTime.utcnow(), credits__gt=0) validation = RegistrationTokenValidation() always_return_data = True
def __init__(self, url, action_plugins, device_plugins, server_properties, crypto): super(AgentClient, self).__init__(url, crypto) self._fqdn = server_properties.fqdn self._nodename = server_properties.nodename self.boot_time = server_properties.boot_time self.start_time = IMLDateTime.utcnow() self.action_plugins = action_plugins self.device_plugins = device_plugins self.writer = HttpWriter(self) self.reader = HttpReader(self) self.sessions = SessionTable(self) self.stopped = threading.Event()
def test_readonly_attributes(self): """Test that attributes which should be readonly reject PATCHes""" response = self.api_client.post(self.RESOURCE_PATH, data={'profile': self.profile['resource_uri']}) self.assertHttpCreated(response) original_object = self.deserialize(response) token_uri = original_object['resource_uri'] readonly_test_values = { 'secret': "X" * SECRET_LENGTH * 2, 'expiry': IMLDateTime.utcnow(), 'credits': 666 } for attribute, test_val in readonly_test_values.items(): response = self.api_client.patch(token_uri, data = {attribute: test_val}) self.assertHttpBadRequest(response) # Check it hasn't changed self.assertDictEqual(self.deserialize(self.api_client.get(token_uri)), original_object)
def blocking_startup(): log.info("%s beginning startup" % self.fqdn) startup_time = 0 # Mixing shutdown/startup state here is a little weird, but # this only happens when it's explicitly requested by a caller # that wants full server simulation. The end result is that # simulating a reboot is easier if we deal with waiting for # the shutdown to finish here before moving on to startup. while simulate_bootup and self.shutting_down: log.info("%s, waiting for shutdown (%d)" % (self.fqdn, startup_time)) startup_time += 1 time.sleep(1) # This may happen to the losing thread(s) during startup after # a multi-PDU power cycle. if self.starting_up: log.info( "%s exiting startup because another thread is already doing it?" % self.fqdn) return self._enter_startup() self.boot_time = IMLDateTime.utcnow() while startup_time < MIN_STARTUP_DURATION: if not simulate_bootup: break remaining_delay = MIN_STARTUP_DURATION - startup_time log.info("%s, sleeping for %d seconds on boot" % (self.fqdn, remaining_delay)) startup_time += remaining_delay time.sleep(remaining_delay) self.start_agent() self._cluster.join(self.nodename) self._exit_startup() log.info("%s startup complete" % self.fqdn)
def process_response(self, request, response): content_type = response["Content-Type"] if not any(x in content_type for x in TYPES.values()): return response def get_meta(prop): return request.META.get(prop, "") def try_loads(string, default): if TYPES["JSON"] not in content_type: return default try: return json.loads(string) except ValueError: return default request_data = { "status": response.status_code, "content_length": get_meta("CONTENT_LENGTH"), "user_agent": get_meta("HTTP_USER_AGENT").decode("utf-8", "replace"), "body": try_loads(request.body, ""), "response": try_loads(response.content, response.content), "request_headers": dict([(key, val) for key, val in request.META.items() if key.isupper()]), "response_headers": dict([(key.upper().replace("-", "_"), val) for key, val in response.items()]), # The following are required by Bunyan. "hostname": get_meta("HTTP_X_FORWARDED_HOST"), "name": "Request Log", "time": IMLDateTime.utcnow().isoformat(), "v": 0, "pid": os.getpid(), "msg": "Request made to {0} {1}".format(request.method, request.get_full_path()), # Bunyan log level is python's log level + 10 "level": settings.LOG_LEVEL + 10, } logger.debug(json.dumps(request_data)) return response
def update_session(self, first=False): mounts = [] for resource in self._server._cluster.get_running_resources( self._server.nodename): mounts.append({ 'device': resource['device_path'], 'fs_uuid': resource['uuid'], 'mount_point': resource['mount_point'], 'recovery_status': {} }) if first: packages = self._server.scan_packages() else: packages = None return { 'resource_locations': self._server._cluster.resource_locations(), 'capabilities': ['manage_targets'], 'metrics': { 'raw': { 'node': self._server.get_node_stats(), 'lustre': self._server.get_lustre_stats(), 'lustre_client_mounts': self._server.lustre_client_mounts } }, 'packages': packages, 'mounts': mounts, "properties": { 'zfs_installed': False, 'distro': 'CentOS', 'distro_version': 6.6, 'python_version_major_minor': 2.6, 'python_patchlevel': 6, 'kernel_version': '2.6.32-504.8.1.el6_lustre.x86_64' }, 'started_at': IMLDateTime.utcnow().isoformat() + 'Z', 'agent_version': 'dummy' }
def _tzaware_future_offset(offset): now = IMLDateTime.utcnow() return now + datetime.timedelta(seconds=offset)
def _call(cls, host, cmd, args): cls.calls.append((cmd, args)) cls.host_calls[host.fqdn].append((cmd, args)) if not cls.succeed: cls._fail(host.fqdn) if (cmd, args) in cls.fail_commands: cls._fail(host.fqdn) mock_server = cls.mock_servers[host.address] log.info("invoke_agent %s %s %s" % (host, cmd, args)) # This isn't really accurate because lnet is scanned asynchonously, but it is as close as we can get today # Fixme: Also I know think this is writing to the wrong thing and should be changing the mock_server entries. # to lnet_up, I guess the mock_server needs an lnet state really, rather than relying on nids present. if cmd == "load_lnet": synthetic_lnet_configuration(host, mock_server["nids"]) return elif cmd == "device_plugin": # Only returns nid info today. return create_synthetic_device_info(host, mock_server, args["plugin"]) elif cmd == "format_target": inode_size = None if "mkfsoptions" in args: inode_arg = re.search("-I (\d+)", args["mkfsoptions"]) if inode_arg: inode_size = int(inode_arg.group(1).__str__()) if inode_size is None: # A 'foo' value inode_size = 777 return { "uuid": uuid.uuid1().__str__(), "inode_count": 666, "inode_size": inode_size, "filesystem_type": "ext4", } elif cmd == "stop_target": ha_label = args["ha_label"] target = ManagedTarget.objects.get(ha_label=ha_label) return agent_result_ok elif cmd == "start_target": ha_label = args["ha_label"] target = ManagedTarget.objects.get(ha_label=ha_label) return agent_result(target.primary_host.nodename) elif cmd == "register_target": # Assume mount paths are "/mnt/testfs-OST0001" style mount_point = args["mount_point"] label = re.search("/mnt/([^\s]+)", mount_point).group(1) return {"label": label} elif cmd == "detect_scan": return mock_server["detect-scan"] elif cmd == "install_packages": return agent_result([]) elif cmd == "register_server": api_client = TestApiClient() old_is_authenticated = CsrfAuthentication.is_authenticated try: CsrfAuthentication.is_authenticated = mock.Mock( return_value=True) api_client.client.login(username="******", password="******") fqdn = cls.mock_servers[host]["fqdn"] response = api_client.post( args["url"] + "register/%s/" % args["secret"], data={ "address": host, "fqdn": fqdn, "nodename": cls.mock_servers[host]["nodename"], "capabilities": ["manage_targets"], "version": cls.version, "csr": helper.generate_csr(fqdn), }, ) assert response.status_code == 201 registration_data = Serializer().deserialize( response.content, format=response["Content-Type"]) print("MockAgent.invoke returning %s" % registration_data) return registration_data finally: CsrfAuthentication.is_authenticated = old_is_authenticated elif cmd == "kernel_status": return { "running": "fake_kernel-0.1", "required": "fake_kernel-0.1", "available": ["fake_kernel-0.1"] } elif cmd == "selinux_status": return {"status": "Disabled"} elif cmd == "reboot_server": now = IMLDateTime.utcnow() log.info("rebooting %s; updating boot_time to %s" % (host, now)) job_scheduler_notify.notify(host, now, {"boot_time": now}) elif cmd == "which zfs": return 1 elif "import platform;" in cmd: return "0" elif "socket.gethostbyname(socket.gethostname())" in cmd: if not mock_server["tests"]["hostname_valid"]: return "127.0.0.1" else: return mock_server["address"] elif "print os.uname()[1]" in cmd: return "%s\n%s" % (mock_server["nodename"], mock_server["fqdn"]) elif "socket.getfqdn()" in cmd: return mock_server["fqdn"] elif "ping" in cmd: result = (0 if mock_server["tests"]["reverse_resolve"] else 2) + (0 if mock_server["tests"]["reverse_ping"] else 1) return result elif "ElectricFence" in cmd: return 0 if mock_server["tests"]["yum_can_update"] else 1 elif "openssl version -a" in cmd: return 0 if mock_server["tests"]["openssl"] else 1 elif "curl -k https" in cmd: return json.dumps({"host_id": host.id, "command_id": 0}) elif cmd in [ "configure_pacemaker", "unconfigure_pacemaker", "configure_target_store", "unconfigure_target_store", "deregister_server", "restart_agent", "shutdown_server", "host_corosync_config", "check_block_device", "set_conf_param", "purge_configuration", ]: return None elif cmd in [ "configure_target_ha", "unconfigure_target_ha", "start_lnet", "stop_lnet", "unload_lnet", "unconfigure_lnet", "configure_corosync", "unconfigure_corosync", "start_corosync", "stop_corosync", "start_pacemaker", "stop_pacemaker", "configure_ntp", "unconfigure_ntp", "import_target", "export_target", "set_profile", "update_profile", "failover_target", "failback_target", "configure_network", "open_firewall", "close_firewall", ]: return agent_result_ok elif cmd == "get_corosync_autoconfig": return agent_result({ "interfaces": { "eth0": { "dedicated": False, "ipaddr": "192.168.0.1", "prefix": 24 }, "eth1": { "dedicated": True, "ipaddr": "10.10.0.01", "prefix": 24 }, }, "mcast_port": "666", }) else: assert False, ( "The %s command is not in the known list for MockAgentRpc. Please add it then when people modify it a simple text search will let them know to change it here as well." % cmd)
def __init__(self): self.tracefile = open('trace.log', 'w', buffering=0) self.tracefile.write("Started at %s: %s %s\n" % (IMLDateTime.utcnow(), args, options))
def _call(cls, host, cmd, args): cls.calls.append((cmd, args)) cls.host_calls[host].append((cmd, args)) if not cls.succeed: cls._fail(host.fqdn) if (cmd, args) in cls.fail_commands: cls._fail(host.fqdn) mock_server = cls.mock_servers[host.address] log.info("invoke_agent %s %s %s" % (host, cmd, args)) # This isn't really accurate because lnet is scanned asynchonously, but it is as close as we can get today # Fixme: Also I know think this is writing to the wrong thing and should be changing the mock_server entries. # to lnet_up, I guess the mock_server needs an lnet state really, rather than relying on nids present. if cmd == "load_lnet": synthetic_lnet_configuration(host, mock_server['nids']) return elif cmd == "device_plugin": # Only returns nid info today. return create_synthetic_device_info(host, mock_server, args['plugin']) elif cmd == 'format_target': inode_size = None if 'mkfsoptions' in args: inode_arg = re.search("-I (\d+)", args['mkfsoptions']) if inode_arg: inode_size = int(inode_arg.group(1).__str__()) if inode_size is None: # A 'foo' value inode_size = 777 return { 'uuid': uuid.uuid1().__str__(), 'inode_count': 666, 'inode_size': inode_size, 'filesystem_type': 'ext4' } elif cmd == 'stop_target': ha_label = args['ha_label'] target = ManagedTarget.objects.get(ha_label=ha_label) return agent_result_ok elif cmd == 'start_target': ha_label = args['ha_label'] target = ManagedTarget.objects.get(ha_label=ha_label) return agent_result(target.primary_host.nodename) elif cmd == 'register_target': # Assume mount paths are "/mnt/testfs-OST0001" style mount_point = args['mount_point'] label = re.search("/mnt/([^\s]+)", mount_point).group(1) return {'label': label} elif cmd == 'detect_scan': return mock_server['detect-scan'] elif cmd == 'install_packages': return agent_result([]) elif cmd == 'register_server': api_client = TestApiClient() old_is_authenticated = CsrfAuthentication.is_authenticated try: CsrfAuthentication.is_authenticated = mock.Mock( return_value=True) api_client.client.login(username='******', password='******') fqdn = cls.mock_servers[host]['fqdn'] response = api_client.post( args['url'] + "register/%s/" % args['secret'], data={ 'address': host, 'fqdn': fqdn, 'nodename': cls.mock_servers[host]['nodename'], 'capabilities': ['manage_targets'], 'version': cls.version, 'csr': helper.generate_csr(fqdn) }) assert response.status_code == 201 registration_data = Serializer().deserialize( response.content, format=response['Content-Type']) print "MockAgent.invoke returning %s" % registration_data return registration_data finally: CsrfAuthentication.is_authenticated = old_is_authenticated elif cmd == 'kernel_status': return { 'running': 'fake_kernel-0.1', 'required': 'fake_kernel-0.1', 'available': ['fake_kernel-0.1'] } elif cmd == 'reboot_server': now = IMLDateTime.utcnow() log.info("rebooting %s; updating boot_time to %s" % (host, now)) job_scheduler_notify.notify(host, now, {'boot_time': now}) elif 'socket.gethostbyname(socket.gethostname())' in cmd: if not mock_server['tests']['hostname_valid']: return '127.0.0.1' else: return mock_server['address'] elif 'print os.uname()[1]' in cmd: return '%s\n%s' % (mock_server['nodename'], mock_server['fqdn']) elif 'socket.getfqdn()' in cmd: return mock_server['fqdn'] elif 'ping' in cmd: result = ((0 if mock_server['tests']['reverse_resolve'] else 2) + (0 if mock_server['tests']['reverse_ping'] else 1)) return result elif 'python-fedora-django' in cmd: return 0 if mock_server['tests']['yum_valid_repos'] else 1 elif 'ElectricFence' in cmd: return 0 if mock_server['tests']['yum_can_update'] else 1 elif 'curl -k https' in cmd: return json.dumps({'host_id': host.id, 'command_id': 0}) elif cmd in [ 'configure_pacemaker', 'unconfigure_pacemaker', 'configure_target_store', 'unconfigure_target_store', 'deregister_server', 'restart_agent', 'shutdown_server', 'host_corosync_config', 'check_block_device', 'set_conf_param', 'purge_configuration' ]: return None elif cmd in [ 'configure_target_ha', 'unconfigure_target_ha', 'start_lnet', 'stop_lnet', 'unload_lnet', 'unconfigure_lnet', 'configure_corosync', 'unconfigure_corosync', 'start_corosync', 'stop_corosync', 'start_pacemaker', 'stop_pacemaker', 'configure_ntp', 'unconfigure_ntp', 'import_target', 'export_target', 'import_target', 'export_target' 'set_profile', 'update_profile', 'failover_target', 'failback_target', 'configure_network', 'open_firewall', 'close_firewall' ]: return agent_result_ok elif cmd == 'get_corosync_autoconfig': return agent_result({ 'interfaces': { 'eth0': { 'dedicated': False, 'ipaddr': '192.168.0.1', 'prefix': 24 }, 'eth1': { 'dedicated': True, 'ipaddr': '10.10.0.01', 'prefix': 24 } }, 'mcast_port': '666' }) else: assert False, "The %s command is not in the known list for MockAgentRpc. Please add it then when people modify it a simple text search will let them know to change it here as well." % cmd