def __init__(self, clientLabel, kw=None): """ clientLabel: this *relatively short* string will be used to construct the temporary database name. It shouldn't contain any characters that would make it inappropriate for a database name (no spaces, etc.) kw: name of keyword argument to add to the decorated function(s). Its value will be a reference to this instance of ManagedTempRepository. Ignored when this instance is used as context manager. Defaults to kw=None to avoid having it added to the keyword args. """ self._kw = kw self._unaffiliatedEngine = collectorsdb.getUnaffiliatedEngine() dbNameFromConfig = CollectorsDbConfig().get( self.REPO_SECTION_NAME, self.REPO_DATABASE_ATTR_NAME) self.tempDatabaseName = "{original}_{label}_{uid}".format( original=dbNameFromConfig, label=clientLabel, uid=uuid.uuid1().hex) # Create a Config patch to override the Repository database name self._configPatch = ConfigAttributePatch( self.REPO_CONFIG_NAME, self.REPO_BASE_CONFIG_DIR, values=((self.REPO_SECTION_NAME, self.REPO_DATABASE_ATTR_NAME, self.tempDatabaseName), )) self._configPatchApplied = False self._attemptedToCreateDatabase = False
def testTransientErrorRetryDecorator(self): # Setup proxy. We'll patch config later, so we need to cache the values # so that the original proxy may be restarted with the original params config = monitorsdb.MonitorsDbConfig() originalHost = config.get("repository", "host") originalPort = config.getint("repository", "port") def _startProxy(): p = startProxy(originalHost, originalPort, 6033) p.next() return p proxy = _startProxy() self.addCleanup(proxy.send, "kill") # Patch monitorsdb config with local proxy with ConfigAttributePatch( config.CONFIG_NAME, config.baseConfigDir, (("repository", "host", "127.0.0.1"), ("repository", "port", "6033"))): # Force refresh of engine singleton monitorsdb._EngineSingleton._pid = None engine = monitorsdb.engineFactory() # First, make sure valid query returns expected results res = engine.execute("select 1") self.assertEqual(res.scalar(), 1) @monitorsdb.retryOnTransientErrors def _killProxyTryRestartProxyAndTryAgain(n=[]): if not n: # Kill the proxy on first attempt proxy.send("kill") proxy.next() try: engine.execute("select 1") self.fail("Proxy did not terminate as expected...") except sqlalchemy.exc.OperationalError: pass n.append(None) elif len(n) == 1: # Restore proxy in second attempt newProxy = _startProxy() self.addCleanup(newProxy.send, "kill") n.append(None) res = engine.execute("select 2") return res # Try again w/ retry decorator result = _killProxyTryRestartProxyAndTryAgain() # Verify that the expected value is eventually returned self.assertEqual(result.scalar(), 2)
def start(self): assert not self.active self._tempParentDir = tempfile.mkdtemp(prefix=self.__class__.__name__) self.tempModelCheckpointDir = os.path.join(self._tempParentDir, "tempStorageRoot") os.mkdir(self.tempModelCheckpointDir) self._configPatch = ConfigAttributePatch( "model-checkpoint.conf", os.environ.get("APPLICATION_CONFIG_PATH"), (("storage", "root", self.tempModelCheckpointDir), )) self._configPatch.start() self.active = True self._logger.info("%s: redirected model checkpoint storage to %s", self.__class__.__name__, self.tempModelCheckpointDir)
def start(self): assert not self.active # Use RabbitMQ Management Plugin to create the new temporary vhost connectionParams = amqp.connection.RabbitmqManagementConnectionParams() url = "http://%s:%s/api/vhosts/%s" % ( connectionParams.host, connectionParams.port, self._vhost) try: try: response = requests.put( url, headers=self._RABBIT_MANAGEMENT_HEADERS, auth=(connectionParams.username, connectionParams.password)) response.raise_for_status() self._virtualHostCreated = True self._logger.info("%s: created temporary rabbitmq vhost=%s", self.__class__.__name__, self._vhost) except Exception: self._logger.exception( "Attempt to create temporary vhost=%s failed. url=%r", self._vhost, url) raise # Configure permissions on the new temporary vhost try: url = "http://%s:%s/api/permissions/%s/%s" % ( connectionParams.host, connectionParams.port, self._vhost, connectionParams.username) response = requests.put( url, headers=self._RABBIT_MANAGEMENT_HEADERS, data=json.dumps({ "configure": ".*", "write": ".*", "read": ".*" }), auth=(connectionParams.username, connectionParams.password)) response.raise_for_status() self._logger.info( "%s: Configured persmissions on temporary rabbitmq vhost=%s", self.__class__.__name__, self._vhost) except Exception: self._logger.exception( "Attempt to configure premissions on vhost=%s failed. url=%r", self._vhost, url) raise # Apply a config patch to override the rabbitmq virtual host to be # used by message_bus_connector and others rabbitmqConfig = amqp.connection.RabbitmqConfig() self._configPatch = ConfigAttributePatch( rabbitmqConfig.CONFIG_NAME, rabbitmqConfig.baseConfigDir, (("connection", "virtual_host", self._vhost), )) self._configPatch.start() self._logger.info("%s: overrode rabbitmq vhost=%s", self.__class__.__name__, self._vhost) # Self-validation connectionParams = ( amqp.connection.getRabbitmqConnectionParameters()) actualVhost = connectionParams.vhost assert actualVhost == self._vhost, ( "Expected vhost=%r, but got vhost=%r") % (self._vhost, actualVhost) except Exception: self._logger.exception("patch failed, deleting vhost=%s", self._vhost) self._removePatches() raise self.active = True self._logger.info("%s: applied patch", self.__class__.__name__)
def testMetricCollectorRun(self, createAdapterMock, repoMock, metricStreamerMock, multiprocessingMock): metricsPerChunk = 4 # Configure multiprocessing def mapAsync(fn, tasks): class _(object): def wait(self): map(fn, tasks) return _() multiprocessingMock.Pool.return_value.map_async.side_effect = mapAsync multiprocessingMock.Pipe.side_effect = multiprocessing.Pipe multiprocessingMock.Manager = (Mock(return_value=(Mock( JoinableQueue=(Mock(side_effect=multiprocessing.JoinableQueue)))))) metricPollInterval = 5 now = datetime.datetime.today() resultsOfGetCloudwatchMetricsPendingDataCollection = [ [], [_makeMetricMockInstance(metricPollInterval, now, 1)], [ _makeMetricMockInstance(metricPollInterval, now, 2), _makeMetricMockInstance(metricPollInterval, now, 3) ], KeyboardInterrupt("Fake KeyboardInterrupt to interrupt run-loop") ] repoMock.getCloudwatchMetricsPendingDataCollection.side_effect = ( resultsOfGetCloudwatchMetricsPendingDataCollection) repoMock.retryOnTransientErrors.side_effect = lambda f: f # Configure the metric_collector.adapters module mock mockResults = [([], now), ([[now, 1]] * metricsPerChunk, now + datetime.timedelta(seconds=metricPollInterval)), ([[now, 2]] * (metricsPerChunk * 5 + 1), now + datetime.timedelta(seconds=metricPollInterval))] adapterInstanceMock = Mock(spec_set=_CloudwatchDatasourceAdapter) adapterInstanceMock.getMetricData.side_effect = mockResults adapterInstanceMock.getMetricResourceStatus.return_value = "status" createAdapterMock.return_value = adapterInstanceMock # Now, run MetricCollector and check results resultOfRunCollector = dict() def runCollector(): try: collector = metric_collector.MetricCollector() resultOfRunCollector["returnCode"] = collector.run() except: resultOfRunCollector["exception"] = sys.exc_info()[1] raise with ConfigAttributePatch( YOMP.app.config.CONFIG_NAME, YOMP.app.config.baseConfigDir, (("metric_streamer", "chunk_size", str(metricsPerChunk)), )): # We run it in a thread in order to detect if MetricCollector.run fails to # return and to make sure that the test script will finish (in case run # doesn't) thread = threading.Thread(target=runCollector) thread.setDaemon(True) thread.start() thread.join(60) self.assertFalse(thread.isAlive()) self.assertIn("exception", resultOfRunCollector) self.assertIsInstance(resultOfRunCollector["exception"], KeyboardInterrupt) self.assertNotIn("returnCode", resultOfRunCollector) self.assertEqual(adapterInstanceMock.getMetricData.call_count, len(mockResults)) # Validate that all expected data points were published # ... validate metricIDs metricIDs = [ kwargs["metricID"] for (args, kwargs) in metricStreamerMock.return_value.streamMetricData.call_args_list ] expectedMetricIDs = [] getDataIndex = 0 for metrics in resultsOfGetCloudwatchMetricsPendingDataCollection: if not metrics or isinstance(metrics, BaseException): continue for m in metrics: results = mockResults[getDataIndex][0] if results: expectedMetricIDs.append(m.uid) getDataIndex += 1 self.assertEqual(metricIDs, expectedMetricIDs) # ... validate data points dataPoints = list( itertools.chain(*[ args[0] for (args, kwargs) in metricStreamerMock.return_value.streamMetricData.call_args_list ])) expectedDataPoints = list( itertools.chain( *[copy.deepcopy(r[0]) for r in mockResults if r[0]])) self.assertEqual(dataPoints, expectedDataPoints) # Assert instance status collected self.assertTrue(adapterInstanceMock.getMetricResourceStatus.called) # saveMetricInstanceStatus uses a connection, not an engine mockConnection = (repoMock.engineFactory.return_value.begin. return_value.__enter__.return_value) # Assert instance status recorded for metricObj in resultsOfGetCloudwatchMetricsPendingDataCollection[1]: repoMock.saveMetricInstanceStatus.assert_any_call( mockConnection, metricObj.server, adapterInstanceMock.getMetricResourceStatus.return_value) for metricObj in resultsOfGetCloudwatchMetricsPendingDataCollection[2]: repoMock.saveMetricInstanceStatus.assert_any_call( mockConnection, metricObj.server, adapterInstanceMock.getMetricResourceStatus.return_value)