def on_get(self, req, resp): """ Handles GET requests for Clusters. :param req: Request instance that will be passed through. :type req: falcon.Request :param resp: Response instance that will be passed through. :type resp: falcon.Response """ req.context['model'] = None try: store_manager = cherrypy.engine.publish('get-store-manager')[0] clusters = store_manager.list(Clusters.new()) if clusters.clusters == []: self.logger.debug('Store returned an empty cluster list.') resp.status = falcon.HTTP_200 return except: self.logger.warn( 'Store does not have any clusters. Returning [] and 404.') resp.status = falcon.HTTP_404 return # HACK: Should use model instead resp.status = falcon.HTTP_200 resp.body = json.dumps([cluster.name for cluster in clusters.clusters])
def on_get(self, req, resp): """ Handles GET requests for Clusters. :param req: Request instance that will be passed through. :type req: falcon.Request :param resp: Response instance that will be passed through. :type resp: falcon.Response """ clusters_dir, error = cherrypy.engine.publish( 'store-get', '/commissaire/clusters/')[0] if error: self.logger.warn( 'Etcd does not have any clusters. Returning [] and 404.') resp.status = falcon.HTTP_404 req.context['model'] = None return results = [] # Don't let an empty clusters directory through if len(clusters_dir._children): for cluster in clusters_dir.leaves: results.append(cluster.key.split('/')[-1]) resp.status = falcon.HTTP_200 req.context['model'] = Clusters(clusters=results) else: self.logger.debug('Etcd has a clusters directory but no content.') resp.status = falcon.HTTP_200 req.context['model'] = None
def test_watcher_failed_to_active(self): """ Verify the watcher. """ with mock.patch('commissaire.transport.ansibleapi.Transport') as _tp: _tp().check_host_availability.return_value = (0, {}) q = Queue() test_host = make_new(HOST) test_host.last_check = (datetime.datetime.now() - datetime.timedelta(days=10)).isoformat() test_host.status = 'failed' test_cluster = make_new(CLUSTER) test_cluster.type = C.CLUSTER_TYPE_KUBERNETES test_cluster.hostset = [test_host.address] store_manager = MagicMock(StoreHandlerManager) store_manager.list.side_effect = (Hosts.new( hosts=[test_host]), Clusters.new(clusters=[test_cluster])) store_manager.get.return_value = test_host watcher(q, store_manager, run_once=True) self.assertEquals(2, store_manager.list.call_count) store_manager.save.assert_called_once() self.assertEquals('active', test_host.status)
def on_get(self, req, resp): """ Handles GET requests for Clusters. :param req: Request instance that will be passed through. :type req: falcon.Request :param resp: Response instance that will be passed through. :type resp: falcon.Response """ req.context['model'] = None try: clusters = Clusters.retrieve() except: self.logger.warn( 'Etcd does not have any clusters. Returning [] and 404.') resp.status = falcon.HTTP_404 return if clusters.clusters == []: self.logger.debug( 'Etcd has a clusters directory but no content.') resp.status = falcon.HTTP_200 return req.context['model'] = clusters
def on_delete(self, req, resp, address): """ Handles the Deletion of a Host. :param req: Request instance that will be passed through. :type req: falcon.Request :param resp: Response instance that will be passed through. :type resp: falcon.Response :param address: The address of the Host being requested. :type address: str """ resp.body = '{}' store_manager = cherrypy.engine.publish('get-store-manager')[0] try: host = Host.new(address=address) WATCHER_QUEUE.dequeue(host) store_manager.delete(host) self.logger.debug( 'Deleted host {0} and dequeued it from the watcher.'.format( host.address)) resp.status = falcon.HTTP_200 except: resp.status = falcon.HTTP_404 # Also remove the host from all clusters. # Note: We've done all we need to for the host deletion, # so if an error occurs from here just log it and # return. try: clusters = store_manager.list(Clusters(clusters=[])) except: self.logger.warn('Store does not have any clusters') return for cluster in clusters.clusters: try: self.logger.debug('Checking cluster {0}'.format(cluster.name)) if address in cluster.hostset: self.logger.info('Removing {0} from cluster {1}'.format( address, cluster.name)) cluster.hostset.remove(address) store_manager.save(cluster) self.logger.info( '{0} has been removed from cluster {1}'.format( address, cluster.name)) except: self.logger.warn( 'Failed to remove {0} from cluster {1}'.format( address, cluster.name))
def cluster_for_host(address, store_manager): """ Checks to see if the the host is part of a cluster. KeyError is raised if the host is not part of a cluster. :param name: Name of a cluster :type name: str :param store_manager: Remote object for remote stores :type store_manager: commissaire.store.StoreHandlerManager :returns: A cluster instance that has the host :rtype: commissaire.model.Model :rasies: KeyError """ for cluster in store_manager.list(Clusters.new()).clusters: if address in cluster.hostset: return cluster raise KeyError
def on_delete(self, req, resp, address): """ Handles the Deletion of a Host. :param req: Request instance that will be passed through. :type req: falcon.Request :param resp: Response instance that will be passed through. :type resp: falcon.Response :param address: The address of the Host being requested. :type address: str """ resp.body = "{}" try: Host.delete(address) resp.status = falcon.HTTP_200 except: resp.status = falcon.HTTP_404 # Also remove the host from all clusters. # Note: We've done all we need to for the host deletion, # so if an error occurs from here just log it and # return. try: clusters = Clusters.retrieve() except: self.logger.warn("Etcd does not have any clusters") return try: for cluster_name in clusters.clusters: self.logger.debug("Checking cluster {0}".format(cluster_name)) cluster = Cluster.retrieve(cluster_name) if address in cluster.hostset: self.logger.info("Removing {0} from cluster {1}".format(address, cluster_name)) cluster.hostset.remove(address) cluster.save(cluster_name) self.logger.info("{0} has been removed from cluster {1}".format(address, cluster_name)) except: self.logger.warn("Failed to remove {0} from cluster {1}".format(address, cluster_name))
def test_host_status_retrieve_with_container_manager(self): """ Verify retrieving Host status when it is in a container manager cluster. """ with mock.patch('cherrypy.engine.publish') as _publish: manager = mock.MagicMock(StoreHandlerManager) kube_container_mgr = KubeContainerManager({ 'server_url': 'http://127.0.0.1:8080', 'token': 'token' }) # A dummy requests.Response response = requests.Response() response.status_code = 200 response._content='{"use": "kube"}' kube_container_mgr._get = mock.MagicMock(return_value=response) manager.list_container_managers.return_value = [kube_container_mgr] _publish.return_value = [manager] test_host = make_new(HOST) test_cluster = make_new(CLUSTER) test_cluster.type = C.CLUSTER_TYPE_KUBERNETES test_cluster.hostset = [test_host.address] # Verify if the host exists the data is returned manager.get.side_effect = ( test_host, test_cluster) manager.list.return_value = Clusters.new(clusters=[test_cluster]) body = self.simulate_request('/api/v0/host/10.2.0.2/status') self.assertEqual(self.srmock.status, falcon.HTTP_200) result = json.loads(body[0]) self.assertEquals(C.CLUSTER_TYPE_KUBERNETES, result['type']) self.assertEquals('available', result['host']['status']) self.assertEquals({'use': 'kube'}, result['container_manager'])