コード例 #1
0
    def on_put(self, req, resp, name):
        """
        Handles the creation of a new Cluster.

        :param req: Request instance that will be passed through.
        :type req: falcon.Request
        :param resp: Response instance that will be passed through.
        :type resp: falcon.Response
        :param name: The name of the Cluster being created.
        :type name: str
        """
        # PUT is idempotent, and since there's no body to this request,
        # there's nothing to conflict with.  The request should always
        # succeed, even if we didn't actually do anything.
        try:
            store_manager = cherrypy.engine.publish('get-store-manager')[0]
            cluster = store_manager.get(Cluster.new(name=name))
            self.logger.info(
                'Creation of already exisiting cluster {0} requested.'.format(
                    name))
        except:
            pass

        args = {}
        data = req.stream.read()
        if data:
            try:
                args = json.loads(data.decode())
                self.logger.debug('Cluster args received: "{0}"'.format(args))
            except ValueError as error:
                self.logger.error(
                    'Unable to parse cluster arguments: {0}'.format(error))
        try:
            self.logger.debug('Looking for network {0}'.format(
                args['network']))
            network = store_manager.get(
                Network.new(name=args['network']))
        except KeyError:
            network = Network.new(**C.DEFAULT_CLUSTER_NETWORK_JSON)
        cluster = Cluster.new(
            name=name, type=args.get('type', C.CLUSTER_TYPE_DEFAULT),
            network=network.name, status='ok', hostset=[])
        self.logger.debug('Cluster to create: {0}'.format(
            cluster.to_json_with_hosts()))
        store_manager.save(cluster)
        self.logger.info(
            'Created cluster {0} per request.'.format(name))
        self.logger.debug('New Cluster: {0}'.format(
            cluster.to_json_with_hosts()))
        resp.status = falcon.HTTP_201
コード例 #2
0
    def on_get(self, req, resp, name, address):
        """
        Handles GET requests for individual hosts in a Cluster.
        This is a membership test, returning 200 OK if the host
        address is part of the cluster, or else 404 Not Found.

        :param req: Request instance that will be passed through.
        :type req: falcon.Request
        :param resp: Response instance that will be passed through.
        :type resp: falcon.Response
        :param name: The name of the Cluster being requested.
        :type name: str
        :param address: The address of the Host being requested.
        :type address: str
        """
        try:
            store_manager = cherrypy.engine.publish('get-store-manager')[0]
            cluster = store_manager.get(Cluster.new(name=name))
        except:
            resp.status = falcon.HTTP_404
            return

        if address in cluster.hostset:
            resp.status = falcon.HTTP_200
        else:
            resp.status = falcon.HTTP_404
コード例 #3
0
    def on_get(self, req, resp, name):
        """
        Handles retrieval of an existing Cluster.

        :param req: Request instance that will be passed through.
        :type req: falcon.Request
        :param resp: Response instance that will be passed through.
        :type resp: falcon.Response
        :param name: The name of the Cluster being requested.
        :type name: str
        """
        try:
            store_manager = cherrypy.engine.publish('get-store-manager')[0]
            cluster = store_manager.get(Cluster.new(name=name))
        except Exception as error:
            self.logger.error("{0}: {1}".format(type(error), error))
            resp.status = falcon.HTTP_404
            return

        if not cluster:
            resp.status = falcon.HTTP_404
            return

        self._calculate_hosts(cluster)
        # Have to set resp.body explicitly to include Hosts.
        resp.body = cluster.to_json_with_hosts()
        resp.status = falcon.HTTP_200
        self.logger.debug('Cluster retrieval: {0}'.format(resp.body))
コード例 #4
0
def etcd_cluster_add_host(name, address):
    """
    Adds a host address to a cluster with the given name.
    If no such cluster exists, the function raises KeyError.

    Note the function is idempotent: if the host address is
    already in the cluster, no change occurs.

    :param name: Name of a cluster
    :type name: str
    :param address: Host address to add
    :type address: str
    """
    try:
        store_manager = cherrypy.engine.publish('get-store-manager')[0]
        cluster = store_manager.get(Cluster.new(name=name))
    except:
        raise KeyError

    # FIXME: Need input validation.
    #        - Does the host exist at /commissaire/hosts/{IP}?
    #        - Does the host already belong to another cluster?

    # FIXME: Should guard against races here, since we're fetching
    #        the cluster record and writing it back with some parts
    #        unmodified.  Use either locking or a conditional write
    #        with the etcd 'modifiedIndex'.  Deferring for now.

    if address not in cluster.hostset:
        cluster.hostset.append(address)
        cluster = store_manager.save(cluster)
コード例 #5
0
ファイル: util.py プロジェクト: chuanchangjia/commissaire
def etcd_cluster_add_host(name, address):
    """
    Adds a host address to a cluster with the given name.
    If no such cluster exists, the function raises KeyError.

    Note the function is idempotent: if the host address is
    already in the cluster, no change occurs.

    :param name: Name of a cluster
    :type name: str
    :param address: Host address to add
    :type address: str
    """
    try:
        store_manager = cherrypy.engine.publish('get-store-manager')[0]
        cluster = store_manager.get(Cluster.new(name=name))
    except:
        raise KeyError

    # FIXME: Need input validation.
    #        - Does the host exist at /commissaire/hosts/{IP}?
    #        - Does the host already belong to another cluster?

    # FIXME: Should guard against races here, since we're fetching
    #        the cluster record and writing it back with some parts
    #        unmodified.  Use either locking or a conditional write
    #        with the etcd 'modifiedIndex'.  Deferring for now.

    if address not in cluster.hostset:
        cluster.hostset.append(address)
        cluster = store_manager.save(cluster)
コード例 #6
0
ファイル: clusters.py プロジェクト: chuanchangjia/commissaire
    def on_get(self, req, resp, name):
        """
        Handles retrieval of an existing Cluster.

        :param req: Request instance that will be passed through.
        :type req: falcon.Request
        :param resp: Response instance that will be passed through.
        :type resp: falcon.Response
        :param name: The name of the Cluster being requested.
        :type name: str
        """
        try:
            store_manager = cherrypy.engine.publish('get-store-manager')[0]
            cluster = store_manager.get(Cluster.new(name=name))
        except:
            resp.status = falcon.HTTP_404
            return

        if not cluster:
            resp.status = falcon.HTTP_404
            return

        self._calculate_hosts(cluster)
        # Have to set resp.body explicitly to include Hosts.
        resp.body = cluster.to_json_with_hosts()
        resp.status = falcon.HTTP_200
コード例 #7
0
ファイル: clusters.py プロジェクト: chuanchangjia/commissaire
    def on_get(self, req, resp, name, address):
        """
        Handles GET requests for individual hosts in a Cluster.
        This is a membership test, returning 200 OK if the host
        address is part of the cluster, or else 404 Not Found.

        :param req: Request instance that will be passed through.
        :type req: falcon.Request
        :param resp: Response instance that will be passed through.
        :type resp: falcon.Response
        :param name: The name of the Cluster being requested.
        :type name: str
        :param address: The address of the Host being requested.
        :type address: str
        """
        try:
            store_manager = cherrypy.engine.publish('get-store-manager')[0]
            cluster = store_manager.get(Cluster.new(name=name))
        except:
            resp.status = falcon.HTTP_404
            return

        if address in cluster.hostset:
            resp.status = falcon.HTTP_200
        else:
            resp.status = falcon.HTTP_404
コード例 #8
0
    def test_implicit_host_create(self):
        """
        Verify creation of a Host with an implied address.
        """
        with mock.patch('cherrypy.engine.publish') as _publish:
            manager = mock.MagicMock(StoreHandlerManager)
            _publish.return_value = [manager]

            manager.save.return_value = make_new(HOST)

            manager.get.side_effect = (
                Exception,
                make_new(CLUSTER),
                make_new(HOST),
                make_new(CLUSTER),
                make_new(HOST))
            data = ('{"ssh_priv_key": "dGVzdAo=", "remote_user": "******",'
                    ' "cluster": "cluster"}')
            body = self.simulate_request(
                '/api/v0/host', method='PUT', body=data)
            self.assertEqual(self.srmock.status, falcon.HTTP_201)
            self.assertEqual(
                json.loads(INITIAL_IMPLICIT_HOST_JSON),
                json.loads(body[0]))

            # Make sure creation fails if the cluster doesn't exist
            manager.get.side_effect = (
                make_new(HOST),
                Exception)
            body = self.simulate_request(
                '/api/v0/host', method='PUT', body=data)
            self.assertEqual(self.srmock.status, falcon.HTTP_409)
            self.assertEqual({}, json.loads(body[0]))

            # Make sure creation is idempotent if the request parameters
            # agree with an existing host.
            manager.get.side_effect = (
                make_new(HOST),
                Cluster.new(
                    name='cluster',
                    status='ok',
                    hostset=["127.0.0.1"]))

            body = self.simulate_request(
                '/api/v0/host', method='PUT', body=data)
            self.assertEqual(self.srmock.status, falcon.HTTP_200)
            self.assertEqual(json.loads(HOST_JSON), json.loads(body[0]))

            # Make sure creation fails if the request parameters conflict
            # with an existing host.
            manager.get.side_effect = (
                make_new(HOST),
                make_new(HOST))
            bad_data = '{"ssh_priv_key": "boguskey"}'
            body = self.simulate_request(
                '/api/v0/host', method='PUT', body=bad_data)
            self.assertEqual(self.srmock.status, falcon.HTTP_409)
            self.assertEqual({}, json.loads(body[0]))
コード例 #9
0
ファイル: clusters.py プロジェクト: chuanchangjia/commissaire
    def on_put(self, req, resp, name):
        """
        Handles the creation of a new Cluster.

        :param req: Request instance that will be passed through.
        :type req: falcon.Request
        :param resp: Response instance that will be passed through.
        :type resp: falcon.Response
        :param name: The name of the Cluster being created.
        :type name: str
        """
        # PUT is idempotent, and since there's no body to this request,
        # there's nothing to conflict with.  The request should always
        # succeed, even if we didn't actually do anything.
        try:
            store_manager = cherrypy.engine.publish('get-store-manager')[0]
            cluster = store_manager.get(Cluster.new(name=name))
            self.logger.info(
                'Creation of already exisiting cluster {0} requested.'.format(
                    name))
        except:
            pass

        # Honor cluster type if it is passed in
        cluster_type = C.CLUSTER_TYPE_DEFAULT
        try:
            data = req.stream.read().decode()
            args = json.loads(data)
            cluster_type = args['type']
        except KeyError:
            # Data was provided but no type was listed. Use default.
            pass
        except ValueError:
            # Cluster type was not provided. Use default.
            pass

        cluster = Cluster.new(
            name=name, cluster=cluster_type, status='ok', hostset=[])

        store_manager.save(cluster)
        self.logger.info(
            'Created cluster {0} per request.'.format(name))
        resp.status = falcon.HTTP_201
コード例 #10
0
    def test_implicit_host_create(self):
        """
        Verify creation of a Host with an implied address.
        """
        with mock.patch('cherrypy.engine.publish') as _publish:
            manager = mock.MagicMock(StoreHandlerManager)
            _publish.return_value = [manager]

            manager.save.return_value = make_new(HOST)

            manager.get.side_effect = (
                Exception,
                make_new(HOST),
                make_new(CLUSTER),
                make_new(HOST))
            data = ('{"ssh_priv_key": "dGVzdAo=", "remote_user": "******",'
                    ' "cluster": "cluster"}')
            body = self.simulate_request(
                '/api/v0/host', method='PUT', body=data)
            self.assertEqual(self.srmock.status, falcon.HTTP_201)
            self.assertEqual(json.loads(HOST_JSON), json.loads(body[0]))

            # Make sure creation fails if the cluster doesn't exist
            manager.get.side_effect = (
                make_new(HOST),
                Exception)
            body = self.simulate_request(
                '/api/v0/host', method='PUT', body=data)
            self.assertEqual(self.srmock.status, falcon.HTTP_409)
            self.assertEqual({}, json.loads(body[0]))

            # Make sure creation is idempotent if the request parameters
            # agree with an existing host.
            manager.get.side_effect = (
                make_new(HOST),
                Cluster.new(
                    name='cluster',
                    status='ok',
                    hostset=["127.0.0.1"]))

            body = self.simulate_request(
                '/api/v0/host', method='PUT', body=data)
            self.assertEqual(self.srmock.status, falcon.HTTP_200)
            self.assertEqual(json.loads(HOST_JSON), json.loads(body[0]))

            # Make sure creation fails if the request parameters conflict
            # with an existing host.
            manager.get.side_effect = (
                make_new(HOST),
                make_new(HOST))
            bad_data = '{"ssh_priv_key": "boguskey"}'
            body = self.simulate_request(
                '/api/v0/host', method='PUT', body=bad_data)
            self.assertEqual(self.srmock.status, falcon.HTTP_409)
            self.assertEqual({}, json.loads(body[0]))
コード例 #11
0
 def test__format_kwargs(self):
     """
     Verify keyword arguments get formatted properly.
     """
     model_instance = Cluster.new(name='test')
     annotations = {
         'commissaire-cluster-test-name': 'test',
         'commissaire-cluster-test-status': 'test',
     }
     kwargs = self.instance._format_kwargs(model_instance, annotations)
     self.assertEquals({'name': 'test', 'status': 'test'}, kwargs)
コード例 #12
0
 def test__format_kwargs(self):
     """
     Verify keyword arguments get formatted properly.
     """
     model_instance = Cluster.new(name='test')
     annotations = {
         'commissaire-cluster-test-name': 'test',
         'commissaire-cluster-test-status': 'test',
     }
     kwargs = self.instance._format_kwargs(model_instance, annotations)
     self.assertEquals({'name': 'test', 'status': 'test'}, kwargs)
コード例 #13
0
    def test__get_on_namespace(self):
        """
        Verify getting data from namespaces works.
        """
        model_instance = Cluster.new(name='test')
        self.instance._store.get = mock.MagicMock()
        self.instance._store.get().json().get().get.return_value = {
            'commissaire-cluster-test-name': 'test',
            'commissaire-cluster-test-status': 'ok',
        }

        self.instance._get_on_namespace(model_instance)
コード例 #14
0
    def test__get_on_namespace(self):
        """
        Verify getting data from namespaces works.
        """
        model_instance = Cluster.new(name='test')
        self.instance._store.get = mock.MagicMock()
        self.instance._store.get().json().get().get.return_value = {
            'commissaire-cluster-test-name': 'test',
            'commissaire-cluster-test-status': 'ok',
        }

        self.instance._get_on_namespace(model_instance)
コード例 #15
0
 def test__format_model(self):
     """
     Verify responses from Kubernetes can be turned into models.
     """
     model_instance = Cluster.new(name='test')
     resp_data = {'metadata': {'annotations': {
          'commissaire-cluster-test-name': 'test',
          'commissaire-cluster-test-status': 'test',
     }}}
     result = self.instance._format_model(resp_data, model_instance)
     self.assertEquals('test', result.name)
     self.assertEquals('test', result.status)
コード例 #16
0
def etcd_cluster_exists(name):
    """
    Returns whether a cluster with the given name exists.

    :param name: Name of a cluster
    :type name: str
    """
    store_manager = cherrypy.engine.publish('get-store-manager')[0]
    try:
        store_manager.get(Cluster.new(name=name))
    except:
        return False
    return True
コード例 #17
0
ファイル: util.py プロジェクト: chuanchangjia/commissaire
def etcd_cluster_exists(name):
    """
    Returns whether a cluster with the given name exists.

    :param name: Name of a cluster
    :type name: str
    """
    store_manager = cherrypy.engine.publish('get-store-manager')[0]
    try:
        store_manager.get(Cluster.new(name=name))
    except:
        return False
    return True
コード例 #18
0
ファイル: clusters.py プロジェクト: chuanchangjia/commissaire
    def on_put(self, req, resp, name):
        """
        Handles PUT requests for Cluster hosts.
        This replaces the entire host list for a Cluster.

        :param req: Request instance that will be passed through.
        :type req: falcon.Request
        :param resp: Response instance that will be passed through.
        :type resp: falcon.Response
        :param name: The name of the Cluster being requested.
        :type name: str
        """
        try:
            req_body = json.loads(req.stream.read().decode())
            old_hosts = set(req_body['old'])  # Ensures no duplicates
            new_hosts = set(req_body['new'])  # Ensures no duplicates
        except (KeyError, TypeError):
            self.logger.info(
                'Bad client PUT request for cluster "{0}": {1}'.
                format(name, req_body))
            resp.status = falcon.HTTP_400
            return

        try:
            store_manager = cherrypy.engine.publish('get-store-manager')[0]
            cluster = store_manager.get(Cluster.new(name=name))
        except:
            resp.status = falcon.HTTP_404
            return

        # old_hosts must match current hosts to accept new_hosts.
        if old_hosts != set(cluster.hostset):
            self.logger.info(
                'Conflict setting hosts for cluster {0}'.format(name))
            self.logger.debug('{0} != {1}'.format(old_hosts, cluster.hostset))
            resp.status = falcon.HTTP_409
            return

        # FIXME: Need input validation.  For each new host,
        #        - Does the host exist at /commissaire/hosts/{IP}?
        #        - Does the host already belong to another cluster?

        # FIXME: Should guard against races here, since we're fetching
        #        the cluster record and writing it back with some parts
        #        unmodified.  Use either locking or a conditional write
        #        with the etcd 'modifiedIndex'.  Deferring for now.

        cluster.hostset = list(new_hosts)
        store_manager.save(cluster)
        resp.status = falcon.HTTP_200
コード例 #19
0
    def on_put(self, req, resp, name):
        """
        Handles PUT requests for Cluster hosts.
        This replaces the entire host list for a Cluster.

        :param req: Request instance that will be passed through.
        :type req: falcon.Request
        :param resp: Response instance that will be passed through.
        :type resp: falcon.Response
        :param name: The name of the Cluster being requested.
        :type name: str
        """
        try:
            req_body = json.loads(req.stream.read().decode())
            old_hosts = set(req_body['old'])  # Ensures no duplicates
            new_hosts = set(req_body['new'])  # Ensures no duplicates
        except (KeyError, TypeError):
            self.logger.info(
                'Bad client PUT request for cluster "{0}": {1}'.
                format(name, req_body))
            resp.status = falcon.HTTP_400
            return

        try:
            store_manager = cherrypy.engine.publish('get-store-manager')[0]
            cluster = store_manager.get(Cluster.new(name=name))
        except:
            resp.status = falcon.HTTP_404
            return

        # old_hosts must match current hosts to accept new_hosts.
        if old_hosts != set(cluster.hostset):
            self.logger.info(
                'Conflict setting hosts for cluster {0}'.format(name))
            self.logger.debug('{0} != {1}'.format(old_hosts, cluster.hostset))
            resp.status = falcon.HTTP_409
            return

        # FIXME: Need input validation.  For each new host,
        #        - Does the host exist at /commissaire/hosts/{IP}?
        #        - Does the host already belong to another cluster?

        # FIXME: Should guard against races here, since we're fetching
        #        the cluster record and writing it back with some parts
        #        unmodified.  Use either locking or a conditional write
        #        with the etcd 'modifiedIndex'.  Deferring for now.

        cluster.hostset = list(new_hosts)
        store_manager.save(cluster)
        resp.status = falcon.HTTP_200
コード例 #20
0
    def test__dispatch(self):
        """
        Verify dispatching of operations works properly.
        """
        # Test namespace
        self.instance._save_on_namespace = mock.MagicMock()
        self.instance._dispatch('save', Cluster.new(name='test'))
        self.instance._save_on_namespace.assert_called_once()

        self.instance._get_on_namespace = mock.MagicMock()
        self.instance._dispatch('get', Cluster.new(name='test'))
        self.instance._get_on_namespace.assert_called_once()

        self.instance._delete_on_namespace = mock.MagicMock()
        self.instance._dispatch('delete', Cluster.new(name='test'))
        self.instance._delete_on_namespace.assert_called_once()

        self.instance._list_on_namespace = mock.MagicMock()
        self.instance._dispatch('list', Cluster.new(name='test'))
        self.instance._list_on_namespace.assert_called_once()

        # Test host
        self.instance._save_host = mock.MagicMock()
        self.instance._dispatch('save', Host.new(name='test'))
        self.instance._save_host.assert_called_once()

        self.instance._get_host = mock.MagicMock()
        self.instance._dispatch('get', Host.new(name='test'))
        self.instance._get_host.assert_called_once()

        self.instance._delete_host = mock.MagicMock()
        self.instance._dispatch('delete', Host.new(name='test'))
        self.instance._delete_host.assert_called_once()

        self.instance._list_host = mock.MagicMock()
        self.instance._dispatch('list', Host.new(name='test'))
        self.instance._list_host.assert_called_once()
コード例 #21
0
    def test__dispatch(self):
        """
        Verify dispatching of operations works properly.
        """
        # Test namespace
        self.instance._save_on_namespace = mock.MagicMock()
        self.instance._dispatch('save', Cluster.new(name='test'))
        self.instance._save_on_namespace.assert_called_once()

        self.instance._get_on_namespace = mock.MagicMock()
        self.instance._dispatch('get', Cluster.new(name='test'))
        self.instance._get_on_namespace.assert_called_once()

        self.instance._delete_on_namespace = mock.MagicMock()
        self.instance._dispatch('delete', Cluster.new(name='test'))
        self.instance._delete_on_namespace.assert_called_once()

        self.instance._list_on_namespace = mock.MagicMock()
        self.instance._dispatch('list', Cluster.new(name='test'))
        self.instance._list_on_namespace.assert_called_once()

        # Test host
        self.instance._save_host = mock.MagicMock()
        self.instance._dispatch('save', Host.new(name='test'))
        self.instance._save_host.assert_called_once()

        self.instance._get_host = mock.MagicMock()
        self.instance._dispatch('get', Host.new(name='test'))
        self.instance._get_host.assert_called_once()

        self.instance._delete_host = mock.MagicMock()
        self.instance._dispatch('delete', Host.new(name='test'))
        self.instance._delete_host.assert_called_once()

        self.instance._list_host = mock.MagicMock()
        self.instance._dispatch('list', Host.new(name='test'))
        self.instance._list_host.assert_called_once()
コード例 #22
0
 def test__format_model(self):
     """
     Verify responses from Kubernetes can be turned into models.
     """
     model_instance = Cluster.new(name='test')
     resp_data = {
         'metadata': {
             'annotations': {
                 'commissaire-cluster-test-name': 'test',
                 'commissaire-cluster-test-status': 'test',
             }
         }
     }
     result = self.instance._format_model(resp_data, model_instance)
     self.assertEquals('test', result.name)
     self.assertEquals('test', result.status)
コード例 #23
0
def etcd_cluster_has_host(name, address):
    """
    Checks if a host address belongs to a cluster with the given name.
    If no such cluster exists, the function raises KeyError.

    :param name: Name of a cluster
    :type name: str
    :param address: Host address
    :type address: str
    """
    try:
        store_manager = cherrypy.engine.publish('get-store-manager')[0]
        cluster = store_manager.get(Cluster.new(name=name))
    except:
        raise KeyError

    return address in cluster.hostset
コード例 #24
0
ファイル: util.py プロジェクト: chuanchangjia/commissaire
def etcd_cluster_has_host(name, address):
    """
    Checks if a host address belongs to a cluster with the given name.
    If no such cluster exists, the function raises KeyError.

    :param name: Name of a cluster
    :type name: str
    :param address: Host address
    :type address: str
    """
    try:
        store_manager = cherrypy.engine.publish('get-store-manager')[0]
        cluster = store_manager.get(Cluster.new(name=name))
    except:
        raise KeyError

    return address in cluster.hostset
コード例 #25
0
ファイル: util.py プロジェクト: chuanchangjia/commissaire
def get_cluster_model(name):
    """
    Returns a Cluster instance from the etcd record for the given
    cluster name, if it exists, or else None.

    For convenience, the EtcdResult is embedded in the Cluster instance
    as an 'etcd' property.

    :param name: Name of a cluster
    :type name: str
    """
    store_manager = cherrypy.engine.publish('get-store-manager')[0]
    try:
        cluster = store_manager.get(Cluster.new(name=name))
    except:
        cluster = None

    return cluster
コード例 #26
0
def get_cluster_model(name):
    """
    Returns a Cluster instance from the etcd record for the given
    cluster name, if it exists, or else None.

    For convenience, the EtcdResult is embedded in the Cluster instance
    as an 'etcd' property.

    :param name: Name of a cluster
    :type name: str
    """
    store_manager = cherrypy.engine.publish('get-store-manager')[0]
    try:
        cluster = store_manager.get(Cluster.new(name=name))
    except:
        cluster = None

    return cluster
コード例 #27
0
ファイル: clusters.py プロジェクト: chuanchangjia/commissaire
    def on_get(self, req, resp, name):
        """
        Handles GET requests for Cluster hosts.

        :param req: Request instance that will be passed through.
        :type req: falcon.Request
        :param resp: Response instance that will be passed through.
        :type resp: falcon.Response
        :param name: The name of the Cluster being requested.
        :type name: str
        """
        try:
            store_manager = cherrypy.engine.publish('get-store-manager')[0]
            cluster = store_manager.get(Cluster.new(name=name))
        except:
            resp.status = falcon.HTTP_404
            return

        resp.body = json.dumps(cluster.hostset)
        resp.status = falcon.HTTP_200
コード例 #28
0
    def on_get(self, req, resp, name):
        """
        Handles GET requests for Cluster hosts.

        :param req: Request instance that will be passed through.
        :type req: falcon.Request
        :param resp: Response instance that will be passed through.
        :type resp: falcon.Response
        :param name: The name of the Cluster being requested.
        :type name: str
        """
        try:
            store_manager = cherrypy.engine.publish('get-store-manager')[0]
            cluster = store_manager.get(Cluster.new(name=name))
        except:
            resp.status = falcon.HTTP_404
            return

        resp.body = json.dumps(cluster.hostset)
        resp.status = falcon.HTTP_200
コード例 #29
0
    def test_investigator(self):
        """
        Verify the investigator.
        """
        with mock.patch('commissaire.transport.ansibleapi.Transport') as _tp:

            _tp().get_info.return_value = (
                0,
                {
                    'os': 'fedora',
                    'cpus': 2,
                    'memory': 11989228,
                    'space': 487652,
                }
            )

            _tp().bootstrap.return_value = (0, {})

            request_queue = Queue()
            response_queue = MagicMock(Queue)

            to_investigate = {
                'address': '10.0.0.2',
                'ssh_priv_key': 'dGVzdAo=',
                'remote_user': '******'
            }

            manager = MagicMock(StoreHandlerManager)
            manager.get.return_value = Host(**json.loads(self.etcd_host))

            request_queue.put_nowait((
                manager, to_investigate, Cluster.new().__dict__))
            investigator(request_queue, response_queue, run_once=True)

            # Investigator saves *after* bootstrapping.
            self.assertEquals(0, manager.save.call_count)

            self.assertEquals(1, response_queue.put.call_count)
            host, error = response_queue.put.call_args[0][0]
            self.assertEquals(host.status, 'inactive')
            self.assertIsNone(error)
コード例 #30
0
ファイル: clusters.py プロジェクト: chuanchangjia/commissaire
    def on_delete(self, req, resp, name):
        """
        Handles the deletion of a Cluster.

        :param req: Request instance that will be passed through.
        :type req: falcon.Request
        :param resp: Response instance that will be passed through.
        :type resp: falcon.Response
        :param name: The name of the Cluster being deleted.
        :type name: str
        """
        resp.body = '{}'

        try:
            store_manager = cherrypy.engine.publish('get-store-manager')[0]
            store_manager.delete(Cluster.new(name=name))
            resp.status = falcon.HTTP_200
            self.logger.info(
                'Deleted cluster {0} per request.'.format(name))
        except:
            self.logger.info(
                'Deleting for non-existent cluster {0} requested.'.format(
                    name))
            resp.status = falcon.HTTP_404
コード例 #31
0
    def on_delete(self, req, resp, name):
        """
        Handles the deletion of a Cluster.

        :param req: Request instance that will be passed through.
        :type req: falcon.Request
        :param resp: Response instance that will be passed through.
        :type resp: falcon.Response
        :param name: The name of the Cluster being deleted.
        :type name: str
        """
        resp.body = '{}'

        try:
            store_manager = cherrypy.engine.publish('get-store-manager')[0]
            store_manager.delete(Cluster.new(name=name))
            resp.status = falcon.HTTP_200
            self.logger.info(
                'Deleted cluster {0} per request.'.format(name))
        except:
            self.logger.info(
                'Deleting for non-existent cluster {0} requested.'.format(
                    name))
            resp.status = falcon.HTTP_404
コード例 #32
0
    ' "last_check": "2015-12-17T15:48:18.710454"}')
#: Credential JSON for tests
HOST_CREDS_JSON = '{"remote_user": "******", "ssh_priv_key": "dGVzdAo="}'
#: Host model for most tests
HOST = Host.new(
    ssh_priv_key='dGVzdAo=',
    remote_user='******',
    **json.loads(HOST_JSON))
#: Hosts model for most tests
HOSTS = Hosts.new(
    hosts=[HOST]
)
#: Cluster model for most tests
CLUSTER = Cluster.new(
    name='cluster',
    status='ok',
    hostset=[],
)
#: Cluster model with HOST for most tests
CLUSTER_WITH_HOST = Cluster.new(
    name='cluster',
    status='ok',
    hostset=[HOST],
)
#: Cluster model with flattened HOST for tests
CLUSTER_WITH_FLAT_HOST = Cluster.new(
    name='cluster',
    status='ok',
    hostset=[HOST.address],
)
#: ClusterRestart model for most tests
コード例 #33
0
#: HostStatus JSON for tests
HOST_STATUS_JSON = (
    '{"type": "host_only", "container_manager": {}, "commissaire": '
    '{"status": "available", "last_check": "2016-07-29T20:39:50.529454"}}')
#: Host model for most tests
HOST = Host.new(ssh_priv_key='dGVzdAo=',
                remote_user='******',
                **json.loads(HOST_JSON))
#: HostStatus model for most tests
HOST_STATUS = HostStatus.new(**json.loads(HOST_STATUS_JSON))
#: Hosts model for most tests
HOSTS = Hosts.new(hosts=[HOST])
#: Cluster model for most tests
CLUSTER = Cluster.new(
    name='cluster',
    status='ok',
    hostset=[],
)
#: Cluster model with HOST for most tests
CLUSTER_WITH_HOST = Cluster.new(
    name='cluster',
    status='ok',
    hostset=[HOST],
)
#: Cluster model with flattened HOST for tests
CLUSTER_WITH_FLAT_HOST = Cluster.new(
    name='cluster',
    status='ok',
    hostset=[HOST.address],
)
#: ClusterRestart model for most tests
コード例 #34
0
    def bootstrap(self, ip, cluster_data, key_file, store_manager, oscmd):
        """
        Bootstraps a host via ansible.

        :param ip: IP address to bootstrap.
        :type ip: str
        :param cluster_data: The data required to create a Cluster instance.
        :type cluster_data: dict or None
        :param key_file: Full path to the file holding the private SSH key.
        :type key_file: str
        :param store_manager: Remote object for remote stores
        :type store_manager: commissaire.store.storehandlermanager.
                             StoreHandlerManager
        :param oscmd: OSCmd class to use
        :type oscmd: commissaire.oscmd.OSCmdBase
        :returns: tuple -- (exitcode(int), facts(dict)).
        """
        self.logger.debug('Using {0} as the oscmd class for {1}'.format(
            oscmd.os_type, ip))

        # cluster_data can be None. If it is change it to an empty dict
        if cluster_data is None:
            cluster_data = {}
        cluster_type = C.CLUSTER_TYPE_HOST
        network = Network.new(**C.DEFAULT_CLUSTER_NETWORK_JSON)
        try:
            self.logger.debug(
                'Grabbing cluster type from {0}'.format(cluster_data))
            cluster = Cluster.new(**cluster_data)
            cluster_type = cluster.type
            self.logger.debug('Found network {0}'.format(cluster.network))
            network = store_manager.get(Network.new(name=cluster.network))
        except KeyError:
            # Not part of a cluster
            pass

        etcd_config = self._get_etcd_config(store_manager)
        kube_config = self._get_kube_config(store_manager)

        play_vars = {
            'commissaire_cluster_type':
            cluster_type,
            'commissaire_bootstrap_ip':
            ip,
            'commissaire_kubernetes_api_server_url':
            kube_config['server_url'],
            'commissaire_kubernetes_bearer_token':
            kube_config['token'],
            # TODO: Where do we get this?
            'commissaire_docker_registry_host':
            '127.0.0.1',
            # TODO: Where do we get this?
            'commissaire_docker_registry_port':
            8080,
            # TODO: Where do we get this?
            'commissaire_flannel_key':
            '/atomic01/network',
            'commissaire_docker_config_local':
            resource_filename('commissaire', 'data/templates/docker'),
            'commissaire_flanneld_config_local':
            resource_filename('commissaire', 'data/templates/flanneld'),
            'commissaire_kubelet_config_local':
            resource_filename('commissaire', 'data/templates/kubelet'),
            'commissaire_kubernetes_config_local':
            resource_filename('commissaire', 'data/templates/kube_config'),
            'commissaire_kubeconfig_config_local':
            resource_filename('commissaire', 'data/templates/kubeconfig'),
            'commissaire_install_libselinux_python':
            " ".join(oscmd.install_libselinux_python()),
            'commissaire_docker_config':
            oscmd.docker_config,
            'commissaire_flanneld_config':
            oscmd.flanneld_config,
            'commissaire_kubelet_config':
            oscmd.kubelet_config,
            'commissaire_kubernetes_config':
            oscmd.kubernetes_config,
            'commissaire_kubeconfig_config':
            oscmd.kubernetes_kubeconfig,
            'commissaire_install_flannel':
            " ".join(oscmd.install_flannel()),
            'commissaire_install_docker':
            " ".join(oscmd.install_docker()),
            'commissaire_install_kube':
            " ".join(oscmd.install_kube()),
            'commissaire_flannel_service':
            oscmd.flannel_service,
            'commissaire_docker_service':
            oscmd.flannel_service,
            'commissaire_kubelet_service':
            oscmd.kubelet_service,
            'commissaire_kubeproxy_service':
            oscmd.kubelet_proxy_service,
        }

        # If we are a flannel_server network then set the var
        if network.type == 'flannel_server':
            play_vars['commissaire_flanneld_server'] = network.options.get(
                'address')
        elif network.type == 'flannel_etcd':
            play_vars['commissaire_etcd_server_url'] = etcd_config[
                'server_url']

        # Provide the CA if etcd is being used over https
        if (etcd_config['server_url'].startswith('https:')
                and 'certificate_ca_path' in etcd_config):
            play_vars['commissaire_etcd_ca_path'] = oscmd.etcd_ca
            play_vars['commissaire_etcd_ca_path_local'] = (
                etcd_config['certificate_ca_path'])

        # Client Certificate additions
        if 'certificate_path' in etcd_config:
            self.logger.info('Using etcd client certs')
            play_vars['commissaire_etcd_client_cert_path'] = (
                oscmd.etcd_client_cert)
            play_vars['commissaire_etcd_client_cert_path_local'] = (
                etcd_config['certificate_path'])
            play_vars['commissaire_etcd_client_key_path'] = (
                oscmd.etcd_client_key)
            play_vars['commissaire_etcd_client_key_path_local'] = (
                etcd_config['certificate_key_path'])

        if 'certificate_path' in kube_config:
            self.logger.info('Using kubernetes client certs')
            play_vars['commissaire_kubernetes_client_cert_path'] = (
                oscmd.kube_client_cert)
            play_vars['commissaire_kubernetes_client_cert_path_local'] = (
                kube_config['certificate_path'])
            play_vars['commissaire_kubernetes_client_key_path'] = (
                oscmd.kube_client_key)
            play_vars['commissaire_kubernetes_client_key_path_local'] = (
                kube_config['certificate_key_path'])

        # XXX: Need to enable some package repositories for OS 'rhel'
        #      (or 'redhat').  This is a hack for a single corner case.
        #      We discussed how to generalize future cases where we need
        #      extra commands for a specific OS but decided to defer until
        #      more crop up.
        #
        #      See https://github.com/projectatomic/commissaire/pull/56
        #
        if oscmd.os_type in ('rhel', 'redhat'):
            play_vars['commissaire_enable_pkg_repos'] = (
                'subscription-manager repos '
                '--enable=rhel-7-server-extras-rpms '
                '--enable=rhel-7-server-optional-rpms')
        else:
            play_vars['commissaire_enable_pkg_repos'] = 'true'

        self.logger.debug('Variables for bootstrap: {0}'.format(play_vars))

        play_file = resource_filename('commissaire',
                                      'data/ansible/playbooks/bootstrap.yaml')
        results = self._run(ip, key_file, play_file, [0], play_vars)

        return results
コード例 #35
0
    def test_bootstrap(self):
        """
        Verify Transport().bootstrap works as expected.
        """
        with patch(
                'commissaire.transport.ansibleapi.TaskQueueManager') as _tqm:
            _tqm().run.return_value = 0

            transport = ansibleapi.Transport()
            transport.variable_manager._fact_cache = {}
            oscmd = MagicMock(OSCmdBase)

            result, facts = transport.bootstrap('10.2.0.2',
                                                Cluster.new().__dict__,
                                                'test/fake_key', MagicMock(),
                                                oscmd)
            # We should have a successful response
            self.assertEquals(0, result)
            # We should see expected calls
            self.assertEquals(1, oscmd.install_docker.call_count)
            self.assertEquals(1, oscmd.install_kube.call_count)

            # Check user-config to playbook-variable translation.
            etcd_config = {
                'server_url': 'https://192.168.1.1:1234',
                'certificate_ca_path': '/path/to/etcd/ca/cert',
                'certificate_path': '/path/to/etcd/client/cert',
                'certificate_key_path': '/path/to/etcd/client/key'
            }
            kube_config = {
                'server_url': 'https://192.168.2.2:4567',
                'certificate_path': '/path/to/kube/client/cert',
                'certificate_key_path': '/path/to/kube/client/key'
            }
            store_manager = MagicMock(StoreHandlerManager)
            store_manager.list_store_handlers.return_value = [
                (EtcdStoreHandler, etcd_config, ()),
                (KubernetesStoreHandler, kube_config, ())
            ]

            store_manager.get.return_value = Network.new(name='default',
                                                         type='flannel_etcd')

            cluster_data = Cluster.new(name='default',
                                       network='default').__dict__

            transport = ansibleapi.Transport()
            transport._run = MagicMock()
            transport._run.return_value = (0, {})
            result, facts = transport.bootstrap('10.2.0.2', cluster_data,
                                                'test/fake_key', store_manager,
                                                oscmd)
            play_vars = transport._run.call_args[0][4]
            self.assertEqual(play_vars['commissaire_etcd_server_url'],
                             'https://192.168.1.1:1234')
            self.assertEqual(play_vars['commissaire_etcd_ca_path_local'],
                             '/path/to/etcd/ca/cert')
            self.assertEqual(
                play_vars['commissaire_etcd_client_cert_path_local'],
                '/path/to/etcd/client/cert')
            self.assertEqual(
                play_vars['commissaire_etcd_client_key_path_local'],
                '/path/to/etcd/client/key')

            # Check 'commissaire_enable_pkg_repos' playbook variable
            # for various operating systems.
            transport = ansibleapi.Transport()
            transport._run = MagicMock()
            transport._run.return_value = (0, {})

            needs_enable_repos = ('redhat', 'rhel')

            for os_type in available_os_types:
                oscmd = get_oscmd(os_type)
                result, facts = transport.bootstrap('10.2.0.2.', cluster_data,
                                                    'test/fake_key',
                                                    MagicMock(), oscmd)
                play_vars = transport._run.call_args[0][4]
                command = play_vars['commissaire_enable_pkg_repos']
                if os_type in needs_enable_repos:
                    self.assertIn('subscription-manager repos', command)
                else:
                    self.assertEqual('true', command)  # no-op command
コード例 #36
0
def clusterexec(store_manager, cluster_name, command, kwargs={}):
    """
    Remote executes a shell commands across a cluster.

    :param store_manager: Proxy object for remtote stores
    :type store_manager: commissaire.store.StoreHandlerManager
    :param cluster_name: Name of the cluster to act on
    :type cluster_name: str
    :param command: Top-level command to execute
    :type command: str
    :param kwargs: Keyword arguments for the command
    :type kwargs: dict
    """
    logger = logging.getLogger('clusterexec')

    # TODO: This is a hack and should really be done elsewhere
    command_args = ()
    if command == 'upgrade':
        finished_hosts_key = 'upgraded'
        model_instance = ClusterUpgrade.new(
            name=cluster_name,
            status='in_process',
            started_at=datetime.datetime.utcnow().isoformat(),
            upgraded=[],
            in_process=[],
        )
    elif command == 'restart':
        finished_hosts_key = 'restarted'
        model_instance = ClusterRestart.new(
            name=cluster_name,
            status='in_process',
            started_at=datetime.datetime.utcnow().isoformat(),
            restarted=[],
            in_process=[],
        )
    elif command == 'deploy':
        finished_hosts_key = 'deployed'
        version = kwargs.get('version', '')
        command_args = (version,)
        model_instance = ClusterDeploy.new(
            name=cluster_name,
            status='in_process',
            started_at=datetime.datetime.utcnow().isoformat(),
            version=version,
            deployed=[],
            in_process=[],
        )

    end_status = 'finished'

    try:
        # Set the initial status in the store
        logger.info('Setting initial status.')
        logger.debug('Status={0}'.format(model_instance.to_json()))
        store_manager.save(model_instance)
    except Exception as error:
        logger.error(
            'Unable to save initial state for "{0}" clusterexec due to '
            '{1}: {2}'.format(cluster_name, type(error), error))
        return

    # Collect all host addresses in the cluster
    try:
        cluster = store_manager.get(Cluster.new(
            name=cluster_name, status='', hostset=[]))
    except Exception as error:
        logger.warn(
            'Unable to continue for cluster "{0}" due to '
            '{1}: {2}. Returning...'.format(cluster_name, type(error), error))
        return

    if cluster.hostset:
        logger.debug(
            '{0} hosts in cluster "{1}"'.format(
                len(cluster.hostset), cluster_name))
    else:
        logger.warn('No hosts in cluster "{0}"'.format(cluster_name))

    # TODO: Find better way to do this
    try:
        hosts = store_manager.list(Hosts(hosts=[]))
    except Exception as error:
        logger.warn(
            'No hosts in the cluster. Error: {0}. Exiting clusterexec'.format(
                error))
        return

    for host in hosts.hosts:
        if host.address not in cluster.hostset:
            logger.debug(
                'Skipping {0} as it is not in this cluster.'.format(
                    host.address))
            continue  # Move on to the next one
        oscmd = get_oscmd(host.os)

        # command_list is only used for logging
        command_list = getattr(oscmd, command)(*command_args)
        logger.info('Executing {0} on {1}...'.format(
            command_list, host.address))

        model_instance.in_process.append(host.address)
        try:
            store_manager.save(model_instance)
        except Exception as error:
            logger.error(
                'Unable to save in_process state for "{0}" clusterexec due to '
                '{1}: {2}'.format(cluster_name, type(error), error))
            return

        key = TemporarySSHKey(host, logger)
        key.create()

        try:
            transport = ansibleapi.Transport(host.remote_user)
            exe = getattr(transport, command)
            result, facts = exe(
                host.address, key.path, oscmd, kwargs)
        # XXX: ansibleapi explicitly raises Exception()
        except Exception as ex:
            # If there was a failure set the end_status and break out
            end_status = 'failed'
            logger.error('Clusterexec {0} for {1} failed: {2}: {3}'.format(
                command, host.address, type(ex), ex))
            break
        finally:
            try:
                key.remove()
                logger.debug('Removed temporary key file {0}'.format(key.path))
            except:
                logger.warn(
                    'Unable to remove the temporary key file: {0}'.format(
                        key.path))

        # Set the finished hosts
        new_finished_hosts = getattr(
            model_instance, finished_hosts_key) + [host.address]
        setattr(
            model_instance,
            finished_hosts_key,
            new_finished_hosts)
        try:
            idx = model_instance.in_process.index(host.address)
            model_instance.in_process.pop(idx)
        except ValueError:
            logger.warn('Host {0} was not in_process for {1} {2}'.format(
                host['address'], command, cluster_name))
        try:
            store_manager.save(model_instance)
            logger.info('Finished executing {0} for {1} in {2}'.format(
                command, host.address, cluster_name))
        except Exception as error:
            logger.error(
                'Unable to save cluster state for "{0}" clusterexec due to '
                '{1}: {2}'.format(cluster_name, type(error), error))
            return

    # Final set of command result
    model_instance.finished_at = datetime.datetime.utcnow().isoformat()
    model_instance.status = end_status

    logger.info('Cluster {0} final {1} status: {2}'.format(
        cluster_name, command, model_instance.to_json()))

    try:
        store_manager.save(model_instance)
    except Exception as error:
        logger.error(
            'Unable to save final state for "{0}" clusterexec due to '
            '{1}: {2}'.format(cluster_name, type(error), error))

    logger.info('Clusterexec stopping')