Example #1
0
    def test_change_package(self, *args):
        """
        AC-1003
        If new package lacks kube_types, that used in user's pods,
        then forbid this change.
        """
        user = self.user
        url = self.item_url(self.user.id)
        package0 = user.package
        package1 = Package(id=1,
                           name='New package',
                           first_deposit=0,
                           currency='USD',
                           period='hour',
                           prefix='$',
                           suffix=' USD')
        kube0, kube1, kube2 = Kube.public_kubes().all()
        # new package allows only standard kube
        PackageKube(package=package1, kube=kube0, kube_price=0)
        db.session.commit()

        # change package: user still doesn't have pods
        data = {'package': package1.name}
        response = self.admin_open(url=url, method='PUT', json=data)
        self.assert200(response)

        # add pod with kube type that exists in both packages (Standard kube)
        pod = Pod(id=str(uuid4()),
                  name='test_change_package1',
                  owner_id=user.id,
                  kube_id=0,
                  config='')
        db.session.add(pod)
        db.session.commit()

        # change package: both packages have this kube_type
        data = {'package': package0.name}
        self.assert200(self.admin_open(url=url, method='PUT', json=data))

        # add pod with kube type that exists only in current package
        pod = Pod(id=str(uuid4()),
                  name='test_change_package2',
                  owner_id=user.id,
                  kube_id=1,
                  config='')
        db.session.add(pod)
        db.session.commit()

        # change package: new package doesn't have kube_type of some of user's
        # pods
        data = {'package': package1.name}
        self.assert400(self.admin_open(url=url, method='PUT', json=data))
    def test_drive_can_be_deleted(self):
        """Test LocalStorage.drive_can_be_deleted method."""
        user, _ = self.fixtures.user_fixtures()
        pd = PersistentDisk(name='q', owner_id=user.id, size=1)
        db.session.add(pd)
        db.session.commit()

        flag, reason = pstorage.LocalStorage.drive_can_be_deleted(pd.id)
        self.assertTrue(flag)
        self.assertIsNone(reason)

        pod_id = str(uuid.uuid4())
        pdname = 'somename1'
        pod = Pod(id=pod_id,
                  name='somename',
                  owner_id=user.id,
                  kube_id=Kube.get_default_kube_type(),
                  config=json.dumps({
                      "volumes_public": [{
                          "persistentDisk": {
                              "pdSize": 1,
                              "pdName": pdname
                          },
                      }]
                  }))
        db.session.add(pod)
        db.session.commit()

        flag, reason = pstorage.LocalStorage.drive_can_be_deleted(pd.id)
        self.assertTrue(flag)
        self.assertIsNone(reason)

        pd = PersistentDisk(name=pdname, owner_id=user.id, size=1)
        db.session.add(pd)
        db.session.commit()
        flag, reason = pstorage.LocalStorage.drive_can_be_deleted(pd.id)
        self.assertFalse(flag)
        self.assertIsNotNone(reason)

        # delete pod, drive must became deletable
        pod.status = 'deleted'
        db.session.query(Pod).update({Pod.status: 'deleted'},
                                     synchronize_session=False)
        db.session.commit()
        flag, reason = pstorage.LocalStorage.drive_can_be_deleted(pd.id)
        self.assertTrue(flag)
        self.assertIsNone(reason)
def _add_public_access_type(upd):
    upd.print_log('Update pod configs')
    pods = Pod.all()
    for pod in pods:
        db_config = pod.get_dbconfig()
        pod.set_dbconfig(db_config, save=False)

        if 'public_access_type' in db_config:
            continue

        elif (db_config.get('public_ip', None)
              or db_config.get('public_ip_before_freed', None)):
            db_config['public_access_type'] = PublicAccessType.PUBLIC_IP

        elif db_config.get('public_aws', None):
            db_config['public_access_type'] = PublicAccessType.PUBLIC_AWS

        elif db_config.get('domain', None):
            db_config['public_access_type'] = PublicAccessType.DOMAIN

            if not db_config.get('base_domain', None):
                domain = db_config['domain']

                base_domain = BaseDomain.query.filter_by(name=domain).first()

                if base_domain:
                    db_config['base_domain'] = domain
                    db_config.pop('domain')

                else:
                    sub_domain_part, base_domain_part = domain.split('.', 1)

                    base_domain = BaseDomain.query.filter_by(
                        name=base_domain_part).first()

                    if base_domain is None:
                        raise DomainNotFound(
                            "Can't find BaseDomain for requested domain {0}".
                            format(domain))

                    pod_domain = PodDomain.query.filter_by(
                        name=sub_domain_part,
                        domain_id=base_domain.id,
                        pod_id=pod.id).first()

                    if not pod_domain:
                        raise DomainNotFound(
                            "Can't find PodDomain for requested domain {0}".
                            format(domain))

                    db_config['base_domain'] = base_domain.name
                    db_config['domain'] = domain

        else:
            db_config['public_access_type'] = PublicAccessType.PUBLIC_IP

        pod.set_dbconfig(db_config, save=False)
Example #4
0
def _check_if_pod_exists(pod_id):
    user = KubeUtils.get_current_user()
    try:
        pod = Pod.filter(Pod.owner_id == user.id, Pod.id == pod_id).first()
        if pod is None:
            raise PodNotFound
    except DataError:
        # pod_id is not uuid
        raise PodNotFound
 def test_update_pods_volumes(self):
     """Test pstorage.update_pods_volumes function"""
     user, _ = self.fixtures.user_fixtures()
     old_drive_name = 'qq11'
     new_drive_name = 'ww22'
     pdname = 'qwerty1243'
     pod_id = str(uuid.uuid4())
     storage_prefix = pstorage.NODE_LOCAL_STORAGE_PREFIX
     pod = Pod(id=pod_id,
               name='somename',
               owner_id=user.id,
               kube_id=Kube.get_default_kube_type(),
               config=json.dumps({
                   "volumes": [{
                       "hostPath": {
                           "path": storage_prefix + '/' + old_drive_name
                       },
                       "name": "var-qqq7824431125",
                       "annotation": {
                           "localStorage": {
                               "path":
                               storage_prefix + '/' + old_drive_name,
                               "size": 1
                           }
                       }
                   }],
                   "volumes_public": [{
                       "persistentDisk": {
                           "pdSize": 1,
                           "pdName": pdname
                       },
                       "name": "var-qqq7824431125"
                   }]
               }))
     db.session.add(pod)
     new_pd = PersistentDisk(name=pdname,
                             drive_name=new_drive_name,
                             owner_id=user.id,
                             size=1)
     db.session.add(new_pd)
     db.session.commit()
     pstorage.update_pods_volumes(new_pd)
     pods = db.session.query(Pod).all()
     self.assertTrue(len(pods), 1)
     new_pod = pods[0]
     config = new_pod.get_dbconfig()
     self.assertEqual(len(config['volumes']), len(config['volumes_public']))
     self.assertEqual(len(config['volumes']), 1)
     new_drive_path = storage_prefix + '/' + new_drive_name
     self.assertEqual(config['volumes'][0]['hostPath']['path'],
                      new_drive_path)
     self.assertEqual(
         config['volumes'][0]['annotation']['localStorage']['path'],
         new_drive_path)
     self.assertEqual(
         config['volumes_public'][0]['persistentDisk']['pdName'], pdname)
Example #6
0
def _recreate_ingress_pod_if_needed():
    kd_user = User.get_internal()
    ingress_pod = Pod.filter_by(name=KUBERDOCK_INGRESS_POD_NAME,
                                owner=kd_user).first()
    if ingress_pod or BaseDomain.query.first():
        PodCollection(kd_user).delete(ingress_pod.id, force=True)
        default_backend_pod = Pod.filter_by(name=KUBERDOCK_BACKEND_POD_NAME,
                                            owner=kd_user).first()
        if not default_backend_pod:
            raise Exception(
                'Nginx ingress controller pod exists, but default backend pod '
                'is not found. Something wrong. Please contact support to get '
                'help.')
        PodCollection(kd_user).delete(default_backend_pod.id, force=True)
        c = ConfigMapClient(KubeQuery())
        try:
            c.delete(name=KUBERDOCK_INGRESS_CONFIG_MAP_NAME,
                     namespace=KUBERDOCK_INGRESS_CONFIG_MAP_NAMESPACE)
        except ConfigMapNotFound:
            pass
        sleep(30)  # TODO: Workaround. Remove it when AC-5470 will be fixed
        ingress.prepare_ip_sharing()
Example #7
0
 def setUp(self):
     # create test data
     self.another_user, _ = fixtures.user_fixtures(
         username='******', email='*****@*****.**')
     config = '{"containers":[{"kubes":1}]}'
     self.ips = [(Pod(id=str(uuid4()), owner_id=self.user.id, name='pod1',
                      kube_id=0, config=config).save(), u'192.168.43.132'),
                 (Pod(id=str(uuid4()), owner_id=self.user.id, name='pod2',
                      kube_id=0, config=config).save(), u'192.168.43.133'),
                 (Pod(id=str(uuid4()), owner_id=self.another_user.id,
                      name='pod3',
                      kube_id=0, config=config).save(), u'192.168.43.134')]
     for pod, ip in self.ips:
         IpState.start(pod.id, int(ip_address(ip)))
     self.pds = [(self.user.id, 'first_disk', 2),
                 (self.user.id, 'second_disk', 16),
                 (self.another_user.id, 'third_disk', 3)]
     for user_id, name, size in self.pds:
         PersistentDiskState.start(user_id, name, size)
     sleep(1)
     IpState.end(self.ips[0][0].id, int(ip_address(self.ips[0][1])))
     PersistentDiskState.end(self.pds[0][0], self.pds[0][1])
     self.stop_date = datetime.utcnow()
def upgrade(upd, with_testing, *args, **kwargs):
    upd.print_log('Update dns pod...')

    local(
        'etcd-ca --depot-path /root/.etcd-ca new-cert --ip "10.254.0.10" --passphrase "" etcd-dns'
    )
    local('etcd-ca --depot-path /root/.etcd-ca sign --passphrase "" etcd-dns')
    local(
        'etcd-ca --depot-path /root/.etcd-ca export etcd-dns --insecure --passphrase "" | tar -xf -'
    )
    local('mv etcd-dns.crt /etc/pki/etcd/etcd-dns.crt')
    local('mv etcd-dns.key.insecure /etc/pki/etcd/etcd-dns.key')

    user = User.filter_by(username=KUBERDOCK_INTERNAL_USER).one()

    dns_pod = Pod.filter_by(name='kuberdock-dns', owner=user).first()

    if dns_pod:
        PodCollection(user).delete(dns_pod.id, force=True)

    dns_config = get_dns_pod_config()
    check_internal_pod_data(dns_config, user)
    dns_pod = PodCollection(user).add(dns_config, skip_check=True)
    PodCollection(user).update(dns_pod['id'], {'command': 'start'})
    def test_execute_es_query(self, es_mock):
        """Test elasticsearch_utils.execute_es_query function."""

        # Add two log pods config + two approprate nodes
        internal_user = User.get_internal()
        pod_id1 = str(uuid4())
        service1 = 'srv1'
        namespace1 = 'ns1'
        pod_id2 = str(uuid4())
        service2 = 'srv2'
        namespace2 = 'ns2'

        host1 = 'h1'
        host2 = 'h2'

        kube_id = Kube.get_default_kube_type()

        pod1 = Pod(id=pod_id1,
                   name=get_kuberdock_logs_pod_name(host1),
                   owner_id=internal_user.id,
                   kube_id=kube_id,
                   config=json.dumps({
                       "service": service1,
                       "namespace": namespace1
                   }),
                   status='RUNNING')
        pod2 = Pod(id=pod_id2,
                   name=get_kuberdock_logs_pod_name(host2),
                   owner_id=internal_user.id,
                   kube_id=kube_id,
                   config=json.dumps({
                       "service": service2,
                       "namespace": namespace2
                   }),
                   status='RUNNING')
        db.session.add_all([pod1, pod2])
        db.session.commit()

        node1 = Node(ip='123.123.123',
                     hostname=host1,
                     kube_id=kube_id,
                     state='completed',
                     upgrade_status='applied')
        node2 = Node(ip='123.123.124',
                     hostname=host2,
                     kube_id=kube_id,
                     state='completed',
                     upgrade_status='applied')
        db.session.add_all([node1, node2])
        db.session.commit()

        size = 123
        index = '1234qwerty'
        query = None
        sort = None
        search_result = {'hits': {'total': 333, 'hits': [1, 2, 3]}}
        search_mock = es_mock.return_value.search
        search_mock.return_value = search_result
        res = elasticsearch_utils.execute_es_query(index, query, size, sort)
        self.assertEqual(
            res, {
                'total': search_result['hits']['total'],
                'hits': search_result['hits']['hits'],
            })
        prefix1 = elasticsearch_utils.K8S_PROXY_PREFIX + \
            '/namespaces/' + namespace1 + '/services/' + service1 + ':9200/'
        prefix2 = elasticsearch_utils.K8S_PROXY_PREFIX + \
            '/namespaces/' + namespace2 + '/services/' + service2 + ':9200/'
        es_mock.assert_called_once_with([
            {
                'host': KUBE_API_HOST,
                'port': KUBE_API_PORT,
                'url_prefix': prefix1,
            },
            {
                'host': KUBE_API_HOST,
                'port': KUBE_API_PORT,
                'url_prefix': prefix2,
            },
        ])
        search_mock.assert_called_once_with(index=index, body={'size': size})

        query = {'a': 1}
        sort = {'b': 2}
        elasticsearch_utils.execute_es_query(index, query, size, sort)
        search_mock.assert_called_with(index=index,
                                       body={
                                           'size': size,
                                           'sort': sort,
                                           'query': query
                                       })
        search_mock.side_effect = RequestException('!!!')
        with self.assertRaises(elasticsearch_utils.LogsError):
            elasticsearch_utils.execute_es_query(index, query, size, sort)
    def test_get_drives_from_db(self):
        """Test PersistentStorage._get_drives_from_db"""
        user, _ = self.fixtures.user_fixtures()
        pd1 = PersistentDisk(name='q', owner_id=user.id, size=1)
        pd2 = PersistentDisk(name='q1', owner_id=user.id, size=2)
        pd3 = PersistentDisk(name='q1', owner_id=1, size=2)
        db.session.add_all([pd1, pd2, pd3])
        db.session.commit()

        ps = pstorage.PersistentStorage()
        drives = ps._get_drives_from_db()
        self.assertEqual(len(drives), 3)
        self.assertEqual([[], [], []], [item['linkedPods'] for item in drives])

        ps = pstorage.PersistentStorage()
        drives = ps._get_drives_from_db(user_id=user.id)
        self.assertEqual(len(drives), 2)
        self.assertEqual([[], []], [item['linkedPods'] for item in drives])
        self.assertEqual({user.id: user.username},
                         {item['owner_id']: item['owner']
                          for item in drives})
        self.assertEqual({'q1', 'q'}, {item['name'] for item in drives})
        self.assertEqual([False, False], [item['in_use'] for item in drives])
        self.assertEqual([False, False],
                         [item['forbidDeletion'] for item in drives])

        pod_id = str(uuid.uuid4())
        pod_name = 'somename1'
        pod = Pod(id=pod_id,
                  name=pod_name,
                  owner_id=user.id,
                  kube_id=Kube.get_default_kube_type(),
                  config=json.dumps({
                      "volumes_public": [{
                          "persistentDisk": {
                              "pdSize": 1,
                              "pdName": 'q'
                          },
                      }]
                  }))
        db.session.add(pod)
        db.session.commit()
        ps = pstorage.PersistentStorage()
        drives = ps._get_drives_from_db(user_id=user.id)
        self.assertEqual(len(drives), 2)
        with_pods = None
        without_pods = None
        for drive in drives:
            if drive['name'] == 'q':
                with_pods = drive
            else:
                without_pods = drive
        self.assertEqual(with_pods['linkedPods'], [{
            'podId': pod_id,
            'name': pod_name
        }])
        self.assertEqual(without_pods['linkedPods'], [])
        self.assertEqual([False, False], [item['in_use'] for item in drives])
        self.assertEqual([False, False],
                         [item['forbidDeletion'] for item in drives])

        pd1.pod_id = pod.id
        db.session.commit()
        drives = ps._get_drives_from_db(user_id=user.id)
        pd1_data = (item for item in drives if item['id'] == pd1.id).next()
        pd2_data = (item for item in drives if item['id'] == pd2.id).next()
        self.assertEqual(True, pd1_data['in_use'])
        self.assertEqual(False, pd2_data['in_use'])
        self.assertEqual(True, pd1_data['forbidDeletion'])
        self.assertEqual(False, pd2_data['forbidDeletion'])