示例#1
0
def add_kubes_and_packages():
    # Create default packages and kubes
    # Package and Kube with id=0 are default
    # and must be undeletable (always present with id=0) for fallback
    k_internal = Kube(id=Kube.get_internal_service_kube_type(),
                      name='Internal service', cpu=.02, cpu_units='Cores',
                      memory=64, memory_units='MB', disk_space=1,
                      disk_space_units='GB', included_traffic=0)
    k1 = Kube(id=0, name='Tiny', cpu=.12, cpu_units='Cores',
              memory=64, memory_units='MB', disk_space=1,
              disk_space_units='GB', included_traffic=0)
    k2 = Kube(name='Standard', cpu=.25, cpu_units='Cores',
              memory=128, memory_units='MB', disk_space=1,
              disk_space_units='GB', included_traffic=0, is_default=True)
    k3 = Kube(name='High memory', cpu=.25, cpu_units='Cores',
              memory=256, memory_units='MB', disk_space=3,
              disk_space_units='GB', included_traffic=0)

    p1 = Package(id=0, name='Standard package', first_deposit=0,
                 currency='USD', period='month', prefix='$',
                 suffix=' USD', is_default=True)
    pk1 = PackageKube(package=p1, kube=k1, kube_price=0)
    pk2 = PackageKube(package=p1, kube=k2, kube_price=0)
    pk3 = PackageKube(package=p1, kube=k3, kube_price=0)
    db.session.add_all([k1, k2, k3, p1, pk1, pk2, pk3, k_internal])

    db.session.commit()
示例#2
0
    def run(self, hostname, kube_type, do_deploy, wait, timeout, testing,
            docker_options, ebs_volume, ls_device, verbose):

        if kube_type is None:
            kube_type_id = Kube.get_default_kube_type()
        else:
            kube_type = Kube.get_by_name(kube_type)
            if kube_type is None:
                raise InvalidCommand('Kube type with name `{0}` not '
                                     'found.'.format(kube_type))
            kube_type_id = kube_type.id

        options = None
        testing = testing or WITH_TESTING
        if docker_options is not None:
            options = {'DOCKER': docker_options}

        if get_maintenance():
            raise InvalidCommand(
                'Kuberdock is in maintenance mode. Operation canceled'
            )
        try:
            check_node_data({'hostname': hostname, 'kube_type': kube_type_id})
            if not isinstance(ls_device, (tuple, list)):
                ls_device = (ls_device,)
            res = create_node(None, hostname, kube_type_id, do_deploy, testing,
                              options=options,
                              ls_devices=ls_device, ebs_volume=ebs_volume)
            print(res.to_dict())
            if wait:
                wait_for_nodes([hostname, ], timeout, verbose)
        except Exception as e:
            raise InvalidCommand("Node management error: {0}".format(e))
示例#3
0
def check_internal_pod_data(data, user=None):
    validator = V(user=None if user is None else user.username)
    if not validator.validate(data, new_pod_schema):
        raise APIError(validator.errors)
    kube_type = data.get('kube_type', Kube.get_default_kube_type())
    if Kube.get_internal_service_kube_type() != kube_type:
        raise APIError('Internal pod must be of type {0}'.format(
            Kube.get_internal_service_kube_type()))
示例#4
0
    def test_api_get_pod_states(self, sel_pod_states_mock):
        missing_podid = str(uuid4())
        endpoint = self.url + '/pod-states/'
        url = endpoint + missing_podid + '/0'
        response = self.user_open(url, auth=self.userauth)
        # pod not found
        self.assert404(response)

        # add one more user and a pod
        user2, user2_password = fixtures.user_fixtures()

        pod1 = pod_models.Pod(id=str(uuid4()),
                              name='p1',
                              owner_id=self.user.id,
                              kube_id=Kube.get_default_kube_type(),
                              config='',
                              status='RUNNING')
        pod2 = pod_models.Pod(id=str(uuid4()),
                              name='p2',
                              owner_id=user2.id,
                              kube_id=Kube.get_default_kube_type(),
                              config='',
                              status='RUNNING')
        db.session.add_all([pod1, pod2])
        db.session.commit()
        url = endpoint + pod2.id + '/0'
        response = self.user_open(url)
        # pod belongs to another user
        self.assert403(response)

        sel_pod_states_mock.return_value = {'a': [1, 2, 3]}
        response = self.open(url, auth=(user2.username, user2_password))
        self.assert200(response)
        sel_pod_states_mock.assert_called_once_with(pod2.id, 0)
        self.assertEqual(response.json, {
            u'status': 'OK',
            'data': sel_pod_states_mock.return_value
        })

        response = self.open(url, auth=self.adminauth)
        self.assert200(response)

        # check depth conversion
        url = endpoint + pod2.id + '/qwerty'
        response = self.open(url, auth=(user2.username, user2_password))
        self.assert200(response)
        sel_pod_states_mock.assert_called_with(pod2.id, 1)

        url = endpoint + pod2.id + '/123'
        response = self.open(url, auth=(user2.username, user2_password))
        self.assert200(response)
        sel_pod_states_mock.assert_called_with(pod2.id, 123)
    def test_check_node_is_locked(self):
        """Test LocalStorage.check_node_is_locked method."""
        kube_type = Kube.get_default_kube_type()
        node1 = Node(ip='192.168.1.2', hostname='host1', kube_id=kube_type)
        node2 = Node(ip='192.168.1.3', hostname='host2', kube_id=kube_type)
        db.session.add_all([node1, node2])
        db.session.commit()
        user, _ = self.fixtures.user_fixtures()
        pd = PersistentDisk(name='q', owner_id=user.id, size=1)
        db.session.add(pd)
        db.session.commit()

        flag, reason = pstorage.LocalStorage.check_node_is_locked(node1.id)
        self.assertFalse(flag)
        self.assertIsNone(reason)
        flag, reason = pstorage.LocalStorage.check_node_is_locked(node2.id)
        self.assertFalse(flag)

        pd = PersistentDisk(name='w',
                            owner_id=user.id,
                            size=1,
                            node_id=node1.id)
        db.session.add(pd)
        db.session.commit()

        flag, reason = pstorage.LocalStorage.check_node_is_locked(node1.id)
        self.assertTrue(flag)
        self.assertIsNotNone(reason)
示例#6
0
    def test_api_get_node_logs(self, get_logs_mock):
        hostname = 'qwerty'
        ip1 = '192.168.1.2'
        host1 = 'host1'
        ip2 = '192.168.1.3'
        host2 = 'host2'
        kube_type = Kube.get_default_kube_type()
        node1 = Node(ip=ip1, hostname=host1, kube_id=kube_type)
        node2 = Node(ip=ip2, hostname=host2, kube_id=kube_type)
        db.session.add_all((node1, node2))
        db.session.commit()

        url = self.url + '/node/' + hostname
        # unknown hostname
        response = self.admin_open(url)
        self.assert404(response)

        url = self.url + '/node/' + host2
        get_logs_mock.return_value = {'2': 3}
        response = self.admin_open(url)
        self.assert200(response)
        self.assertEqual(response.json, {
            u'status': u'OK',
            u'data': get_logs_mock.return_value
        })
        get_logs_mock.assert_called_once_with(host2, None, 100, host=ip2)
示例#7
0
def no_billing_data():
    return {
        'billing': 'No billing',
        'packages': [p.to_dict(with_kubes=True) for p in Package.query.all()],
        'default': {
            'kubeType': Kube.get_default_kube().to_dict(),
            'packageId': Package.get_default().to_dict(),
        }
    }
 def test_update_pods_volumes(self):
     """Test pstorage.update_pods_volumes function"""
     user, _ = self.fixtures.user_fixtures()
     old_drive_name = 'qq11'
     new_drive_name = 'ww22'
     pdname = 'qwerty1243'
     pod_id = str(uuid.uuid4())
     storage_prefix = pstorage.NODE_LOCAL_STORAGE_PREFIX
     pod = Pod(id=pod_id,
               name='somename',
               owner_id=user.id,
               kube_id=Kube.get_default_kube_type(),
               config=json.dumps({
                   "volumes": [{
                       "hostPath": {
                           "path": storage_prefix + '/' + old_drive_name
                       },
                       "name": "var-qqq7824431125",
                       "annotation": {
                           "localStorage": {
                               "path":
                               storage_prefix + '/' + old_drive_name,
                               "size": 1
                           }
                       }
                   }],
                   "volumes_public": [{
                       "persistentDisk": {
                           "pdSize": 1,
                           "pdName": pdname
                       },
                       "name": "var-qqq7824431125"
                   }]
               }))
     db.session.add(pod)
     new_pd = PersistentDisk(name=pdname,
                             drive_name=new_drive_name,
                             owner_id=user.id,
                             size=1)
     db.session.add(new_pd)
     db.session.commit()
     pstorage.update_pods_volumes(new_pd)
     pods = db.session.query(Pod).all()
     self.assertTrue(len(pods), 1)
     new_pod = pods[0]
     config = new_pod.get_dbconfig()
     self.assertEqual(len(config['volumes']), len(config['volumes_public']))
     self.assertEqual(len(config['volumes']), 1)
     new_drive_path = storage_prefix + '/' + new_drive_name
     self.assertEqual(config['volumes'][0]['hostPath']['path'],
                      new_drive_path)
     self.assertEqual(
         config['volumes'][0]['annotation']['localStorage']['path'],
         new_drive_path)
     self.assertEqual(
         config['volumes_public'][0]['persistentDisk']['pdName'], pdname)
示例#9
0
 def _validate_kube_type_in_user_package(self, exists, field, value):
     if exists and self.user:
         if self.user == KUBERDOCK_INTERNAL_USER and \
                 value == Kube.get_internal_service_kube_type():
             return
         package = User.get(self.user).package
         if value not in [k.kube_id for k in package.kubes]:
             self._error(field,
                         "Pod can't be created, because your package "
                         "\"{0}\" does not include kube type with id "
                         "\"{1}\"".format(package.name, value))
示例#10
0
    def test_change_package(self, *args):
        """
        AC-1003
        If new package lacks kube_types, that used in user's pods,
        then forbid this change.
        """
        user = self.user
        url = self.item_url(self.user.id)
        package0 = user.package
        package1 = Package(id=1,
                           name='New package',
                           first_deposit=0,
                           currency='USD',
                           period='hour',
                           prefix='$',
                           suffix=' USD')
        kube0, kube1, kube2 = Kube.public_kubes().all()
        # new package allows only standard kube
        PackageKube(package=package1, kube=kube0, kube_price=0)
        db.session.commit()

        # change package: user still doesn't have pods
        data = {'package': package1.name}
        response = self.admin_open(url=url, method='PUT', json=data)
        self.assert200(response)

        # add pod with kube type that exists in both packages (Standard kube)
        pod = Pod(id=str(uuid4()),
                  name='test_change_package1',
                  owner_id=user.id,
                  kube_id=0,
                  config='')
        db.session.add(pod)
        db.session.commit()

        # change package: both packages have this kube_type
        data = {'package': package0.name}
        self.assert200(self.admin_open(url=url, method='PUT', json=data))

        # add pod with kube type that exists only in current package
        pod = Pod(id=str(uuid4()),
                  name='test_change_package2',
                  owner_id=user.id,
                  kube_id=1,
                  config='')
        db.session.add(pod)
        db.session.commit()

        # change package: new package doesn't have kube_type of some of user's
        # pods
        data = {'package': package1.name}
        self.assert400(self.admin_open(url=url, method='PUT', json=data))
 def add_two_nodes(self):
     ip1 = '192.168.1.2'
     host1 = 'host1'
     ip2 = '192.168.1.3'
     host2 = 'host2'
     kube_type = Kube.get_default_kube_type()
     node1 = Node(ip=ip1, hostname=host1, kube_id=kube_type)
     node2 = Node(ip=ip2, hostname=host2, kube_id=kube_type)
     db.session.add_all((node1, node2))
     db.session.commit()
     self.node1 = node1
     self.node2 = node2
     return (node1, node2)
示例#12
0
def pod(**kwargs):
    if 'owner_id' not in kwargs and 'owner' not in kwargs:
        kwargs['owner'], _ = user_fixtures()
    if 'kube_id' not in kwargs and 'kube' not in kwargs:
        kwargs['kube'] = Kube.get_default_kube()
    if 'config' in kwargs and not isinstance(kwargs['config'], basestring):
        kwargs['config'] = json.dumps(kwargs['config'])
    namespace = str(uuid4())
    kwargs.setdefault('id', namespace)
    kwargs.setdefault('name', 'pod-' + randstr())
    kwargs.setdefault(
        'config',
        json.dumps({
            'node':
            None,
            'replicas':
            1,
            'secrets': [],
            'namespace':
            namespace,
            'restartPolicy':
            'Never',
            'volumes': [],
            'sid':
            str(uuid4()),
            'containers': [{
                'kubes':
                1,
                'terminationMessagePath':
                None,
                'name':
                'curl' + randstr(),
                'workingDir':
                '',
                'image':
                'appropriate/curl',
                'args': ['curl', 'httpbin.org/get'],
                'volumeMounts': [],
                'sourceUrl':
                'hub.docker.com/r/appropriate/curl',
                'env': [{
                    'name':
                    'PATH',
                    'value':
                    '/usr/local/sbin:/usr/local/bin:'
                    '/usr/sbin:/usr/bin:/sbin:/bin'
                }],
                'ports': []
            }]
        }))
    return Pod(**kwargs).save()
示例#13
0
def node(hostname=None, ip=None, kube_id=None, owner=None):
    if owner is None:
        owner, _ = user_fixtures()
    if kube_id is None:
        kube_id = Kube.get_default_kube()
    if ip is None:
        ip = random_ip()
    if hostname is None:
        hostname = randstr()

    return Node(ip=ip,
                hostname=hostname,
                kube_id=kube_id.id,
                state=NODE_STATUSES.pending)
示例#14
0
    def test_drive_can_be_deleted(self):
        """Test LocalStorage.drive_can_be_deleted method."""
        user, _ = self.fixtures.user_fixtures()
        pd = PersistentDisk(name='q', owner_id=user.id, size=1)
        db.session.add(pd)
        db.session.commit()

        flag, reason = pstorage.LocalStorage.drive_can_be_deleted(pd.id)
        self.assertTrue(flag)
        self.assertIsNone(reason)

        pod_id = str(uuid.uuid4())
        pdname = 'somename1'
        pod = Pod(id=pod_id,
                  name='somename',
                  owner_id=user.id,
                  kube_id=Kube.get_default_kube_type(),
                  config=json.dumps({
                      "volumes_public": [{
                          "persistentDisk": {
                              "pdSize": 1,
                              "pdName": pdname
                          },
                      }]
                  }))
        db.session.add(pod)
        db.session.commit()

        flag, reason = pstorage.LocalStorage.drive_can_be_deleted(pd.id)
        self.assertTrue(flag)
        self.assertIsNone(reason)

        pd = PersistentDisk(name=pdname, owner_id=user.id, size=1)
        db.session.add(pd)
        db.session.commit()
        flag, reason = pstorage.LocalStorage.drive_can_be_deleted(pd.id)
        self.assertFalse(flag)
        self.assertIsNotNone(reason)

        # delete pod, drive must became deletable
        pod.status = 'deleted'
        db.session.query(Pod).update({Pod.status: 'deleted'},
                                     synchronize_session=False)
        db.session.commit()
        flag, reason = pstorage.LocalStorage.drive_can_be_deleted(pd.id)
        self.assertTrue(flag)
        self.assertIsNone(reason)
示例#15
0
    def test_get_drives_from_db(self):
        """Test PersistentStorage._get_drives_from_db"""
        user, _ = self.fixtures.user_fixtures()
        pd1 = PersistentDisk(name='q', owner_id=user.id, size=1)
        pd2 = PersistentDisk(name='q1', owner_id=user.id, size=2)
        pd3 = PersistentDisk(name='q1', owner_id=1, size=2)
        db.session.add_all([pd1, pd2, pd3])
        db.session.commit()

        ps = pstorage.PersistentStorage()
        drives = ps._get_drives_from_db()
        self.assertEqual(len(drives), 3)
        self.assertEqual([[], [], []], [item['linkedPods'] for item in drives])

        ps = pstorage.PersistentStorage()
        drives = ps._get_drives_from_db(user_id=user.id)
        self.assertEqual(len(drives), 2)
        self.assertEqual([[], []], [item['linkedPods'] for item in drives])
        self.assertEqual({user.id: user.username},
                         {item['owner_id']: item['owner']
                          for item in drives})
        self.assertEqual({'q1', 'q'}, {item['name'] for item in drives})
        self.assertEqual([False, False], [item['in_use'] for item in drives])
        self.assertEqual([False, False],
                         [item['forbidDeletion'] for item in drives])

        pod_id = str(uuid.uuid4())
        pod_name = 'somename1'
        pod = Pod(id=pod_id,
                  name=pod_name,
                  owner_id=user.id,
                  kube_id=Kube.get_default_kube_type(),
                  config=json.dumps({
                      "volumes_public": [{
                          "persistentDisk": {
                              "pdSize": 1,
                              "pdName": 'q'
                          },
                      }]
                  }))
        db.session.add(pod)
        db.session.commit()
        ps = pstorage.PersistentStorage()
        drives = ps._get_drives_from_db(user_id=user.id)
        self.assertEqual(len(drives), 2)
        with_pods = None
        without_pods = None
        for drive in drives:
            if drive['name'] == 'q':
                with_pods = drive
            else:
                without_pods = drive
        self.assertEqual(with_pods['linkedPods'], [{
            'podId': pod_id,
            'name': pod_name
        }])
        self.assertEqual(without_pods['linkedPods'], [])
        self.assertEqual([False, False], [item['in_use'] for item in drives])
        self.assertEqual([False, False],
                         [item['forbidDeletion'] for item in drives])

        pd1.pod_id = pod.id
        db.session.commit()
        drives = ps._get_drives_from_db(user_id=user.id)
        pd1_data = (item for item in drives if item['id'] == pd1.id).next()
        pd2_data = (item for item in drives if item['id'] == pd2.id).next()
        self.assertEqual(True, pd1_data['in_use'])
        self.assertEqual(False, pd2_data['in_use'])
        self.assertEqual(True, pd1_data['forbidDeletion'])
        self.assertEqual(False, pd2_data['forbidDeletion'])
示例#16
0
def kube_type(**kwargs):
    return Kube(**dict(
        kwargs, name=randstr(), cpu=.25, memory=64, disk_space=1)).save()
    def test_get_nodes_collection(self, get_all_nodes_mock,
                                  fix_missed_nodes_mock, system_settings_mock):
        """Test for kapi.node_utils.get_nodes_collection function."""
        node1, node2 = self.add_two_nodes()
        ip3 = '192.168.1.4'
        host3 = 'host3'
        kube_type = Kube.get_default_kube_type()
        node3 = Node(ip=ip3,
                     hostname=host3,
                     kube_id=kube_type,
                     state=NODE_STATUSES.pending)
        db.session.add(node3)
        db.session.commit()
        get_all_nodes_mock.return_value = [{
            'metadata': {
                'name': node1.hostname
            },
            'status': {
                'conditions': [{
                    'type': 'Ready',
                    'status': 'True'
                }]
            }
        }, {
            'metadata': {
                'name': node2.hostname
            },
            'status': {
                'conditions': [{
                    'type': 'Unknown',
                    'status': 'True',
                    'reason': 'qwerty',
                    'lastTransitionTime': 'asdfg'
                }]
            }
        }]
        fix_missed_nodes_mock.return_value = (node1, node2, node3)
        system_settings_mock.get_by_name = self.get_by_name
        res = node_utils.get_nodes_collection()
        get_all_nodes_mock.assert_called_once_with()
        # AC-3349 Fix. Not needed due to fix in 'get_nodes_collection'.
        # fix_missed_nodes_mock.assert_called_once_with(
        #     [node1, node2, node3],
        #     {x['metadata']['name']: x
        #      for x in get_all_nodes_mock.return_value})
        self.assertEqual(len(res), 3)
        self.assertEqual(
            res[0], {
                'id': node1.id,
                'ip': node1.ip,
                'hostname': node1.hostname,
                'kube_type': node1.kube_id,
                'status': NODE_STATUSES.running,
                'reason': '',
                'install_log': '',
                'resources': {}
            })
        self.assertEqual(res[1]['id'], node2.id)
        self.assertEqual(res[1]['ip'], node2.ip)
        self.assertEqual(res[1]['hostname'], node2.hostname)
        self.assertTrue(res[1]['status'], NODE_STATUSES.troubles)
        self.assertTrue('qwerty' in res[1]['reason'])
        self.assertTrue('asdfg' in res[1]['reason'])

        self.assertEqual(res[2]['id'], node3.id)
        self.assertEqual(res[2]['ip'], node3.ip)
        self.assertEqual(res[2]['hostname'], node3.hostname)
        self.assertTrue(res[2]['status'], NODE_STATUSES.pending)
示例#18
0
def upgrade(upd, with_testing, *args, **kwargs):
    # 00085_update.py
    upd.print_log('Add default Persistent Disks size in pods config...')
    pods = Pod.query.all()
    for pod in pods:
        upd.print_log('Processing pod {0}'.format(pod.name))
        config = pod.get_dbconfig()
        config['volumes_public'] = with_size(config.get('volumes_original', []), pod.owner_id)
        pod.set_dbconfig(config, save=False)
    for pod in pods:
        config = pod.get_dbconfig()
        config.pop('volumes_original', None)
        pod.set_dbconfig(config, save=False)
    db.session.commit()

    # 00086_update.py
    upd.print_log('Update kubes to hard limits')
    internal = Kube.get_by_name('Internal service')
    if internal:
        internal.cpu = 0.02
        internal.save()
    small = Kube.get_by_name('Small')
    if small:
        small.cpu = 0.05
        small.save()
    standard = Kube.get_by_name('Standard')
    if standard:
        standard.cpu = 0.25
        standard.save()
    high = Kube.get_by_name('High memory')
    if high:
        high.cpu = 0.5
        high.save()
    upd.print_log('Setup k8s2etcd middleware service')
    upd.print_log(
        helpers.local(
            "cat > /etc/systemd/system/kuberdock-k8s2etcd.service << 'EOF' {0}"
            .format(SERVICE_FILE))
    )

    helpers.local('systemctl daemon-reload')
    upd.print_log(helpers.local('systemctl reenable kuberdock-k8s2etcd'))
    upd.print_log(helpers.local('systemctl restart kuberdock-k8s2etcd'))

    upd.print_log('Add after etcd.service to kube-apiserver service file')
    upd.print_log(
        helpers.local(
            "cat > /etc/systemd/system/kube-apiserver.service << 'EOF' {0}"
            .format(K8S_API_SERVICE_FILE))
    )
    upd.print_log('Turn off watch-cache in kube_apiserver')
    lines = []
    with open(KUBE_API_SERVER_PATH) as f:
        lines = f.readlines()
    with open(KUBE_API_SERVER_PATH, 'w+') as f:
        for line in lines:
            if (KUBE_API_SERVER_ARG in line and
                    not KUBE_API_WATCHCACHE_DISABLE in line):
                s = line.split('"')
                s[1] += KUBE_API_WATCHCACHE_DISABLE
                line = '"'.join(s)
            f.write(line)
    helpers.restart_master_kubernetes(with_enable=True)

    # 00087_update.py
    upd.print_log('Upgrade namespaces for PD...')
    config = ConfigParser.RawConfigParser()
    config.read(KUBERDOCK_SETTINGS_FILE)
    ns = MASTER_IP
    if not config.has_option('main', 'PD_NAMESPACE'):
        if CEPH:
            # Store default CEPH pool as namespace. It already was used
            # by KD cluster, so we will not change it.
            ns = OLD_DEFAULT_CEPH_POOL
        config.set('main', 'PD_NAMESPACE', ns)
        with open(KUBERDOCK_SETTINGS_FILE, 'wb') as fout:
            config.write(fout)

    if CEPH:
        # Set 'rbd' for all existing ceph drives, because it was a default pool
        PersistentDisk.query.filter(
            ~PersistentDisk.drive_name.contains(PD_NS_SEPARATOR)
        ).update(
            {PersistentDisk.drive_name: \
                OLD_DEFAULT_CEPH_POOL + PD_NS_SEPARATOR + \
                PersistentDisk.drive_name
            },
            synchronize_session=False
        )
        db.session.commit()
        try:
            pstorage.check_namespace_exists(namespace=ns)
        except pstorage.NoNodesError:
            # skip CEPH pool checking if there are no nodes with CEPH
            pass

    # Restart kuberdock to prevent loss of PD bind state, becuase fix for this
    # is in the new version.
    helpers.restart_service('emperor.uwsgi')
    def test_execute_es_query(self, es_mock):
        """Test elasticsearch_utils.execute_es_query function."""

        # Add two log pods config + two approprate nodes
        internal_user = User.get_internal()
        pod_id1 = str(uuid4())
        service1 = 'srv1'
        namespace1 = 'ns1'
        pod_id2 = str(uuid4())
        service2 = 'srv2'
        namespace2 = 'ns2'

        host1 = 'h1'
        host2 = 'h2'

        kube_id = Kube.get_default_kube_type()

        pod1 = Pod(id=pod_id1,
                   name=get_kuberdock_logs_pod_name(host1),
                   owner_id=internal_user.id,
                   kube_id=kube_id,
                   config=json.dumps({
                       "service": service1,
                       "namespace": namespace1
                   }),
                   status='RUNNING')
        pod2 = Pod(id=pod_id2,
                   name=get_kuberdock_logs_pod_name(host2),
                   owner_id=internal_user.id,
                   kube_id=kube_id,
                   config=json.dumps({
                       "service": service2,
                       "namespace": namespace2
                   }),
                   status='RUNNING')
        db.session.add_all([pod1, pod2])
        db.session.commit()

        node1 = Node(ip='123.123.123',
                     hostname=host1,
                     kube_id=kube_id,
                     state='completed',
                     upgrade_status='applied')
        node2 = Node(ip='123.123.124',
                     hostname=host2,
                     kube_id=kube_id,
                     state='completed',
                     upgrade_status='applied')
        db.session.add_all([node1, node2])
        db.session.commit()

        size = 123
        index = '1234qwerty'
        query = None
        sort = None
        search_result = {'hits': {'total': 333, 'hits': [1, 2, 3]}}
        search_mock = es_mock.return_value.search
        search_mock.return_value = search_result
        res = elasticsearch_utils.execute_es_query(index, query, size, sort)
        self.assertEqual(
            res, {
                'total': search_result['hits']['total'],
                'hits': search_result['hits']['hits'],
            })
        prefix1 = elasticsearch_utils.K8S_PROXY_PREFIX + \
            '/namespaces/' + namespace1 + '/services/' + service1 + ':9200/'
        prefix2 = elasticsearch_utils.K8S_PROXY_PREFIX + \
            '/namespaces/' + namespace2 + '/services/' + service2 + ':9200/'
        es_mock.assert_called_once_with([
            {
                'host': KUBE_API_HOST,
                'port': KUBE_API_PORT,
                'url_prefix': prefix1,
            },
            {
                'host': KUBE_API_HOST,
                'port': KUBE_API_PORT,
                'url_prefix': prefix2,
            },
        ])
        search_mock.assert_called_once_with(index=index, body={'size': size})

        query = {'a': 1}
        sort = {'b': 2}
        elasticsearch_utils.execute_es_query(index, query, size, sort)
        search_mock.assert_called_with(index=index,
                                       body={
                                           'size': size,
                                           'sort': sort,
                                           'query': query
                                       })
        search_mock.side_effect = RequestException('!!!')
        with self.assertRaises(elasticsearch_utils.LogsError):
            elasticsearch_utils.execute_es_query(index, query, size, sort)
示例#20
0
def upgrade(upd, with_testing, *args, **kwargs):
    upgrade_db()

    # === 00124_update.py ===
    # Move index file of k8s2etcd service from / to /var/lib/kuberdock
    try:
        stop_service(u124_service_name)
        if os.path.isfile(u124_old) and not os.path.isfile(u124_new):
            shutil.move(u124_old, u124_new)
    finally:
        start_service(u124_service_name)

    # === 00126_update.py ===

    pod_collection = PodCollection()
    for pod_dict in pod_collection.get(as_json=False):
        pod = pod_collection._get_by_id(pod_dict['id'])
        db_config = get_pod_config(pod.id)
        cluster_ip = db_config.pop('clusterIP', None)
        if cluster_ip is None:
            service_name = db_config.get('service')
            if service_name is None:
                continue
            namespace = db_config.get('namespace') or pod.id
            service = KubeQuery().get(['services', service_name],
                                      ns=namespace)
            cluster_ip = service.get('spec', {}).get('clusterIP')
            if cluster_ip is not None:
                db_config['podIP'] = cluster_ip
        replace_pod_config(pod, db_config)

    # === 00127_update.py ===

    upd.print_log('Upgrading menu...')
    MenuItemRole.query.delete()
    MenuItem.query.delete()
    Menu.query.delete()
    generate_menu()

    # === 00130_update.py ===

    upd.print_log('Update permissions...')
    Permission.query.delete()
    Resource.query.delete()
    add_permissions()
    db.session.commit()

    # === 00135_update.py ===
    # upd.print_log('Changing session_data schema...')
    # upgrade_db(revision='220dacf65cba')


    # === 00137_update.py ===
    upd.print_log('Upgrading db...')
    # upgrade_db(revision='3c832810a33c')
    upd.print_log('Raise max kubes to 64')
    max_kubes = 'max_kubes_per_container'
    old_value = SystemSettings.get_by_name(max_kubes)
    if old_value == '10':
        SystemSettings.set_by_name(max_kubes, 64)
    upd.print_log('Update kubes')
    small = Kube.get_by_name('Small')
    standard = Kube.get_by_name('Standard')
    if small:
        small.cpu = 0.12
        small.name = 'Tiny'
        small.memory = 64
        if small.is_default and standard:
            small.is_default = False
            standard.is_default = True
        small.save()
    if standard:
        standard.cpu = 0.25
        standard.memory = 128
        standard.save()
    high = Kube.get_by_name('High memory')
    if high:
        high.cpu = 0.25
        high.memory = 256
        high.disk_space = 3
        high.save()

    # === 00138_update.py ===

    if not (CEPH or AWS):
        upgrade_localstorage_paths(upd)

    # === added later ===

    secret_key = SystemSettings.query.filter(
        SystemSettings.name == 'sso_secret_key').first()
    if not secret_key.value:
        secret_key.value = randstr(16)
    secret_key.description = (
    'Used for Single sign-on. Must be shared between '
    'Kuberdock and billing system or other 3rd party '
    'application.')
    db.session.commit()

    upd.print_log('Close all sessions...')
    close_all_sessions()