def upgrade(upd, with_testing, *args, **kwargs): #00109_update.py upd.print_log('Update system settings scheme...') helpers.upgrade_db() redis = ConnectionPool.get_connection() old_settings = SystemSettings.get_all() # backup for downgrade if not redis.get('old_system_settings'): redis.set('old_system_settings', json.dumps(old_settings), ex=int(timedelta(days=7).total_seconds())) SystemSettings.query.delete() add_system_settings() for param in old_settings: SystemSettings.set_by_name(param.get('name'), param.get('value'), commit=False) db.session.commit() #00111_update.py helpers.restart_master_kubernetes() #00113_update.py upd.print_log('Adding "count_type" column to packages...') helpers.upgrade_db(revision='42b36be03945')
def test_put(self, PodCollection): PodCollection().update.return_value = {} pod = self.fixtures.pod(status='unpaid', owner=self.user) pod_config = pod.get_dbconfig() response = self.user_open(PodAPIUrl.put(pod.id), 'PUT', {}) self.assert200(response) # check fix-price users restrictions SystemSettings.set_by_name('billing_type', 'whmcs') self.user.count_type = 'fixed' self.db.session.commit() # only admin has permission to remove "unpaid" status set_paid = {'command': 'set', 'commandOptions': {'status': 'stopped'}} response = self.admin_open(PodAPIUrl.put(pod.id), 'PUT', set_paid) self.assert200(response) # only admin has permission to upgrade pod upgrade = { 'command': 'redeploy', 'containers': [dict(c, kubes=c['kubes'] + 1) for c in pod_config['containers']] } response = self.admin_open(PodAPIUrl.put(pod.id), 'PUT', upgrade) self.assert200(response)
def downgrade(upd, with_testing, exception, *args, **kwargs): # 00090_update.py upd.print_log('Downgrade system_settings scheme...') redis = ConnectionPool.get_connection() SystemSettings.query.delete() db.session.add_all([ SystemSettings(name='billing_apps_link', label='Link to billing system script', description='Link to predefined application request processing script', placeholder='http://whmcs.com/script.php', value=redis.get('old_billing_apps_link')), SystemSettings(name='persitent_disk_max_size', value=redis.get('old_persitent_disk_max_size'), label='Persistent disk maximum size', description='maximum capacity of a user container persistent disk in GB', placeholder='Enter value to limit PD size') ]) db.session.commit() helpers.downgrade_db(revision='27ac98113841') # 00094_update.py try: from kubedock.nodes.models import NodeMissedAction except ImportError: upd.print_log('Cannot find NodeMissedAction model') else: upd.print_log('Create table for NodeMissedAction model if not exists') NodeMissedAction.__table__.create(bind=db.engine, checkfirst=True) db.session.commit() # 00099_update.py helpers.downgrade_db(revision='46bba639e6fb') # first of rc4
def _validate_max_kubes_per_container(self, exists, field, value): if exists: max_size = SystemSettings.get_by_name('max_kubes_per_container') if max_size and int(value) > int(max_size): self._error(field, ( 'Container cannot have more than {0} kubes.'.format( max_size)))
def _validate_pd_size_max(self, exists, field, value): if exists: max_size = SystemSettings.get_by_name('persitent_disk_max_size') if max_size and int(value) > int(max_size): self._error(field, ( 'Persistent disk size must be less or equal ' 'to "{0}" GB').format(max_size))
def _update_00186_upgrade(): for setting_name in NAMES: SystemSettings.query.filter_by(name=setting_name).delete() db.session.add_all([ SystemSettings(name=keys.DNS_MANAGEMENT_CLOUDFLARE_EMAIL, label='CloudFlare Email', description='Email for CloudFlare DNS management', placeholder='Enter CloudFlare Email', setting_group='domain'), SystemSettings( name=keys.DNS_MANAGEMENT_CLOUDFLARE_TOKEN, label='CloudFlare Global API Key', description='Global API Key for CloudFlare DNS management', placeholder='Enter CloudFlare Global API Key', setting_group='domain'), ]) db.session.commit()
def inner(*args, **kwargs): billing_type = SystemSettings.get_by_name('billing_type') if billing_type == 'No billing': if raise_: raise WithoutBilling() driver = None else: driver = current_app.billing_factory.get_billing(billing_type) return func(driver, *args, **kwargs)
def upgrade(upd, with_testing, *args, **kwargs): upd.print_log('Upgrading DB...') helpers.upgrade_db() # 00116 upd.print_log('Add system settings for CPU and Memory multipliers') db.session.add_all([ SystemSettings(name='cpu_multiplier', value=CPU_MULTIPLIER, label='CPU multiplier', description='Cluster CPU multiplier', placeholder='Enter value for CPU multiplier'), SystemSettings(name='memory_multiplier', value=MEMORY_MULTIPLIER, label='Memory multiplier', description='Cluster Memory multiplier', placeholder='Enter value for Memory multiplier'), ]) upd.print_log('Create table for NodeAction model if not exists') NodeAction.__table__.create(bind=db.engine, checkfirst=True) db.session.commit() # 00117 upd.print_log('Update permissions...') Permission.query.delete() Resource.query.delete() add_permissions() db.session.commit() # Fix wrong pd_states if exists. wrong_states = db.session.query(PersistentDiskState).join( PersistentDisk, db.and_( PersistentDisk.name == PersistentDiskState.pd_name, PersistentDisk.owner_id == PersistentDiskState.user_id, PersistentDisk.state == PersistentDiskStatuses.DELETED)).filter( PersistentDiskState.end_time == None) for state in wrong_states: state.end_time = datetime.datetime.utcnow() db.session.commit() helpers.close_all_sessions()
def _update_00185_upgrade(): SystemSettings.query.filter_by(name=keys.MAX_KUBES_TRIAL_USER).delete() db.session.add_all([ SystemSettings(name=keys.MAX_KUBES_TRIAL_USER, value='5', label='Kubes limit for Trial user', placeholder='Enter Kubes limit for Trial user', setting_group='general'), ]) db.session.commit()
def setUp(self): self.setting = SystemSettings(name='test_setting', value='test_setting_value', label='test_setting_label', options='["foo", "abc"]', description='test_setting_description', setting_group='test_setting_group', placeholder='test_setting_placeholder') self.db.session.add(self.setting) self.db.session.commit()
def prepare_system_settings(data): """ Process system settings and puts'em into data :param data: dict -> data to be fed to template """ keys = ('billing_type', 'persitent_disk_max_size') n = namedtuple('N', 'billing maxsize')._make(keys) data.update({k: SystemSettings.get_by_name(k) for k in keys}) if not data[n.maxsize]: data[n.maxsize] = 10 if data[n.billing].lower() == 'no billing': return
def downgrade(upd, with_testing, exception, *args, **kwargs): #00109_update.py upd.print_log('Downgrade system_settings scheme...') redis = ConnectionPool.get_connection() old_settings = redis.get('old_system_settings') if old_settings: # restore old settings SystemSettings.query.delete() for param in json.loads(old_settings): db.session.add( SystemSettings(name=param.get('name'), label=param.get('label'), description=param.get('description'), placeholder=param.get('placeholder'), options=json.dumps(param.get('options')), value=param.get('value'))) db.session.commit() #00113_update.py upd.print_log('Removing "count_type" column from packages...') helpers.downgrade_db(revision='27c8f4c5f242')
def upgrade(upd, with_testing, *args, **kwargs): # 00090_update.py upd.print_log('Update system settings scheme...') helpers.upgrade_db() redis = ConnectionPool.get_connection() billing_apps_link = SystemSettings.get_by_name('billing_apps_link') persitent_disk_max_size = SystemSettings.get_by_name('persitent_disk_max_size') # backup for downgrade if not redis.get('old_billing_apps_link'): redis.set('old_billing_apps_link', billing_apps_link or '', ex=int(timedelta(days=7).total_seconds())) if not redis.get('old_persitent_disk_max_size'): redis.set('old_persitent_disk_max_size', persitent_disk_max_size, ex=int(timedelta(days=7).total_seconds())) billing_url = (urlparse(billing_apps_link)._replace(path='', query='', params='').geturl() if billing_apps_link else None) SystemSettings.query.delete() add_system_settings() SystemSettings.set_by_name( 'persitent_disk_max_size', persitent_disk_max_size, commit=False) SystemSettings.set_by_name('billing_url', billing_url, commit=False) db.session.commit() # 00094_update.py upd.print_log('Drop table "node_missed_actions" if exists') table = Table('node_missed_actions', db.metadata) table.drop(bind=db.engine, checkfirst=True) db.session.commit() # 00095_update.py upd.print_log('Restart k8s2etcd service') upd.print_log(helpers.local('systemctl restart kuberdock-k8s2etcd')) # 00098_update.py copyfile('/var/opt/kuberdock/conf/sudoers-nginx.conf', '/etc/sudoers.d/nginx') local('chown nginx:nginx /etc/nginx/conf.d/shared-kubernetes.conf') local('chown nginx:nginx /etc/nginx/conf.d/shared-etcd.conf') helpers.close_all_sessions()
def has_billing(): billing_type = SystemSettings.get_by_name('billing_type').lower() if billing_type == 'no billing': return False return True
def upgrade(upd, with_testing, *args, **kwargs): upgrade_db() # === 00124_update.py === # Move index file of k8s2etcd service from / to /var/lib/kuberdock try: stop_service(u124_service_name) if os.path.isfile(u124_old) and not os.path.isfile(u124_new): shutil.move(u124_old, u124_new) finally: start_service(u124_service_name) # === 00126_update.py === pod_collection = PodCollection() for pod_dict in pod_collection.get(as_json=False): pod = pod_collection._get_by_id(pod_dict['id']) db_config = get_pod_config(pod.id) cluster_ip = db_config.pop('clusterIP', None) if cluster_ip is None: service_name = db_config.get('service') if service_name is None: continue namespace = db_config.get('namespace') or pod.id service = KubeQuery().get(['services', service_name], ns=namespace) cluster_ip = service.get('spec', {}).get('clusterIP') if cluster_ip is not None: db_config['podIP'] = cluster_ip replace_pod_config(pod, db_config) # === 00127_update.py === upd.print_log('Upgrading menu...') MenuItemRole.query.delete() MenuItem.query.delete() Menu.query.delete() generate_menu() # === 00130_update.py === upd.print_log('Update permissions...') Permission.query.delete() Resource.query.delete() add_permissions() db.session.commit() # === 00135_update.py === # upd.print_log('Changing session_data schema...') # upgrade_db(revision='220dacf65cba') # === 00137_update.py === upd.print_log('Upgrading db...') # upgrade_db(revision='3c832810a33c') upd.print_log('Raise max kubes to 64') max_kubes = 'max_kubes_per_container' old_value = SystemSettings.get_by_name(max_kubes) if old_value == '10': SystemSettings.set_by_name(max_kubes, 64) upd.print_log('Update kubes') small = Kube.get_by_name('Small') standard = Kube.get_by_name('Standard') if small: small.cpu = 0.12 small.name = 'Tiny' small.memory = 64 if small.is_default and standard: small.is_default = False standard.is_default = True small.save() if standard: standard.cpu = 0.25 standard.memory = 128 standard.save() high = Kube.get_by_name('High memory') if high: high.cpu = 0.25 high.memory = 256 high.disk_space = 3 high.save() # === 00138_update.py === if not (CEPH or AWS): upgrade_localstorage_paths(upd) # === added later === secret_key = SystemSettings.query.filter( SystemSettings.name == 'sso_secret_key').first() if not secret_key.value: secret_key.value = randstr(16) secret_key.description = ( 'Used for Single sign-on. Must be shared between ' 'Kuberdock and billing system or other 3rd party ' 'application.') db.session.commit() upd.print_log('Close all sessions...') close_all_sessions()