def delete_nodes(): """ Delete all nodes from restored cluster. When restoring cluster after a full crush it makes sense to drop nodes from restored db because they will be added again (re-deployed). This also implies purging of the pods - in this case they also need to be re-deployed (or restored from pod backups) """ from kubedock.core import db from kubedock.api import create_app from kubedock.pods.models import PersistentDisk, Pod, PodIP from kubedock.usage.models import IpState, ContainerState, PodState from kubedock.domains.models import PodDomain from kubedock.nodes.models import Node from kubedock.kapi.nodes import delete_node_from_db create_app(fake_sessions=True).app_context().push() IpState.query.delete() PodIP.query.delete() ContainerState.query.delete() PodDomain.query.delete() PersistentDisk.query.delete() PodState.query.delete() Pod.query.delete() logger.info("All pod data purged.") db.session.commit() devnull = open(os.devnull, 'w') subprocess.call(['kubectl', 'delete', '--all', 'namespaces'], stderr=devnull, stdout=devnull) subprocess.call(['kubectl', 'delete', '--all', 'nodes'], stderr=devnull, stdout=devnull) logger.info("Etcd data purged.") for node in Node.get_all(): delete_node_from_db(node) logger.info("Node `{0}` purged.".format(node.hostname))
"""Checks if current storage backend supports persistent volume resizing""" def run(self): storage = STORAGE_CLASS() print storage.is_pv_resizable() pv_manager.add_command('resize', PVResize) pv_manager.add_command('is-resizable', PVIsResizable) class NetworkPoliciesCreator(Command): def run(self): create_network_policies() app = create_app(fake_sessions=True) manager = Manager(app, with_default_commands=False) directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'kubedock', 'updates', 'kdmigrations') migrate = Migrate(app, db, directory) def make_shell_context(): return dict(app=app, db=db, User=User, Pod=Pod, Package=Package, Kube=Kube) manager.add_command('shell', Shell(make_context=make_shell_context)) manager.add_command('db', MigrateCommand) manager.add_command('createdb', Creator()) manager.add_command('updatedb', Updater())
code.interact(local=locals or inspect.currentframe().f_back.f_locals) try: __builtins__.__dict__['INTERACT'] = interact __builtins__['INTERACT'] = interact except (TypeError, AttributeError): pass # ============================================================================== from kubedock import frontend, api, listeners from kubedock.settings import PRE_START_HOOK_ENABLED, SENTRY_ENABLE from kubedock.core import ExclusiveLock front_app = frontend.create_app() back_app = api.create_app() application = DispatcherMiddleware( front_app, {'/api': back_app} ) # if SENTRY_ENABLE: if False: import socket from kubedock.settings import MASTER_IP from kubedock.settings import SENTRY_DSN, SENTRY_PROCESSORS from kubedock.settings import SENTRY_EXCLUDE_PATHS from raven.contrib.flask import Sentry from kubedock.utils import get_version from kubedock.kapi.licensing import get_license_info authkey = get_license_info().get('auth_key', 'no installation id') hostname = "{}({})".format(socket.gethostname(), MASTER_IP)
] if stopped: msg.append(MESSAGES['pods'].format(', '.join(stopped))) except (SystemExit, Exception) as e: msg.append("Can't get internal pods states because of {}".format( e.message)) return os.linesep.join(msg) def check_cluster(): msg = [] master = check_master() if master: msg.append("Master errors:") msg.append(master) nodes = check_nodes() if nodes: msg.append("Nodes errors:") msg.append(nodes) store = output.status output.status = False disconnect_all() output.status = store return os.linesep.join(msg) if __name__ == '__main__': app = create_app() with app.app_context(): print check_cluster() or "All OK"
def create_app(self): return create_app(self)
def create_app(self): from kubedock.api import create_app return create_app(self, fake_sessions=True)