def backup( # type: ignore self, client: DCOSClient, **kwargs) -> BackupList: bl = BackupList() apps = client.get("{}/marathon/v2/apps".format(client.dcos_url)).json() for app in apps['apps']: bl.append(self.createBackup(app)) return bl
def backup( # type: ignore self, client: DCOSClient, **kwargs) -> BackupList: bl = BackupList() jobs = client.get( f"{client.dcos_url}/service/metronome/v1/jobs?embed=schedules" ).json() for job in jobs: bl.append(self.createBackup(job)) return bl
def test_secret_migrate(): with open('tests/examples/simpleSecret.json') as json_file: data = json.load(json_file) assert data['key'] is not None bl = BackupList() bl.append(Backup(pluginName='secret', backupName='foo.bar', data=data)) s = SecretPlugin() ml = s.migrate(backupList=bl, manifestList=ManifestList()) assert len(ml) == 1 assert ml[0][0].data['foo.bar'] == 'Rk9PQkFS'
def migrate(self, backupList: BackupList, manifestList: ManifestList, **kwargs: Any) -> ManifestList: ml = ManifestList() for ba in backupList.backups(pluginName='secret'): assert isinstance(ba, Backup) metadata = V1ObjectMeta() metadata.annotations = {} clusterMeta = manifestList.clusterMeta() if clusterMeta: metadata.annotations = clusterMeta.annotations logging.debug("Found backup {}".format(ba)) b = ba.data fullPath = "/".join(filter(None, [b["path"], b["key"]])) name = b["key"] metadata.annotations[utils.namespace_path( "secret-path")] = fullPath metadata.name = utils.make_subdomain(name.split('/')) sec = V1Secret(metadata=metadata) sec.api_version = 'v1' sec.kind = 'Secret' # K8s requires secret values to be base64-encoded. The secret value # is base64-encoded during backup so it can be passed as-is here. sec.data = {utils.dnsify(name): b['value']} manifest = Manifest(pluginName=self.plugin_name, manifestName=utils.dnsify(fullPath)) manifest.append(sec) ml.append(manifest) return ml
def backup( # type: ignore self, client: DCOSClient, **kwargs) -> BackupList: backupList = BackupList() sec = DCOSSecretsService(client) path = "" keys = sec.list(path) if keys: for key in keys: secData = sec.get(path, key) backupList.append( Backup(self.plugin_name, Backup.renderBackupName(path + key), data=secData)) return backupList
def migrate(self, backupList: BackupList, manifestList: ManifestList, **kwargs: Any) -> ManifestList: node_label_tracker = NodeLabelTracker() ml = ManifestList() for b in backupList.backups(pluginName=self.plugin_name): mig = MarathonMigrator(node_label_tracker=node_label_tracker, backup=b, backup_list=backupList, manifest_list=manifestList) try: manifest = mig.migrate() if manifest: ml.append(manifest) except Exception as e: logging.warning("Cannot migrate: {}".format(e)) app_node_labels = node_label_tracker.get_apps_by_label() if app_node_labels: logging.info( 'Node labels used by deployments generated from Marathon apps:\n{}\n' 'Please make sure that these labels are properly set on nodes\nof the' ' target Kubernetes cluster!'.format( json.dumps(list(app_node_labels)))) return ml
def test_jenkins_backup(): with open('tests/examples/jenkins.json') as json_file: data = json.load(json_file) bl = BackupList() m = MarathonPlugin() backup = m.createBackup(data) bl.append(backup) j = JenkinsPlugin() jbl = j.backup(client=DCOSClient(), backupList=bl) assert len(jbl) == 1 assert jbl[0].data['version'] == "3.6.1-2.190.1" assert jbl[0].data['options']['service']['mem'] == 4096 assert jbl[0].data['options']['service']['cpus'] == 1
def __init__(self) -> None: super(DCOSMigrate, self).__init__() self.client = DCOSClient() self.pm = PluginManager() self.manifest_list = ManifestList() self.backup_list = BackupList() config = self.pm.config_options config.extend(self.config_defaults) self.argparse = ArgParse( config, prog='dcos-migrate', usage= 'Does a backup of your DC/OS cluster and migrates everything into K8s Manifests' ) self.phases: List[Callable[[Optional[str], bool], None]] = [ self.initPhase, self.backup, self.backup_data, self.migrate, self.migrate_data ]
def backup(self, client: DCOSClient, backupList: BackupList, **kwargs: Any) -> BackupList: bl = BackupList() for b in backupList.backups(pluginName="marathon"): assert isinstance(b, Backup) if (b.data and 'labels' in b.data and "DCOS_PACKAGE_NAME" in b.data['labels'] and b.data['labels']['DCOS_PACKAGE_NAME'] == "jenkins"): # we found a jenkins package lets extract the config if 'DCOS_PACKAGE_OPTIONS' in b.data['labels']: options_str = b64decode(b.data['labels']['DCOS_PACKAGE_OPTIONS']) options = json.loads(options_str) data = { "packageName": b.data['labels']['DCOS_PACKAGE_NAME'], "version": b.data['labels']['DCOS_PACKAGE_VERSION'], "options": options } bl.append( Backup(pluginName=self.plugin_name, backupName=Backup.renderBackupName(b.data['labels']['DCOS_SERVICE_NAME']), data=data)) return bl
def backup(self, client: DCOSClient, backupList: BackupList, **kwargs: Any) -> BackupList: bl = BackupList() metadataResp = client.get(client.full_dcos_url('/metadata')) stateSumResp = client.get( client.full_dcos_url('/mesos/master/state-summary')) metadata = metadataResp.json() state = stateSumResp.json() data = { "CLUSTER_ID": metadata['CLUSTER_ID'], "CLUSTER": state['cluster'], "MESOS_MASTER_STATE-SUMMARY": state, "BACKUP_DATE": str(datetime.date.today()) } bl.append( Backup(pluginName=self.plugin_name, backupName="default", data=data)) return bl
def migrate(self, backupList: BackupList, manifestList: ManifestList, **kwargs: T.Any) -> ManifestList: ml = ManifestList() for b in backupList.backups(pluginName=self.plugin_name): mig = MetronomeMigrator(backup=b, backup_list=backupList, manifest_list=manifestList) manifest = mig.migrate() if manifest: ml.append(manifest) return ml
def migrate(self, backupList: BackupList, manifestList: ManifestList, **kwargs: Any) -> ManifestList: ml = ManifestList() clusterBackup = backupList.backup(pluginName=self.plugin_name, backupName='default') if not clusterBackup: logging.critical( "Cluster backup not found. Cannot provide DC/OS annotations") return ml metadata = V1ObjectMeta( name="dcos-{}".format(clusterBackup.data['CLUSTER_ID'])) metadata.annotations = { utils.namespace_path("cluster-id"): clusterBackup.data['CLUSTER_ID'], utils.namespace_path("cluster-name"): clusterBackup.data['CLUSTER'], utils.namespace_path("backup-date"): clusterBackup.data['BACKUP_DATE'], } cfgmap = V1ConfigMap(metadata=metadata) # models do not set defaults -.- cfgmap.kind = "ConfigMap" cfgmap.api_version = "v1" cfgmap.data = { 'MESOS_MASTER_STATE_SUMMARY_BASE64': b64encode( json.dumps( clusterBackup.data['MESOS_MASTER_STATE-SUMMARY']).encode( 'ascii')) } manifest = Manifest(pluginName=self.plugin_name, manifestName="dcos-cluster") manifest.append(cfgmap) ml.append(manifest) return ml
class DCOSMigrate(object): """docstring for DCOSMigrate.""" phases_choices = [ "all", "backup", "backup_data", "migrate", "migrate_data" ] config_defaults = [ Arg(name="phase", nargs="?", choices=phases_choices, default="all", positional=True, help="phase to start with."), Arg(name="verbose", alternatives=["-v"], action="count", default=1, help="log verbosity. Default to critical and warnings") ] def __init__(self) -> None: super(DCOSMigrate, self).__init__() self.client = DCOSClient() self.pm = PluginManager() self.manifest_list = ManifestList() self.backup_list = BackupList() config = self.pm.config_options config.extend(self.config_defaults) self.argparse = ArgParse( config, prog='dcos-migrate', usage= 'Does a backup of your DC/OS cluster and migrates everything into K8s Manifests' ) self.phases: List[Callable[[Optional[str], bool], None]] = [ self.initPhase, self.backup, self.backup_data, self.migrate, self.migrate_data ] @property def selected_phase(self) -> int: """returns the int(index) of the selected phase or 0""" return self.phases_choices.index(self.pm.config['global'].get( 'phase', "all")) def _end_process(self, message: str, exit_code: int = 0) -> int: print("Ending DC/OS migration - {}".format(message)) return exit_code def run(self, args: Optional[List[str]] = None) -> int: """main entrypoint to start the migration script. Returns exit code as int""" self.handleArgparse(args) self.handleGlobal() for i, p in enumerate(self.phases): if self.selected_phase > i: p(None, True) continue p(None, False) if self.selected_phase and self.selected_phase == i: return self._end_process("selected phase {} reached".format( self.phases_choices[i])) return 0 def handleGlobal(self) -> None: """handle global config before starting the process""" levels = [ logging.CRITICAL, logging.WARNING, logging.INFO, logging.DEBUG ] v = self.pm.config['global'].get('verbose', 1) level = levels[min(len(levels) - 1, v)] logging.basicConfig(level=level, force=True) def handleArgparse(self, args: Optional[List[str]] = None) -> None: if args is None: args = [] self.pm.config = self.argparse.parse_args(args) def initPhase(self, pluginName: Optional[str] = None, skip: bool = False) -> None: """currently unused and empty method to cover all choice""" pass def backup(self, pluginName: Optional[str] = None, skip: bool = False) -> None: if skip: logging.info("skipping backup - trying to load from disk.") self.backup_list.load() return logging.info("Calling {} Backup Batches".format( len(self.pm.backup_batch))) for batch in self.pm.backup_batch: # each batch could also be executed in parallel. # But for now just start sequential for plugin in batch: logging.info("Calling backup for plugin {}".format( plugin.plugin_name)) blist = plugin.backup(client=self.client, backupList=self.backup_list) if blist: self.backup_list.extend(blist) self.backup_list.store() def backup_data(self, pluginName: Optional[str] = None, skip: bool = False) -> None: if skip: logging.info("skipping backup data") return # for batch in self.pm.backup_batch: # # each batch could also be executed in parallel. # # But for not just start sequencial # for plugin in batch: # blist = plugin.backup_data(DCOSClient=self.client) # self.backup_data_list.extend(blist) def migrate(self, pluginName: Optional[str] = None, skip: bool = False) -> None: if skip: logging.info("skipping migrate - trying to load from disk.") self.manifest_list.load() return for batch in self.pm.migrate_batch: # each batch could also be executed in parallel. # But for not just start sequencial for plugin in batch: mlist = plugin.migrate(backupList=self.backup_list, manifestList=self.manifest_list) if mlist: self.manifest_list.extend(mlist) self.manifest_list.store() def migrate_data(self, pluginName: Optional[str] = None, skip: bool = False) -> None: if skip: logging.info("skipping migrate data") return # for batch in self.pm.migrate_batch: # # each batch could also be executed in parallel. # # But for not just start sequencial # for plugin in batch: # mlist = plugin.migrate( # backupList=self.backup_list, manifestList=self.manifest_list) # self.manifest_list.extend(mlist) def get_plugin_names(self) -> Iterable[str]: return self.pm.plugins.keys()