def export_docker(graph, export_location): """ This is a wrapper function for exporting docker images, containers and volumes. """ if os.geteuid() != 0: sys.exit("You need to have root privileges to run atomic export.") if not os.path.isdir(export_location): os.makedirs(export_location) try: #Save the docker storage driver storage_driver = DOCKER_CLIENT.info()["Driver"] filed = open(export_location + "/info.txt", "w") filed.write(storage_driver) filed.close() #export docker images export_images(export_location) #export docker containers export_containers(graph, export_location) #export docker volumes export_volumes(graph, export_location) except: error = sys.exc_info()[0] sys.exit(error) util.writeOut("atomic export completed successfully")
def import_docker(graph, import_location): """ This is a wrapper function for importing docker images, containers and volumes. """ if os.geteuid() != 0: sys.exit("You need to have root privileges to run atomic import.") if not os.path.isdir(import_location): sys.exit("{0} does not exist".format(import_location)) try: #import docker images import_images(import_location) #import docker containers import_containers(graph, import_location) #import docker volumes import_volumes(graph, import_location) except: error = sys.exc_info()[0] sys.exit(error) util.writeOut("atomic import completed successfully") util.writeOut("Would you like to cleanup (rm -rf {0}) the temporary directory [y/N]" .format(import_location)) choice = sys.stdin.read(1) if (choice == 'y') or (choice == 'Y'): util.writeOut("Deleting {0}".format(import_location)) subprocess.check_call("rm -rf {0}".format(import_location), shell=True) else: util.writeOut("Cleanup operation aborted") util.writeOut("Please restart docker daemon for the changes to take effect")
def export_docker(graph, export_location): """ This is a wrapper function for exporting docker images, containers and volumes. """ if os.geteuid() != 0: sys.exit("You need to have root privileges to run atomic export.") if not os.path.isdir(export_location): os.makedirs(export_location) try: #Save the docker storage driver storage_driver = DOCKER_CLIENT.info()["Driver"] filed = open(export_location+"/info.txt", "w") filed.write(storage_driver) filed.close() #export docker images export_images(export_location) #export docker containers export_containers(graph, export_location) #export docker volumes export_volumes(graph, export_location) except: error = sys.exc_info()[0] sys.exit(error) util.writeOut("atomic export completed successfully")
def scan(self): if (not self.args.images and not self.args.containers and not self.args.all) and len(self.args.scan_targets) == 0: sys.stderr.write("\nYou must provide a list of containers or images to scan\n") sys.exit(1) self.ping() BUS_NAME = "org.OpenSCAP.daemon" OBJECT_PATH = "/OpenSCAP/daemon" INTERFACE = "org.OpenSCAP.daemon.Interface" input_resolve = {} if self.args.images: scan_list = self._get_all_image_ids() elif self.args.containers: scan_list = self._get_all_container_ids() elif self.args.all: cids = self._get_all_container_ids() iids = self._get_all_image_ids() scan_list = cids + iids else: scan_list = [] for scan_input in self.args.scan_targets: docker_id = self.get_input_id(scan_input) input_resolve[docker_id] = scan_input scan_list.append(docker_id) util.writeOut("\nScanning...\n") bus = dbus.SystemBus() try: oscap_d = bus.get_object(BUS_NAME, OBJECT_PATH) oscap_i = dbus.Interface(oscap_d, INTERFACE) # Check if the user has asked to override the behaviour of fetching the # latest CVE input data, as defined in the openscap-daemon conf file # oscap-daemon a byte of 0 (False), 1 (True), and 2 (no change) if self.args.fetch_cves is None: fetch = 2 elif self.args.fetch_cves: fetch = 1 else: fetch = 0 scan_return = json.loads(oscap_i.scan_list(scan_list, 4, fetch, timeout=99999)) except dbus.exceptions.DBusException as e: message = "The openscap-daemon returned: {0}".format(e.get_dbus_message()) if e.get_dbus_name() == 'org.freedesktop.DBus.Error.ServiceUnknown': message = "Unable to find the openscap-daemon dbus service. "\ "Either start the openscap-daemon service or pull " \ "and run the openscap-daemon image" sys.stderr.write("\n{0}\n\n".format(message)) sys.exit(1) if self.args.json: util.output_json(scan_return) else: if not self.args.detail: clean = util.print_scan_summary(scan_return, input_resolve) else: clean = util.print_detail_scan_summary(scan_return, input_resolve) if not clean: sys.exit(1)
def push_image_to_satellite(image, server_url, username, password, verify_ssl, docker_client, repo_name, organization_name, activation_key_name, debug=False): if not image: raise ValueError("Image required") parts = image.split("/") if parts > 1: if parts[0].find(".") != -1: server_url = parts[0] image = ("/").join(parts[1:]) if not server_url: raise ValueError("Satellite server url required") if not server_url.startswith("http"): server_url = "https://" + server_url try: sat = SatelliteServer(server_url=server_url, username=username, password=password, verify_ssl=verify_ssl, docker_client=docker_client, debug=debug) except Exception as e: raise IOError('Failed to initialize Satellite: {0}'.format(e)) try: org_id = sat.getOrgID(organization_name) except Exception as e: raise IOError("Failed to get organization ID") try: repo_id = sat.getRepoId(repo_name, org_id) except Exception as e: raise IOError("""Invalid Repository Name: {0}. Please verify that you typed the name correctly, or create the repository """.format(repo_name).replace('\n', ' ')) try: activation_key = sat.getActivationKey(activation_key_name, organization_id) except Exception as e: raise IOError("""Invalid Activation Key Name: {0}""".format(activation_key_name).replace('\n', ' ')) keyData = sat.get_data(repo_id, activation_key) content_view_id = keyData.get("content_view_id") product_id = keyData.get("product_id") try: util.writeOut('Uploading image "{0}" to server "{1}"'.format( image, server_url)) sat.upload_docker_image(image, repo_id) util.writeOut("") except Exception as e: raise IOError('Failed to upload image: {0}'.format(e)) sat.publish_view(content_view_id, repo_id) print("Push Complete")
def import_images(import_location): """ Method for importing docker images from a filesystem directory. """ tarballs = subprocess.check_output("ls {0}/images".format(import_location), shell=True) split_tarballs = tarballs.split() for i in split_tarballs: util.writeOut("Importing image with id: {0}".format(i[:-4])) subprocess.check_call("docker load < {0}/images/{1}".format(import_location, i), shell=True)
def _print_release(self): """ Prints the release information and splits based on the column length :return: None """ step = self._max - 2 r1_split = [self.i1.release.strip()[i:i+step] for i in range(0, len(self.i1.release.rstrip()), step)] r2_split = [self.i2.release.strip()[i:i+step] for i in range(0, len(self.i2.release.rstrip()), step)] for n in list(range(max(len(r1_split), len(r2_split)))): col1 = r1_split[n] if 0 <= n < len(r1_split) else "" col2 = r2_split[n] if 0 <= n < len(r2_split) else "" util.writeOut(self.two_col.format(col1, col2))
def import_volumes(graph, import_location): """ Method for importing docker volumes from a filesystem directory. """ util.writeOut("Importing Volumes") subprocess.check_call("/usr/bin/tar --selinux -xzvf {0}/volumes/volumeData.tar.gz" " -C {1}/volumes" .format(import_location, graph), stdout=DEVNULL, shell=True) if os.path.isdir(graph + "/vfs"): subprocess.check_call("/usr/bin/tar --selinux -xzvf {0}/volumes/vfsData.tar.gz" " -C {1}/vfs" .format(import_location, graph), stdout=DEVNULL, shell=True)
def export_volumes(graph, export_location): """ Method for exporting docker volumes into a filesystem directory. """ if not os.path.isdir(export_location + "/volumes"): os.makedirs(export_location + "/volumes") util.writeOut("Exporting Volumes") subprocess.check_call("/usr/bin/tar --selinux -zcvf {0}/volumes/volumeData.tar.gz" " -C {1}/volumes ." .format(export_location, graph), stdout=DEVNULL, shell=True) if os.path.isdir(graph + "/vfs"): subprocess.check_call("/usr/bin/tar --selinux -zcvf {0}/volumes/vfsData.tar.gz" " -C {1}/vfs ." .format(export_location, graph), stdout=DEVNULL, shell=True)
def import_containers(graph, import_location): """ Method for importing docker containers from a filesystem directory. """ if not os.path.isdir(import_location + "/containers"): sys.exit("{0} does not exist. No containers to import." .format(import_location+"/containers")) containers = subprocess.check_output("ls {0}/containers".format(import_location), shell=True) split_containers = containers.split() for i in split_containers: util.writeOut("Importing container ID:{0}".format(i[8:])) subprocess.check_call("/usr/libexec/atomic/migrate.sh import --container-id={0}" " --graph={1} --import-location={2}" .format(i[8:], graph, import_location), shell=True)
def output_files(self, images, image_list): """ Prints out the file differences when applicable :param images: :param image_list: :return: None """ file_diff = DiffFS(image_list[0].chroot, image_list[1].chroot) for image in image_list: self.json_out[image.name] = {'{}_only'.format(image.name): file_diff._get_only(image.chroot)} self.json_out['files_differ'] = file_diff.common_diff if not self.args.json: file_diff.print_results(images[0], images[1]) util.writeOut("\n")
def push_image_to_satellite(image, server_url, username, password, verify_ssl, docker_client, activation_key, repo_id, debug=False): if not image: raise ValueError("Image required") parts = image.split("/") if parts > 1: if parts[0].find(".") != -1: server_url = parts[0] image = ("/").join(parts[1:]) if not server_url: raise ValueError("Satellite server url required") if not server_url.startswith("http"): server_url = "https://" + server_url try: sat = SatelliteServer(server_url=server_url, username=username, password=password, verify_ssl=verify_ssl, docker_client=docker_client, debug=debug) except Exception as e: raise IOError('Failed to initialize Satellite: {0}'.format(e)) if not sat.is_repo(repo_id): raise IOError( """Invalid Repository ID: {0}. Please create that repository and try again, or input a different ID.""".format(repo_id).replace('\n', ' ')) keyData = sat.get_data(repo_id, activation_key) content_view_id = keyData.get("content_view_id") org_id = keyData.get("org_id") product_id = keyData.get("product_id") try: util.writeOut('Uploading image "{0}" to server "{1}"'.format( image, server_url)) sat.upload_docker_image(image, repo_id) util.writeOut("") except Exception as e: raise IOError('Failed to upload image: {0}'.format(e)) sat.publish_view(content_view_id, repo_id) print("Push Complete")
def push_image_to_satellite( image, server_url, username, password, verify_ssl, docker_client, activation_key, repo_id, debug=False ): if not image: raise ValueError("Image required") parts = image.split("/") if parts > 1: if parts[0].find(".") != -1: server_url = parts[0] image = ("/").join(parts[1:]) if not server_url: raise ValueError("Satellite server url required") if not server_url.startswith("http"): server_url = "https://" + server_url try: sat = SatelliteServer( server_url=server_url, username=username, password=password, verify_ssl=verify_ssl, docker_client=docker_client, debug=debug, ) except Exception as e: raise IOError("Failed to initialize Satellite: {0}".format(e)) if not sat.is_repo(repo_id): raise IOError( """Invalid Repository ID: {0}. Please create that repository and try again, or input a different ID.""".format( repo_id ).replace( "\n", " " ) ) keyData = sat.get_data(repo_id, activation_key) content_view_id = keyData.get("content_view_id") org_id = keyData.get("org_id") product_id = keyData.get("product_id") try: util.writeOut('Uploading image "{0}" to server "{1}"'.format(image, server_url)) sat.upload_docker_image(image, repo_id) util.writeOut("") except Exception as e: raise IOError("Failed to upload image: {0}".format(e)) sat.publish_view(content_view_id, repo_id) print("Push Complete")
def export_containers(graph, export_location): """ Method for exporting docker containers into a filesystem directory. """ if not os.path.isdir(export_location + "/containers"): os.makedirs(export_location + "/containers") split_containers = [] for j in DOCKER_CLIENT.containers(all=True): split_containers.append(j["Id"]) for i in range(0, len(split_containers)): util.writeOut("Exporting container ID:{0}".format(split_containers[i][:12])) subprocess.check_call("/usr/libexec/atomic/migrate.sh export --container-id={0}" " --graph={1} --export-location={2}" .format(split_containers[i][:12], graph, export_location), shell=True)
def scan(self): self.ping() BUS_NAME = "org.OpenSCAP.daemon" OBJECT_PATH = "/OpenSCAP/daemon" INTERFACE = "org.OpenSCAP.daemon.Interface" input_resolve = {} if self.args.images: scan_list = self._get_all_image_ids() elif self.args.containers: scan_list = self._get_all_container_ids() elif self.args.all: cids = self._get_all_container_ids() iids = self._get_all_image_ids() scan_list = cids + iids else: scan_list = [] for scan_input in self.args.scan_targets: docker_id = self.get_input_id(scan_input) input_resolve[docker_id] = scan_input scan_list.append(docker_id) util.writeOut("\nScanning...\n") bus = dbus.SystemBus() try: oscap_d = bus.get_object(BUS_NAME, OBJECT_PATH) oscap_i = dbus.Interface(oscap_d, INTERFACE) scan_return = json.loads(oscap_i.scan_list(scan_list, 4)) except dbus.exceptions.DBusException: error = "Unable to find the openscap-daemon dbus service. "\ "Either start the openscap-daemon service or pull and run"\ " the openscap-daemon image" sys.stderr.write("\n{0}\n\n".format(error)) sys.exit(1) if self.args.json: util.output_json(scan_return) else: if not self.args.detail: clean = util.print_scan_summary(scan_return, input_resolve) else: clean = util.print_detail_scan_summary(scan_return, input_resolve) if not clean: sys.exit(1)
def export_volumes(graph, export_location): """ Method for exporting docker volumes into a filesystem directory. """ if not os.path.isdir(export_location + "/volumes"): os.makedirs(export_location + "/volumes") util.writeOut("Exporting Volumes") subprocess.check_call( "/usr/bin/tar --selinux -zcvf {0}/volumes/volumeData.tar.gz" " -C {1}/volumes .".format(export_location, graph), stdout=DEVNULL, shell=True) if os.path.isdir(graph + "/vfs"): subprocess.check_call( "/usr/bin/tar --selinux -zcvf {0}/volumes/vfsData.tar.gz" " -C {1}/vfs .".format(export_location, graph), stdout=DEVNULL, shell=True)
def export_containers(graph, export_location): """ Method for exporting docker containers into a filesystem directory. """ if not os.path.isdir(export_location + "/containers"): os.makedirs(export_location + "/containers") split_containers = [] for j in DOCKER_CLIENT.containers(all=True): split_containers.append(j["Id"]) for i in range(0, len(split_containers)): util.writeOut("Exporting container ID:{0}".format( split_containers[i][:12])) subprocess.check_call( "/usr/libexec/atomic/migrate.sh export --container-id={0}" " --graph={1} --export-location={2}".format( split_containers[i][:12], graph, export_location), shell=True)
def push_image_to_pulp(image, server_url, username, password, verify_ssl, docker_client): if not image: raise ValueError("Image required") parts = image.split("/") if parts > 1: if parts[0].find(".") != -1: server_url = parts[0] image = ("/").join(parts[1:]) repo = image.replace("/", "-") if not server_url: raise ValueError("Pulp server url required") if not server_url.startswith("http"): server_url = "https://" + server_url try: pulp = PulpServer(server_url=server_url, username=username, password=password, verify_ssl=verify_ssl, docker_client=docker_client) except Exception as e: raise IOError('Failed to initialize Pulp: {0}'.format(e)) try: if not pulp.is_repo(repo): pulp.create_repo(image, repo) except Exception as e: raise IOError('Failed to create repository: {0}'.format(e)) try: util.writeOut('Uploading image "{0}" to server "{1}"'.format( image, server_url)) pulp.upload_docker_image(image, repo) util.writeOut("") except Exception as e: raise IOError('Failed to upload image: {0}'.format(e)) pulp.publish_repo(repo) pulp.export_repo(repo)
def stop(self): try: cid = self._is_container(self.name, active=True) self.name = cid except AtomicError as error: util.writeOut(error) sys.exit(1) args = self._get_args("STOP") if args: cmd = self.gen_cmd(args + list(map(pipes.quote, self.args.args))) self.display(cmd) subprocess.check_call(cmd, env=self.cmd_env, shell=True) # Container exists try: if self.inspect["State"]["Running"]: self.d.stop(self.name) except KeyError: pass
def stop(self): try: cid = self._is_container(self.name, active=True) self.name = cid except AtomicError as error: util.writeOut(error) sys.exit(1) args = self._get_args("STOP") if args: cmd = self.gen_cmd(args) self.display(cmd) subprocess.check_call(cmd, env=self.cmd_env, shell=True) # Container exists try: if self.inspect["State"]["Running"]: self.d.stop(self.name) except KeyError: pass
def export_images(export_location): """ Method for exporting docker images into a filesystem directory. """ if not os.path.isdir(export_location + "/images"): os.makedirs(export_location + "/images") split_images, split_ids = ([] for i in range(2)) for j in DOCKER_CLIENT.images(): split_images.append(j["RepoTags"]) for k in DOCKER_CLIENT.images(): split_ids.append(k["Id"]) dic = {} for i in range(0, len(split_ids)): if split_images[i] == '<none>:<none>': continue if split_ids[i] in dic: dic[split_ids[i]] = [dic[split_ids[i]], split_images[i]] else: dic[split_ids[i]] = split_images[i] for ids, images in dic.iteritems(): util.writeOut("Exporting image with id: {0}".format(ids[:12])) if isinstance(images, list): img = "" for i, val in enumerate(images): img = img + " " + val subprocess.check_call( "docker save {0} > {1}/images/{2}.tar".format( img.lstrip(), export_location, ids[:12]), shell=True) else: subprocess.check_call( "docker save {0} > {1}/images/{2}.tar".format( images, export_location, ids[:12]), shell=True)
def output_rpms(self, rpm_image_list): """ Prints out the differences in RPMs when applicable :param rpm_image_list: :return: None """ ip = RpmPrint(rpm_image_list) if not self.args.json: if ip.has_diff: ip._print_diff(self.args.verbose) else: util.writeOut("\n{} and {} have no different RPMs".format(ip.i1.name, ip.i2.name)) # Output JSON content else: rpm_json = ip._rpm_json() for image in rpm_json.keys(): if image not in self.json_out: self.json_out[image] = rpm_json[image] else: _tmp = self.json_out[image] _tmp.update(rpm_json[image]) self.json_out[image] = _tmp
def export_images(export_location): """ Method for exporting docker images into a filesystem directory. """ if not os.path.isdir(export_location + "/images"): os.makedirs(export_location + "/images") split_images, split_ids = ([] for i in range(2)) for j in DOCKER_CLIENT.images(): split_images.append(j["RepoTags"]) for k in DOCKER_CLIENT.images(): split_ids.append(k["Id"]) dic = {} for i in range(0, len(split_ids)): if split_images[i] == '<none>:<none>': continue if split_ids[i] in dic: dic[split_ids[i]] = [dic[split_ids[i]], split_images[i]] else: dic[split_ids[i]] = split_images[i] for ids, images in dic.iteritems(): util.writeOut("Exporting image with id: {0}".format(ids[:12])) if isinstance(images, list): img = "" for i, val in enumerate(images): img = img+" "+val subprocess.check_call( "docker save {0} > {1}/images/{2}.tar".format( img.lstrip(), export_location, ids[:12]), shell=True) else: subprocess.check_call( "docker save {0} > {1}/images/{2}.tar".format( images, export_location, ids[:12]), shell=True)
def print_results(self, left_docker_obj, right_docker_obj): """ Pretty output for the results of the filesystem diff :param left_docker_obj: :param right_docker_obj: :return: """ def _print_diff(file_list): for _file in file_list: util.writeOut("{0}{1}".format(5*" ", _file)) if all([len(self.left) == 0, len(self.right) == 0, len(self.common_diff) == 0]): util.writeOut("\nThere are no file differences between {0} " "and {1}".format(left_docker_obj, right_docker_obj)) if len(self.left) > 0: util.writeOut("\nFiles only in {}:".format(left_docker_obj)) _print_diff(self.left) if len(self.right) > 0: util.writeOut("\nFiles only in {}:".format(right_docker_obj)) _print_diff(self.right) if len(self.common_diff): util.writeOut("\nCommon files that are different:") _print_diff(self.common_diff)
def _print_diff(self, be_verbose): """ Outputs the diff information in columns :return: None """ util.writeOut("") util.writeOut(self.two_col.format(self.i1.name, self.i2.name)) util.writeOut(self.two_col.format("-"*self._max, "-"*self._max)) self._print_release() util.writeOut(self.two_col.format("-"*self._max, "-"*self._max)) for rpm in self.all_rpms: if (rpm in self.i1.rpms) and (rpm in self.i2.rpms): if be_verbose: util.writeOut(self.two_col.format(rpm, rpm)) elif (rpm in self.i1.rpms) and not (rpm in self.i2.rpms): util.writeOut(self.two_col.format(rpm, "")) elif not (rpm in self.i1.rpms) and (rpm in self.i2.rpms): util.writeOut(self.two_col.format("", rpm))
def _print_diff(file_list): for _file in file_list: util.writeOut("{0}{1}".format(5*" ", _file))
def scan(self): if (not self.args.images and not self.args.containers and not self.args.all) and len(self.args.scan_targets) == 0: sys.stderr.write( "\nYou must provide a list of containers or images to scan\n") sys.exit(1) self.ping() BUS_NAME = "org.OpenSCAP.daemon" OBJECT_PATH = "/OpenSCAP/daemon" INTERFACE = "org.OpenSCAP.daemon.Interface" input_resolve = {} if self.args.images: scan_list = self._get_all_image_ids() elif self.args.containers: scan_list = self._get_all_container_ids() elif self.args.all: cids = self._get_all_container_ids() iids = self._get_all_image_ids() scan_list = cids + iids else: scan_list = [] for scan_input in self.args.scan_targets: docker_id = self.get_input_id(scan_input) input_resolve[docker_id] = scan_input scan_list.append(docker_id) # Check to make sure none of the docker objects we need to # scan are already mounted. for docker_obj in scan_list: if util.is_dock_obj_mounted(docker_obj): sys.stderr.write( "\nThe object {0} is already mounted (in " "use) and therefore cannot be scanned.\n".format( docker_obj)) sys.exit(1) util.writeOut("\nScanning...\n") bus = dbus.SystemBus() try: oscap_d = bus.get_object(BUS_NAME, OBJECT_PATH) oscap_i = dbus.Interface(oscap_d, INTERFACE) # Check if the user has asked to override the behaviour of fetching the # latest CVE input data, as defined in the openscap-daemon conf file # oscap-daemon a byte of 0 (False), 1 (True), and 2 (no change) if self.args.fetch_cves is None: fetch = 2 elif self.args.fetch_cves: fetch = 1 else: fetch = 0 scan_return = json.loads( oscap_i.scan_list(scan_list, 4, fetch, timeout=99999)) except dbus.exceptions.DBusException as e: message = "The openscap-daemon returned: {0}".format( e.get_dbus_message()) if e.get_dbus_name( ) == 'org.freedesktop.DBus.Error.ServiceUnknown': message = "Unable to find the openscap-daemon dbus service. "\ "Either start the openscap-daemon service or pull " \ "and run the openscap-daemon image" sys.stderr.write("\n{0}\n\n".format(message)) sys.exit(1) if self.args.json: util.output_json(scan_return) else: if not self.args.detail: clean = util.print_scan_summary(scan_return, input_resolve) else: clean = util.print_detail_scan_summary(scan_return, input_resolve) if not clean: sys.exit(1)