def delete(self): if self.args.debug: util.write_out(str(self.args)) if len(self.args.containers) > 0 and self.args.all: raise ValueError("You must select --all or provide a list of containers to delete.") beu = BackendUtils() if self.args.all: container_objects = beu.get_containers() else: container_objects = [] for con in self.args.containers: _, con_obj = beu.get_backend_and_container_obj(con, str_preferred_backend=storage) container_objects.append(con_obj) four_col = " {0:12} {1:20} {2:25} {3:10}" util.write_out(four_col.format("ID", "NAME", 'IMAGE_NAME', "STORAGE")) for con in container_objects: util.write_out(four_col.format(con.id[0:12], con.name[0:20], con.image_name[0:25], con.backend.backend)) if not util.confirm_input("\nDo you wish to delete the following containers?\n"): util.write_out("Aborting...") return for del_con in container_objects: try: del_con.backend.delete_container(del_con.id, force=self.args.force) except APIError as e: util.write_err("Failed to delete container {}: {}".format(con.id, e))
def __init__(self, policy_filename=None): """ :param policy_filename: override policy filename """ super(Pull, self).__init__() self.policy_filename=policy_filename self.be_utils = BackendUtils()
def pull_image(self): storage_set = False if self.args.storage is None else True storage = _storage if not storage_set else self.args.storage check_storage_is_available(storage) if self.args.debug: write_out(str(self.args)) src_creds = getattr(self.args, 'src_creds', None) if src_creds == "": src_creds = None be_utils = BackendUtils() be = be_utils.get_backend_from_string(storage) self.args.policy_filename = self.policy_filename try: if be.backend == 'docker': remote_image_obj = be.make_remote_image(self.args.image) if remote_image_obj.is_system_type and not storage_set: be = be_utils.get_backend_from_string('ostree') be_utils.message_backend_change('docker', 'ostree') elif be.backend == "containers-storage": remote_image_obj = be.make_remote_image(self.args.image) else: remote_image_obj = None be.pull_image(self.args.image, remote_image_obj, debug=self.args.debug, assumeyes=self.args.assumeyes, src_creds=src_creds) except ValueError as e: raise ValueError("Failed: {}".format(e)) return 0
def tag_image(self): """ Tag an image with a different name :return: 0 if the tag was created """ if self.args.debug: util.write_out(str(self.args)) beu = BackendUtils() backend = None if self.args.storage: backend = beu.get_backend_from_string(self.args.storage) image = backend.has_image(self.args.src) else: backend, image = beu.get_backend_and_image_obj(self.args.src, required=False) if not backend or not image: raise ValueError("Cannot find image {}.".format(self.args.src)) backend.tag_image(self.args.src, self.args.target) # We need to return something here for dbus return 0
def update(self): if self.args.debug: write_out(str(self.args)) if self.args.all and self.args.image is not None: raise ValueError("Cannot specify both --all and an image name") if self.args.all and self.args.force: raise ValueError("Cannot specify both --all and --force") if self.args.all and self.args.storage is None: raise ValueError("Please specify --storage") beu = BackendUtils() if self.args.all: be = beu.get_backend_from_string(self.args.storage) return self.update_all_images(be, self.args.debug) try: be, img_obj = beu.get_backend_and_image_obj( self.image, str_preferred_backend=self.args.storage or storage, required=True if self.args.storage else False) input_name = img_obj.input_name except ValueError: raise ValueError("{} not found locally. Unable to update".format( self.image)) be.update(input_name, debug=self.args.debug, force=self.args.force, image_object=img_obj) return 0
def delete_image(self): """ Mark given image(s) for deletion from registry :return: 0 if all images marked for deletion, otherwise 2 on any failure """ if self.args.debug: util.write_out(str(self.args)) if len(self.args.delete_targets) > 0 and self.args.all: raise ValueError("You must select --all or provide a list of images to delete.") beu = BackendUtils() delete_objects = [] # We need to decide on new returns for dbus because we now check image # validity prior to executing the delete. If there is going to be a # failure, it will be here. # # The failure here is basically that it couldnt verify/find the image. if self.args.all: delete_objects = beu.get_images(get_all=True) else: for image in self.args.delete_targets: _, img_obj = beu.get_backend_and_image_obj(image, str_preferred_backend=self.args.storage) delete_objects.append(img_obj) if self.args.remote: return self._delete_remote(self.args.delete_targets) _image_names = [] for del_obj in delete_objects: if del_obj.repotags: _image_names.append(len(del_obj.repotags[0])) else: _image_names.append(len(del_obj.id)) max_img_name = max(_image_names) + 2 if not self.args.assumeyes: util.write_out("Do you wish to delete the following images?\n") two_col = " {0:" + str(max_img_name) + "} {1}" util.write_out(two_col.format("IMAGE", "STORAGE")) for del_obj in delete_objects: image = None if not del_obj.repotags else del_obj.repotags[0] if image is None or "<none>" in image: image = del_obj.id[0:12] util.write_out(two_col.format(image, del_obj.backend.backend)) confirm = util.input("\nConfirm (y/N) ") confirm = confirm.strip().lower() if not confirm in ['y', 'yes']: util.write_err("User aborted delete operation for {}".format(self.args.delete_targets)) sys.exit(2) # Perform the delete for del_obj in delete_objects: del_obj.backend.delete_image(del_obj.input_name, force=self.args.force) # We need to return something here for dbus return
def stop(self): if self.args.debug: util.write_out(str(self.args)) beu = BackendUtils() be, con_obj = beu.get_backend_and_container_obj(self.args.container, storage) be.stop_container(con_obj, atomic=self, args=self.args) return 0
def update(self): if self.args.debug: write_out(str(self.args)) beu = BackendUtils() try: be, img_obj = beu.get_backend_and_image_obj(self.image, str_preferred_backend=self.args.storage or storage, required=True if self.args.storage else False) input_name = img_obj.input_name except ValueError: raise ValueError("{} not found locally. Unable to update".format(self.image)) be.update(input_name, self.args)
def update(self): if self.args.debug: write_out(str(self.args)) beu = BackendUtils() try: be, img_obj = beu.get_backend_and_image_obj(self.image, str_preferred_backend=self.args.storage or storage, required=True if self.args.storage else False) input_name = img_obj.input_name except ValueError: raise ValueError("{} not found locally. Unable to update".format(self.image)) be.update(input_name, debug=self.args.debug, force=self.args.force, image_object=img_obj) return 0
def pull_image(self): if self.args.debug: write_out(str(self.args)) be_utils = BackendUtils() be = be_utils.get_backend_from_string(self.args.storage) self.args.policy_filename = self.policy_filename try: be.pull_image(self.args.image, debug=self.args.debug) except ValueError as e: write_out(str(e)) return 0
def _get_scan_list(self): beu = BackendUtils() if self.args.images: scan_list = beu.get_images() elif self.args.containers: scan_list = beu.get_containers() elif self.args.all: scan_list = beu.get_images() + beu.get_containers() else: scan_list = [] for scan_target in self.args.scan_targets: try: # get_backend_and_container throws a ValueError when it cannot find anything _, scan_obj = beu.get_backend_and_container_obj( scan_target) except ValueError: try: # get_backend_and_image throws a ValueError when it cannot find anything _, scan_obj = beu.get_backend_and_image_obj( scan_target) except ValueError: raise ValueError( "Unable to locate the container or image '{}'". format(scan_target)) scan_list.append(scan_obj) return scan_list
def _ps(self): def _check_filters(): if not self.args.filters: return True for f in self.args.filters: _filter, _ = f.split('=', 1) if _filter not in [x for x in self.FILTER_KEYWORDS]: raise ValueError("The filter {} is not valid. " "Please choose from {}".format(_filter, [x for x in self.FILTER_KEYWORDS])) _check_filters() beu = BackendUtils() containers = self.filter_container_objects(beu.get_containers()) self._mark_vulnerable(containers) if self.args.all: return containers return [x for x in containers if x.running]
def uninstall(self): if self.args.debug: util.write_out(str(self.args)) beu = BackendUtils() try: be, img_obj = beu.get_backend_and_image_obj(self.args.image, str_preferred_backend=self.args.storage) except ValueError as e: if 'ostree' in [x().backend for x in beu.available_backends]: ost = OSTreeBackend() img_obj = ost.has_container(self.args.image) if not img_obj: raise ValueError(e) be = ost be.uninstall(img_obj, name=self.args.name, atomic=self, ignore=self.args.ignore) return 0
def delete_image(self): """ Mark given image(s) for deletion from registry :return: 0 if all images marked for deletion, otherwise 2 on any failure """ if self.args.debug: util.write_out(str(self.args)) beu = BackendUtils() # Ensure the input values match up first delete_objects = [] # We need to decide on new returns for dbus because we now check image # validity prior to executing the delete. If there is going to be a # failure, it will be here. # # The failure here is basically that it couldnt verify/find the image. for image in self.args.delete_targets: be, img_obj = beu.get_backend_and_image(image, str_preferred_backend=self.args.storage) delete_objects.append((be, img_obj)) if self.args.remote: return self._delete_remote(self.args.delete_targets) max_img_name = max([len(x.input_name) for _, x in delete_objects]) + 2 if not self.args.assumeyes: util.write_out("Do you wish to delete the following images?\n") two_col = " {0:" + str(max_img_name) + "} {1}" util.write_out(two_col.format("IMAGE", "STORAGE")) for del_obj in delete_objects: be, img_obj = del_obj util.write_out(two_col.format(img_obj.input_name, be.backend)) confirm = util.input("\nConfirm (y/N) ") confirm = confirm.strip().lower() if not confirm in ['y', 'yes']: util.write_err("User aborted delete operation for {}".format(self.args.delete_targets)) sys.exit(2) # Perform the delete for del_obj in delete_objects: be, img_obj = del_obj be.delete_image(img_obj.input_name, force=self.args.force) # We need to return something here for dbus return
def uninstall(self): if self.args.debug: util.write_out(str(self.args)) beu = BackendUtils() try: be, img_obj = beu.get_backend_and_image_obj( self.args.image, str_preferred_backend=self.args.storage) except ValueError as e: if 'ostree' in [x().backend for x in beu.available_backends]: ost = OSTreeBackend() img_obj = ost.has_container(self.args.image) if not img_obj: raise ValueError(e) be = ost be.uninstall(img_obj, name=self.args.name, atomic=self) return 0
def stop(self): if self.args.debug: util.write_out(str(self.args)) beu = BackendUtils() be, con_obj = beu.get_backend_and_container_obj(self.args.container, storage) con_obj.stop_args = con_obj.get_label('stop') if con_obj.stop_args and be.backend == 'docker': cmd = self.gen_cmd(con_obj.stop_args + self.quote(self.args.args)) cmd = self.sub_env_strings(cmd) self.display(cmd) # There should be some error handling around this # in case it fails. And what should then be done? util.check_call(cmd, env=self.cmd_env()) be.stop_container(con_obj)
def run(self): storage_set = False if self.args.storage is None else True storage = _storage if not storage_set else self.args.storage be_utils = BackendUtils() if self.name: try: be, con_obj = be_utils.get_backend_and_container_obj(self.name) return be.run(con_obj, atomic=self, args=self.args) except ValueError: pass be = be_utils.get_backend_from_string(storage) db = DockerBackend() img_object = be.has_image(self.image) if img_object is None and storage == 'docker': self.display("Need to pull %s" % self.image) remote_image_obj = db.make_remote_image(self.args.image) # If the image has a atomic.type of system, then we need to land # this in the ostree backend. Install it and then start it # because this is run if remote_image_obj.is_system_type and not storage_set: be = be_utils.get_backend_from_string('ostree') be_utils.message_backend_change('docker', 'ostree') be.install(self.image, self.name) con_obj = be.has_container(self.name) return be.run(con_obj) if self.args.display: return 0 try: db.pull_image(self.image, remote_image_obj) img_object = db.has_image(self.image) except RegistryInspectError: raise ValueError("Unable to find image {}".format(self.image)) return be.run(img_object, atomic=self, args=self.args)
def stop(self): if self.args.debug: util.write_out(str(self.args)) beu = BackendUtils() be, con_obj = beu.get_backend_and_container_obj( self.args.container, storage) con_obj.stop_args = con_obj.get_label('stop') if con_obj.stop_args and be.backend == 'docker': cmd = self.gen_cmd(con_obj.stop_args + self.quote(self.args.args)) cmd = self.sub_env_strings(cmd) self.display(cmd) # There should be some error handling around this # in case it fails. And what should then be done? util.check_call(cmd, env=self.cmd_env()) be.stop_container(con_obj)
class TestBackendUtils(unittest.TestCase): bu = BackendUtils() def test_get_backend_for_image_preferred(self): pass def test_get_backend_for_image(self): pass
def pull_image(self): storage_set = False if self.args.storage is None else True storage = _storage if not storage_set else self.args.storage check_storage_is_available(storage) if self.args.debug: write_out(str(self.args)) be_utils = BackendUtils() be = be_utils.get_backend_from_string(storage) self.args.policy_filename = self.policy_filename try: if be.backend == 'docker': remote_image_obj = be.make_remote_image(self.args.image) if remote_image_obj.is_system_type and not storage_set: be = be_utils.get_backend_from_string('ostree') be_utils.message_backend_change('docker', 'ostree') else: remote_image_obj = None be.pull_image(self.args.image, remote_image_obj, debug=self.args.debug, assumeyes=self.args.assumeyes) except ValueError as e: raise ValueError("Failed: {}".format(e)) return 0
def run(self): if self.name: be_utils = BackendUtils() try: be, con_obj = be_utils.get_backend_and_container_obj(self.name) return be.run(con_obj, atomic=self, args=self.args) except ValueError: pass db = DockerBackend() img_object = db.has_image(self.image) if img_object is None: self.display("Need to pull %s" % self.image) if self.args.display: return 0 try: db.pull_image(self.image) img_object = db.has_image(self.image) except RegistryInspectError: raise ValueError("Unable to find image {}".format(self.image)) db.run(img_object, atomic=self, args=self.args)
def install(self): debug = self.args.debug if self.args.debug: util.write_out(str(self.args)) be_utils = BackendUtils() try: # Check to see if the container already exists _, _ = be_utils.get_backend_and_container_obj(self.name) raise ValueError("A container '%s' is already present" % self.name) except ValueError: pass if self.user: if not util.is_user_mode(): raise ValueError("--user does not work for privileged user") return self.syscontainers.install_user_container(self.image, self.name) elif self.system: return self.syscontainers.install(self.image, self.name) elif OSTREE_PRESENT and self.args.setvalues: raise ValueError("--set is valid only when used with --system or --user") # Assumed backend now is docker be = be_utils.get_backend_from_string('docker') # If the image is already present, img_obj = be.has_image(self.image) if img_obj is None: be.pull_image(self.image, debug=debug) img_obj = be.has_image(self.image) install_args = img_obj.get_label('INSTALL') if not install_args: return 0 install_args = install_args.split() cmd = self.sub_env_strings(self.gen_cmd(install_args + self.quote(self.args.args))) self.display(cmd) if not self.args.display: return util.check_call(cmd)
def install(self): if self.args.debug: util.write_out(str(self.args)) storage_set = False if self.args.storage is None else True storage = _storage if not storage_set else self.args.storage args_system = getattr(self.args, 'system', None) args_user= getattr(self.args, 'user', None) if (args_system or args_user) and storage != 'ostree' and storage_set: raise ValueError("The --system and --user options are only available for the 'ostree' storage.") be_utils = BackendUtils() try: # Check to see if the container already exists _, _ = be_utils.get_backend_and_container_obj(self.name) raise ValueError("A container '%s' is already present" % self.name) except ValueError: pass if self.user: if not util.is_user_mode(): raise ValueError("--user does not work for privileged user") return self.syscontainers.install_user_container(self.image, self.name) elif self.system or storage == 'ostree': return self.syscontainers.install(self.image, self.name) elif OSTREE_PRESENT and self.args.setvalues: raise ValueError("--set is valid only when used with --system or --user") # Assumed backend now is docker be = be_utils.get_backend_from_string('docker') # If the image is already present, img_obj = be.has_image(self.image) if img_obj is None: remote_image_obj = be.make_remote_image(self.args.image) # We found an atomic.type of system, therefore install it onto the ostree # backend if remote_image_obj.is_system_type and not storage_set: be_utils.message_backend_change('docker', 'ostree') return self.syscontainers.install(self.image, self.name) be.pull_image(self.args.image, remote_image_obj, debug=self.args.debug) img_obj = be.has_image(self.image) install_args = img_obj.get_label('INSTALL') if not install_args: return 0 install_args = install_args.split() cmd = self.sub_env_strings(self.gen_cmd(install_args + self.quote(self.args.args))) self.display(cmd) if not self.args.display: try: name = img_obj.fq_name except RegistryInspectError: name = img_obj.input_name install_data = {} install_data[name] = {'id': img_obj.id, 'install_date': strftime("%Y-%m-%d %H:%M:%S", gmtime()) } util.InstallData.write_install_data(install_data) return util.check_call(cmd)
def run(self): if self.name: be_utils = BackendUtils() try: be, con_obj = be_utils.get_backend_and_container_obj(self.name) return be.run(con_obj, atomic=self, args=self.args) except ValueError: pass db = DockerBackend() img_object = db.has_image(self.image) if img_object is None: self.display("Need to pull %s" % self.image) if self.args.display: return 0 try: db.pull_image(self.image) img_object = db.has_image(self.image) except RegistryInspectError: util.write_err("Unable to find image {}".format(self.image)) db.run(img_object, atomic=self, args=self.args)
def verify(self): if self.args.image and self.args.all: raise ValueError("Incompatible options specified. --all doesn't support an image name") if not self.args.all and not self.args.image: raise ValueError("Please specify the image name") if self.args.all and not self.args.storage: raise ValueError("Please specify --storage") if self.args.all: be = BackendUtils().get_backend_from_string(self.args.storage) images = be.get_images() for i in images: if i.repotags is None: continue img_name = i.repotags[0] d = util.Decompose(img_name) if d.registry == "": util.write_err("Image {} not fully qualified: skipping".format(img_name)) continue self._verify_one_image(img_name) else: return self._verify_one_image(self.args.image)
def prune_images(self): """ Remove dangling images from registry :return: 0 if all images deleted or no dangling images found """ if self.args.debug: util.write_out(str(self.args)) beu = BackendUtils() for backend in beu.available_backends: be = backend() be.prune() return 0
class Pull(Atomic): def __init__(self, policy_filename=None): """ :param policy_filename: override policy filename """ super(Pull, self).__init__() self.policy_filename=policy_filename self.be_utils = BackendUtils() def pull_image(self): if self.args.debug: write_out(str(self.args)) be = self.be_utils.get_backend_from_string(self.args.storage) self.args.policy_filename = self.policy_filename try: be.pull_image(self.args.image, debug=self.args.debug) except ValueError as e: write_out("Failed: {}".format(e)) return 1 return 0
def _get_scan_list(self): beu = BackendUtils() if self.args.images: scan_list = beu.get_images() elif self.args.containers: scan_list = beu.get_containers() elif self.args.all: scan_list = beu.get_images() + beu.get_containers() else: scan_list = [] for scan_target in self.args.scan_targets: try: # get_backend_and_container throws a ValueError when it cannot find anything _, scan_obj = beu.get_backend_and_container_obj(scan_target) except ValueError: try: # get_backend_and_image throws a ValueError when it cannot find anything _, scan_obj = beu.get_backend_and_image_obj(scan_target) except ValueError: raise ValueError("Unable to locate the container or image '{}' locally. Check the " "input name for typos or pull the image first.".format(scan_target)) scan_list.append(scan_obj) return scan_list
def run(self): storage_set = False if self.args.storage is None else True storage = _storage if not storage_set else self.args.storage be_utils = BackendUtils() if self.name: try: be, con_obj = be_utils.get_backend_and_container_obj(self.name) return be.run(con_obj, atomic=self, args=self.args) except ValueError: pass be = be_utils.get_backend_from_string(storage) db = DockerBackend() img_object = be.has_image(self.image) if img_object is None and storage == 'docker': self.display("Need to pull %s" % self.image) remote_image_obj = db.make_remote_image(self.args.image) # If the image has a atomic.type of system, then we need to land # this in the ostree backend. Install it and then start it # because this is run if remote_image_obj.is_system_type and not storage_set: be = be_utils.get_backend_from_string('ostree') be_utils.message_backend_change('docker', 'ostree') be.install(self.image, self.name) con_obj = be.has_container(self.name) return be.run(con_obj) if self.args.display: return 0 try: db.pull_image(self.image, remote_image_obj) img_object = db.has_image(self.image) except RegistryInspectError: raise ValueError("Unable to find image {}".format(self.image)) if storage == 'ostree': if img_object is None: be.pull_image(self.args.image, None) # For system containers, the run method really needs a container obj con_obj = be.has_container(self.name) if con_obj is None: be.install(self.image, self.name) img_object = be.has_container(self.name) return be.run(img_object, atomic=self, args=self.args)
class Containers(Atomic): FILTER_KEYWORDS = { "container": "id", "image": "image_name", "command": "command", "created": "created", "state": "state", "runtime": "runtime", "backend": "backend.backend" } def __init__(self): super(Containers, self).__init__() self.beu = BackendUtils() def fstrim(self): with AtomicDocker() as client: for container in client.containers(): containerId = container["Id"] ret = self._inspect_container(name=containerId) pid = ret["State"]["Pid"] mp = "/proc/%d/root" % (pid) util.write_out("Trimming container id {0}".format( containerId[0:12])) util.check_call(["/usr/sbin/fstrim", "-v", mp], stdout=DEVNULL) return def filter_container_objects(self, con_objs): def _walk(_filter_objs, _filter, _value): _filtered = [] for con_obj in _filter_objs: it = con_obj for i in _filter.split("."): it = getattr(it, i, None) if _value.lower() in it.lower(): _filtered.append(con_obj) return _filtered if not self.args.filters: return con_objs filtered_objs = copy.deepcopy(con_objs) for f in self.args.filters: cfilter, value = f.split('=', 1) cfilter = self.FILTER_KEYWORDS[cfilter] filtered_objs = _walk(filtered_objs, cfilter, value) return filtered_objs def ps_tty(self): if self.args.debug: util.write_out(str(self.args)) self.beu.dump_backends() container_objects = self._ps() if not any([x.running for x in container_objects]) and not self.args.all: return 0 if self.args.quiet: for con_obj in container_objects: util.write_out(con_obj.id[:12]) return 0 if self.args.json: util.output_json(self._to_json(container_objects)) return 0 if len(container_objects) == 0: return 0 max_container_id = 12 if self.args.truncate else max( [len(x.id) for x in container_objects]) max_image_name = 20 if self.args.truncate else max( [len(x.image_name) for x in container_objects]) max_command = 20 if self.args.truncate else max( [len(x.command) for x in container_objects]) col_out = "{0:2} {1:%s} {2:%s} {3:%s} {4:16} {5:9} {6:10} {7:10}" % ( max_container_id, max_image_name, max_command) if self.args.heading: util.write_out( col_out.format(" ", "CONTAINER ID", "IMAGE", "COMMAND", "CREATED", "STATE", "BACKEND", "RUNTIME")) for con_obj in container_objects: indicator = "" if con_obj.vulnerable: if util.is_python2: indicator = indicator + self.skull + " " else: indicator = indicator + str(self.skull, "utf-8") + " " util.write_out( col_out.format(indicator, con_obj.id[0:max_container_id], con_obj.image_name[0:max_image_name], con_obj.command[0:max_command], con_obj.created[0:16], con_obj.state[0:9], con_obj.backend.backend[0:10], con_obj.runtime[0:10])) def ps(self): container_objects = self._ps() return self._to_json(container_objects) def _ps(self): def _check_filters(): if not self.args.filters: return True for f in self.args.filters: _filter, _ = f.split('=', 1) keywords = list(self.FILTER_KEYWORDS.keys()) if _filter not in keywords: raise ValueError("The filter {} is not valid. " "Please choose from {}".format( _filter, keywords)) _check_filters() containers = self.filter_container_objects(self.beu.get_containers()) self._mark_vulnerable(containers) if self.args.all: return containers return [x for x in containers if x.running] @staticmethod def _to_json(con_objects): containers = [] for con_obj in con_objects: _con = { 'id': con_obj.id, 'image_id': con_obj.image_id, 'image_name': con_obj.image_name, 'command': con_obj.command, 'created': con_obj.created, 'state': con_obj.state, 'backend': con_obj.backend.backend, 'runtime': con_obj.runtime, 'vulnerable': con_obj.vulnerable, 'running': con_obj.running } containers.append(_con) return containers def delete(self): if self.args.debug: util.write_out(str(self.args)) self.beu.dump_backends() if (len(self.args.containers) > 0 and self.args.all) or (len(self.args.containers) < 1 and not self.args.all): raise ValueError( "You must select --all or provide a list of images to delete.") if self.args.all: if self.args.storage: be = self.beu.get_backend_from_string(self.args.storage) container_objects = be.get_containers() else: container_objects = self.beu.get_containers() else: container_objects = [] for con in self.args.containers: _, con_obj = self.beu.get_backend_and_container_obj( con, str_preferred_backend=self.args.storage or storage, required=True if self.args.storage else False) container_objects.append(con_obj) if len(container_objects) == 0: raise ValueError("No containers to delete") four_col = " {0:12} {1:20} {2:25} {3:10}" if not self.args.assumeyes: util.write_out("Do you wish to delete the following images?\n") else: util.write_out("The following containers will be deleted.\n") util.write_out(four_col.format("ID", "NAME", 'IMAGE_NAME', "STORAGE")) for con in container_objects: util.write_out( four_col.format(con.id[0:12], con.name[0:20], con.image_name[0:25], con.backend.backend)) if not self.args.assumeyes: confirm = util.input("\nConfirm (y/N) ") confirm = confirm.strip().lower() if not confirm in ['y', 'yes']: util.write_err("User aborted delete operation for {}".format( self.args.containers or "all containers")) sys.exit(2) for del_con in container_objects: try: del_con.backend.delete_container(del_con.id, force=self.args.force) except APIError as e: util.write_err("Failed to delete container {}: {}".format( con.id, e)) return 0 def _mark_vulnerable(self, containers): assert isinstance(containers, list) vulnerable_uuids = self.get_vulnerable_ids() for con in containers: if con.id in vulnerable_uuids: con.vulnerable = True def update(self): if self.syscontainers.get_checkout(self.args.container): return self.syscontainers.update_container(self.args.container, self.args.setvalues, self.args.rebase) raise ValueError("System container '%s' is not installed" % self.args.container) def rollback(self): util.write_out("Attempting to roll back system container: %s" % self.args.container) self.syscontainers.rollback(self.args.container)
class Containers(Atomic): FILTER_KEYWORDS = { "container": "id", "image": "image_name", "command": "command", "created": "created", "state": "state", "runtime": "runtime", "backend": "backend.backend" } def __init__(self): super(Containers, self).__init__() self.beu = BackendUtils() def fstrim(self): with AtomicDocker() as client: for container in client.containers(): containerId = container["Id"] ret = self._inspect_container(name=containerId) pid = ret["State"]["Pid"] mp = "/proc/%d/root" % (pid) util.write_out("Trimming container id {0}".format( containerId[0:12])) util.check_call(["/usr/sbin/fstrim", "-v", mp], stdout=DEVNULL) return def filter_container_objects(self, con_objs): def _walk(_filter_objs, _filter, _value): _filtered = [] for con_obj in _filter_objs: it = con_obj for i in _filter.split("."): it = getattr(it, i, None) if _value.lower() in it.lower(): _filtered.append(con_obj) return _filtered if not self.args.filter: return con_objs for f in self.args.filter: cfilter, value = f.split('=', 1) cfilter = self.FILTER_KEYWORDS[cfilter] con_objs = _walk(con_objs, cfilter, value) return con_objs def ps_tty(self): if self.args.debug: util.write_out(str(self.args)) self.beu.dump_backends() container_objects = self._ps() # If we were not asked for json output, return out with no output # when there are no applicable container objects if not self.args.json: if len(container_objects) == 0: return 0 if not any([x.running for x in container_objects]) and not self.args.all: return 0 # Set to 12 when truncate if self.args.truncate: max_container_id = 12 # Otherwise set to the max, falling back to 0 else: max_container_id = max([len(x.id) for x in container_objects] or [0]) # Quiet supersedes json output if self.args.quiet: for con_obj in container_objects: util.write_out(con_obj.id[0:max_container_id]) return 0 if self.args.json: util.output_json(self._to_json(container_objects)) return 0 max_image_name = 20 if self.args.truncate else max( [len(x.image_name) for x in container_objects]) if self.args.truncate: max_command = 10 else: _max_command = max([len(x.command) for x in container_objects]) max_command = _max_command if _max_command > 9 else 10 max_container_name = 10 if self.args.truncate else max( [len(x.name) for x in container_objects]) col_out = "{0:2} {1:%s} {2:%s} {3:%s} {4:%s} {5:16} {6:10} {7:10} {8:10}" % ( max_container_id, max_image_name, max_container_name, max_command) if self.args.heading: util.write_out( col_out.format(" ", "CONTAINER ID", "IMAGE", "NAME", "COMMAND", "CREATED", "STATE", "BACKEND", "RUNTIME")) for con_obj in container_objects: indicator = "" if con_obj.vulnerable: if util.is_python2: indicator = indicator + self.skull + " " else: indicator = indicator + str(self.skull, "utf-8") + " " util.write_out( col_out.format(indicator, con_obj.id[0:max_container_id], con_obj.image_name[0:max_image_name], con_obj.name[0:max_container_name], str(con_obj.command)[0:max_command], con_obj.created[0:16], con_obj.state[0:10], con_obj.backend.backend[0:10], con_obj.runtime[0:10])) def ps(self): container_objects = self._ps() return self._to_json(container_objects) def _ps(self): def _check_filters(): if not self.args.filter: return True for f in self.args.filter: _filter, _ = f.split('=', 1) keywords = list(self.FILTER_KEYWORDS.keys()) if _filter not in keywords: raise ValueError("The filter {} is not valid. " "Please choose from {}".format( _filter, keywords)) _check_filters() containers = self.filter_container_objects(self.beu.get_containers()) self._mark_vulnerable(containers) if self.args.all: return containers return [x for x in containers if x.running] @staticmethod def _to_json(con_objects): containers = [] for con_obj in con_objects: _con = { 'id': con_obj.id, 'image_id': con_obj.image, 'image_name': con_obj.image_name, 'name': con_obj.name, 'command': con_obj.command, 'created': con_obj.created, 'state': con_obj.state, 'backend': con_obj.backend.backend, 'runtime': con_obj.runtime, 'vulnerable': con_obj.vulnerable, 'running': con_obj.running } containers.append(_con) return containers def delete(self): if self.args.debug: util.write_out(str(self.args)) self.beu.dump_backends() if (len(self.args.containers) > 0 and self.args.all) or (len(self.args.containers) < 1 and not self.args.all): raise ValueError( "You must select --all or provide a list of images to delete.") if self.args.all: if self.args.storage: be = self.beu.get_backend_from_string(self.args.storage) container_objects = be.get_containers() else: container_objects = self.beu.get_containers() else: container_objects = [] for con in self.args.containers: _, con_obj = self.beu.get_backend_and_container_obj( con, str_preferred_backend=self.args.storage or storage, required=True if self.args.storage else False) container_objects.append(con_obj) if len(container_objects) == 0: raise ValueError("No containers to delete") four_col = " {0:12} {1:20} {2:25} {3:10}" if not self.args.assumeyes: util.write_out("Do you wish to delete the following images?\n") else: util.write_out("The following containers will be deleted.\n") util.write_out(four_col.format("ID", "NAME", 'IMAGE_NAME', "STORAGE")) for con in container_objects: util.write_out( four_col.format(con.id[0:12], con.name[0:20], con.image_name[0:25], con.backend.backend)) if not self.args.assumeyes: confirm = util.input("\nConfirm (y/N) ") confirm = confirm.strip().lower() if not confirm in ['y', 'yes']: util.write_err("User aborted delete operation for {}".format( self.args.containers or "all containers")) sys.exit(2) for del_con in container_objects: try: del_con.backend.delete_container(del_con.id, force=self.args.force) except APIError as e: util.write_err("Failed to delete container {}: {}".format( con.id, e)) return 0 def _mark_vulnerable(self, containers): assert isinstance(containers, list) vulnerable_uuids = self.get_vulnerable_ids() for con in containers: if con.id in vulnerable_uuids: con.vulnerable = True def update_all_containers(self): preupdate_containers = self.syscontainers.get_containers() could_not_update = {} for i in preupdate_containers: name = i['Names'][0] util.write_out("Checking container {}...".format(name)) try: self.syscontainers.update_container(name, controlled=True) except: # pylint: disable=bare-except could_not_update[name] = True postupdate_containers = self.syscontainers.get_containers() images_by_name = { i['RepoTags'][0]: i for i in self.syscontainers.get_system_images() } preupdate_containers_by_name = { i['Names'][0]: i for i in preupdate_containers } postupdate_containers_by_name = { i['Names'][0]: i for i in postupdate_containers } def get_image_id(c): return c['ImageId'] if 'ImageId' in c else c['ImageID'] def colored(line, color): if sys.stdout.isatty(): return "\x1b[1;%dm%s\x1b[0m" % (color, line) else: return line def get_status(container_name, pre_id, post_id, image_id): COLOR_RED = 31 COLOR_GREEN = 32 COLOR_YELLOW = 33 if container_name in could_not_update: return "Failed", COLOR_RED if image_id == None: return "Unknown", COLOR_YELLOW if post_id == image_id and image_id != pre_id: return "Updated now", COLOR_GREEN if post_id == image_id and image_id == pre_id: return "Updated", COLOR_GREEN return "Not updated", COLOR_RED cols = "{0:15} {1:32} {2:32} {3:32} {4:15}" util.write_out("\nSUMMARY\n") util.write_out( cols.format("Container", "Image ID before update", "Image ID after update", "Latest image ID", "Status")) for cnt in preupdate_containers_by_name.keys(): pre_version = get_image_id(preupdate_containers_by_name[cnt]) post_version = get_image_id(postupdate_containers_by_name[cnt]) img_name = img_id = None try: img_name = preupdate_containers_by_name[cnt]['Image'] img_id = get_image_id(images_by_name[img_name]) except KeyError: pass status, color = get_status(cnt, pre_version, post_version, img_id) colored_status = colored(status[:15], color) util.write_out( cols.format(cnt[:15], pre_version[:32], post_version[:32], (img_id or "<NOT FOUND>")[:32], colored_status)) def update(self): if self.args.all: if self.args.container is not None: raise ValueError( "Incompatible options specified. --all doesn't support a container name" ) if self.args.setvalues or self.args.rebase: raise ValueError( "Incompatible options specified. --all doesn't support --set or --rebase" ) return self.update_all_containers() if self.args.container is None: raise ValueError( "Too few arguments. Please specify the container") if self.syscontainers.get_checkout(self.args.container): return self.syscontainers.update_container(self.args.container, self.args.setvalues, self.args.rebase) raise ValueError("System container '%s' is not installed" % self.args.container) def rollback(self): util.write_out("Attempting to roll back system container: %s" % self.args.container) self.syscontainers.rollback(self.args.container)
def delete_image(self): """ Mark given image(s) for deletion from registry :return: 0 if all images marked for deletion, otherwise 2 on any failure """ if self.args.debug: util.write_out(str(self.args)) if len(self.args.delete_targets) > 0 and self.args.all: raise ValueError( "You must select --all or provide a list of images to delete.") beu = BackendUtils() delete_objects = [] # We need to decide on new returns for dbus because we now check image # validity prior to executing the delete. If there is going to be a # failure, it will be here. # # The failure here is basically that it couldnt verify/find the image. if self.args.all: delete_objects = beu.get_images(get_all=True) else: for image in self.args.delete_targets: _, img_obj = beu.get_backend_and_image_obj( image, str_preferred_backend=self.args.storage or storage, required=True if self.args.storage else False) delete_objects.append(img_obj) if self.args.remote: return self._delete_remote(self.args.delete_targets) _image_names = [] for del_obj in delete_objects: if del_obj.repotags: _image_names.append(len(del_obj.repotags[0])) else: _image_names.append(len(del_obj.id)) max_img_name = max(_image_names) + 2 if not self.args.assumeyes: util.write_out("Do you wish to delete the following images?\n") two_col = " {0:" + str(max_img_name) + "} {1}" util.write_out(two_col.format("IMAGE", "STORAGE")) for del_obj in delete_objects: image = None if not del_obj.repotags else del_obj.repotags[0] if image is None or "<none>" in image: image = del_obj.id[0:12] util.write_out(two_col.format(image, del_obj.backend.backend)) confirm = util.input("\nConfirm (y/N) ") confirm = confirm.strip().lower() if not confirm in ['y', 'yes']: util.write_err("User aborted delete operation for {}".format( self.args.delete_targets)) sys.exit(2) # Perform the delete for del_obj in delete_objects: del_obj.backend.delete_image(del_obj.input_name, force=self.args.force) # We need to return something here for dbus return
class Info(Atomic): def __init__(self): super(Info, self).__init__() self.beu = BackendUtils() def version(self): self._version(util.write_out) def _version(self, write_func): layer_objects = self.get_layer_objects() max_version_len = max([len(x.long_version) for x in layer_objects]) max_version_len = max_version_len if max_version_len > 9 else 9 max_img_len = len(max([y for x in layer_objects for y in x.repotags], key=len)) + 9 max_img_len = max_img_len if max_img_len > 12 else 12 col_out = "{0:" + str(max_img_len) + "} {1:" + str(max_version_len) + "} {2:10}" write_func(col_out.format("IMAGE NAME", "VERSION", "IMAGE ID")) for layer in layer_objects: for int_img_name in range(len(layer.repotags)): version = layer.long_version if int_img_name < 1 else "" iid = layer.id[:12] if int_img_name < 1 else "" space = "" if int_img_name < 1 else " Tag: " write_func(col_out.format(space + layer.repotags[int_img_name], version, iid)) write_func("") def get_layer_objects(self): _, img_obj = self.beu.get_backend_and_image_obj(self.image, str_preferred_backend=self.args.storage) return img_obj.layers def dbus_version(self): layer_objects = self.get_layer_objects() versions = [] for layer in layer_objects: versions.append({"Image": layer.repotags, "Version": layer.long_version, "iid": layer.id}) return versions def info_tty(self): if self.args.debug: util.write_out(str(self.args)) util.write_out(self.info()) def info(self): """ Retrieve and print all LABEL information for a given image. """ if self.args.storage == 'ostree' and self.args.force: # Ostree and remote combos are illegal raise ValueError("The --remote option cannot be used with the 'ostree' storage option.") if self.args.force: # The user wants information on a remote image be = self.beu.get_backend_from_string(self.args.storage) img_obj = be.make_remote_image(self.image) else: # The image is local be, img_obj = self.beu.get_backend_and_image_obj(self.image, str_preferred_backend=self.args.storage) with closing(StringIO()) as buf: try: info_name = img_obj.fq_name except RegistryInspectError: info_name = img_obj.input_name buf.write("Image Name: {}\n".format(info_name)) buf.writelines(sorted(["{}: {}\n".format(k, v) for k,v in list(img_obj.labels.items())])) if img_obj.template_variables_set: buf.write("\n\nTemplate variables with default value, but overridable with --set:\n") buf.writelines(["{}: {}\n".format(k, v) for k,v in list(sorted(img_obj.template_variables_set.items()))]) if img_obj.template_variables_unset: buf.write("\n\nTemplate variables that has no default value, and must be set with --set:\n") buf.writelines(["{}: {}\n".format(k, v) for k,v in list(sorted(img_obj.template_variables_unset.items()))]) return buf.getvalue()
def update(self): beu = BackendUtils() be, img_obj = beu.get_backend_and_image_obj(self.image, str_preferred_backend=self.args.storage) be.update(img_obj.input_name, force=self.args.force)
def install(self): if self.args.debug: util.write_out(str(self.args)) storage_set = False if self.args.storage is None else True storage = _storage if not storage_set else self.args.storage args_system = getattr(self.args, 'system', None) args_user = getattr(self.args, 'user', None) if (args_system or args_user) and storage != 'ostree' and storage_set: raise ValueError("The --system and --user options are only available for the 'ostree' storage.") be_utils = BackendUtils() try: # Check to see if the container already exists _, _ = be_utils.get_backend_and_container_obj(self.name) raise ValueError("A container '%s' is already present" % self.name) except ValueError: pass if self.user: if not util.is_user_mode(): raise ValueError("--user does not work for privileged user") return self.syscontainers.install_user_container(self.image, self.name) if self.ostree_uri(self.image): return self.syscontainers.install(self.image, self.name) # Check if image exists str_backend = 'ostree' if self.args.system else self.args.storage or storage be = be_utils.get_backend_from_string(str_backend) img_obj = be.has_image(self.args.image) if img_obj and img_obj.is_system_type: be = be_utils.get_backend_from_string('ostree') if img_obj is None: # Unable to find the image locally, look remotely remote_image_obj = be.make_remote_image(self.args.image) # We found an atomic.type of system, therefore install it onto the ostree # backend if remote_image_obj.is_system_type and not storage_set: be_utils.message_backend_change('docker', 'ostree') be = be_utils.get_backend_from_string('ostree') be.pull_image(self.args.image, remote_image_obj, debug=self.args.debug) img_obj = be.has_image(self.image) if be.backend is not 'docker': if OSTREE_PRESENT and self.args.setvalues and not self.user and not self.system: raise ValueError("--set is valid only when used with --system or --user") # We need to fix this long term and get ostree # using the backend approach vs the atomic args be.syscontainers.set_args(self.args) return be.install(self.image, self.name) installation = None if storage == 'docker' and not args_system: if self.args.system_package == 'build': raise ValueError("'--system-package=build' is not supported for docker backend") installation = be.rpm_install(img_obj, self.name) install_args = img_obj.get_label('INSTALL') if installation or install_args: try: name = img_obj.fq_name except RegistryInspectError: name = img_obj.input_name install_data_content = { 'id': img_obj.id, "container_name": self.name, 'install_date': strftime("%Y-%m-%d %H:%M:%S", gmtime()) } if installation: # let's fail the installation if rpm for this image is already installed if util.InstallData.image_installed(img_obj): raise ValueError("Image {} is already installed.".format(self.image)) install_data_content["rpm_installed_files"] = installation.installed_files rpm_nvra = re.sub(r"\.rpm$", "", installation.original_rpm_name) install_data_content["system_package_nvra"] = rpm_nvra install_data = {name: install_data_content} if not install_args: return 0 install_args = install_args.split() cmd = self.sub_env_strings(self.gen_cmd(install_args + self.quote(self.args.args))) self.display(cmd) if not self.args.display: result = util.check_call(cmd) if result == 0: if installation or install_args: # Only write the install data if the installation worked. util.InstallData.write_install_data(install_data) return result
def install(self): if self.args.debug: util.write_out(str(self.args)) storage_set = False if self.args.storage is None else True storage = _storage if not storage_set else self.args.storage args_system = getattr(self.args, 'system', None) args_user = getattr(self.args, 'user', None) if (args_system or args_user) and storage != 'ostree' and storage_set: raise ValueError( "The --system and --user options are only available for the 'ostree' storage." ) be_utils = BackendUtils() try: # Check to see if the container already exists _, _ = be_utils.get_backend_and_container_obj(self.name) raise ValueError("A container '%s' is already present" % self.name) except ValueError: pass if self.user: if not util.is_user_mode(): raise ValueError("--user does not work for privileged user") return self.syscontainers.install_user_container( self.image, self.name) if self.ostree_uri(self.image): return self.syscontainers.install(self.image, self.name) # Check if image exists str_backend = 'ostree' if self.args.system else self.args.storage or storage be = be_utils.get_backend_from_string(str_backend) img_obj = be.has_image(self.args.image) if img_obj and img_obj.is_system_type: be = be_utils.get_backend_from_string('ostree') if img_obj is None: # Unable to find the image locally, look remotely remote_image_obj = be.make_remote_image(self.args.image) # We found an atomic.type of system, therefore install it onto the ostree # backend if remote_image_obj.is_system_type and not storage_set: be_utils.message_backend_change('docker', 'ostree') be = be_utils.get_backend_from_string('ostree') be.pull_image(self.args.image, remote_image_obj, debug=self.args.debug) img_obj = be.has_image(self.image) if be.backend is not 'docker': if OSTREE_PRESENT and self.args.setvalues and not self.user and not self.system: raise ValueError( "--set is valid only when used with --system or --user") # We need to fix this long term and get ostree # using the backend approach vs the atomic args be.syscontainers.set_args(self.args) return be.install(self.image, self.name) installation = None if storage == 'docker' and not args_system: if self.args.system_package == 'build': raise ValueError( "'--system-package=build' is not supported for docker backend" ) installation = be.rpm_install(img_obj, self.name) install_args = img_obj.get_label('INSTALL') if installation or install_args: try: name = img_obj.fq_name except RegistryInspectError: name = img_obj.input_name install_data_content = { 'id': img_obj.id, "container_name": self.name, 'install_date': strftime("%Y-%m-%d %H:%M:%S", gmtime()) } if installation: # let's fail the installation if rpm for this image is already installed if util.InstallData.image_installed(img_obj): raise ValueError("Image {} is already installed.".format( self.image)) install_data_content[ "rpm_installed_files"] = installation.installed_files rpm_nvra = re.sub(r"\.rpm$", "", installation.original_rpm_name) install_data_content["system_package_nvra"] = rpm_nvra install_data = {name: install_data_content} util.InstallData.write_install_data(install_data) if not install_args: return 0 install_args = install_args.split() cmd = self.sub_env_strings( self.gen_cmd(install_args + self.quote(self.args.args))) self.display(cmd) if not self.args.display: return util.check_call(cmd)
def __init__(self): super(Info, self).__init__() self.beu = BackendUtils()
class Info(Atomic): def __init__(self): super(Info, self).__init__() self.beu = BackendUtils() def version(self): self._version(util.write_out) def _version(self, write_func): layer_objects = self.get_layer_objects() max_version_len = max([len(x.long_version) for x in layer_objects]) max_version_len = max_version_len if max_version_len > 9 else 9 max_img_len = len( max([y for x in layer_objects for y in x.repotags], key=len)) + 9 max_img_len = max_img_len if max_img_len > 12 else 12 col_out = "{0:" + str(max_img_len) + "} {1:" + str( max_version_len) + "} {2:10}" write_func(col_out.format("IMAGE NAME", "VERSION", "IMAGE ID")) for layer in layer_objects: for int_img_name in range(len(layer.repotags)): version = layer.long_version if int_img_name < 1 else "" iid = layer.id[:12] if int_img_name < 1 else "" space = "" if int_img_name < 1 else " Tag: " write_func( col_out.format(space + layer.repotags[int_img_name], version, iid)) write_func("") def get_layer_objects(self): _, img_obj = self.beu.get_backend_and_image( self.image, str_preferred_backend=self.args.storage) return img_obj.layers def dbus_version(self): layer_objects = self.get_layer_objects() versions = [] for layer in layer_objects: versions.append({ "Image": layer.repotags, "Version": layer.long_version, "iid": layer.id }) return versions def info_tty(self): if self.args.debug: util.write_out(str(self.args)) util.write_out(self.info()) def info(self): """ Retrieve and print all LABEL information for a given image. """ if self.args.storage == 'ostree' and self.args.force: # Ostree and remote combos are illegal raise ValueError( "The --remote option cannot be used with the 'ostree' storage option." ) if self.args.force: # The user wants information on a remote image be = self.beu.get_backend_from_string(self.args.storage) img_obj = be.make_remote_image(self.image) else: # The image is local be, img_obj = self.beu.get_backend_and_image( self.image, str_preferred_backend=self.args.storage) with closing(StringIO()) as buf: try: info_name = img_obj.fq_name except RegistryInspectError: info_name = img_obj.input_name buf.write("Image Name: {}\n".format(info_name)) buf.writelines( sorted([ "{}: {}\n".format(k, v) for k, v in list(img_obj.labels.items()) ])) if img_obj.template_variables_set: buf.write( "\n\nTemplate variables with default value, but overridable with --set:\n" ) buf.writelines([ "{}: {}\n".format(k, v) for k, v in list( sorted(img_obj.template_variables_set.items())) ]) if img_obj.template_variables_unset: buf.write( "\n\nTemplate variables that has no default value, and must be set with --set:\n" ) buf.writelines([ "{}: {}\n".format(k, v) for k, v in list( sorted(img_obj.template_variables_unset.items())) ]) return buf.getvalue()
class Containers(Atomic): FILTER_KEYWORDS= {"container": "id", "image": "image_name", "command": "command", "created": "created", "state": "state", "runtime": "runtime", "backend" : "backend.backend"} def __init__(self): super(Containers, self).__init__() self.beu = BackendUtils() def fstrim(self): with AtomicDocker() as client: for container in client.containers(): containerId = container["Id"] ret = self._inspect_container(name=containerId) pid = ret["State"]["Pid"] mp = "/proc/%d/root" % (pid) util.write_out("Trimming container id {0}".format(containerId[0:12])) util.check_call(["/usr/sbin/fstrim", "-v", mp], stdout=DEVNULL) return def filter_container_objects(self, con_objs): def _walk(_filter_objs, _filter, _value): _filtered = [] for con_obj in _filter_objs: it = con_obj for i in _filter.split("."): it = getattr(it, i, None) if _value.lower() in it.lower(): _filtered.append(con_obj) return _filtered if not self.args.filter: return con_objs for f in self.args.filter: cfilter, value = f.split('=', 1) cfilter = self.FILTER_KEYWORDS[cfilter] con_objs = _walk(con_objs, cfilter, value) return con_objs def ps_tty(self): if self.args.debug: util.write_out(str(self.args)) self.beu.dump_backends() container_objects = self._ps() # If we were not asked for json output, return out with no output # when there are no applicable container objects if not self.args.json: if len(container_objects) == 0: return 0 if not any([x.running for x in container_objects]) and not self.args.all: return 0 # Set to 12 when truncate if self.args.truncate: max_container_id = 12 # Otherwise set to the max, falling back to 0 else: max_container_id = max([len(x.id) for x in container_objects] or [0]) # Quiet supersedes json output if self.args.quiet: for con_obj in container_objects: util.write_out(con_obj.id[0:max_container_id]) return 0 if self.args.json: util.output_json(self._to_json(container_objects)) return 0 max_image_name = 20 if self.args.truncate else max([len(x.image_name) for x in container_objects]) if self.args.truncate: max_command = 10 else: _max_command = max([len(x.command) for x in container_objects]) max_command = _max_command if _max_command > 9 else 10 max_container_name = 10 if self.args.truncate else max([len(x.name) for x in container_objects]) col_out = "{0:2} {1:%s} {2:%s} {3:%s} {4:%s} {5:16} {6:10} {7:10} {8:10}" % (max_container_id, max_image_name, max_container_name, max_command) if self.args.heading: util.write_out(col_out.format(" ", "CONTAINER ID", "IMAGE", "NAME", "COMMAND", "CREATED", "STATE", "BACKEND", "RUNTIME")) for con_obj in container_objects: indicator = "" if con_obj.vulnerable: if util.is_python2: indicator = indicator + self.skull + " " else: indicator = indicator + str(self.skull, "utf-8") + " " util.write_out(col_out.format(indicator, con_obj.id[0:max_container_id], con_obj.image_name[0:max_image_name], con_obj.name[0:max_container_name], str(con_obj.command)[0:max_command], con_obj.created[0:16], con_obj.state[0:10], con_obj.backend.backend[0:10], con_obj.runtime[0:10])) def ps(self): container_objects = self._ps() return self._to_json(container_objects) def _ps(self): def _check_filters(): if not self.args.filter: return True for f in self.args.filter: _filter, _ = f.split('=', 1) keywords = list(self.FILTER_KEYWORDS.keys()) if _filter not in keywords: raise ValueError("The filter {} is not valid. " "Please choose from {}".format(_filter, keywords)) _check_filters() containers = self.filter_container_objects(self.beu.get_containers()) self._mark_vulnerable(containers) if self.args.all: return containers return [x for x in containers if x.running] @staticmethod def _to_json(con_objects): containers = [] for con_obj in con_objects: _con = {'id': con_obj.id, 'image_id': con_obj.image, 'image_name': con_obj.image_name, 'name': con_obj.name, 'command': con_obj.command, 'created': con_obj.created, 'state': con_obj.state, 'backend': con_obj.backend.backend, 'runtime': con_obj.runtime, 'vulnerable': con_obj.vulnerable, 'running': con_obj.running } containers.append(_con) return containers def delete(self): if self.args.debug: util.write_out(str(self.args)) self.beu.dump_backends() if (len(self.args.containers) > 0 and self.args.all) or (len(self.args.containers) < 1 and not self.args.all): raise ValueError("You must select --all or provide a list of images to delete.") if self.args.all: if self.args.storage: be = self.beu.get_backend_from_string(self.args.storage) container_objects = be.get_containers() else: container_objects = self.beu.get_containers() else: container_objects = [] for con in self.args.containers: _, con_obj = self.beu.get_backend_and_container_obj(con, str_preferred_backend=self.args.storage or storage, required=True if self.args.storage else False) container_objects.append(con_obj) if len(container_objects) == 0: raise ValueError("No containers to delete") four_col = " {0:12} {1:20} {2:25} {3:10}" if not self.args.assumeyes: util.write_out("Do you wish to delete the following images?\n") else: util.write_out("The following containers will be deleted.\n") util.write_out(four_col.format("ID", "NAME", 'IMAGE_NAME', "STORAGE")) for con in container_objects: util.write_out(four_col.format(con.id[0:12], con.name[0:20], con.image_name[0:25], con.backend.backend)) if not self.args.assumeyes: confirm = util.input("\nConfirm (y/N) ") confirm = confirm.strip().lower() if not confirm in ['y', 'yes']: util.write_err("User aborted delete operation for {}".format(self.args.containers or "all containers")) sys.exit(2) for del_con in container_objects: try: del_con.backend.delete_container(del_con.id, force=self.args.force) except APIError as e: util.write_err("Failed to delete container {}: {}".format(con.id, e)) return 0 def _mark_vulnerable(self, containers): assert isinstance(containers, list) vulnerable_uuids = self.get_vulnerable_ids() for con in containers: if con.id in vulnerable_uuids: con.vulnerable = True def update(self): if self.syscontainers.get_checkout(self.args.container): return self.syscontainers.update_container(self.args.container, self.args.setvalues, self.args.rebase) raise ValueError("System container '%s' is not installed" % self.args.container) def rollback(self): util.write_out("Attempting to roll back system container: %s" % self.args.container) self.syscontainers.rollback(self.args.container)
def __init__(self): super(Containers, self).__init__() self.beu = BackendUtils()
class Verify(Atomic): def __init__(self): super(Verify, self).__init__() self.debug = False self.backend_utils = BackendUtils() def _layers_match(self, local, remote): _match = [] for _layer_int in range(len(local)): if local[_layer_int] == remote[_layer_int]: _match.append(True) else: _match.append(False) return all(_match) def verify(self): if self.args.image and self.args.all: raise ValueError("Incompatible options specified. --all doesn't support an image name") if not self.args.all and not self.args.image: raise ValueError("Please specify the image name") if self.args.all and not self.args.storage: raise ValueError("Please specify --storage") if self.args.all: be = BackendUtils().get_backend_from_string(self.args.storage) images = be.get_images() for i in images: if i.repotags is None: continue img_name = i.repotags[0] d = util.Decompose(img_name) if d.registry == "": util.write_err("Image {} not fully qualified: skipping".format(img_name)) continue self._verify_one_image(img_name) else: return self._verify_one_image(self.args.image) def _verify_one_image(self, image): if self.args.debug: util.write_out(str(self.args)) be, local_layers, remote_layers = self._verify(image) if not self._layers_match(local_layers, remote_layers) or self.args.verbose: col = "{0:30} {1:20} {2:20} {3:1}" util.write_out("\n{} contains the following images:\n".format(image)) util.write_out(col.format("NAME", "LOCAL VERSION", "REMOTE VERSION", "DIFFERS")) for layer_int in range(len(local_layers)): differs = 'NO' if remote_layers[layer_int] == local_layers[layer_int] else 'YES' util.write_out(col.format(local_layers[layer_int].name[:30], local_layers[layer_int].long_version[:20], remote_layers[layer_int].long_version[:20], differs)) util.write_out("\n") if not self.args.no_validate: be.validate_layer(image) def verify_dbus(self): _, local_layers, remote_layers = self._verify(self.args.image) layers = [] for layer_int in range(len(local_layers)): layer = {} layer['name'] = local_layers[layer_int].name layer['local_version'] = local_layers[layer_int].long_version layer['remote_version'] = remote_layers[layer_int].long_version layer['differs'] = False if remote_layers[layer_int] == local_layers[layer_int] else True layers.append(layer) return layers def _verify(self, image): be, img_obj = self.backend_utils.get_backend_and_image_obj(image, str_preferred_backend=self.args.storage or storage, required=True if self.args.storage else False) remote_img_name = "{}:latest".format(util.Decompose(img_obj.fq_name).no_tag) remote_img_obj = be.make_remote_image(remote_img_name) return be, img_obj.layers, remote_img_obj.layers def get_tagged_images(self, names, layers): """ Returns a dict with image names and its tag name. :param names: :param layers: :return: list of sorted dicts (by index) """ base_images = [] for name in names: _match = next((x for x in layers if x['Name'] == name and x['RepoTags'] is not ''), None) registry, repo, image, tag, digest = util.Decompose(self.get_fq_image_name(_match['RepoTags'][0])).all tag = "latest" ri = RegistryInspect(registry=registry, repo=repo, image=image, tag=tag, digest=digest, debug=self.debug) remote_inspect = ri.inspect() release = remote_inspect.get("Labels", None).get("Release", None) version = remote_inspect.get("Labels", None).get("Version", None) if release and version: remote_version = "{}-{}-{}".format(name, version, release) else: # Check if the blob exists on the registry by the ID remote_id = no_shaw(ri.remote_id) _match['Version'] = _match['Id'] remote_version = remote_id if remote_id is not None else "" _match['Remote Version'] = remote_version base_images.append(_match) return sorted(base_images, key=itemgetter('index')) @staticmethod def _mismatch(layer): if layer['Version'] != layer['Remote Version'] and layer['Remote Version'] != layer['Id']: return "Yes" if layer['Version'] == '' and layer['Remote Version'] == '': return "!" return "No" @staticmethod def print_verify(base_images, image, verbose=False): """ Implements a verbose printout of layers. Can be called with atomic verify -v or if we detect some layer does not have versioning information. :param base_images: :param image: :return: None """ def check_for_updates(base_images): for i in base_images: if Verify._mismatch(i) in ['Yes', '!']: return True return False has_updates = check_for_updates(base_images) if has_updates or verbose: col = "{0:30} {1:20} {2:20} {3:1}" util.write_out("\n{} contains the following images:\n".format(image)) util.write_out(col.format("NAME", "LOCAL VERSION", "REMOTE VERSION", "DIFFERS")) for _image in base_images: util.write_out(col.format(_image['Name'][:30], _image['Version'][:20], _image['Remote Version'][:20], Verify._mismatch(_image))) util.write_out("\n")
def __init__(self): super(Verify, self).__init__() self.debug = False self.backend_utils = BackendUtils()
class Verify(Atomic): def __init__(self): super(Verify, self).__init__() self.debug = False self.backend_utils = BackendUtils() def _layers_match(self, local, remote): _match = [] for _layer_int in range(len(local)): if local[_layer_int] == remote[_layer_int]: _match.append(True) else: _match.append(False) return all(_match) def verify(self): if self.args.debug: util.write_out(str(self.args)) local_layers, remote_layers = self._verify() if not self._layers_match(local_layers, remote_layers) or self.args.verbose: col = "{0:30} {1:20} {2:20} {3:1}" util.write_out("\n{} contains the following images:\n".format(self.image)) util.write_out(col.format("NAME", "LOCAL VERSION", "REMOTE VERSION", "DIFFERS")) for layer_int in range(len(local_layers)): differs = 'NO' if remote_layers[layer_int] == local_layers[layer_int] else 'YES' util.write_out(col.format(local_layers[layer_int].name[:30], local_layers[layer_int].long_version[:20], remote_layers[layer_int].long_version[:20], differs)) util.write_out("\n") def verify_dbus(self): local_layers, remote_layers = self._verify() layers = [] for layer_int in range(len(local_layers)): layer = {} layer['name'] = local_layers[layer_int].name layer['local_version'] = local_layers[layer_int].long_version layer['remote_version'] = remote_layers[layer_int].long_version layer['differs'] = False if remote_layers[layer_int] == local_layers[layer_int] else True layers.append(layer) return layers def _verify(self): be, img_obj = self.backend_utils.get_backend_and_image_obj(self.image, self.args.storage) remote_img_name = "{}:latest".format(util.Decompose(img_obj.fq_name).no_tag) remote_img_obj = be.make_remote_image(remote_img_name) return img_obj.layers, remote_img_obj.layers def get_tagged_images(self, names, layers): """ Returns a dict with image names and its tag name. :param names: :param layers: :return: list of sorted dicts (by index) """ base_images = [] for name in names: _match = next((x for x in layers if x['Name'] == name and x['RepoTags'] is not ''), None) registry, repo, image, tag, _ = util.Decompose(self.get_fq_image_name(_match['RepoTags'][0])).all tag = "latest" ri = RegistryInspect(registry=registry, repo=repo, image=image, tag=tag, debug=self.debug) ri.ping() remote_inspect = ri.inspect() release = remote_inspect.get("Labels", None).get("Release", None) version = remote_inspect.get("Labels", None).get("Version", None) if release and version: remote_version = "{}-{}-{}".format(name, version, release) else: # Check if the blob exists on the registry by the ID remote_id = no_shaw(ri.rc.manifest_json.get("config", None).get("digest", None)) _match['Version'] = _match['Id'] remote_version = remote_id if remote_id is not None else "" _match['Remote Version'] = remote_version base_images.append(_match) return sorted(base_images, key=itemgetter('index')) @staticmethod def _mismatch(layer): if layer['Version'] != layer['Remote Version'] and layer['Remote Version'] != layer['Id']: return "Yes" if layer['Version'] == '' and layer['Remote Version'] == '': return "!" return "No" @staticmethod def print_verify(base_images, image, verbose=False): """ Implements a verbose printout of layers. Can be called with atomic verify -v or if we detect some layer does not have versioning information. :param base_images: :param image: :return: None """ def check_for_updates(base_images): for i in base_images: if Verify._mismatch(i) in ['Yes', '!']: return True return False has_updates = check_for_updates(base_images) if has_updates or verbose: col = "{0:30} {1:20} {2:20} {3:1}" util.write_out("\n{} contains the following images:\n".format(image)) util.write_out(col.format("NAME", "LOCAL VERSION", "REMOTE VERSION", "DIFFERS")) for _image in base_images: util.write_out(col.format(_image['Name'][:30], _image['Version'][:20], _image['Remote Version'][:20], Verify._mismatch(_image))) util.write_out("\n") def verify_system_image(self): manifest = self.syscontainers.get_manifest(self.image) name = json.loads(manifest).get('Name', self.image) if manifest: layers = SystemContainers.get_layers_from_manifest(manifest) else: layers = [self.image] if not getattr(self.args,"no_validate", False): self.validate_system_image_manifests(layers) if not manifest: return remote = True try: remote_manifest = self.syscontainers.get_manifest(self.image, remote=True) remote_layers = SystemContainers.get_layers_from_manifest(remote_manifest) except subprocess.CalledProcessError: remote_layers = [] remote = False if hasattr(itertools, 'izip_longest'): zip_longest = getattr(itertools, 'izip_longest') else: zip_longest = getattr(itertools, 'zip_longest') images = [] for local, remote in zip_longest(layers, remote_layers): images.append({'Name': name, 'Version': no_shaw(local), 'Id': no_shaw(local), 'Remote Version': no_shaw(remote), 'remote': remote, 'no_version' : True, 'Repo Tags': self.image, }) self.print_verify(images, self.image, verbose=self.args.verbose) def validate_system_image_manifests(self,layers): """ Validate a system image's layers against the the associated validation manifests created from those image layers on atomic pull. :param layers: list of the names of the layers to validate :return: None """ for layer in layers: mismatches = self.syscontainers.validate_layer(layer) if len(mismatches) > 0: util.write_out("modifications in layer %s layer:\n" % layer) for m in mismatches: util.write_out("file '%s' changed checksum from '%s' to '%s'" % (m["name"], m["old-checksum"], m["new-checksum"])) def validate_image_manifest(self): """ Validates a docker image by mounting the image on a rootfs and validate that rootfs against the manifests that were created. Note that it won't be validated layer by layer. :param: :return: None """ iid = self._is_image(self.image) manifestname = os.path.join(util.ATOMIC_VAR_LIB, "gomtree-manifests/%s.mtree" % iid) if not os.path.exists(manifestname): return tmpdir = tempfile.mkdtemp() m = Mount() m.args = [] m.image = self.image m.mountpoint = tmpdir m.mount() r = util.validate_manifest(manifestname, img_rootfs=tmpdir, keywords="type,uid,gid,mode,size,sha256digest") m.unmount() if r.return_code != 0: util.write_err(r.stdout) shutil.rmtree(tmpdir) @staticmethod def get_gomtree_manifest(layer, root=os.path.join(util.ATOMIC_VAR_LIB, "gomtree-manifests")): manifestpath = os.path.join(root,"%s.mtree" % layer) if os.path.isfile(manifestpath): return manifestpath return None
class Verify(Atomic): def __init__(self): super(Verify, self).__init__() self.debug = False self.backend_utils = BackendUtils() def _layers_match(self, local, remote): _match = [] for _layer_int in range(len(local)): if local[_layer_int] == remote[_layer_int]: _match.append(True) else: _match.append(False) return all(_match) def verify(self): if self.args.debug: util.write_out(str(self.args)) local_layers, remote_layers = self._verify() if not self._layers_match(local_layers, remote_layers) or self.args.verbose: col = "{0:30} {1:20} {2:20} {3:1}" util.write_out("\n{} contains the following images:\n".format(self.image)) util.write_out(col.format("NAME", "LOCAL VERSION", "REMOTE VERSION", "DIFFERS")) for layer_int in range(len(local_layers)): differs = 'NO' if remote_layers[layer_int] == local_layers[layer_int] else 'YES' util.write_out(col.format(local_layers[layer_int].name[:30], local_layers[layer_int].long_version[:20], remote_layers[layer_int].long_version[:20], differs)) util.write_out("\n") def verify_dbus(self): local_layers, remote_layers = self._verify() layers = [] for layer_int in range(len(local_layers)): layer = {} layer['name'] = local_layers[layer_int].name layer['local_version'] = local_layers[layer_int].long_version layer['remote_version'] = remote_layers[layer_int].long_version layer['differs'] = False if remote_layers[layer_int] == local_layers[layer_int] else True layers.append(layer) return layers def _verify(self): be, img_obj = self.backend_utils.get_backend_and_image_obj(self.image, str_preferred_backend=self.args.storage or storage, required=True if self.args.storage else False) remote_img_name = "{}:latest".format(util.Decompose(img_obj.fq_name).no_tag) remote_img_obj = be.make_remote_image(remote_img_name) return img_obj.layers, remote_img_obj.layers def get_tagged_images(self, names, layers): """ Returns a dict with image names and its tag name. :param names: :param layers: :return: list of sorted dicts (by index) """ base_images = [] for name in names: _match = next((x for x in layers if x['Name'] == name and x['RepoTags'] is not ''), None) registry, repo, image, tag, digest = util.Decompose(self.get_fq_image_name(_match['RepoTags'][0])).all tag = "latest" ri = RegistryInspect(registry=registry, repo=repo, image=image, tag=tag, digest=digest, debug=self.debug) remote_inspect = ri.inspect() release = remote_inspect.get("Labels", None).get("Release", None) version = remote_inspect.get("Labels", None).get("Version", None) if release and version: remote_version = "{}-{}-{}".format(name, version, release) else: # Check if the blob exists on the registry by the ID remote_id = no_shaw(ri.remote_id) _match['Version'] = _match['Id'] remote_version = remote_id if remote_id is not None else "" _match['Remote Version'] = remote_version base_images.append(_match) return sorted(base_images, key=itemgetter('index')) @staticmethod def _mismatch(layer): if layer['Version'] != layer['Remote Version'] and layer['Remote Version'] != layer['Id']: return "Yes" if layer['Version'] == '' and layer['Remote Version'] == '': return "!" return "No" @staticmethod def print_verify(base_images, image, verbose=False): """ Implements a verbose printout of layers. Can be called with atomic verify -v or if we detect some layer does not have versioning information. :param base_images: :param image: :return: None """ def check_for_updates(base_images): for i in base_images: if Verify._mismatch(i) in ['Yes', '!']: return True return False has_updates = check_for_updates(base_images) if has_updates or verbose: col = "{0:30} {1:20} {2:20} {3:1}" util.write_out("\n{} contains the following images:\n".format(image)) util.write_out(col.format("NAME", "LOCAL VERSION", "REMOTE VERSION", "DIFFERS")) for _image in base_images: util.write_out(col.format(_image['Name'][:30], _image['Version'][:20], _image['Remote Version'][:20], Verify._mismatch(_image))) util.write_out("\n") def verify_system_image(self): manifest = self.syscontainers.get_manifest(self.image) name = json.loads(manifest).get('Name', self.image) if manifest: layers = SystemContainers.get_layers_from_manifest(manifest) else: layers = [self.image] if not getattr(self.args,"no_validate", False): self.validate_system_image_manifests(layers) if not manifest: return remote = True try: remote_manifest = self.syscontainers.get_manifest(self.image, remote=True) remote_layers = SystemContainers.get_layers_from_manifest(remote_manifest) except subprocess.CalledProcessError: remote_layers = [] remote = False if hasattr(itertools, 'izip_longest'): zip_longest = getattr(itertools, 'izip_longest') else: zip_longest = getattr(itertools, 'zip_longest') images = [] for local, remote in zip_longest(layers, remote_layers): images.append({'Name': name, 'Version': no_shaw(local), 'Id': no_shaw(local), 'Remote Version': no_shaw(remote), 'remote': remote, 'no_version' : True, 'Repo Tags': self.image, }) self.print_verify(images, self.image, verbose=self.args.verbose) def validate_system_image_manifests(self,layers): """ Validate a system image's layers against the the associated validation manifests created from those image layers on atomic pull. :param layers: list of the names of the layers to validate :return: None """ for layer in layers: mismatches = self.syscontainers.validate_layer(layer) if len(mismatches) > 0: util.write_out("modifications in layer %s layer:\n" % layer) for m in mismatches: util.write_out("file '%s' changed checksum from '%s' to '%s'" % (m["name"], m["old-checksum"], m["new-checksum"])) def validate_image_manifest(self): """ Validates a docker image by mounting the image on a rootfs and validate that rootfs against the manifests that were created. Note that it won't be validated layer by layer. :param: :return: None """ iid = self._is_image(self.image) manifestname = os.path.join(util.ATOMIC_VAR_LIB, "gomtree-manifests/%s.mtree" % iid) if not os.path.exists(manifestname): return tmpdir = tempfile.mkdtemp() m = Mount() m.args = [] m.image = self.image m.mountpoint = tmpdir m.mount() r = util.validate_manifest(manifestname, img_rootfs=tmpdir, keywords="type,uid,gid,mode,size,sha256digest") m.unmount() if r.return_code != 0: util.write_err(r.stdout) shutil.rmtree(tmpdir) @staticmethod def get_gomtree_manifest(layer, root=os.path.join(util.ATOMIC_VAR_LIB, "gomtree-manifests")): manifestpath = os.path.join(root,"%s.mtree" % layer) if os.path.isfile(manifestpath): return manifestpath return None