def testSaveToCache(self): '''Given a image object, check the image layers are cached''' self.image.load_image() layer = self.image.layers[0] cache.add_layer = Mock() common.save_to_cache(self.image) cache.add_layer.assert_called_once_with(layer)
def analyze_full_image(full_image, options): """If we are able to load a full image after a build, we can run an analysis on it""" # set up for analysis crun.setup(full_image) # analyze image cimage.analyze(full_image, options) # clean up after analysis rootfs.clean_up() # we should now be able to set imported layers lock.set_imported_layers(full_image) # save to the cache common.save_to_cache(full_image) return [full_image]
def analyze_docker_image(image_obj, redo=False, dockerfile=False): '''Given a DockerImage object, for each layer, retrieve the packages, first looking up in cache and if not there then looking up in the command library. For looking up in command library first mount the filesystem and then look up the command library for commands to run in chroot''' # set up empty master list of packages master_list = [] prepare_for_analysis(image_obj, dockerfile) # Analyze the first layer and get the shell shell = analyze_first_layer(image_obj, master_list, redo) # Analyze the remaining layers analyze_subsequent_layers(image_obj, shell, master_list, redo) common.save_to_cache(image_obj)
def execute(self, image_obj, redo=False): '''Execution should be: scancode -ilpcu --quiet --json - /path/to/directory ''' for layer in image_obj.layers: # load the layers from cache common.load_from_cache(layer) if redo or not layer.files_analyzed: # the layer doesn't have analyzed files, so run analysis file_list = collect_layer_data(layer) if file_list: add_file_data(layer, file_list) layer.files_analyzed = True # save data to the cache common.save_to_cache(image_obj)
def analyze_base_image(base_image, options): """If we are unable to load the full image, we will try to analyze the base image and try to extrapolate""" # set up for analysis crun.setup(base_image) # analyze image cimage.analyze(base_image, options) # clean up rootfs.clean_up() # save the base image to cache common.save_to_cache(base_image) # let's try to figure out what packages were going to be installed in # the dockerfile anyway stub_image = get_dockerfile_packages() return [base_image, stub_image]
def analyze_docker_image(image_obj, redo=False, dfile_lock=False, dfobj=None): '''Given a DockerImage object, for each layer, retrieve the packages, first looking up in cache and if not there then looking up in the command library. For looking up in command library first mount the filesystem and then look up the command library for commands to run in chroot. If there's a dockerfile object available, extract any package information from the layers.''' # set up empty master list of packages master_list = [] prepare_for_analysis(image_obj, dfobj) # Analyze the first layer and get the shell shell = analyze_first_layer(image_obj, master_list, redo) # Analyze the remaining layers analyze_subsequent_layers(image_obj, shell, master_list, redo, dfobj, dfile_lock) common.save_to_cache(image_obj)
def teardown(image_obj): """Teardown and cleanup after analysis""" # Add the image layers to the cache common.save_to_cache(image_obj) # Clean up working directories and mount points rootfs.clean_up()