def _copy_file_to_image(self, renderer, resolver, volume, filename, stat_entry=None): if stat_entry is None: try: stat_entry = os.stat(filename) except (OSError, IOError): return image_urn = volume.urn.Append(utils.SmartStr(filename)) out_fd = None try: with open(filename, "rb") as in_fd: renderer.format("Adding file {0}\n", filename) resolver.Set(image_urn, lexicon.AFF4_STREAM_ORIGINAL_FILENAME, rdfvalue.XSDString(os.path.abspath(filename))) progress = aff4.ProgressContext(length=stat_entry.st_size) if stat_entry.st_size < self.MAX_SIZE_FOR_SEGMENT: with volume.CreateMember(image_urn) as out_fd: # Only enable compression if we are using it. if (self.compression != lexicon.AFF4_IMAGE_COMPRESSION_STORED): out_fd.compression_method = zip.ZIP_DEFLATE out_fd.WriteStream(in_fd, progress=progress) else: resolver.Set(image_urn, lexicon.AFF4_IMAGE_COMPRESSION, rdfvalue.URN(self.compression)) with aff4_image.AFF4Image.NewAFF4Image( resolver, image_urn, volume.urn) as out_fd: out_fd.WriteStream(in_fd, progress=progress) except IOError: try: # Currently we can only access NTFS filesystems. if self.profile.metadata("os") == "windows": self.session.logging.debug( "Unable to read %s. Attempting raw access.", filename) # We can not just read this file, parse it from the NTFS. self._copy_raw_file_to_image(renderer, resolver, volume, filename) except IOError: self.session.logging.warn("Unable to read %s. Skipping.", filename) finally: if out_fd: resolver.Close(out_fd)
def _WriteToTarget(self, resolver, source_as, image_stream): # Prepare a temporary map to control physical memory acquisition. helper_map = aff4_map.AFF4Map(resolver) with resolver.CachePut( AddressSpaceWrapper(resolver=resolver, address_space=source_as)) as source_aff4: total_length = 0 for run in source_as.get_address_ranges(): total_length += run.length helper_map.AddRange(run.start, run.start, run.length, source_aff4.urn) progress = aff4.ProgressContext(length=total_length) image_stream.WriteStream(helper_map, progress=progress) return total_length