def unzip(src_path, dest_dir): zipfile.ZipFile(src_path).extractall(dest_dir)
def UnpackApk(file_name, dst, ignore_paths=()): zippy = zipfile.ZipFile(file_name) files_to_extract = [f for f in zippy.namelist() if f not in ignore_paths] zippy.extractall(dst, files_to_extract)
import urllib # Start measuring time [1] start = time.time() # Download the database [2] url = 'https://www.dropbox.com/s/ldhzb3o9qai14m1/catdog.zip?dl=1' u = urllib.urlopen(url) data = u.read() u.close() with open(os.getcwd() + '/' + 'catdog.zip', "wb") as f : f.write(data) f.close() #[3] Unzip the database zip_ref = zipfile.ZipFile(os.getcwd() + '/' + 'catdog.zip', 'r') zip_ref.extractall(os.getcwd()) zip_ref.close() #Choose a random even number between 6 and 20 N = np.random.randint(6,20) if N%2 != 0: N=N-1 #Verify if the Resized images folder exists if os.path.exists(os.getcwd()+ '/' + 'catdog' + '/' + 'catdogResize') == False: os.mkdir(os.getcwd() + '/' + 'catdog' + '/' + 'catdogResize') plt.figure(1) for i in range(0,N): #Choose a random image from the folder and resize it
def on_finished(self, evt): """Finish button has been pressed. Give the specified values """ # gridsizer depending on the type of choosing model if self.type in ('IPort', 'OPort'): page = self.pages[6] if self.type == 'IPort' else self.pages[7] gridSizer = page.sizer.GetItem(2).GetSizer().GetItem( 0).GetSizer().GetItem(0).GetSizer() textCtrl = gridSizer.GetItem(1).GetWindow() self.label = textCtrl.GetValue() self.id = gridSizer.GetItem(3).GetWindow().GetValue() self.python_path = os.path.join(DOMAIN_PATH, 'Basic', self.type + '.py') else: if self.type == 'Atomic': gridSizer = self.pages[1].sizer.GetItem(2).GetSizer().GetItem( 0).GetSizer().GetItem(0).GetSizer() filebrowse_python = gridSizer.GetItem(9).GetWindow() filebrowse_plugin = gridSizer.GetItem(11).GetWindow() filebrowse_model = self.pages[3].sizer.GetItem(2).GetWindow() ### test if extention exists model_path = filebrowse_model.GetValue() if not model_path.endswith('.amd'): model_path += '.amd' # give the label textCtrl = gridSizer.GetItem(1).GetWindow() ### give the python filename, inputs and outputs of corresponding model in_SpinCtrl = gridSizer.GetItem(5).GetWindow() out_SpinCtrl = gridSizer.GetItem(7).GetWindow() ### give the specific behavior which can be Default, Generator or Collector (Scope and Disk) specific_behavior = gridSizer.GetItem(3).GetWindow() self.specific_behavior = specific_behavior.GetValue() elif self.type == 'Coupled': gridSizer = self.pages[2].sizer.GetItem(2).GetSizer().GetItem( 0).GetSizer().GetItem(0).GetSizer() filebrowse_python = gridSizer.GetItem(7).GetWindow() filebrowse_plugin = gridSizer.GetItem(9).GetWindow() filebrowse_model = self.pages[4].sizer.GetItem(2).GetWindow() ### test if extention exists model_path = filebrowse_model.GetValue() if not model_path.endswith('.cmd'): model_path += '.cmd' # give the label textCtrl = gridSizer.GetItem(1).GetWindow() ### give the python filename, inputs and outputs of corresponding model in_SpinCtrl = gridSizer.GetItem(3).GetWindow() out_SpinCtrl = gridSizer.GetItem(5).GetWindow() self.model_path = os.path.abspath(model_path) self.python_path = filebrowse_python.GetValue() self.plugin_path = filebrowse_plugin.GetValue() self.label = textCtrl.GetValue() self.id = 0 self.inputs = in_SpinCtrl.GetValue() self.outputs = out_SpinCtrl.GetValue() ### model path exist ? if os.path.exists(self.model_path): msg = _("%s already exist.\nDo you want to rewrite it ?") % ( self.model_path) dlg = wx.MessageDialog( self, msg, _('Wizard Manager'), wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION) if dlg.ShowModal() in (wx.ID_NO, wx.ID_CANCEL): self.overwrite_flag = False if self.overwrite_flag: ### create the model on the disk try: zout = zipfile.ZipFile(self.model_path, "w") except Exception, info: sys.stsout.write( _("ERROR: Enable to creacte Zip file in Wizard GUI (%s)" % info)) return False else: if self.python_path == '': if self.type == 'Atomic': string = atomicCode(self.label) else: string = coupledCode(self.label) python_name = os.path.basename( self.model_path).split('.')[0] zout.writestr("%s.py" % python_name, string.encode('utf-8')) self.python_path = os.path.join( self.model_path, "%s.py" % python_name) else: py_file = os.path.basename(self.python_path) zout.write(self.python_path, py_file) self.python_path = os.path.join( self.model_path, py_file) ### force model file (.amd or cmd) to have same name with choosed python file #ext = os.path.basename(self.model_path).split('.')[1] #self.model_path = os.path.join(os.path.dirname(self.model_path), "%s.%s"%(py_file.split('.')[0],ext)) zout.writestr('DEVSimPyModel.dat', _("Call SaveFile method first!")) if self.plugin_path != '': zout.write( self.plugin_path, os.path.join('plugins', os.path.basename(self.plugin_path))) finally: zout.close()
def unzip(self):#system.img if self.rominfo.flag==1: print('无效格式!!!') sys.exit(1) if quiet==0: if input('是否解包卡刷包zip文件?y/n>>>')=='n': print('取消.') sys.exit(0) if self.rominfo.flag==5: z=zipfile.ZipFile(self.file) z.extractall(path='flashable') z.close() print('功能性卡刷包解包完成.输出目录:flashable') print('Done.') sys.exit(0) if self.rominfo.abflag==True and zipfile.is_zipfile(self.file)==True: z=zipfile.ZipFile(self.file) z.extract('payload.bin') z.close() self.file='payload.bin' self.abunpack() print('Done.') sys.exit(0) if self.rominfo.onlyfolder==True: z=zipfile.ZipFile(self.file) for name in z.namelist() : if name.find('system')==0: z.extract(name) z.close() print('Done.') sys.exit(0) if self.rominfo.olnyimg==True: z=zipfile.ZipFile(self.file) z.extract('system.img') z.close() if self.unpacktodir==1: self.file='system.img' self.imgunpack() print('Done.') sys.exit(0) if self.rominfo.brotil==True: z=zipfile.ZipFile(self.file) z.extract('system.transfer.list') z.extract('system.new.dat.br') z.close() self.brotli() self.newdatunpack() if self.unpacktodir==1: self.file='system.img' self.imgunpack() print('Done.') sys.exit(0) if self.rominfo.newdat==True: z=zipfile.ZipFile(self.file) z.extract('system.transfer.list') z.extract('system.new.dat') z.close() self.newdatunpack() if self.unpacktodir==1: self.file='system.img' self.imgunpack() print('Done.') sys.exit(0) if self.unpacktodir==0: print('Done! 输出的到的目录: /') return else:pass
def walk_zip(self, settings, plugin, pattern, regex): """Walk the archived files within the plugin.""" with zipfile.ZipFile(plugin[0], 'r') as z: zipped = [(join(basename(plugin[0]), normpath(fn)), plugin[1]) for fn in sorted(z.namelist())] self.find_files(zipped, "", pattern, settings, regex)
def process_func(idx): # Load original image. orig_idx = fields['orig_idx'][idx] orig_file = fields['orig_file'][idx] orig_path = os.path.join(celeba_dir, 'img_celeba', orig_file) img = PIL.Image.open(orig_path) # Choose oriented crop rectangle. lm = landmarks[orig_idx] eye_avg = (lm[0] + lm[1]) * 0.5 + 0.5 mouth_avg = (lm[3] + lm[4]) * 0.5 + 0.5 eye_to_eye = lm[1] - lm[0] eye_to_mouth = mouth_avg - eye_avg x = eye_to_eye - rot90(eye_to_mouth) x /= np.hypot(*x) x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) y = rot90(x) c = eye_avg + eye_to_mouth * 0.1 quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) zoom = 1024 / (np.hypot(*x) * 2) # Shrink. shrink = int(np.floor(0.5 / zoom)) if shrink > 1: size = (int(np.round(float(img.size[0]) / shrink)), int(np.round(float(img.size[1]) / shrink))) img = img.resize(size, PIL.Image.ANTIALIAS) quad /= shrink zoom *= shrink # Crop. border = max(int(np.round(1024 * 0.1 / zoom)), 3) crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1])))) crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1])) if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]: img = img.crop(crop) quad -= crop[0:2] # Simulate super-resolution. superres = int(np.exp2(np.ceil(np.log2(zoom)))) if superres > 1: img = img.resize((img.size[0] * superres, img.size[1] * superres), PIL.Image.ANTIALIAS) quad *= superres zoom /= superres # Pad. pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1])))) pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0)) if max(pad) > border - 4: pad = np.maximum(pad, int(np.round(1024 * 0.3 / zoom))) img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect') h, w, _ = img.shape y, x, _ = np.mgrid[:h, :w, :1] mask = 1.0 - np.minimum( np.minimum(np.float32(x) / pad[0], np.float32(y) / pad[1]), np.minimum( np.float32(w - 1 - x) / pad[2], np.float32(h - 1 - y) / pad[3])) blur = 1024 * 0.02 / zoom img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) img += (np.median(img, axis=(0, 1)) - img) * np.clip( mask, 0.0, 1.0) img = PIL.Image.fromarray(np.uint8(np.clip(np.round(img), 0, 255)), 'RGB') quad += pad[0:2] # Transform. img = img.transform((4096, 4096), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR) img = img.resize((1024, 1024), PIL.Image.ANTIALIAS) img = np.asarray(img).transpose(2, 0, 1) # Verify MD5. md5 = hashlib.md5() md5.update(img.tobytes()) assert md5.hexdigest() == fields['proc_md5'][idx] # Load delta image and original JPG. with zipfile.ZipFile( os.path.join(delta_dir, 'deltas%05d.zip' % (idx - idx % 1000)), 'r') as zip: delta_bytes = zip.read('delta%05d.dat' % idx) with open(orig_path, 'rb') as file: orig_bytes = file.read() # Decrypt delta image, using original JPG data as decryption key. algorithm = cryptography.hazmat.primitives.hashes.SHA256() backend = cryptography.hazmat.backends.default_backend() salt = bytes(orig_file, 'ascii') kdf = cryptography.hazmat.primitives.kdf.pbkdf2.PBKDF2HMAC( algorithm=algorithm, length=32, salt=salt, iterations=100000, backend=backend) key = base64.urlsafe_b64encode(kdf.derive(orig_bytes)) delta = np.frombuffer(bz2.decompress( cryptography.fernet.Fernet(key).decrypt(delta_bytes)), dtype=np.uint8).reshape(3, 1024, 1024) # Apply delta image. img = img + delta # Verify MD5. md5 = hashlib.md5() md5.update(img.tobytes()) assert md5.hexdigest() == fields['final_md5'][idx] return img
def unzip(data_name, from_path, to_path): print('extracting {} ...'.format(data_name)) with zipfile.ZipFile(from_path) as zf: zf.extractall(to_path)
async def _(event): if event.fwd_from: return input_str = event.pattern_match.group(1) if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY): os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY) if event.reply_to_msg_id: reply_message = await event.get_reply_message() # https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 if not reply_message.sticker: return sticker = reply_message.sticker sticker_attrib = find_instance(sticker.attributes, DocumentAttributeSticker) if not sticker_attrib.stickerset: await event.reply("This sticker is not part of a pack") return is_a_s = is_it_animated_sticker(reply_message) file_ext_ns_ion = "webp" file_caption = "https://t.me/RoseSupport/33801" if is_a_s: file_ext_ns_ion = "tgs" file_caption = "Forward the ZIP file to @AnimatedStickersRoBot to get lottIE JSON containing the vector information." sticker_set = await borg( GetStickerSetRequest(sticker_attrib.stickerset)) pack_file = os.path.join(Config.TMP_DOWNLOAD_DIRECTORY, sticker_set.set.short_name, "pack.txt") if os.path.isfile(pack_file): os.remove(pack_file) # Sticker emojis are retrieved as a mapping of # <emoji>: <list of document ids that have this emoji> # So we need to build a mapping of <document id>: <list of emoji> # Thanks, Durov emojis = defaultdict(str) for pack in sticker_set.packs: for document_id in pack.documents: emojis[document_id] += pack.emoticon async def download(sticker, emojis, path, file): await borg.download_media(sticker, file=os.path.join(path, file)) with open(pack_file, "a") as f: f.write( f"{{'image_file': '{file}','emojis':{emojis[sticker.id]}}}," ) pending_tasks = [ asyncio.ensure_future( download( document, emojis, Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name, f"{i:03d}.{file_ext_ns_ion}")) for i, document in enumerate(sticker_set.documents) ] await event.edit( f"Downloading {sticker_set.set.count} sticker(s) to .{Config.TMP_DOWNLOAD_DIRECTORY}{sticker_set.set.short_name}..." ) num_tasks = len(pending_tasks) while 1: done, pending_tasks = await asyncio.wait( pending_tasks, timeout=2.5, return_when=asyncio.FIRST_COMPLETED) try: await event.edit( f"Downloaded {num_tasks - len(pending_tasks)}/{sticker_set.set.count}" ) except MessageNotModifiedError: pass if not pending_tasks: break await event.edit("Downloading to my local completed") # https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 directory_name = Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name zipf = zipfile.ZipFile(directory_name + ".zip", "w", zipfile.ZIP_DEFLATED) zipdir(directory_name, zipf) zipf.close() await borg.send_file(event.chat_id, directory_name + ".zip", caption=file_caption, force_document=True, allow_cache=False, reply_to=event.message.id, progress_callback=progress) try: os.remove(directory_name + ".zip") os.remove(directory_name) except: pass await event.edit("task Completed") await asyncio.sleep(3) await event.delete() else: await event.edit("TODO: Not Implemented")
def openZipStream(self, sourceZipStream): if not self.isOpen: self.basefile = self.url self.baseurl = self.url # url gets changed by selection self.fs = zipfile.ZipFile(sourceZipStream, mode="r") self.isOpen = True
def write_data_export_zip(self, request, response): with zipfile.ZipFile(response, mode="w") as zip_file: with zip_file.open( f"{self.slug}.json", mode="w" ) as json_binary_file, io.TextIOWrapper(json_binary_file) as json_file: json.dump( { "email": self.user.email, "date_joined": self.user.date_joined.isoformat(), "last_login": self.user.last_login.isoformat(), "url": request.build_absolute_uri(self.get_absolute_url()), "is_public": self.is_public, "name": self.name, "city_or_town": self.city_or_town, "country": self.country, "cause_areas": list(map(CauseArea.label, self.cause_areas)), "cause_areas_other": self.cause_areas_other, "available_to_volunteer": self.available_to_volunteer, "open_to_job_offers": self.open_to_job_offers, "expertise_areas": list( map(ExpertiseArea.label, self.expertise_areas) ), "expertise_areas_other": self.expertise_areas_other, "career_interest_areas": list( map(ExpertiseArea.label, self.career_interest_areas) ), "available_as_speaker": self.available_as_speaker, "topics_i_speak_about": self.topics_i_speak_about, "organisational_affiliations": list( map( OrganisationalAffiliation.label, self.organisational_affiliations, ) ), "summary": self.summary, "giving_pledges": list( map(GivingPledge.label, self.giving_pledges) ), "member_of_local_groups": [ request.build_absolute_uri(local_group.get_absolute_uri()) for local_group in self.local_groups.all() ], "organiser_of_local_groups": [ request.build_absolute_uri(local_group.get_absolute_uri()) for local_group in self.user.localgroup_set.all() ], "aliases": [ request.build_absolute_uri( urls.reverse("profile", kwargs={"slug": slug.slug}) ) for slug in self.slugs.filter(redirect=True) ], "legacy_hub_url": ( self.legacy_record and request.build_absolute_uri( urls.reverse( "profile_legacy", kwargs={"legacy_record": self.legacy_record}, ) ) ), }, json_file, indent=2, ) if self.image: with self.image.open() as image_src_file, zip_file.open( self.slug + pathlib.PurePath(self.image.name).suffix, mode="w" ) as image_dst_file: shutil.copyfileobj(image_src_file, image_dst_file)
def open(self, reloadCache=False): if not self.isOpen: if (self.isZip or self.isTarGz or self.isEis or self.isXfd or self.isRss or self.isInstalledTaxonomyPackage) and self.cntlr: self.basefile = self.cntlr.webCache.getfilename( self.url, reload=reloadCache) else: self.basefile = self.url self.baseurl = self.url # url gets changed by selection if not self.basefile: return # an error should have been logged if self.isZip: try: self.fs = zipfile.ZipFile(openFileStream( self.cntlr, self.basefile, 'rb'), mode="r") self.isOpen = True except EnvironmentError as err: self.logError(err) pass elif self.isTarGz: try: self.fs = tarfile.open(self.basefile, "r:gz") self.isOpen = True except EnvironmentError as err: self.logError(err) pass elif self.isEis: # check first line of file buf = b'' try: file = open(self.basefile, 'rb') more = True while more: l = file.read(8) if len(l) < 8: break if len(buf) == 0 and l.startswith( b"<?xml "): # not compressed buf = l + file.read() # not compressed break compressedBytes = file.read( struct.unpack(">L", l[0:4])[0]) if len(compressedBytes) <= 0: break buf += zlib.decompress(compressedBytes) file.close() except EnvironmentError as err: self.logError(err) pass #uncomment to save for debugging #with open("c:/temp/test.xml", "wb") as f: # f.write(buf) if buf.startswith(b"<?xml "): try: # must strip encoding str = buf.decode(XmlUtil.encoding(buf)) endEncoding = str.index("?>", 0, 128) if endEncoding > 0: str = str[endEncoding + 2:] file = io.StringIO(initial_value=str) parser = etree.XMLParser(recover=True, huge_tree=True) self.eisDocument = etree.parse(file, parser=parser) file.close() self.isOpen = True except EnvironmentError as err: self.logError(err) return # provide error message later except etree.LxmlError as err: self.logError(err) return # provide error message later elif self.isXfd: # check first line of file file = open(self.basefile, 'rb') firstline = file.readline() if firstline.startswith( b"application/x-xfdl;content-encoding=\"asc-gzip\""): # file has been gzipped base64input = file.read(-1) file.close() file = None fb = base64.b64decode(base64input) ungzippedBytes = b"" totalLenUncompr = 0 i = 0 while i < len(fb): lenCompr = fb[i + 0] * 256 + fb[i + 1] lenUncomp = fb[i + 2] * 256 + fb[i + 3] lenRead = 0 totalLenUncompr += lenUncomp gzchunk = (bytes((31, 139, 8, 0)) + fb[i:i + lenCompr]) try: with gzip.GzipFile( fileobj=io.BytesIO(gzchunk)) as gf: while True: readSize = min(16384, lenUncomp - lenRead) readBytes = gf.read(size=readSize) lenRead += len(readBytes) ungzippedBytes += readBytes if len(readBytes) == 0 or (lenUncomp - lenRead) <= 0: break except IOError as err: pass # provide error message later i += lenCompr + 4 #for learning the content of xfd file, uncomment this: #with open("c:\\temp\\test.xml", "wb") as fh: # fh.write(ungzippedBytes) file = io.StringIO( initial_value=ungzippedBytes.decode("utf-8")) else: # position to start of file file.seek(0, io.SEEK_SET) try: self.xfdDocument = etree.parse(file) file.close() self.isOpen = True except EnvironmentError as err: self.logError(err) return # provide error message later except etree.LxmlError as err: self.logError(err) return # provide error message later elif self.isRss: try: self.rssDocument = etree.parse(self.basefile) self.isOpen = True except EnvironmentError as err: self.logError(err) return # provide error message later except etree.LxmlError as err: self.logError(err) return # provide error message later elif self.isInstalledTaxonomyPackage: self.isOpen = True # load mappings try: metadataFiles = self.taxonomyPackageMetadataFiles if len(metadataFiles) != 1: raise IOError( _("Taxonomy package must contain one and only one metadata file: {0}." ).format(', '.join(metadataFiles))) # HF: this won't work, see DialogOpenArchive for correct code # not sure if it is used taxonomyPackage = PackageManager.parsePackage( self.cntlr, self.url) fileSourceDir = os.path.dirname(self.baseurl) + os.sep self.mappedPaths = \ dict((prefix, remapping if isHttpUrl(remapping) else (fileSourceDir + remapping.replace("/", os.sep))) for prefix, remapping in taxonomyPackage["remappings"].items()) except EnvironmentError as err: self.logError(err) return # provide error message later
def prepare(self): """Prepare env for analysis.""" # Get SeDebugPrivilege for the Python process. It will be needed in # order to perform the injections. grant_privilege("SeDebugPrivilege") grant_privilege("SeLoadDriverPrivilege") # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") # Pass the configuration through to the Process class. Process.set_config(self.config) # Set virtual machine clock. set_clock( datetime.datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S")) # Set the default DLL to be used for this analysis. self.default_dll = self.config.options.get("dll") # If a pipe name has not set, then generate a random one. self.config.pipe = self.get_pipe_path( self.config.options.get("pipe", random_string(16, 32))) # Generate a random name for the logging pipe server. self.config.logpipe = self.get_pipe_path(random_string(16, 32)) # Initialize and start the Command Handler pipe server. This is going # to be used for communicating with the monitored processes. self.command_pipe = PipeServer(PipeDispatcher, self.config.pipe, message=True, dispatcher=CommandPipeHandler(self)) self.command_pipe.daemon = True self.command_pipe.start() # Initialize and start the Log Pipe Server - the log pipe server will # open up a pipe that monitored processes will use to send logs to # before they head off to the host machine. destination = self.config.ip, self.config.port self.log_pipe_server = PipeServer(PipeForwarder, self.config.logpipe, destination=destination) self.log_pipe_server.daemon = True self.log_pipe_server.start() # We update the target according to its category. If it's a file, then # we store the target path. if self.config.category == "file": self.target = os.path.join(os.environ["TEMP"], self.config.file_name) elif self.config.category == "archive": zip_path = os.path.join(os.environ["TEMP"], self.config.file_name) zipfile.ZipFile(zip_path).extractall(os.environ["TEMP"]) self.target = os.path.join(os.environ["TEMP"], self.config.options["filename"]) # If it's a URL, well.. we store the URL. else: self.target = self.config.target
def test_gzip_fasta(): strOutputDirectory = "/home/travis/build/JasperBoom/test-output/gzip-fasta" strTestDirectory = "/home/travis/build/JasperBoom/caltha/tests" os.makedirs(strOutputDirectory) sp.call([ "/home/travis/build/JasperBoom/caltha/src/caltha", "-i", strTestDirectory + "/data/umi5_primer_gzip.fasta.gz", "-t", strOutputDirectory + "/tab.zip", "-z", strOutputDirectory + "/zip.zip", "-b", strOutputDirectory + "/blast.zip", "-f", "fasta", "-l", "umi5", "-a", "primer", "-u", "20", "-y", "0.97", "-c", "1", "-w", "GGRKCHGGDACWGGDTGAAC", "-r", "GATCAWACAAATAAAGGTAWTCGATC", "-d", strOutputDirectory, "-@", "1", ], ) with zipfile.ZipFile(strOutputDirectory + "/blast.zip", "r") as objZip: objZip.extractall(strOutputDirectory) with zipfile.ZipFile(strOutputDirectory + "/tab.zip", "r") as objZip: objZip.extractall(strOutputDirectory) with zipfile.ZipFile(strOutputDirectory + "/zip.zip", "r") as objZip: objZip.extractall(strOutputDirectory) lstOutputFiles = [ "umi5_primer_gzip_BLAST.fasta", "umi5_primer_gzip_TABULAR.tbl", ] dicLineCounts = {} for strFileName in lstOutputFiles: with open(strOutputDirectory + "/" + strFileName) as oisBlastFile: intLineCount = 0 for strLine in oisBlastFile: intLineCount += 1 dicLineCounts[strFileName] = intLineCount lstOutputZips = ["umi5_primer_gzip_PREVALIDATION.zip"] for strZipFile in lstOutputZips: if (os.path.exists(strOutputDirectory + "/" + strZipFile) and os.path.getsize(strOutputDirectory + "/" + strZipFile) > 0): blnArchiveCheck = 1 else: blnArchiveCheck = 0 assert blnArchiveCheck == 1 assert dicLineCounts["umi5_primer_gzip_BLAST.fasta"] == 1560 assert dicLineCounts["umi5_primer_gzip_TABULAR.tbl"] == 781
def load_ora(self, filename, feedback_cb=None): """Loads from an OpenRaster file""" print 'load_ora:' t0 = time.time() z = zipfile.ZipFile(filename) print 'mimetype:', z.read('mimetype').strip() xml = z.read('stack.xml') image = ET.fromstring(xml) stack = image.find('stack') w = int(image.attrib['w']) h = int(image.attrib['h']) def round_up_to_n(value, n): assert value >= 0, "function undefined for negative numbers" residual = value % n if residual: value = value - residual + n return int(value) def get_pixbuf(filename): t1 = time.time() try: fp = z.open(filename, mode='r') except KeyError: # support for bad zip files (saved by old versions of the GIMP ORA plugin) fp = z.open(filename.encode('utf-8'), mode='r') print 'WARNING: bad OpenRaster ZIP file. There is an utf-8 encoded filename that does not have the utf-8 flag set:', repr(filename) res = self._pixbuf_from_stream(fp, feedback_cb) fp.close() print ' %.3fs loading %s' % (time.time() - t1, filename) return res def get_layers_list(root, x=0,y=0): res = [] for item in root: if item.tag == 'layer': if 'x' in item.attrib: item.attrib['x'] = int(item.attrib['x']) + x if 'y' in item.attrib: item.attrib['y'] = int(item.attrib['y']) + y res.append(item) elif item.tag == 'stack': stack_x = int( item.attrib.get('x', 0) ) stack_y = int( item.attrib.get('y', 0) ) res += get_layers_list(item, stack_x, stack_y) else: print 'Warning: ignoring unsupported tag:', item.tag return res self.clear() # this leaves one empty layer no_background = True # FIXME: don't require tile alignment for frame self.set_frame(width=round_up_to_n(w, N), height=round_up_to_n(h, N)) for layer in get_layers_list(stack): a = layer.attrib if 'background_tile' in a: assert no_background try: print a['background_tile'] self.set_background(get_pixbuf(a['background_tile'])) no_background = False continue except backgroundsurface.BackgroundError, e: print 'ORA background tile not usable:', e src = a.get('src', '') if not src.lower().endswith('.png'): print 'Warning: ignoring non-png layer' continue pixbuf = get_pixbuf(src) name = a.get('name', '') x = int(a.get('x', '0')) y = int(a.get('y', '0')) opac = float(a.get('opacity', '1.0')) compositeop = str(a.get('composite-op', DEFAULT_COMPOSITE_OP)) if compositeop not in VALID_COMPOSITE_OPS: compositeop = DEFAULT_COMPOSITE_OP visible = not 'hidden' in a.get('visibility', 'visible') self.add_layer(insert_idx=0, name=name) last_pixbuf = pixbuf t1 = time.time() self.load_layer_from_pixbuf(pixbuf, x, y) layer = self.layers[0] self.set_layer_opacity(helpers.clamp(opac, 0.0, 1.0), layer) self.set_layer_compositeop(compositeop, layer) self.set_layer_visibility(visible, layer) print ' %.3fs converting pixbuf to layer format' % (time.time() - t1) # strokemap fname = a.get('mypaint_strokemap_v2', None) if fname: if x % N or y % N: print 'Warning: dropping non-aligned strokemap' else: sio = StringIO(z.read(fname)) layer.load_strokemap_from_file(sio, x, y) sio.close()
'Contratos': '2', 'Desenhos': '3', 'Indicações': '4', 'Marcas': '5', 'Patentes': '6', 'Programas': '7', 'Circuitos': '8' } OUTPUT_FOLDER = "./input/" # downloading JSON with all RPIs since 01/Jan/1900 search_url = BASE_URL + BUSCA + "revista.dataInicial=" + DATA_INICIAL + \ "&revista.dataFinal=" + DATA_FINAL + "&revista.tipoRevista.id=" + \ TIPO_REVISTA['Patentes'] patents_rpis = json.loads(urllib.request.urlopen(search_url).read()) # downloading Pxxxx.zip file download_url = BASE_URL + TXT + patents_rpis[0]['nomeArquivoEscritorio'] zip_file = OUTPUT_FOLDER + patents_rpis[0]['nomeArquivoEscritorio'] urllib.request.urlretrieve(download_url, zip_file) # extracting Pxxxx.zip file into ./input folder zfobj = zipfile.ZipFile(zip_file) for name in zfobj.namelist(): uncompressed = zfobj.read(name) outputFilename = OUTPUT_FOLDER + name output = open(outputFilename, 'wb') output.write(uncompressed) output.close() os.remove(zip_file)
def unzip(file_to_unzip): zipfold = os.path.split(file_to_unzip)[0] zip_ref = zipfile.ZipFile(file_to_unzip) zip_ref.extractall(zipfold) zip_ref.close() os.remove(file_to_unzip)
def setup_class(cls): import zipfile zipf = os.path.join(my_path, "emd_files", "fei_emd_files.zip") with zipfile.ZipFile(zipf, 'r') as zipped: zipped.extractall(cls.fei_files_path)
def read(self, file_name): result = [] # The base object of 3mf is a zipped archive. archive = zipfile.ZipFile(file_name, "r") self._base_name = os.path.basename(file_name) try: self._root = ET.parse(archive.open("3D/3dmodel.model")) self._unit = self._root.getroot().get("unit") build_items = self._root.findall("./3mf:build/3mf:item", self._namespaces) for build_item in build_items: id = build_item.get("objectid") object = self._root.find("./3mf:resources/3mf:object[@id='{0}']".format(id), self._namespaces) if "type" in object.attrib: if object.attrib["type"] == "support" or object.attrib["type"] == "other": # Ignore support objects, as cura does not support these. # We can't guarantee that they wont be made solid. # We also ignore "other", as I have no idea what to do with them. Logger.log("w", "3MF file contained an object of type %s which is not supported by Cura", object.attrib["type"]) continue elif object.attrib["type"] == "solidsupport" or object.attrib["type"] == "model": pass # Load these as normal else: # We should technically fail at this point because it's an invalid 3MF, but try to continue anyway. Logger.log("e", "3MF file contained an object of type %s which is not supported by the 3mf spec", object.attrib["type"]) continue build_item_node = self._createNodeFromObject(object, self._base_name + "_" + str(id)) # compensate for original center position, if object(s) is/are not around its zero position transform_matrix = Matrix() mesh_data = build_item_node.getMeshData() if mesh_data is not None: extents = mesh_data.getExtents() center_vector = Vector(extents.center.x, extents.center.y, extents.center.z) transform_matrix.setByTranslation(center_vector) # offset with transform from 3mf transform = build_item.get("transform") if transform is not None: transform_matrix.multiply(self._createMatrixFromTransformationString(transform)) build_item_node.setTransformation(transform_matrix) global_container_stack = Application.getInstance().getGlobalContainerStack() # Create a transformation Matrix to convert from 3mf worldspace into ours. # First step: flip the y and z axis. transformation_matrix = Matrix() transformation_matrix._data[1, 1] = 0 transformation_matrix._data[1, 2] = 1 transformation_matrix._data[2, 1] = -1 transformation_matrix._data[2, 2] = 0 # Second step: 3MF defines the left corner of the machine as center, whereas cura uses the center of the # build volume. if global_container_stack: translation_vector = Vector(x = -global_container_stack.getProperty("machine_width", "value") / 2, y = -global_container_stack.getProperty("machine_depth", "value") / 2, z = 0) translation_matrix = Matrix() translation_matrix.setByTranslation(translation_vector) transformation_matrix.multiply(translation_matrix) # Third step: 3MF also defines a unit, wheras Cura always assumes mm. scale_matrix = Matrix() scale_matrix.setByScaleVector(self._getScaleFromUnit(self._unit)) transformation_matrix.multiply(scale_matrix) # Pre multiply the transformation with the loaded transformation, so the data is handled correctly. build_item_node.setTransformation(build_item_node.getLocalTransformation().preMultiply(transformation_matrix)) result.append(build_item_node) except Exception as e: Logger.log("e", "An exception occurred in 3mf reader: %s", e) return result
#!/usr/bin/env python # coding: utf-8 # In[1]: path_to_zip_file = "datasets.zip" directory_to_extract_to = "" import zipfile zip_ref = zipfile.ZipFile(path_to_zip_file, 'r') zip_ref.extractall(directory_to_extract_to) zip_ref.close() # In[2]: import pandas as pd Location = "all_140_in_33.P1.csv" df = pd.read_csv(Location) df
#ext = os.path.basename(self.model_path).split('.')[1] #self.model_path = os.path.join(os.path.dirname(self.model_path), "%s.%s"%(py_file.split('.')[0],ext)) zout.writestr('DEVSimPyModel.dat', _("Call SaveFile method first!")) if self.plugin_path != '': zout.write( self.plugin_path, os.path.join('plugins', os.path.basename(self.plugin_path))) finally: zout.close() else: ### search python file in archive zin = zipfile.ZipFile(self.model_path, 'r') info_list = zin.infolist() ### si le nom du fichier python py est le meme que le nom du modèle .amd ou .cmd name = "%s.py" % os.path.splitext( os.path.basename(self.model_path))[0] if name in info_list: self.python_path = os.path.join(self.model_path, name) ### sinon on cherche le .py dans le modèle en excluant plugins.py else: for item in info_list: name, ext = os.path.splitext(item.filename) if ext == ".py" and name != 'plugins': self.python_path = os.path.join( self.model_path, item.filename) ### TODO: get class from python file and test with insepct module if is submodule of DomainBehavior break
hradius = hlength * 0.6 if gap: diff = cpv.scale(normal, gap) xyz1 = cpv.sub(xyz1, diff) xyz2 = cpv.add(xyz2, diff) xyz3 = cpv.add(cpv.scale(normal, hlength), xyz2) obj = [cgo.CYLINDER] + xyz1 + xyz3 + [radius] + color1 + color2 + [cgo.CONE] + xyz3 + xyz2 + [hradius, 0.0] + color2 + color2 + [1.0, 0.0] return obj dirpath = tempfile.mkdtemp() zip_dir = 'out.zip' with zipfile.ZipFile(zip_dir) as hs_zip: hs_zip.extractall(dirpath) cmd.load(join(dirpath,"protein.pdb"), "protein") cmd.show("cartoon", "protein") if dirpath: f = join(dirpath, "label_threshold_10.mol2") else: f = "label_threshold_10.mol2" cmd.load(f, 'label_threshold_10') cmd.hide('everything', 'label_threshold_10') cmd.label("label_threshold_10", "name") cmd.set("label_font_id", 7) cmd.set("label_size", -0.4)
def sampleextract(sample): print b.OKBLUE + " Extracting to :", os.getcwd() + "/extract/\n" + b.ENDC with zipfile.ZipFile(sample, 'r') as zip_ref: zip_ref.extractall("./extract")
if board in ("sky9x", "taranis"): l += u"SOUNDS/%s/SYSTEM;" % directory l += f + u";" + s + u"\n" csvFile.write(l.encode("utf-8")) for s, f in sounds: if s and f: l = u"" if board in ("sky9x", "taranis"): l += u"SOUNDS/%s;" % directory l += f + u";" + s + u"\n" csvFile.write(l.encode("utf-8")) csvFile.close() if "zip" in sys.argv: zip_name = "%s-%s.zip" % (voice, board) zip = zipfile.ZipFile(zip_name, "w", zipfile.ZIP_DEFLATED) for s, f in systemSounds: if s and f: generate(s, f) if board in ("sky9x", "taranis"): zip.write(f, "SOUNDS/%s/SYSTEM/" % directory + f) else: zip.write(f, f) os.remove(f) for s, f in sounds: if s and f: generate(s, f) if board in ("sky9x", "taranis"): zip.write(f, "SOUNDS/%s/" % directory + f) else: zip.write(f, f)
def __init__(self,file=''): print('正在处理ROM信息...该过程需要1s-2min分钟不等') '''获取ROM信息 输入的文件可以是线刷包,也可以是卡刷包''' size=os.path.getsize(file) if os.path.exists(file)==False or size==0: print('E:请选择一个正确的文件!!!') self.flag=1#无效文件路径 return self.file=file if file.find('payload.bin')>-1: self.abflag=True self.flag=3 print('发现A/B(System As Root)更新文件(安卓10动态分区)') return if file.find('.ozip') > -1 and zipfile.is_zipfile(file)==True: with open(file,'rb') as fr: magic=fr.read(12) if magic==b"OPPOENCRYPT!" or magic[:2]==b"PK": self.ozip=True print('发现OPPO OZIP! 需要解密后才能读取ROM信息') fr.close() return print('这个ROM可能不是OPPO OZIP?!') if file.find('.tar.md5') > -1 and tarfile.is_tarfile(file): self.samsumgodinfile=True a=str(get_saminfo(file)) if a: a=a.replace("b'",'') a=a.replace(".tar\\n'",'') li=a.split(' ') a=li[2].split('_') print('ROM类型:'+a[0]+'\n版本:'+a[1]+'\n发行标志:'+a[5]+'\n固件类型:offical') print('发现三星odin线刷文件!') print('W:只有ROM类型为AP才支持解包出系统镜像') return print('Maybe:发现三星odin线刷文件?!') if file.find('.tgz') > -1 and tarfile.is_tarfile(file): #MIUI tar = tarfile.open(file, "r:gz") l=tar.getnames() for a in l: if a.find('system.img')>-1 : self.flag=3 self.miuitar=True print('Maybe:MIUI 线刷包找到') return elif a.find('super.img')>-1: self.flag=3 self.miuitar=True self.super=True print('Maybe:MIUI 线刷包找到') return if zipfile.is_zipfile(file)==False: print('E:不支持的格式!!!!') self.flag=2 return self.file=file z=zipfile.ZipFile(file) self.l=z.namelist() self.flag=4 #z.close() if 'system.img' in self.l: self.olnyimg=True if 'system/framework/framework.jar' in self.l: self.onlyfolder=True if 'system.new.dat.br' in self.l and 'system.transfer.list' in self.l: self.brotil=True if 'system.new.dat' in self.l and 'system.transfer.list' in self.l: self.newdat=True if 'system.transfer.list' in self.l: z.extract('system.transfer.list') f = open('system.transfer.list', 'r') v = int(f.readline()) f.close() if v == 1: print('Android Lollipop 5.0 检测到!\n') self.androidVersion='Lollipop 5.0 API 21' elif v == 2: print('Android Lollipop 5.1 检测到!\n') self.androidVersion='Lollipop 5.1 API 22' elif v == 3: print('Android Marshmallow 6.x 检测到!\n') self.androidVersion='Marshmallow 6.x API 23' elif v == 4: print('Android Nougat 7.x / Oreo 8.x 或更高版本检测到!\n') self.androidVersion='Nougat 7.x or higher API 24+' os.remove('system.transfer.list') if 'payload.bin' in self.l: self.abflag=True self.flag=4 print('发现A/B(System As Root)更新文件(安卓10动态分区)') if 'META-INF/com/android/metadata' in self.l: z.extract('META-INF/com/android/metadata') f=open('META-INF/com/android/metadata', encoding='UTF-8') l=[] for i in f:l.append(i.strip()) f.close() os.remove('META-INF/com/android/metadata') for i in l: x=i.split('=') if x[0]=='post-build': text=x[1] self.info=text.split('/') if len(self.info)==6: print('ROM制造商:'+self.info[0]+'\n手机代号:'+self.info[1]+'\n版本:'+self.info[2]+'\nAndroid开发版本:'+self.info[3]+'\n固件版本:'+self.info[4]+'\n发行标志:'+self.info[5]) z.close() return else: print('您的设备指纹可能已经被修改,无法获取ROM信息!!!') else: print('metadata文件不存在?!') z.close() for names in self.l:#prop获取Android版本 if names.find('build.prop') > -1: try:z.extract(names) except:pass if os.path.exists(names): f=open(names, encoding='UTF-8') l=[] for i in f:l.append(i.strip()) f.close() os.remove(names) for i in l: x=i.split('=') if x[0]=='ro.build.fingerprint':#Android 指纹库 text=x[1] self.info=text.split('/') if len(self.info)==6: print('ROM制造商:'+self.info[0]+'\n手机代号:'+self.info[1]+'\n版本:'+self.info[2]+'\nAndroid开发版本:'+self.info[3]+'\n固件版本:'+self.info[4]+'\n发行标志:'+self.info[5]) z.close() return else: print('您的设备指纹可能已经被修改,无法获取ROM信息!!!') if 'META-INF/com/google/android/updater-script' in self.l: z.extract('META-INF/com/google/android/updater-script') f=open('META-INF/com/google/android/updater-script', encoding='UTF-8') l=[] for i in f:l.append(i.strip()) f.close() os.remove('META-INF/com/google/android/updater-script') for i in l: if 'ui_print("Target:' in i: i=i.replace('ui_print("Target:','') i=i.replace('");','') i=i.replace(' ','') self.info=i.split('/') if len(self.info)==6: print('ROM制造商:'+self.info[0]+'\n手机代号:'+self.info[1]+'\n版本:'+self.info[2]+'\nAndroid开发版本:'+self.info[3]+'\n固件版本:'+self.info[4]+'\n发行标志:'+self.info[5]) z.close() return if (i.find('update-binary') > -1 and i.find('ummy') > -1) or i.find('#MAGISK') > -1: self.flag=5 print('发现该压缩包为功能性卡刷包!(Magisk/oepngapps/ak2/ak3/etc.') z.close() return print('W:无法从updater-script获取ROM信息!!') if zipfile.is_zipfile(file)==False: if file.find('.kdz') > -1: print('Maybe:发现LG .kdz文件!\n正在测试是否为 .kdz文件...') if lg_kd_kdz(file).islgkdzfile(): self.lgkdz=True self.flag=3 self.type=3 print('发现LG .kdz文件!') return else: print('这个文件可能不是LG .kdz文件?') self.flag=2 return if file.find('.dz') > -1: print('Maybe:发现LG .dz文件!\n正在测试是否为 .dz文件...') if lg_kd_kdz(file).islgdzfile(): self.lgkd=True self.flag=3 self.type=3 print('发现LG .dz文件!') else: print('这个文件可能不是LG .dz文件?') self.flag=2 return print('无效不可读格式?') self.flag=2 return z.close()
def plcupload(self): u"""Lädt das angegebene Projekt auf den RevPi. @return True, bei erfolgreicher Verarbeitung""" tup = self.lst_typeup.index(self.var_typeup.get()) dirselect = "" dirtmp = None filelist = [] fileselect = None foldername = "" rscfile = None if tup == 0: # Datei fileselect = tkfd.askopenfilenames( parent=self.master, title="Upload Python program...", initialdir=self.opt.get("plcupload_dir", homedir), filetypes=(("Python", "*.py"), (_("All files"), "*.*")) ) if type(fileselect) == tuple and len(fileselect) > 0: for file in fileselect: filelist.append(file) elif tup == 1: # Ordner dirselect = tkfd.askdirectory( parent=self.master, title=_("Folder to upload"), mustexist=True, initialdir=self.opt.get("plcupload_dir", homedir) ) # Ordnernamen merken um diesen auf RevPi anzulegen foldername = os.path.basename(dirselect) if type(dirselect) == str and dirselect != "": filelist = self.create_filelist(dirselect) elif tup == 2: # Zip fileselect = tkfd.askopenfilename( parent=self.master, title=_("Upload Zip archive..."), initialdir=self.opt.get("plcupload_file", ""), initialfile=self.revpi + ".zip", filetypes=( (_("Zip archive"), "*.zip"), (_("All files"), "*.*") ) ) if type(fileselect) == str and fileselect != "": # Zipdatei prüfen if zipfile.is_zipfile(fileselect): dirtmp = mkdtemp() fhz = zipfile.ZipFile(fileselect) fhz.extractall(dirtmp) fhz.close() filelist = self.create_filelist(dirtmp) dirselect, rscfile = self.check_replacedir(dirtmp) else: tkmsg.showerror( _("Error"), _("The specified file is not a ZIP archive."), parent=self.master ) return False elif tup == 3: # TarGz fileselect = tkfd.askopenfilename( parent=self.master, title=_("Upload TarGz archiv..."), initialdir=self.opt.get("plcupload_file", ""), initialfile=self.revpi + ".tar.gz", filetypes=( (_("TGZ archive"), "*.tar.gz"), (_("All files"), "*.*") ) ) if type(fileselect) == str and fileselect != "": # Tar-Datei prüfen if tarfile.is_tarfile(fileselect): dirtmp = mkdtemp() fht = tarfile.open(fileselect) fht.extractall(dirtmp) fht.close() filelist = self.create_filelist(dirtmp) dirselect, rscfile = self.check_replacedir(dirtmp) else: tkmsg.showerror( _("Error"), _("The specified file is not a TAR archive."), parent=self.master ) return False # Wenn keine Dateien gewählt if len(filelist) == 0: return True # Vor Übertragung aufräumen wenn ausgewählt if self.var_cleanup.get() and not self.xmlcli.plcuploadclean(): tkmsg.showerror( _("Error"), _("There was an error deleting the files on the " "Revolution Pi."), parent=self.master ) return False # Aktuell konfiguriertes Programm lesen (für uploaded Flag) opt_program = self.xmlcli.get_config() opt_program = opt_program.get("plcprogram", "none.py") self.uploaded = True ec = 0 for fname in filelist: if fname == rscfile: continue # FIXME: Fehlerabfang bei Dateilesen with open(fname, "rb") as fh: # Dateinamen ermitteln if dirselect == "": sendname = os.path.basename(fname) else: # Ordnernamen in Dateipfad für RevPi übernehmen sendname = os.path.join( foldername, fname.replace(dirselect, "")[1:] ) # Prüfen ob Dateiname bereits als Startprogramm angegeben ist if sendname == opt_program: self.uploaded = False # Datei übertragen try: ustatus = self.xmlcli.plcupload( Binary(gzip.compress(fh.read())), sendname) except Exception: ec = -2 break if not ustatus: ec = -1 break if ec == 0: tkmsg.showinfo( _("Success"), _("The PLC program was transferred successfully."), parent=self.master ) if self.var_picup.get(): if rscfile is not None: self.setpictoryrsc(rscfile) else: tkmsg.showerror( _("Error"), _("There is no piCtory configuration in this " "archive."), parent=self.master ) # Einstellungen speichern if tup == 0: self.opt["plcupload_dir"] = os.path.dirname(fileselect[0]) elif tup == 1: self.opt["plcupload_dir"] = dirselect else: self.opt["plcupload_file"] = os.path.dirname(fileselect) self.opt["typeup"] = self.var_typeup.get() self.opt["picup"] = self.var_picup.get() _savedefaults(self.revpi, self.opt) elif ec == -1: tkmsg.showerror( _("Error"), _("The Revolution Pi could not process some parts of the " "transmission."), parent=self.master ) elif ec == -2: tkmsg.showerror( _("Error"), _("Errors occurred during transmission"), parent=self.master ) # Temp-Dir aufräumen if dirtmp is not None: rmtree(dirtmp) return True
def get_zip_names(zipname): z = zipfile.ZipFile(zipname) names = z.namelist() z.close() return names
def save_ora(self, filename, options=None, **kwargs): print 'save_ora:' t0 = time.time() tempdir = tempfile.mkdtemp('mypaint') # use .tmp extension, so we don't overwrite a valid file if there is an exception z = zipfile.ZipFile(filename + '.tmpsave', 'w', compression=zipfile.ZIP_STORED) # work around a permission bug in the zipfile library: http://bugs.python.org/issue3394 def write_file_str(filename, data): zi = zipfile.ZipInfo(filename) zi.external_attr = 0100644 << 16 z.writestr(zi, data) write_file_str('mimetype', 'image/openraster') # must be the first file image = ET.Element('image') stack = ET.SubElement(image, 'stack') x0, y0, w0, h0 = self.get_effective_bbox() a = image.attrib a['w'] = str(w0) a['h'] = str(h0) def store_pixbuf(pixbuf, name): tmp = join(tempdir, 'tmp.png') t1 = time.time() pixbuf.save(tmp, 'png') print ' %.3fs pixbuf saving %s' % (time.time() - t1, name) z.write(tmp, name) os.remove(tmp) def store_surface(surface, name, rect=[]): tmp = join(tempdir, 'tmp.png') t1 = time.time() surface.save_as_png(tmp, *rect, **kwargs) print ' %.3fs surface saving %s' % (time.time() - t1, name) z.write(tmp, name) os.remove(tmp) def add_layer(x, y, opac, surface, name, layer_name, visible=True, compositeop=DEFAULT_COMPOSITE_OP, rect=[]): layer = ET.Element('layer') stack.append(layer) store_surface(surface, name, rect) a = layer.attrib if layer_name: a['name'] = layer_name a['src'] = name a['x'] = str(x) a['y'] = str(y) a['opacity'] = str(opac) if compositeop not in VALID_COMPOSITE_OPS: compositeop = DEFAULT_COMPOSITE_OP a['composite-op'] = compositeop if visible: a['visibility'] = 'visible' else: a['visibility'] = 'hidden' return layer for idx, l in enumerate(reversed(self.layers)): if l.is_empty(): continue opac = l.opacity x, y, w, h = l.get_bbox() el = add_layer(x-x0, y-y0, opac, l._surface, 'data/layer%03d.png' % idx, l.name, l.visible, l.compositeop, rect=(x, y, w, h)) # strokemap sio = StringIO() l.save_strokemap_to_file(sio, -x, -y) data = sio.getvalue(); sio.close() name = 'data/layer%03d_strokemap.dat' % idx el.attrib['mypaint_strokemap_v2'] = name write_file_str(name, data) # save background as layer (solid color or tiled) bg = self.background # save as fully rendered layer x, y, w, h = self.get_bbox() l = add_layer(x-x0, y-y0, 1.0, bg, 'data/background.png', 'background', DEFAULT_COMPOSITE_OP, rect=(x,y,w,h)) x, y, w, h = bg.get_pattern_bbox() # save as single pattern (with corrected origin) store_surface(bg, 'data/background_tile.png', rect=(x+x0, y+y0, w, h)) l.attrib['background_tile'] = 'data/background_tile.png' # preview (256x256) t2 = time.time() print ' starting to render full image for thumbnail...' thumbnail_pixbuf = self.render_thumbnail() store_pixbuf(thumbnail_pixbuf, 'Thumbnails/thumbnail.png') print ' total %.3fs spent on thumbnail' % (time.time() - t2) helpers.indent_etree(image) xml = ET.tostring(image, encoding='UTF-8') write_file_str('stack.xml', xml) z.close() os.rmdir(tempdir) if os.path.exists(filename): os.remove(filename) # windows needs that os.rename(filename + '.tmpsave', filename) print '%.3fs save_ora total' % (time.time() - t0) return thumbnail_pixbuf
def do_export(self, filename, used_for_merging_dbs=False): self.orig_dir = os.getcwd() if not os.path.isabs(filename): filename = os.path.join(self.config()["export_dir"], filename) os.chdir(os.path.dirname(filename)) if used_for_merging_dbs is True: metadata = {} else: metadata = self.main_widget().show_export_metadata_dialog() if metadata is None: # Cancelled. os.chdir(self.orig_dir) return -1 metadata_file = open("METADATA", "w", encoding="utf-8") for key, value in metadata.items(): print(key + ":" + value.strip().replace("\n", "<br>"), file=metadata_file) metadata_file.close() db = self.database() w = self.main_widget() # Generate log entries. if used_for_merging_dbs: w.set_progress_text(_("Extracting cards...")) else: w.set_progress_text(_("Exporting cards...")) active_objects = db.active_objects_to_export() number_of_entries = len(active_objects["tags"]) + \ len(active_objects["fact_view_ids"]) + \ len(active_objects["card_type_ids"]) + \ len(active_objects["media_filenames"]) + \ len(active_objects["_card_ids"]) + \ len(active_objects["_fact_ids"]) xml_file = open("cards.xml", "w", encoding="utf-8") xml_format = XMLFormat() xml_file.write(xml_format.log_entries_header(number_of_entries)) w.set_progress_range(number_of_entries) w.set_progress_update_interval(number_of_entries / 20) for tag in active_objects["tags"]: log_entry = LogEntry() log_entry["type"] = EventTypes.ADDED_TAG log_entry["o_id"] = tag.id log_entry["name"] = tag.name xml_file.write(xml_format.repr_log_entry(log_entry)) w.increase_progress(1) for fact_view_id in active_objects["fact_view_ids"]: fact_view = db.fact_view(fact_view_id, is_id_internal=False) log_entry = LogEntry() log_entry["type"] = EventTypes.ADDED_FACT_VIEW log_entry["o_id"] = fact_view.id log_entry["name"] = fact_view.name log_entry["q_fact_keys"] = repr(fact_view.q_fact_keys) log_entry["a_fact_keys"] = repr(fact_view.a_fact_keys) log_entry["q_fact_key_decorators"] = \ repr(fact_view.q_fact_key_decorators) log_entry["a_fact_key_decorators"] = \ repr(fact_view.a_fact_key_decorators) log_entry["a_on_top_of_q"] = repr(fact_view.a_on_top_of_q) log_entry["type_answer"] = repr(fact_view.type_answer) if fact_view.extra_data: log_entry["extra"] = repr(fact_view.extra_data) xml_file.write(xml_format.repr_log_entry(log_entry)) w.increase_progress(1) for card_type_id in active_objects["card_type_ids"]: card_type = db.card_type(card_type_id, is_id_internal=False) log_entry = LogEntry() log_entry["type"] = EventTypes.ADDED_CARD_TYPE log_entry["o_id"] = card_type.id log_entry["name"] = card_type.name log_entry["fact_keys_and_names"] = \ repr(card_type.fact_keys_and_names) log_entry["fact_views"] = repr([fact_view.id for fact_view \ in card_type.fact_views]) log_entry["unique_fact_keys"] = \ repr(card_type.unique_fact_keys) log_entry["required_fact_keys"] = \ repr(card_type.required_fact_keys) log_entry["keyboard_shortcuts"] = \ repr(card_type.keyboard_shortcuts) if card_type.extra_data: log_entry["extra"] = repr(card_type.extra_data) xml_file.write(xml_format.repr_log_entry(log_entry)) w.increase_progress(1) for media_filename in active_objects["media_filenames"]: log_entry = LogEntry() log_entry["type"] = EventTypes.ADDED_MEDIA_FILE log_entry["fname"] = media_filename xml_file.write(str(xml_format.repr_log_entry(log_entry))) w.increase_progress(1) for _fact_id in active_objects["_fact_ids"]: fact = db.fact(_fact_id, is_id_internal=True) log_entry = LogEntry() log_entry["type"] = EventTypes.ADDED_FACT log_entry["o_id"] = fact.id for fact_key, value in fact.data.items(): log_entry[fact_key] = value xml_file.write(xml_format.repr_log_entry(log_entry)) w.increase_progress(1) for _card_id in active_objects["_card_ids"]: card = db.card(_card_id, is_id_internal=True) log_entry = LogEntry() log_entry["type"] = EventTypes.ADDED_CARD log_entry["o_id"] = card.id log_entry["card_t"] = card.card_type.id log_entry["fact"] = card.fact.id log_entry["fact_v"] = card.fact_view.id log_entry["tags"] = ",".join([tag.id for tag in card.tags]) if used_for_merging_dbs: log_entry["c_time"] = card.creation_time log_entry["m_time"] = card.modification_time log_entry["gr"] = card.grade log_entry["e"] = card.easiness log_entry["ac_rp"] = card.acq_reps log_entry["rt_rp"] = card.ret_reps log_entry["lps"] = card.lapses log_entry["ac_rp_l"] = card.acq_reps_since_lapse log_entry["rt_rp_l"] = card.ret_reps_since_lapse log_entry["l_rp"] = card.last_rep log_entry["n_rp"] = card.next_rep else: log_entry["gr"] = -1 log_entry["e"] = 2.5 log_entry["ac_rp"] = 0 log_entry["rt_rp"] = 0 log_entry["lps"] = 0 log_entry["ac_rp_l"] = 0 log_entry["rt_rp_l"] = 0 log_entry["l_rp"] = -1 log_entry["n_rp"] = -1 if card.extra_data: log_entry["extra"] = repr(card.extra_data) xml_file.write(xml_format.repr_log_entry(log_entry)) w.increase_progress(1) xml_file.write(xml_format.log_entries_footer()) xml_file.close() # Make archive (Zipfile requires a .zip extension). zip_file = zipfile.ZipFile(filename + ".zip", "w", compression=zipfile.ZIP_DEFLATED) zip_file.write("cards.xml") zip_file.write("METADATA") w.close_progress() if used_for_merging_dbs: w.set_progress_text(_("Extracting media files...")) else: w.set_progress_text(_("Bundling media files...")) number_of_media_files = len(active_objects["media_filenames"]) w.set_progress_range(number_of_media_files) w.set_progress_update_interval(number_of_media_files / 100) for media_filename in active_objects["media_filenames"]: full_path = os.path.normpath(\ os.path.join(self.database().media_dir(), media_filename)) if not os.path.exists(full_path): self.main_widget().show_error(\ _("Missing filename: " + full_path)) continue zip_file.write(full_path, media_filename, compress_type=zipfile.ZIP_STORED) w.increase_progress(1) zip_file.close() if os.path.exists(filename): os.remove(filename) os.rename(filename + ".zip", filename) os.remove("cards.xml") os.remove("METADATA") os.chdir(self.orig_dir) w.close_progress()
def unzip(filepath): print("Extracting: " + filepath) dirpath = os.path.dirname(filepath) with zipfile.ZipFile(filepath) as zf: zf.extractall(dirpath) os.remove(filepath)