def make_shortcut(emulator, flags, rom, shortcut_name): """ will generate a windows batch file containing the paths for the emulator and rom with the correct emulator flags """ emulator = PureWindowsPath(Path(emulator)) rom = PureWindowsPath(Path(rom.replace("'", "`"))) # command = '"{}" {} "{}"'.format(emulator,flags,game) desktop = win_path_to_desktops + '\\' + shortcut_name + '.lnk' ico_check = path_to_ICOS + '/' + shortcut_name + '.ico' if not os.path.exists(ico_check): ico_name = win_path_to_ICOS + '\\es_not_found.ico' else: ico_name = win_path_to_ICOS + '\\' + shortcut_name + '.ico' ico_name = PureWindowsPath(Path(ico_name)) fcontents = '$WScriptShell = New-Object -ComObject WScript.Shell;$Shortcut = $WScriptShell.CreateShortcut("{}");$Shortcut.TargetPath = \'"{}"\';$ShortCut.Arguments=\'{} "{}"\';$shortcut.IconLocation=\"{}\";$Shortcut.Save()'.format( desktop, emulator, flags, rom, ico_name) print(fcontents) f = open("make_shortcut.ps1".format(shortcut_name), "w") f.write(fcontents) f.close() ostr = "powershell.exe -ExecutionPolicy Bypass -File make_shortcut.ps1".format( win_exe) print(ostr) os.system(ostr)
def checkerflickerplusanalyzer(exp_name, stimulusnr, clusterstoanalyze=None, frametimingsfraction=None, cutoff=4): """ Analyzes checkerflicker-like data, typically interspersed stimuli in between chunks of checkerflicker. e.g. checkerflickerplusmovie, frozennoise Parameters: ---------- exp_name: Experiment name. stimulusnr: Number of the stimulus to be analyzed. clusterstoanalyze: Number of clusters should be analyzed. Default is None. First N cells will be analyzed if this parameter is given. In case of long recordings it might make sense to first look at a subset of cells before starting to analyze the whole dataset. frametimingsfraction: Fraction of the recording to analyze. Should be a number between 0 and 1. e.g. 0.3 will analyze the first 30% of the whole recording. cutoff: Worst rating that is wanted for the analysis. Default is 4. The source of this value is manual rating of each cluster. """ exp_dir = iof.exp_dir_fixer(exp_name) stimname = iof.getstimname(exp_dir, stimulusnr) exp_name = os.path.split(exp_dir)[-1] clusters, metadata = asc.read_spikesheet(exp_dir, cutoff=cutoff) # Check that the inputs are as expected. if clusterstoanalyze: if clusterstoanalyze > len(clusters[:, 0]): warnings.warn('clusterstoanalyze is larger ' 'than number of clusters in dataset. ' 'All cells will be included.') clusterstoanalyze = None if frametimingsfraction: if not 0 < frametimingsfraction < 1: raise ValueError('Invalid input for frametimingsfraction: {}. ' 'It should be a number between 0 and 1' ''.format(frametimingsfraction)) scr_width = metadata['screen_width'] scr_height = metadata['screen_height'] refresh_rate = metadata['refresh_rate'] parameters = asc.read_parameters(exp_dir, stimulusnr) stx_h = parameters['stixelheight'] stx_w = parameters['stixelwidth'] # Check whether any parameters are given for margins, calculate # screen dimensions. marginkeys = ['tmargin', 'bmargin', 'rmargin', 'lmargin'] margins = [] for key in marginkeys: margins.append(parameters.get(key, 0)) # Subtract bottom and top from vertical dimension; left and right # from horizontal dimension scr_width = scr_width - sum(margins[2:]) scr_height = scr_height - sum(margins[:2]) nblinks = parameters['Nblinks'] bw = parameters.get('blackwhite', False) # Gaussian stimuli are not supported yet, we need to ensure we # have a black and white stimulus if bw is not True: raise ValueError('Gaussian stimuli are not supported yet!') seed = parameters.get('seed', -1000) sx, sy = scr_height / stx_h, scr_width / stx_w # Make sure that the number of stimulus pixels are integers # Rounding down is also possible but might require # other considerations. if sx % 1 == 0 and sy % 1 == 0: sx, sy = int(sx), int(sy) else: raise ValueError('sx and sy must be integers') filter_length, frametimings = asc.ft_nblinks(exp_dir, stimulusnr) if parameters['stimulus_type'] in [ 'FrozenNoise', 'checkerflickerplusmovie' ]: runfr = parameters['RunningFrames'] frofr = parameters['FrozenFrames'] # To generate the frozen noise, a second seed is used. # The default value of this is -10000 as per StimulateOpenGL secondseed = parameters.get('secondseed', -10000) if parameters['stimulus_type'] == 'checkerflickerplusmovie': mblinks = parameters['Nblinksmovie'] # Retrivee the number of frames (files) from parameters['path'] ipath = PureWindowsPath(parameters['path']).as_posix() repldict = iof.config('stimuli_path_replace') for needle, repl in repldict.items(): ipath = ipath.replace(needle, repl) ipath = os.path.normpath(ipath) # Windows compatiblity moviefr = len([ name for name in os.listdir(ipath) if os.path.isfile(os.path.join(ipath, name)) and name.lower().endswith('.raw') ]) noiselen = (runfr + frofr) * nblinks movielen = moviefr * mblinks triallen = noiselen + movielen ft_on, ft_off = asc.readframetimes(exp_dir, stimulusnr, returnoffsets=True) frametimings = np.empty(ft_on.shape[0] * 2, dtype=float) frametimings[::2] = ft_on frametimings[1::2] = ft_off import math ntrials = math.floor(frametimings.size / triallen) trials = np.zeros((ntrials, runfr + frofr + moviefr)) for t in range(ntrials): frange = frametimings[t * triallen:(t + 1) * triallen] trials[t, :runfr + frofr] = frange[:noiselen][::nblinks] trials[t, runfr + frofr:] = frange[noiselen:][::mblinks] frametimings = trials.ravel() filter_length = np.int(np.round(.666 * refresh_rate / nblinks)) # Add frozen movie to frozen noise (for masking) frofr += moviefr savefname = str(stimulusnr) + '_data' if clusterstoanalyze: clusters = clusters[:clusterstoanalyze, :] print('Analyzing first %s cells' % clusterstoanalyze) savefname += '_' + str(clusterstoanalyze) + 'cells' if frametimingsfraction: frametimingsindex = int(len(frametimings) * frametimingsfraction) frametimings = frametimings[:frametimingsindex] print('Analyzing first {}% of' ' the recording'.format(frametimingsfraction * 100)) savefname += '_' + str(frametimingsfraction).replace('.', '') + 'fraction' frame_duration = np.average(np.ediff1d(frametimings)) total_frames = frametimings.shape[0] all_spiketimes = [] # Store spike triggered averages in a list containing correct shaped # arrays stas = [] for i in range(len(clusters[:, 0])): spiketimes = asc.read_raster(exp_dir, stimulusnr, clusters[i, 0], clusters[i, 1]) spikes = asc.binspikes(spiketimes, frametimings) all_spiketimes.append(spikes) stas.append(np.zeros((sx, sy, filter_length))) # Separate out the repeated parts all_spiketimes = np.array(all_spiketimes) mask = runfreezemask(total_frames, runfr, frofr, refresh_rate) repeated_spiketimes = all_spiketimes[:, ~mask] run_spiketimes = all_spiketimes[:, mask] # We need to cut down the total_frames by the same amount # as spiketimes total_run_frames = run_spiketimes.shape[1] # To be able to use the same code as checkerflicker analyzer, # convert to list again. run_spiketimes = list(run_spiketimes) # Empirically determined to be best for 32GB RAM desired_chunk_size = 21600000 # Length of the chunks (specified in number of frames) chunklength = int(desired_chunk_size / (sx * sy)) chunksize = chunklength * sx * sy nrofchunks = int(np.ceil(total_run_frames / chunklength)) print(f'\nAnalyzing {stimname}.\nTotal chunks: {nrofchunks}') time = startime = datetime.datetime.now() timedeltas = [] quals = np.zeros(len(stas)) frame_counter = 0 for i in range(nrofchunks): randnrs, seed = randpy.ranb(seed, chunksize) # Reshape and change 0's to -1's stimulus = np.reshape(randnrs, (sx, sy, chunklength), order='F') * 2 - 1 del randnrs # Range of indices we are interested in for the current chunk if (i + 1) * chunklength < total_run_frames: chunkind = slice(i * chunklength, (i + 1) * chunklength) chunkend = chunklength else: chunkind = slice(i * chunklength, None) chunkend = total_run_frames - i * chunklength for k in range(filter_length, chunkend - filter_length + 1): stim_small = stimulus[:, :, k - filter_length + 1:k + 1][:, :, ::-1] for j in range(clusters.shape[0]): spikes = run_spiketimes[j][chunkind] if spikes[k] != 0: stas[j] += spikes[k] * stim_small qual = np.array([]) for c in range(clusters.shape[0]): qual = np.append(qual, asc.staquality(stas[c])) quals = np.vstack((quals, qual)) # Draw progress bar width = 50 # Number of characters prog = i / (nrofchunks - 1) bar_complete = int(prog * width) bar_noncomplete = width - bar_complete timedeltas.append(msc.timediff(time)) # Calculate running avg avgelapsed = np.mean(timedeltas) elapsed = np.sum(timedeltas) etc = startime + elapsed + avgelapsed * (nrofchunks - i) sys.stdout.flush() sys.stdout.write('\r{}{} |{:4.1f}% ETC: {}'.format( '█' * bar_complete, '-' * bar_noncomplete, prog * 100, etc.strftime("%a %X"))) time = datetime.datetime.now() sys.stdout.write('\n') # Remove the first row which is full of random nrs. quals = quals[1:, :] max_inds = [] spikenrs = np.array([spikearr.sum() for spikearr in run_spiketimes]) for i in range(clusters.shape[0]): with warnings.catch_warnings(): warnings.filterwarnings('ignore', '.*true_divide*.') stas[i] = stas[i] / spikenrs[i] # Find the pixel with largest absolute value max_i = np.squeeze( np.where(np.abs(stas[i]) == np.max(np.abs(stas[i])))) # If there are multiple pixels with largest value, # take the first one. if max_i.shape != (3, ): try: max_i = max_i[:, 0] # If max_i cannot be found just set it to zeros. except IndexError: max_i = np.array([0, 0, 0]) max_inds.append(max_i) print(f'Completed. Total elapsed time: {msc.timediff(startime)}\n' + f'Finished on {datetime.datetime.now().strftime("%A %X")}') savepath = os.path.join(exp_dir, 'data_analysis', stimname) if not os.path.isdir(savepath): os.makedirs(savepath, exist_ok=True) savepath = os.path.join(savepath, savefname) keystosave = [ 'clusters', 'frametimings', 'mask', 'repeated_spiketimes', 'run_spiketimes', 'frame_duration', 'max_inds', 'nblinks', 'stas', 'stx_h', 'stx_w', 'total_run_frames', 'sx', 'sy', 'filter_length', 'stimname', 'exp_name', 'spikenrs', 'clusterstoanalyze', 'frametimingsfraction', 'cutoff', 'quals', 'nrofchunks', 'chunklength' ] datadict = {} for key in keystosave: datadict[key] = locals()[key] np.savez(savepath, **datadict) t = (np.arange(nrofchunks) * chunklength * frame_duration) / refresh_rate qmax = np.max(quals, axis=0) qualsn = quals / qmax[np.newaxis, :] ax = plt.subplot(111) ax.plot(t, qualsn, alpha=0.3) plt.ylabel('Z-score of center pixel (normalized)') plt.xlabel('Minutes of stimulus analyzed') plt.ylim([0, 1]) plf.spineless(ax, 'tr') plt.title(f'Recording duration optimization\n{exp_name}\n {savefname}') plt.savefig(savepath + '.svg', format='svg') plt.close()
class Start(QtGui.QMainWindow): def __init__(self,parent=None): QtGui.QWidget.__init__(self,parent) self.ui = Ui_MainWindow() self.ui.setupUi(self) QtCore.QObject.connect(self.ui.run_button,QtCore.SIGNAL("clicked()"), self.Run ) QtCore.QObject.connect(self.ui.about_button,QtCore.SIGNAL("clicked()"), self.About ) QtCore.QObject.connect(self.ui.select_input_button,QtCore.SIGNAL("clicked()"), self.SelectInputFile ) QtCore.QObject.connect(self.ui.select_pdflatex_button,QtCore.SIGNAL("clicked()"), self.SelectPdflatexFile ) self.input_file_path = '' self.pdflatex_file_path = '' self.fo_tex = 0 self.tex_file_path = '' self.tex_filename = '' self.working_dir = '' def AboutClose(self): self.about_widget.hide() def About(self): # A help window popups self.about_widget = QtGui.QWidget() # create widget self.about = Ui_About() # get designer's code self.about.setupUi(self.about_widget) # apply designer's code QtCore.QObject.connect( self.about.OK_button,QtCore.SIGNAL("clicked()"), self.AboutClose ) self.about_widget.show() # def isNot_TeX_File(self,name): #check extension ending = name[-4:] if ending != '.tex': return True return False def SelectInputFile(self): fd = QtGui.QFileDialog(self) self.input_file_path = fd.getOpenFileName() if self.input_file_path == '' : return if self.isNot_TeX_File(self.input_file_path): self.ui.input_file_field.setText(RedText('> Error: Unsupported file format')) self.input_file_path = '' return self.ui.input_file_field.setText(self.input_file_path) print('> got input file path:',self.input_file_path) self.tex_filename = PureWindowsPath(self.input_file_path).name # Potential Error self.tex_filename = self.tex_filename[:-4]+'_results.tex' self.working_dir = str( PureWindowsPath(self.input_file_path).parents[0] ) print('> working dir set to :', self.working_dir) print('> tex file name set to :', self.tex_filename) def isNotPdflatexFile(self,name): ending = name[len(name)-12:] if ending != 'pdflatex.exe' : return True return False def SelectPdflatexFile(self): fd = QtGui.QFileDialog(self) self.pdflatex_file_path = fd.getOpenFileName() print('> selected file:', self.pdflatex_file_path) if self.pdflatex_file_path == '' : return if self.isNotPdflatexFile(self.pdflatex_file_path): self.ui.pdflatex_field.setText(RedText('> Error: pdflatex.exe expected')) self.pdflatex_file_path = '' return self.ui.pdflatex_field.setText(self.pdflatex_file_path) def Print(self,text): print (text,file=self.fo_tex) def PPrint(self,expr): print ('\n$$\n',sympy.latex(expr),'\n$$\n',file=self.fo_tex) def InitiateTexFile(self): try: #cur_dir = Path('.') cur_dir = Path(self.working_dir) #print('> current directory is set to: ',cur_dir) self.tex_file_path = str(cur_dir/self.tex_filename) self.fo_tex = open(self.tex_file_path,'w') #TODO close ?? except OSError: self.ui.input_file_field.setText(Redtext('.tex file cannot be created')) return True initial_string = ''' \documentclass{article} \\usepackage[utf8]{inputenc} \\usepackage{amsthm} \\usepackage{amsmath} \\begin{document} ''' # \\usepackage[russian]{babel} self.Print(initial_string) print('> TeX file has been created') return False def RunPdflatexAndOpenPdf(self): print ('> Starting pdflatex.exe') args = [ self.pdflatex_file_path, '-interaction=batchmode', '-halt-on-error', '-no-shell-escape', self.tex_file_path ] tex_process = subprocess.call(args,cwd=self.working_dir) print('> pdflatex process return code : ', tex_process) (Path(self.working_dir)/(self.tex_filename[:-4]+'.log')).unlink() (Path(self.working_dir)/(self.tex_filename[:-4]+'.aux')).unlink() print ('> .aux and .log files deleted') print('> Openning pdf file...') pdf_filename = str ( Path(self.working_dir)/(self.tex_filename.replace('.tex','.pdf')) ) import os os.system("start "+pdf_filename ) def FilesArePresent(self): if self.isNotPdflatexFile(self.pdflatex_file_path): self.ui.pdflatex_field.setText(RedText('select file')) return False if self.isNot_TeX_File(self.input_file_path): self.ui.input_file_field.setText(RedText('select file')) return False return True def CheckODEs(self,fo): ''' read text file fo, solve ODEs, print results, close file ''' funvar = FindFuncAndVar(fo) if (funvar[0] == '' or funvar[1] == '') : self.ui.input_file_field.setText(RedText('ERROR: Function or Variable is not specified')) print('> ERROR: Function or Variable is not specified') return True print ('> function and variable defined') # reset fo fo.close() fo = open(self.input_file_path,'r') # print('> input file reopened') print('> Parsing input file ...') for i,data in enumerate(ParseTex(fo)): print('>> iteration :',i ) print('>> data :',data) eqs = LatexToSympy(data+funvar) print('>> Checking solution ...') res = Check(eqs[0],eqs[1]) self.Print('ODE number: '+str(i+1)) self.PPrint(eqs[0]) if res[0] == False : self.Print ('Given solution:') self.PPrint (eqs[1]) self.Print (' is INCORRECT') # RED letters self.Print ('') self.Print ('The correct one is:') self.PPrint (res[1]) else: self.Print('Solution:') self.PPrint(eqs[1]) self.Print ('CORRECT') # GREEN letters self.Print ('') fo.close() return False ''' def ShowProcessingMessage(self): self.proc_mes = QtGui.QWidget() ui = Ui_Form() ui.setupUi(self.proc_mes) self.proc_mes.show() # def HideProcessingMessage(self): self.proc_mes.hide() ''' def VerifyInputFile(self): print('> Verifying input file...') args = [self.pdflatex_file_path, '-interaction=batchmode', '-halt-on-error', '-no-shell-escape', '-draftmode', # '-quiet', self.input_file_path ] tex_process = subprocess.call(args,cwd=self.working_dir) print('> pdflatex process return code : ', tex_process) Path(self.input_file_path[:-4]+'.aux').unlink() if tex_process != 0 : return True # error occured else: Path(self.input_file_path[:-4]+'.log').unlink() return False # no error def Run(self): #self.ShowProcessingMessage() print ( '> RUNNING ...') if not self.FilesArePresent(): print('> ERROR : select files') return error_occured = self.VerifyInputFile() if error_occured : print ('> ERROR: cannot compile the input file') self.ui.input_file_field.setText(RedText('ERROR: See .log file for details')) #TODO # ErrorMessage # look at log file return # open input file try: fo = open(self.input_file_path,'r') except (FileNotFoundError, NameError ) as e: self.ui.input_file_field.setText(RedText('cannot open file')) print('> ERROR: cannot open file') return # # initiate tex file error_occured = self.InitiateTexFile() if error_occured : self.ui.input_file_field.setText(RedText('cannot create TeX file')) self.ui.pdflatex_field.setText(RedText('cannot create TeX file')) print('> ERROR: cannot create TeX file') # error_occured = self.CheckODEs(fo) # fo will be closed there if error_occured : print ('> ODE checker ERROR') #TODO # ErrorMessage # look at log file return self.Print('\end{document}') self.fo_tex.close() print ('> Parsing finished') #self.HideProcessingMessage() self.RunPdflatexAndOpenPdf()
class File(object): """Abstract class for all file operations. The `filepath` represents any filepath accessible on the disk. The `relapath` is a relative path representative for the archive file. The `filename` is the actual filename of the file. The `extrpath` determines the extraction path and may be used for read(). """ def __init__(self, filepath=None, contents=None, relapath=None, filename=None, mode=None, password=None, description=None, selected=False, stream=None, platforms=[]): if isinstance(filepath, Path): self.filepath = str(filepath) else: self.filepath = filepath # Remove all \\ slashes by always parsing the relapath as a Windows # path and changing it to posix. if relapath: self.relapath = PureWindowsPath(relapath).as_posix() else: self.relapath = relapath self.mode = mode self.error = None self.description = description self.password = password self.children = [] self.duplicate = False self.unpacker = None self.parent = None self.archive = False self.identified = False self.safelisted = False self.safelist_reason = "" # Extract the filename from any of the available path components. self.filename = ntpath.basename(filename or self.relapath or self.filepath or "").rstrip("\x00") or None self._contents = contents self._platforms = platforms self._selected = selected self._selectable = selected self._identified_ran = False self._human_type = "" self._extension = "" self._dependency_version = "" self._dependency = "" self._md5 = None self._sha1 = None self._sha256 = None self._mime = None self._magic = None self._mime_human = None self._magic_human = None self._stream = stream self._ole = None self._ole_tried = False @classmethod def from_path(self, filepath, relapath=None, filename=None, password=None): return File(filepath=filepath, stream=open(filepath, "rb"), relapath=relapath, filename=filename, password=password) def temp_path(self, suffix=""): # TODO Depending on use-case we may not need a full copy. Perhaps # abstract away the "if self.f.filepath ... else ..." logic? fd, filepath = tempfile.mkstemp(suffix=suffix) shutil.copyfileobj(self.stream, os.fdopen(fd, "wb")) return filepath @property def contents(self): if self._contents is None and self.filepath: self._contents = open(self.filepath, "rb").read() return self._contents @property def stream(self): if not self._stream: return io.BytesIO(self.contents) self._stream.seek(0) return self._stream def _identify(self): if self._identified_ran: return self._identified_ran = True data = identify(self) if data: self._selected = data[0] self._selectable = data[0] self._human_type = data[1] self._extension = data[2] self._platforms = [] for platform in data[3]: self._platforms.append({ "platform": platform, "os_version": "" }) self._dependency = data[4] self._dependency_version = "" self.identified = True def _hashes(self): sha256, s, buf = hashlib.sha256(), self.stream, True sha1 = hashlib.sha1() md5 = hashlib.md5() while buf: buf = s.read(0x10000) sha256.update(buf) md5.update(buf) sha1.update(buf) self._sha256 = sha256.hexdigest() self._sha1 = sha1.hexdigest() self._md5 = md5.hexdigest() @property def md5(self): if not self._md5: self._hashes() return self._md5 @property def sha1(self): if not self._sha1: self._hashes() return self._sha1 @property def sha256(self): if not self._sha256: self._hashes() return self._sha256 @property def magic(self): if not self._magic and self.filesize: self._magic = magic.from_buffer(self.contents) return self._magic or "" @property def mime(self): if not self._mime and self.filesize: self._mime = magic.from_buffer(self.contents, mime=True) return self._mime or "" @property def magic_human(self): if not self._magic_human: magic = self.magic or "" if "," in magic: spl = magic.split(",") magic = "%s (%s)" % (spl[0], ",".join(spl[1:3]).strip()) self._magic_human = magic return self._magic_human or "" @property def mime_human(self): if not self._mime_human: mime = self.mime or "" if "/" in mime: mime = mime.split("/", 1)[1] if mime.startswith("x-"): mime = mime[2:] mime = mime.replace("-", " ") self._mime_human = mime return self._mime_human or "" @property def parentdirs(self): if not self.relapath: return [] dirname = os.path.dirname(self.relapath.replace("\\", "/")) return dirname.split("/") if dirname else [] @property def filesize(self): s = self.stream s.seek(0, os.SEEK_END) return s.tell() @property def dependency(self): if not self._identified_ran: self._identify() return self._dependency @property def dependency_version(self): if not self._identified_ran: self._identify() return self._dependency_version @property def extension(self): if not self._identified_ran: self._identify() return self._extension @property def human_type(self): if not self._identified_ran: self._identify() return self._human_type @property def platforms(self): if not self._identified_ran: self._identify() return self._platforms @property def selected(self): if not self._identified_ran: self._identify() if self.error: return False return self._selected @property def selectable(self): if not self._identified_ran: self._identify() if self.error: return False return self._selectable @property def extrpath(self): ret, child = [], self while child.parent: if not child.relapath: return ret ret.insert(0, child.relapath) child = child.parent return ret @property def relaname(self): if not self.relapath: return # TODO Strip absolute paths for Windows. # TODO Normalize relative paths. return self.relapath.lstrip("\\/").rstrip("\x00") @property def ole(self): if not self._ole_tried: try: self._ole = olefile.OleFileIO(self.stream) except IOError: pass self._ole_tried = True return self._ole def set_error(self, state, error): self.mode = state self.error = error def clear_error(self): self.mode = Errors.NO_ERROR self.error = None def safelist(self, reason): self.safelisted = True self.safelist_reason = reason def deselect(self): self._selected = False def unselectable(self): self._selected = False self._selectable = False def raise_no_ole(self, message): if self.ole is None: raise UnpackException(message) def to_dict(self, selected_files=None): children = [] for child in self.children: children.append(child.to_dict(selected_files)) if selected_files and child.selected: selected_files.append(child) return { "filename": self.filename, "relapath": self.relapath, "relaname": self.relaname, "filepath": self.filepath, "extrpath": self.extrpath, "parentdirs": self.parentdirs, "duplicate": self.duplicate, "size": self.filesize, "children": children, "type": "container" if self.children else "file", "finger": { "magic": self.magic, "mime": self.mime, "mime_human": self.mime_human, "magic_human": self.magic_human, }, "password": self.password, "human_type": self.human_type, "extension": self.extension, "identified": self.identified, "platforms": self.platforms, "selected": self.selected, "selectable": self.selectable, "dependency": self._dependency, "dependency_version": self._dependency_version, "safelisted": self.safelisted, "safelist_reason": self.safelist_reason, "error": self.error, } def astree(self, finger=True, sanitize=False, selected_files=None, child_cb=None): ret = { "duplicate": self.duplicate, "password": self.password, "human_type": self.human_type, "extension": self.extension, "dependency": self._dependency, "dependency_version": self._dependency_version, "filename": self.filename, "relapath": self.relapath, "relaname": self.relaname, "extrpath": self.extrpath, "size": self.filesize, "identified": self.identified, "platforms": self.platforms, "selected": self.selected, "selectable": self.selectable, "safelisted": self.safelisted, "safelist_reason": self.safelist_reason, "sha256": self.sha256, "md5": self.md5, "sha1": self.sha1, "type": "container" if self.children else "file", "children": [], "error": self.error, } if not sanitize: ret["filepath"] = self.filepath if finger: ret["finger"] = { "mime": self.mime, "mime_human": self.mime_human, "magic": self.magic, "magic_human": self.magic_human, } if child_cb: child_cb(self, ret) def findentry(entry, name): for idx in range(len(entry)): if entry[idx]["filename"] == name: return entry[idx] entry.append({ "type": "directory", "filename": name, "children": [] }) return entry[-1] for child in self.children: entry = ret["children"] for part in child.parentdirs: entry = findentry(entry, part)["children"] if selected_files and child.selected: selected_files.append(child) entry.append( child.astree(finger=finger, sanitize=sanitize, selected_files=selected_files, child_cb=child_cb)) return ret def extract(self, dirpath, filename=None, preserve=False): """Extract one or all files into a directory, note that directory hierarchy is by default not preserved with this function.""" for child in self.children: if filename and child.relapath != filename: continue if not preserve: filepath = os.path.join(dirpath, child.filename) else: filepath = os.path.abspath( os.path.join(dirpath, child.relaname)) # Avoid path traversal. if not filepath.startswith(dirpath): continue if not os.path.exists(os.path.dirname(filepath)): os.mkdir(os.path.dirname(filepath)) shutil.copyfileobj(child.stream, open(filepath, "wb"), 1024 * 1024) child.extract(dirpath, preserve=preserve) def read(self, relapath, stream=False): """Extract a single file from a possibly nested archive. See also the `extrpath` field of an embedded document.""" if isinstance(relapath, (str, bytes)): relapath = relapath, relapath, nextpath = relapath[0], relapath[1:] for child in self.children: if child.relapath == relapath: if nextpath: return child.read(nextpath) return child.stream if stream else child.contents def get_child(self, relaname, regex=False): if not regex: relaname = "%s$" % re.escape(relaname) for child in self.children: if child.relaname and re.match(relaname, child.relaname): return child