def _generate_thumbnails(self, files=None): if files is None: files = self.files.all() image_file = self._find_processing_file(files) path = os.path.splitext(image_file.abspath)[0] # XXX: we really should use the correct file ending and find another way of naming thumbname = path + ".thumb" thumbname2 = path + ".presentation" old_thumb_files = filter( lambda f: f.filetype in (u"thumb", u"presentation"), files) # XXX: removing files before the new ones are created is bad, that should happen later (use File.unlink_after_deletion). # XXX: But we need better thumbnail naming first. for old in old_thumb_files: self.files.remove(old) old.unlink() make_thumbnail_image(image_file.abspath, thumbname) make_presentation_image(image_file.abspath, thumbname2) self.files.append(File(thumbname, u"thumb", u"image/jpeg")) self.files.append(File(thumbname2, u"presentation", u"image/jpeg"))
def launchFile(self): ## Prompts a file dialog with only 2 available file types ## Once file is selected, dialog returns the file's path (filename) filename = tkFileDialog.askopenfilename( filetypes=[('word files', '.docx'), ('text files', '.txt')]) ## Splits file's path into a tuple of directory string and extension string filepath, file_ext = os.path.splitext(filename) if filename: if file_ext == ".txt": ## Executable file for notepad program filepath = "C:/Windows/notepad.exe" elif file_ext == ".docx": ## Executable file for Microsoft Word program filepath = "C:/Program Files (x86)/Microsoft Office/root/Office16/WINWORD.EXE" try: ## Create sub process proc = subprocess.Popen([filepath, filename]) proc.wait() except (OSError, subprocess.CalledProcessError): return "Failed, file is damaged or program has crashed!" ## Prompts dialog for user to input revision message message = tkSimpleDialog.askstring("Commit message", "What are the changes?") rcsFile = File(filename, Database()) rcsFile.add(message) ## Update file list with the new changes self.updatefileList()
def addFile(self): ## Prompts a file dialog with only 2 available file types filename = tkFileDialog.askopenfilename( filetypes=[('text files', '.txt'), ('word files', '.docx')]) ## Prompts dialog for user to input revision message message = tkSimpleDialog.askstring("First commit", "What are the details?") rcsFile = File(filename, Database()) rcsFile.add(message) ## Update file list with the new changes self.updatefileList()
def test_change_file(container_node): from core import File node = container_node d = File(path=u"test", filetype=u"test", mimetype=u"test") node.files.append(d) db.session.commit() d.path = u"changed" db.session.commit() # Changing the current file affects only the current node's File, not the node version's File. assert node.versions[0].files.one().path == u"test" assert node.files.one().path == u"changed" assert node.versions[-1].next is None
def test_replace_file(container_node): from core import File node = container_node db.session.commit() d = File(path=u"test", filetype=u"test", mimetype=u"test") node.files.append(d) node[u"testattr"] = u"test" db.session.commit() d = File(path=u"replaced", filetype=u"test", mimetype=u"test") node.files = [d] node[u"testattr"] = u"replaced" db.session.commit() assert node.versions[1].files.one().path == u"test" assert node.versions[2].files.one().path == u"replaced"
def convert_image(self, audiofile): path, ext = splitfilename(audiofile.filename) if audiofile.tags: for k in audiofile.tags: if k == "APIC:thumbnail": with open("{}.thumb2".format(path), "wb") as fout: fout.write(audiofile.tags[k].data) pic = Image.open(path + ".thumb2") width = pic.size[0] height = pic.size[1] if width > height: newwidth = 320 newheight = height * newwidth / width else: newheight = 320 newwidth = width * newheight / height pic = pic.resize((newwidth, newheight), Image.ANTIALIAS) pic.save(path + ".thumb2", "jpeg") self.files.append( File(path + ".thumb2", "presentation", audiofile.tags[k].mime)) break
def _generate_other_format(self, mimetype_to_generate, files=None): original_file = filter_scalar(lambda f: f.filetype == u"original", files) extension = mimetype_to_generate.split("/")[1] newimg_name = os.path.splitext( original_file.abspath)[0] + "." + extension assert original_file.abspath != newimg_name if original_file.mimetype == u"image/svg+xml": convert_options = [ "-alpha", "off", "-colorspace", "RGB", "-background", "white" ] else: convert_options = [] old_file = filter_scalar( lambda f: f.filetype == u"image" and f.mimetype == mimetype_to_generate, files) if old_file is not None: self.files.remove(old_file) old_file.unlink() convert_image(original_file.abspath, newimg_name, convert_options) self.files.append(File(newimg_name, u"image", mimetype_to_generate))
async def transcribe(socket_id: str, data: Dict[str, str]): try: filename = data.get('filename') accent = data.get('accent') is_verified = File.verify_size(filename) if is_verified: ctx = {'message': 'Transcribing', 'status': Status.INFO.value} await socket.emit(event=SocketMessage.STATUS.value, data=ctx, sid=socket_id) worker = Thread(target=transribe_job, args=(filename, accent, socket_id)) worker.start() else: ctx = { 'message': 'File uploaded exceeds 5 mins limit', 'status': Status.ERROR.value } await socket.emit(event=SocketMessage.STATUS.value, data=ctx, sid=socket_id) except Exception as error: ctx = {'message': str(error), 'status': Status.ERROR.value} await socket.emit(event=SocketMessage.STATUS.value, data=ctx, sid=socket_id)
def _generate_image_formats(self, files=None, mimetypes_to_consider=None): """Creates other full size formats for this image node. TIFF: create new PNG to be used as `image` SVG: create PNG and add it as `png_image` :param mimetypes_to_consider: limit the formats that should be (re)-generated to this sequence of mimetypes """ if files is None: files = self.files.all() original_file = filter_scalar(lambda f: f.filetype == u"original", files) old_image_files = filter(lambda f: f.filetype == u"image", files) for old_img_file in old_image_files: # we don't want to remove the original file... if old_img_file.path != original_file.path: self.files.remove(old_img_file) old_img_file.unlink() mimetypes_to_generate = set(Image.IMAGE_FORMATS_FOR_MIMETYPE[original_file.mimetype]) if mimetypes_to_consider is not None: mimetypes_to_generate = mimetypes_to_generate.intersection(mimetypes_to_consider) for new_mimetype in mimetypes_to_generate: if new_mimetype == original_file.mimetype: # image is alias for the original image in this case fileobj = File(original_file.path, u"image", original_file.mimetype) self.files.append(fileobj) else: self._generate_other_format(new_mimetype, files)
def makeAudioThumb(self, audiofile): ret = None path, ext = splitfilename(audiofile.abspath) if not audiofile.abspath.endswith(".mp3"): ret = path + ".mp3" utils.process.call(("lame", "-V", "4", "-q", audiofile.abspath, ret)) self.files.append(File(path + ".mp3", "mp3", "audio/mpeg")) return ret
def _finish_change(node, change_file, user, uploadfile, req): if change_file in ["yes", "no"]: # check that the correct filetype is uploaded # note: only the suffix of the filename is checked not the file content uploadfile_type = getMimeType(uploadfile.filename)[1] if uploadfile_type != node.type and uploadfile_type != node.get_upload_filetype( ): req.setStatus(httpstatus.HTTP_NOT_ACCEPTABLE) return # sys files are always cleared to delete remaining thumbnails, presentation images etc. for f in node.files: if f.filetype in node.get_sys_filetypes(): node.files.remove(f) file = importFile(uploadfile.filename, uploadfile.tempname) # add new file file.filetype = node.get_upload_filetype() node.files.append(file) # this should re-create all dependent files node.event_files_changed() logg.info(u"%s changed file of node %s to %s (%s)", user.login_name, node.id, uploadfile.filename, uploadfile.tempname) return attpath = "" for f in node.files: if f.mimetype == "inode/directory": attpath = f.base_name break if change_file == "attdir": # add attachmentdir if attpath == "": # add attachment directory attpath = req.params.get("inputname") if not os.path.exists(getImportDir() + "/" + attpath): os.mkdir(getImportDir() + "/" + attpath) node.files.append( File(getImportDir() + "/" + attpath, "attachment", "inode/directory")) importFileIntoDir(getImportDir() + "/" + attpath, uploadfile.tempname) # add new file if change_file == "attfile": # add file as attachment if attpath == "": # no attachment directory existing file = importFile(uploadfile.filename, uploadfile.tempname) # add new file file.mimetype = "inode/file" file.filetype = "attachment" node.files.append(file) else: # import attachment file into existing attachment directory importFileIntoDir(getImportDir() + "/" + attpath, uploadfile.tempname) # add new file
def test_add_file(container_node): node = container_node db.session.commit() d = File(path=u"test", filetype=u"test", mimetype=u"test") node.files.append(d) node[u"changed"] = u"new_file" db.session.commit() assert node.versions[0].files.count() == 0 assert node.versions[1].files.count() == 1
def importFileToRealname(realname, tempname, prefix="", typeprefix=""): filename = os.path.basename(realname) destname = _find_unique_destname(filename, prefix) shutil.copyfile(tempname, destname) r = realname.lower() mimetype, filetype = getMimeType(r) return File(destname, typeprefix + filetype, mimetype)
def test_remove_file(container_node): node = container_node d = File(path=u"test", filetype=u"test", mimetype=u"test") node.files.append(d) db.session.commit() node.files = [] node[u"changed"] = u"removed_file" db.session.commit() assert node.versions[0].files.count() == 1 assert node.versions[1].files.count() == 0
def importFileRandom(tempname): filename = os.path.basename(tempname) uploaddir = getImportDir() destfile = unicode(random.random())[2:] + os.path.splitext(filename)[1] destname = os.path.join(uploaddir, destfile) shutil.copyfile(tempname, destname) r = tempname.lower() mimetype, filetype = getMimeType(r) return File(destname, filetype, mimetype)
def importFile(realname, tempname, prefix=""): if not os.path.exists(tempname): raise IOError("temporary file " + tempname + "does not exist") filename = os.path.basename(tempname) destname = _find_unique_destname(filename, prefix) shutil.copyfile(tempname, destname) r = realname.lower() mimetype, filetype = getMimeType(r) return File(destname, filetype, mimetype)
def do_file(self, filename, stream): # create compact file and initialize basic settings begin_time = self.getdatetimenow() root, is_new = nc.open(filename) if is_new: sample, n = nc.open(stream.files.all()[0].file.completepath()) shape = sample.variables['data'].shape nc.getdim(root,'northing', shape[1]) nc.getdim(root,'easting', shape[2]) nc.getdim(root,'timing') v_lat = nc.getvar(root,'lat', 'f4', ('northing','easting',), 4) v_lon = nc.getvar(root,'lon', 'f4', ('northing','easting',), 4) v_lon[:] = nc.getvar(sample, 'lon')[:] v_lat[:] = nc.getvar(sample, 'lat')[:] nc.close(sample) nc.sync(root) self.do_var(root, 'data', stream) # save the content inside the compact file if not root is None: nc.close(root) f = File(localname=filename) f.save() return f
def transribe_job(filename: str, accent: str, socket_id: str): try: file_path = File.path(filename) transcriber = Transcriber(file_path, accent) message = transcriber.run() ctx = {'message': message, 'status': Status.SUCCESS.value} asyncio.run( socket.emit(event=SocketMessage.TRANSCRIBE.value, data=ctx, sid=socket_id)) except Exception as error: ctx = {'message': str(error), 'status': Status.ERROR.value} asyncio.run( socket.emit(event=SocketMessage.STATUS.value, data=ctx, sid=socket_id)) finally: File.delete(filename) root = filename.split('.')[0] File.delete(f'{root}.wav')
def importFileIntoDir(destdir, tempname): filename = os.path.basename(tempname) dest_dirpath = os.path.join(getImportDir(), destdir) dest_filepath = os.path.join(dest_dirpath, filename) if not os.path.exists(dest_dirpath): os.mkdir(dest_dirpath) shutil.copyfile(tempname, dest_filepath) r = tempname.lower() mimetype, filetype = getMimeType(r) return File(os.path.join(dest_filepath), filetype, mimetype)
def _writeback_iptc(self): """ Handles metadata content if changed. Creates a 'new' original [old == upload]. """ upload_file = None original_path = None original_file = None for f in self.files: if f.getType() == 'original': original_file = f if os.path.exists(f.abspath): original_path = f.abspath if os.path.basename(original_path).startswith('-'): return if f.type == 'upload': if os.path.exists(f.abspath): upload_file = f if not original_file: logg.info('No original upload for writing IPTC.') return if not upload_file: upload_path = '{}_upload{}'.format( os.path.splitext(original_path)[0], os.path.splitext(original_path)[-1]) import shutil shutil.copy(original_path, upload_path) self.files.append( File(upload_path, "upload", original_file.mimetype)) db.session.commit() tag_dict = {} for field in self.getMetaFields(): if field.get('type') == "meta" and field.getValueList( )[0] != '' and 'on' in field.getValueList(): tag_name = field.getValueList()[0].split('iptc_')[-1] field_value = self.get('iptc_{}'.format(field.getName())) if field.getValueList( )[0] != '' and 'on' in field.getValueList(): tag_dict[tag_name] = field_value lib.iptc.IPTC.write_iptc_tags(original_path, tag_dict)
def event_metafield_changed(self, node, field): if "image" in node.type: items = node.attrs.items() # check if there is an original file and modify it in case for f in node.getFiles(): if f.type == "original": path, ext = splitfilename(f.abspath) pngname = path + "_wm.jpg" for file in node.getFiles(): if file.getType() == "original_wm": node.files.remove(file) break self.watermark(f.abspath, pngname, node.get(field.getName()), 0.6) node.files.append(File(pngname, "original_wm", "image/jpeg")) db.session.commit() logg.info("watermark created for original file")
def _generate_zoom_archive(self, files=None): if files is None: files = self.files.all() image_file = self._find_processing_file(files) zip_filename = get_zoom_zip_filename(self.id) zip_filepath = os.path.join(config.get("paths.zoomdir"), zip_filename) old_zoom_files = filter(lambda f: f.filetype == u"zoom", files) for old in old_zoom_files: self.files.remove(old) old.unlink() _create_zoom_archive(Image.ZOOM_TILESIZE, image_file.abspath, zip_filepath) file_obj = File(path=zip_filepath, filetype=u"zoom", mimetype=u"application/zip") self.files.append(file_obj)
def make_thumbnail_image(self, audiofile): path, ext = splitfilename(audiofile.filename) if audiofile.tags: for k in audiofile.tags: if k == "APIC:thumbnail": with open("{}.thumb".format(path), "wb") as fout: fout.write(audiofile.tags[k].data) pic = Image.open(path + ".thumb2") width = pic.size[0] height = pic.size[1] if width > height: newwidth = 128 newheight = height * newwidth / width else: newheight = 128 newwidth = width * newheight / height pic = pic.resize((newwidth, newheight), Image.ANTIALIAS) pic.save(path + ".thumb", "jpeg") pic = pic.resize((newwidth, newheight), Image.ANTIALIAS) im = Image.new(pic.mode, (128, 128), (255, 255, 255)) x = (128 - newwidth) / 2 y = (128 - newheight) / 2 im.paste(pic, (x, y, x + newwidth, y + newheight)) draw = ImageDraw.ImageDraw(im) draw.line([(0, 0), (127, 0), (127, 127), (0, 127), (0, 0)], (128, 128, 128)) im = im.convert("RGB") im.save(path + ".thumb", "jpeg") self.files.append( File(path + ".thumb", "thumb", audiofile.tags[k].mime)) break
def _image_fixture_proto(mime_subtype, session): img_path = TEST_IMAGE_PATHS[mime_subtype] img_fullpath = resolve_datadir_path(img_path) if not os.path.isdir(os.path.dirname(img_fullpath)): os.mkdir(os.path.dirname(img_fullpath)) if not os.path.exists(img_fullpath): if 'svg' not in mime_subtype: # generate test images, http://pillow.readthedocs.io/en/latest/handbook/image-file-formats.html # TODO: make all tests pass with the generated images instead of relying on external test images from PIL import Image, ImageDraw img = Image.new('RGBA', (2001, 2001)) # ZOOM_SIZE +1 draw = ImageDraw.Draw(img) draw.ellipse((25, 25, 75, 75), fill=(255, 0, 0)) draw.text((10, 10), "mediatum test image: " + mime_subtype, fill=(0, 255, 0)) save_options = {} if mime_subtype in ['png', 'tiff']: save_options.update(dpi=(400, 400)) img.save(img_fullpath, **save_options) # TODO: write Exif to tiff and jpeg if mime_subtype in ['jpeg', 'tiff']: import exiftool with exiftool.ExifTool() as et: pass #et.execute("XResolution=300", img_fullpath) else: pytest.skip(u"test image not found at " + img_fullpath) image = ImageFactory() MetadatatypeFactory(name=u"test") mimetype = u"image/" + mime_subtype image.files.append( File(path=img_path, filetype=u"original", mimetype=mimetype)) image._test_mimetype = mimetype return image
def show_workflow_node(self, node, req, data=None): check_context() user = users.getUserFromRequest(req) current_workflow = getNodeWorkflow(node) current_workflow_step = getNodeWorkflowStep(node) FATAL_ERROR = False FATAL_ERROR_STR = "" if "gotrue" in req.params: if not PYPDF_MODULE_PRESENT: del req.params['gotrue'] return self.show_workflow_node(node, req) radio_apply_reset_accept = req.params.get( 'radio_apply_reset_accept', '') if radio_apply_reset_accept == 'reset': for f in node.files: f_name = f.base_name if f_name.startswith( 'addpic2pdf_%s_node_%s_' % (unicode(current_workflow_step.id), unicode( node.id))) and f.filetype.startswith('p_document'): logg.info( "workflow step addpic2pdf(%s): going to remove file '%s' from node '%s' (%s) for request from user '%s' (%s)", current_workflow_step.id, f_name, node.name, node.id, user.login_name, req.ip) node.files.remove(f) db.session.commit() try: os.remove(f.abspath) except: logg.exception( "exception in workflow setep addpic2pdf, removing file failed, ignoring" ) del req.params['gotrue'] return self.show_workflow_node(node, req) elif radio_apply_reset_accept == 'accept': p_document_files = [ f for f in node.files if f.filetype == 'p_document' and f.base_name.startswith( 'addpic2pdf_%s_node_%s_' % (unicode(current_workflow_step.id), unicode(node.id))) ] if len(p_document_files) > 0: p_document_file = p_document_files[0] document_file = [ f for f in node.files if f.filetype == 'document' ][0] o_document_file = File(document_file.path, 'o_document', document_file.mimetype) node.files.remove(document_file) node.files.append(o_document_file) o_document_name = o_document_file.base_name for f in node.files: if f.filetype in [ 'thumb', 'fileinfo', 'fulltext' ] or f.filetype.startswith('present'): if os.path.splitext( f.base_name)[0] == os.path.splitext( o_document_name)[0]: new_f = File(f.path, 'o_' + f.filetype, f.mimetype) node.files.remove(f) node.files.append(new_f) new_document_file = File(p_document_file.path, 'document', p_document_file.mimetype) node.files.remove(p_document_file) node.files.append(new_document_file) db.session.commit() node.event_files_changed() del req.params['gotrue'] return self.forwardAndShow(node, True, req) elif radio_apply_reset_accept == 'apply': drag_logo_fullname = req.params.get("input_drag_logo_fullname", None) if not drag_logo_fullname: req.params["addpic2pdf_error"] = "%s: %s" % ( format_date().replace('T', ' - '), t(lang(req), "admin_wfstep_addpic2pdf_no_logo_selected")) del req.params['gotrue'] return self.show_workflow_node(node, req) drag_logo_filepath = [ f.abspath for f in current_workflow_step.files if f.base_name == drag_logo_fullname ][0] pos_cm = req.params.get("input_poffset_cm", "0, 0") x_cm, y_cm = [float(x.strip()) for x in pos_cm.split(",")] pdf_in_filepath = getPdfFilepathForProcessing( current_workflow_step, node) current_pageno = int( req.params.get("input_current_page", "0").strip()) radio_select_targetpages = req.params.get( "radio_select_targetpages", "").strip() input_select_targetpages = req.params.get( "input_select_targetpages", "").strip() printer_range = [] page_count = get_pdf_pagecount(pdf_in_filepath) _parser_error = False try: if radio_select_targetpages == "current_page": printer_range = [current_pageno] elif radio_select_targetpages == "all": printer_range = range(0, page_count) elif radio_select_targetpages == "pair": printer_range = [ x for x in range(0, page_count) if x % 2 ] if input_select_targetpages: printer_range = [ x for x in printer_range if x in parse_printer_range(input_select_targetpages, maximum=page_count + 1) ] elif radio_select_targetpages == "impair": printer_range = [ x for x in range(0, page_count) if not x % 2 ] if input_select_targetpages: printer_range = [ x for x in printer_range if x in parse_printer_range(input_select_targetpages, maximum=page_count + 1) ] elif radio_select_targetpages == "range_only" and input_select_targetpages: printer_range = parse_printer_range( input_select_targetpages, maximum=page_count + 1) except ValueError as e: _parser_error = True if _parser_error: req.params["addpic2pdf_error"] = "%s: %s" % ( format_date().replace('T', ' - '), t(lang(req), "admin_wfstep_addpic2pdf_printer_range_error")) del req.params['gotrue'] return self.show_workflow_node(node, req) printer_range = map(int, list(printer_range)) if not printer_range: req.params["addpic2pdf_error"] = "%s: %s" % ( format_date().replace('T', ' - '), t( lang(req), "admin_wfstep_addpic2pdf_printer_range_selected_empty" )) del req.params['gotrue'] return self.show_workflow_node(node, req) x = x_cm * cm # cm = 28.346456692913385 y = y_cm * cm pic_dpi = get_pic_info(drag_logo_filepath).get('dpi', None) scale = 1.0 if pic_dpi: dpi_x, dpi_y = pic_dpi if dpi_x != dpi_y: req.params["addpic2pdf_error"] = "%s: %s" % ( format_date().replace('T', ' - '), t(lang(req), "admin_wfstep_addpic2pdf_logo_dpix_dpiy")) dpi = int(dpi_x) if dpi == 72: scale = 1.0 else: scale = 1.0 * 72.0 / dpi else: dpi = 300 scale = 1.0 * 72.0 / dpi #dpi = 72 #scale = 1.0 tmppath = config.get("paths.datadir") + "tmp/" date_str = format_date().replace('T', '-').replace(' ', '').replace( ':', '-') filetempname = tmppath + \ "temp_addpic_pdf_wfs_%s_node_%s_%s_%s_.pdf" % ( unicode(current_workflow_step.id), unicode(node.id), date_str, unicode(random.random())) url = req.params.get('input_drag_logo_url', '') fn_out = filetempname build_logo_overlay_pdf(pdf_in_filepath, drag_logo_filepath, fn_out, x, y, scale=scale, mask='auto', pages=printer_range, follow_rotate=True, url=(" " * ADD_NBSP) + url) for f in node.files: f_name = f.base_name if f_name.startswith('addpic2pdf_%s_node_%s_' % ( unicode(current_workflow_step.id), unicode(node.id), )) and f.filetype.startswith('p_document'): logg.info( "workflow step addpic2pdf(%s): going to remove file '%s' from node '%s' (%s) for request from user '%s' (%s)", current_workflow_step.id, f_name, node.name, node.id, user.login_name, req.ip) node.files.remove(f) try: os.remove(f.abspath) except: pass break date_str = format_date().replace('T', '-').replace(' ', '').replace( ':', '-') nodeFile = importFileToRealname( "_has_been_processed_%s.pdf" % (date_str), filetempname, prefix='addpic2pdf_%s_node_%s_' % ( unicode(current_workflow_step.id), unicode(node.id), ), typeprefix="p_") node.files.append(nodeFile) db.session.commit() try: os.remove(filetempname) except: pass del req.params['gotrue'] return self.show_workflow_node(node, req) if "gofalse" in req.params: return self.forwardAndShow(node, False, req) # part of show_workflow_node not handled by "gotrue" and "gofalse" try: pdf_filepath = [ f.abspath for f in node.files if f.filetype.startswith('document') ][0] error_no_pdf = False except: error_no_pdf = t( lang(req), "admin_wfstep_addpic2pdf_no_pdf_document_for_this_node") if not PYPDF_MODULE_PRESENT or error_no_pdf: error = "" if not PYPDF_MODULE_PRESENT: error += t(lang(req), "admin_wfstep_addpic2pdf_no_pypdf") if error_no_pdf: error += error_no_pdf pdf_dimensions = { 'd_pageno2size': { 0: [595.275, 841.889] }, 'd_pageno2rotate': { 0: 0 } } # A4 keep_params = copyDictValues(req.params, {}, KEEP_PARAMS) context = { "key": req.params.get("key", req.session.get("key", "")), "error": error, "node": node, "files": node.files, "wfs": current_workflow_step, "wfs_files": [], "logo_info": {}, "logo_info_list": [], "getImageSize": lambda x: (0, 0), "pdf_page_count": 0, "pdf_dimensions": pdf_dimensions, "json_pdf_dimensions": json.dumps(pdf_dimensions), "keep_params": json.dumps(keep_params), "startpageno": 0, "FATAL_ERROR": 'true', "user": users.getUserFromRequest(req), "prefix": self.get("prefix"), "buttons": self.tableRowButtons(node) } return req.getTAL("workflow/addpic2pdf.html", context, macro="workflow_addpic2pdf") try: pdf_dimensions = get_pdf_dimensions(pdf_filepath) pdf_pagecount = get_pdf_pagecount(pdf_filepath) except Exception as e: logg.exception("exception in workflow step addpic2pdf(%s)", current_workflow_step.id) pdf_dimensions = { 'd_pages': 0, 'd_pageno2size': (0, 0), 'd_pageno2rotate': 0 } pdf_pagecount = 0 FATAL_ERROR = True FATAL_ERROR_STR += " - %s" % (unicode(e)) #wfs_files = [f for f in current_workflow_step.getFiles() if os.path.isfile(f.retrieveFile())] wfs_files0, wfs_files = getFilelist(current_workflow_step, 'logoupload') url_mapping = [ line.strip() for line in current_workflow_step.get("url_mapping").splitlines() if line.strip() and line.find("|") > 0 ] url_mapping = dict( map(lambda x: (x[0].strip(), x[1].strip()), [line.split("|", 1) for line in url_mapping])) logo_info = {} logo_info_list = [] for f in [ f for f in wfs_files if f.base_name.startswith('m_upload_logoupload') ]: f_path = f.abspath try: _size = list(get_pic_size(f_path)) _dpi = get_pic_dpi(f_path) except Exception as e: logg.exception("exception in workflow step addpic2pdf(%s)", current_workflow_step.id) FATAL_ERROR = True FATAL_ERROR_STR += (" - ERROR loading logo '%s'" % f_path) + unicode(e) continue logo_filename = f.base_name logo_url = "" for key in url_mapping: if logo_filename.find(key) >= 0: logo_url = url_mapping[key] break logo_info[logo_filename.encode('utf-8')] = { 'size': _size, 'dpi': _dpi, 'url': logo_url.encode('utf-8') } if _dpi == 'no-info': _dpi = 72.0 logo_info_list.append({ 'size': _size, 'dpi': _dpi, 'url': logo_url.encode('utf-8') }) if len(logo_info) == 0: logg.error( "workflow step addpic2pdf(%s): Error: no logo images found", current_workflow_step.id) FATAL_ERROR = True FATAL_ERROR_STR += " - Error: no logo images found" keep_params = copyDictValues(req.params, {}, KEEP_PARAMS) context = { "key": req.params.get("key", req.session.get("key", "")), "error": req.params.get('addpic2pdf_error', ''), "node": node, "files": node.files, "wfs": current_workflow_step, "wfs_files": wfs_files, "logo_info": logo_info, "logo_info_list": logo_info_list, "getImageSize": get_pic_size, "pdf_page_count": pdf_pagecount, "pdf_dimensions": pdf_dimensions, "json_pdf_dimensions": json.dumps(pdf_dimensions), "keep_params": json.dumps(keep_params), "startpageno": startpageno, "FATAL_ERROR": { False: 'false', True: 'true' }[bool(FATAL_ERROR)], "user": users.getUserFromRequest(req), "prefix": self.get("prefix"), "buttons": self.tableRowButtons(node) } if FATAL_ERROR: context["error"] += " - %s" % (FATAL_ERROR_STR) return req.getTAL("workflow/addpic2pdf.html", context, macro="workflow_addpic2pdf")
def xmlToCommon(path, destPath=None): """Converts an Afterbirth xml to the common format""" xml = ET.parse(path) root = xml.getroot() # can be stage, rooms, etc rooms = root.findall("room") ret = [] for roomNode in rooms: roomXmlProps = dict(roomNode.attrib) rtype = int(roomNode.get("type") or "1") del roomXmlProps["type"] rvariant = int(roomNode.get("variant") or "0") del roomXmlProps["variant"] rsubtype = int(roomNode.get("subtype") or "0") del roomXmlProps["subtype"] difficulty = int(roomNode.get("difficulty") or "0") del roomXmlProps["difficulty"] roomName = roomNode.get("name") or "" del roomXmlProps["name"] rweight = float(roomNode.get("weight") or "1") del roomXmlProps["weight"] shape = int(roomNode.get("shape") or "-1") del roomXmlProps["shape"] if shape == -1: shape = None width = int(roomNode.get("width") or "13") + 2 height = int(roomNode.get("height") or "7") + 2 dims = (width, height) for k, s in Room.Shapes.items(): if s["Dims"] == dims: shape = k break shape = shape or 1 del roomXmlProps["width"] del roomXmlProps["height"] lastTestTime = roomXmlProps.get("lastTestTime", None) if lastTestTime: try: lastTestTime = datetime.datetime.fromisoformat(lastTestTime) del roomXmlProps["lastTestTime"] except: print("Invalid test time string found", lastTestTime) traceback.print_exception(*sys.exc_info()) lastTestTime = None doors = list( map( lambda door: [ int(door.get("x")) + 1, int(door.get("y")) + 1, door.get("exists", "0")[0] in "1tTyY", ], roomNode.findall("door"), )) room = Room(roomName, None, difficulty, rweight, rtype, rvariant, rsubtype, shape, doors) room.xmlProps = roomXmlProps room.lastTestTime = lastTestTime ret.append(room) realWidth = room.info.dims[0] gridLen = room.info.gridLen() for spawn in roomNode.findall("spawn"): ex, ey, stackedEnts = ( int(spawn.get("x")) + 1, int(spawn.get("y")) + 1, spawn.findall("entity"), ) grindex = Room.Info.gridIndex(ex, ey, realWidth) if grindex >= gridLen: print( f"Discarding the current entity stack due to invalid position! {room.getPrefix()}: {ex-1},{ey-1}" ) continue ents = room.gridSpawns[grindex] for ent in stackedEnts: entityXmlProps = dict(ent.attrib) etype, evariant, esubtype, eweight = ( int(ent.get("type")), int(ent.get("variant")), int(ent.get("subtype")), float(ent.get("weight")), ) del entityXmlProps["type"] del entityXmlProps["variant"] del entityXmlProps["subtype"] del entityXmlProps["weight"] ents.append( Entity(ex, ey, etype, evariant, esubtype, eweight, entityXmlProps)) room.gridSpawns = room.gridSpawns fileXmlProps = dict(root.attrib) return File(ret, fileXmlProps)
def stbRBToCommon(path): """Converts an Rebirth STB to the common format""" stb = open(path, "rb").read() headerPacker = struct.Struct("<I") roomBegPacker = struct.Struct("<IIBH") roomEndPacker = struct.Struct("<fBBBH") doorPacker = struct.Struct("<hh?") stackPacker = struct.Struct("<hhB") entPacker = struct.Struct("<HHHf") # Room count # No header for rebirth rooms = headerPacker.unpack_from(stb, 0)[0] off = headerPacker.size ret = [] for r in range(rooms): # Room Type, Room Variant, Difficulty, Length of Room Name String # No subtype for rebirth roomData = roomBegPacker.unpack_from(stb, off) rtype, rvariant, difficulty, nameLen = roomData off += roomBegPacker.size # print ("Room Data: {roomData}") # Room Name roomName = struct.unpack_from(f"<{nameLen}s", stb, off)[0].decode() off += nameLen # print (f"Room Name: {roomName}") # Weight, width, height, number of doors, number of entities # No shape for rebirth entityTable = roomEndPacker.unpack_from(stb, off) rweight, width, height, numDoors, numEnts = entityTable off += roomEndPacker.size # print (f"Entity Table: {entityTable}") # We have to figure out the shape manually for rebirth width += 2 height += 2 shape = 1 for s in [1, 4, 6, 8]: # only valid room shapes as of rebirth, defaults to 1x1 w, h = Room.Info(shape=s).dims if w == width and h == height: shape = s break doors = [] for d in range(numDoors): # X, Y, exists doorX, doorY, exists = doorPacker.unpack_from(stb, off) off += doorPacker.size doors.append([doorX + 1, doorY + 1, exists]) room = Room(roomName, None, difficulty, rweight, rtype, rvariant, 0, shape, doors) ret.append(room) realWidth = room.info.dims[0] gridLen = room.info.gridLen() for e in range(numEnts): # x, y, number of entities at this position ex, ey, stackedEnts = stackPacker.unpack_from(stb, off) ex += 1 ey += 1 off += stackPacker.size grindex = Room.Info.gridIndex(ex, ey, realWidth) if grindex >= gridLen: print( f"Discarding the current entity stack due to invalid position! {room.getPrefix()}: {ex-1},{ey-1}" ) off += entPacker.size * stackedEnts continue ents = room.gridSpawns[grindex] for s in range(stackedEnts): # type, variant, subtype, weight etype, evariant, esubtype, eweight = entPacker.unpack_from( stb, off) off += entPacker.size ents.append(Entity(ex, ey, etype, evariant, esubtype, eweight)) room.gridSpawns = room.gridSpawns # used to update spawn count return File(ret)
def stbAntiToCommon(path): """Converts an Antibirth STB to the common format""" stb = open(path, "rb").read() headerPacker = struct.Struct("<4sI") roomBegPacker = struct.Struct("<IIIBH") roomEndPacker = struct.Struct( "<fBBBBH9s") # 9 padding bytes for some other room data doorPacker = struct.Struct("<hh?") stackPacker = struct.Struct("<hhB") entPacker = struct.Struct("<HHHf") # Header, Room count header, rooms = headerPacker.unpack_from(stb, 0) off = headerPacker.size if header.decode() != "STB2": raise ValueError("Antibirth STBs must have the STB2 header") ret = [] for r in range(rooms): # Room Type, Room Variant, Subtype, Difficulty, Length of Room Name String roomData = roomBegPacker.unpack_from(stb, off) rtype, rvariant, rsubtype, difficulty, nameLen = roomData off += roomBegPacker.size # print ("Room Data: {roomData}") # Room Name roomName = struct.unpack_from(f"<{nameLen}s", stb, off)[0].decode() off += nameLen # print (f"Room Name: {roomName}") # Weight, width, height, shape, number of doors, number of entities entityTable = roomEndPacker.unpack_from(stb, off) rweight, width, height, shape, numDoors, numEnts, extraData = entityTable off += roomEndPacker.size # print (f"Entity Table: {entityTable}") width += 2 height += 2 if shape == 0: print(f"Bad room shape! {rvariant}, {roomName}, {width}, {height}") shape = 1 doors = [] for d in range(numDoors): # X, Y, exists doorX, doorY, exists = doorPacker.unpack_from(stb, off) off += doorPacker.size doors.append([doorX + 1, doorY + 1, exists]) room = Room(roomName, None, difficulty, rweight, rtype, rvariant, rsubtype, shape, doors) ret.append(room) if extraData != b"\x00\x00\x00\x00\x00\x00\x00\x00\x00": print(f"Room {room.getPrefix()} uses the extra bytes:", extraData) realWidth = room.info.dims[0] gridLen = room.info.gridLen() for e in range(numEnts): # x, y, number of entities at this position ex, ey, stackedEnts = stackPacker.unpack_from(stb, off) ex += 1 ey += 1 off += stackPacker.size grindex = Room.Info.gridIndex(ex, ey, realWidth) if grindex >= gridLen: print( f"Discarding the current entity stack due to invalid position! {room.getPrefix()}: {ex-1},{ey-1}" ) off += entPacker.size * stackedEnts continue ents = room.gridSpawns[grindex] for s in range(stackedEnts): # type, variant, subtype, weight etype, evariant, esubtype, eweight = entPacker.unpack_from( stb, off) off += entPacker.size ents.append(Entity(ex, ey, etype, evariant, esubtype, eweight)) room.gridSpawns = room.gridSpawns return File(ret)
def ntreadfile(params, cm, proc, update_vads, long_size, is_write=False): import volatility.win32.tasks as tasks from core import FileRead from core import FileWrite from core import File from utils import get_addr_space import api TARGET_LONG_SIZE = api.get_os_bits() / 8 global interproc_data global interproc_config cpu_index = params["cpu_index"] cpu = params["cpu"] # IN HANDLE FileHandle, # IN HANDLE Event OPTIONAL, # IN PIO_APC_ROUTINE ApcRoutine OPTIONAL, # IN PVOID ApcContext OPTIONAL, # OUT PIO_STATUS_BLOCK IoStatusBlock, # OUT PVOID Buffer, # IN ULONG Length, # IN PLARGE_INTEGER ByteOffset OPTIONAL, # IN PULONG Key OPTIONAL pgd = api.get_running_process(cpu_index) # Read the parameters ret_addr, file_handle, arg2, arg3, arg4, arg5, buff, length, offset_p, arg9 = read_parameters( cpu, 9, long_size) # Load volatility address space addr_space = get_addr_space(pgd) # Get list of processes, and filter out by the process that triggered the # call (current process id) eprocs = [ t for t in tasks.pslist(addr_space) if t.UniqueProcessId == proc.get_pid() ] # Initialize file_obj, that will point to the object of the referenced file file_obj = None # Search handle table for the new created process for task in eprocs: if task.UniqueProcessId == proc.get_pid( ) and task.ObjectTable.HandleTableList: for handle in task.ObjectTable.handles(): if handle.is_valid( ) and handle.HandleValue == file_handle and handle.get_object_type( ) == "File": file_obj = handle.dereference_as("_FILE_OBJECT") break break if file_obj is not None: file_instance = interproc_data.get_file_by_file_name( str(file_obj.FileName)) # If we have still not recorded the file, add it to files to record if file_instance is None: file_instance = File(str(file_obj.FileName)) interproc_data.add_file(file_instance) # Now, record the read/write # curr_file_offset is never used # curr_file_offset = int(file_obj.CurrentByteOffset.QuadPart) # FO_SYNCHRONOUS_IO 0x0000002 is_offset_maintained = ((file_obj.Flags & 0x0000002) != 0) # If no offset was specified, and the offset is mantained, the real # offset is taken from the file object offset = None if offset_p == 0 and is_offset_maintained: offset = int(file_obj.CurrentByteOffset.QuadPart) elif offset_p != 0: # If an offset is provided, the current offset in the file_object # will be updated, regardless of the flag. try: offset = struct.unpack("Q", api.r_va(pgd, offset_p, 8))[0] except: offset = 0 pp_debug( "Could not dereference offset in NtReadFile call in interproc_callbacks.py\n" ) else: # If no offset was specified and the file object does not have the flag set, we may be in front of some kind # of corruption error or deliberate manipulation pp_debug( "[!] The file object flag FO_SYNCHRONOUS_IO is not set, and no offset was provided\n" ) return # At this moment we do not record the data op = None local_proc = proc if not is_write: op = FileRead(file_instance, local_proc, buff, offset, length, None) if interproc_config.interproc_text_log and interproc_config.interproc_text_log_handle is not None: f = interproc_config.interproc_text_log_handle if TARGET_LONG_SIZE == 4: f.write( "[PID: %08x] NtReadFile: Offset: %08x Size: %08x / %s\n" % (proc.get_pid(), offset, length, str(file_obj.FileName))) elif TARGET_LONG_SIZE == 8: f.write( "[PID: %08x] NtReadFile: Offset: %16x Size: %16x / %s\n" % (proc.get_pid(), offset, length, str(file_obj.FileName))) else: op = FileWrite(file_instance, local_proc, buff, offset, length, None) if interproc_config.interproc_text_log and interproc_config.interproc_text_log_handle is not None: f = interproc_config.interproc_text_log_handle if TARGET_LONG_SIZE == 4: f.write( "[PID: %08x] NtWriteFile: Offset: %08x Size: %08x / %s\n" % (proc.get_pid(), offset, length, str(file_obj.FileName))) elif TARGET_LONG_SIZE == 8: f.write( "[PID: %08x] NtWriteFile: Offset: %16x Size: %16x / %s\n" % (proc.get_pid(), offset, length, str(file_obj.FileName))) file_instance.add_operation(op) local_proc.add_file_operation(op) if update_vads: proc.update_vads()
def runAction(self, node, op=""): fnode = None for fnode in node.files: if fnode.filetype == "document": break def reformatAuthors(s): authors = s.strip().split(";") if len(authors) > 1: authors = ", ".join(authors[:-1]) + " and " + authors[-1] else: authors = authors[0] return authors # get pdf form appended to this workflow step through upload field 'upload_pdfform' current_workflow = getNodeWorkflow(node) current_workflow_step = getNodeWorkflowStep(node) formfilelist, formfilelist2 = getFilelist(current_workflow_step, 'upload_pdfform') pdf_fields_editable = current_workflow_step.get("pdf_fields_editable") pdf_form_separate = current_workflow_step.get("pdf_form_separate") pdf_form_overwrite = current_workflow_step.get("pdf_form_overwrite") if pdf_fields_editable.lower() in ["1", "true"]: pdf_fields_editable = True else: pdf_fields_editable = False if pdf_form_separate.lower() in ["1", "true"]: pdf_form_separate = True else: pdf_form_separate = False fields = [] f_retrieve_path = None schema = getMetaType(node.schema) if formfilelist: # take newest (mtime) f_mtime, f_name, f_mimetype, f_size, f_type, f_retrieve_path, f = formfilelist[ -1] for field_dict in parse_pdftk_fields_dump( get_pdftk_fields_dump(f_retrieve_path)): fieldname = field_dict.get('FieldName', None) if fieldname: value = '' if fieldname in dict(node.attrs.items()): schemafield = schema.children.filter_by( name=fieldname).first() value = schemafield.getFormattedValue(node)[1] if fieldname.find('author') >= 0: value = reformatAuthors(value) elif fieldname.lower() == 'node.schema': value = getMetaType(node.schema).getLongName() elif fieldname.lower() == 'node.id': value = unicode(node.id) elif fieldname.lower() == 'node.type': value = node.type elif fieldname.lower() == 'date()': value = format_date(now(), format='%d.%m.%Y') elif fieldname.lower() == 'time()': value = format_date(now(), format='%H:%M:%S') elif fieldname.find("+") > 0: for _fn in fieldname.split('+'): value = node.get(_fn) if value: break elif '[att:' in fieldname: value = fieldname while '[att:' in value: m = re.search('(?<=\[att:)([^&\]]+)', value) if m: if m.group(0) == 'id': v = unicode(node.id) elif m.group(0) == 'type': v = node.type elif m.group(0) == 'schema': v = getMetaType(node.schema).getLongName() else: schemafield = schema.children.filter_by( name=m.group(0)).first() v = schemafield.getFormattedValue(node)[0] value = value.replace( '[att:%s]' % (m.group(0)), v) else: logg.warning( "workflowstep %s (%s): could not find attribute for pdf form field '%s' - node: '%s' (%s)", current_workflow_step.name, current_workflow_step.id, fieldname, node.name, node.id) fields.append((fieldname, remove_tags(desc(value)))) if not pdf_form_separate and fnode and f_retrieve_path and os.path.isfile( f_retrieve_path): pages = fillPDFForm(f_retrieve_path, fields, input_is_fullpath=True, editable=pdf_fields_editable) if pages == "": # error in pdf creation -> forward to false operation logg.error( "workflowstep %s (%s): could not create pdf file - node: '%s' (%s)" % (current_workflow_step.name, current_workflow_step.id, node.name, node.id)) self.forward(node, False) return origname = fnode.abspath outfile = addPagesToPDF(pages, origname) for f in node.files: node.files.remove(f) fnode.path = outfile.replace(config.get("paths.datadir"), "") node.files.append(fnode) node.files.append( File(origname, 'upload', 'application/pdf')) # store original filename node.event_files_changed() db.session.commit() logg.info( "workflow '%s' (%s), workflowstep '%s' (%s): added pdf form to pdf (node '%s' (%s)) fields: %s", current_workflow.name, current_workflow.id, current_workflow_step.name, current_workflow_step.id, node.name, node.id, fields) elif pdf_form_separate and f_retrieve_path and os.path.isfile( f_retrieve_path): pages = fillPDFForm(f_retrieve_path, fields, input_is_fullpath=True, editable=pdf_fields_editable) if pages == "": # error in pdf creation -> forward to false operation logg.error( "workflowstep %s (%s): could not create pdf file - node: '%s' (%s)" % (current_workflow_step.name, current_workflow_step.id, node.name, node.id)) self.forward(node, False) return importdir = getImportDir() try: new_form_path = join_paths(importdir, "%s_%s" % (node.id, f_name)) counter = 0 if not pdf_form_overwrite: # build correct filename while os.path.isfile(new_form_path): counter += 1 new_form_path = join_paths( importdir, "%s_%s_%s" % (node.id, counter, f_name)) # copy new file and remove tmp shutil.copy(pages, new_form_path) if os.path.exists(pages): os.remove(pages) except Exception: logg.exception( "workflowstep %s (%s): could not copy pdf form to import directory - node: '%s' (%s), import directory: '%s'", current_workflow_step.name, current_workflow_step.id, node.name, node.id, importdir) found = 0 for fn in node.files: if fn.abspath == new_form_path: found = 1 break if found == 0 or (found == 1 and not pdf_form_overwrite): node.files.append( File(new_form_path, 'pdf_form', 'application/pdf')) db.session.commit() logg.info( "workflow '%s' (%s), workflowstep '%s' (%s): added separate pdf form to node (node '%s' (%s)) fields: %s, path: '%s'", current_workflow.name, current_workflow.id, current_workflow_step.name, current_workflow_step.id, node.name, node.id, fields, new_form_path) else: logg.warning( "workflowstep %s (%s): could not process pdf form - node: '%s' (%s)", current_workflow_step.name, current_workflow_step.id, node.name, node.id) self.forward(node, True)
def xml_start_element(self, name, attrs): try: node = self.nodes[-1] except: node = None if name == "nodelist": if "exportversion" in attrs: logg.info("starting xml import: %s", attrs) elif name == "node": self.node_already_seen = False parent = node try: datatype = attrs["datatype"] except KeyError: # compatibility for old xml files created with mediatum t = attrs.get("type") if t is not None: datatype = t else: datatype = "directory" if "id" not in attrs: attrs["id"] = ustr(random.random()) old_id = attrs["id"] if old_id in self.id2node: node = self.id2node[old_id] self.node_already_seen = True return elif datatype in ["mapping"]: content_class = Node.get_class_for_typestring(datatype) node = content_class(name=(attrs["name"] + "_imported_" + old_id)) else: content_class = Node.get_class_for_typestring(datatype) node = content_class(name=attrs["name"]) # todo: handle access #if "read" in attrs: # node.setAccess("read", attrs["read"].encode("utf-8")) #if "write" in attrs: # node.setAccess("write", attrs["write"].encode("utf-8")) #if "data" in attrs: # node.setAccess("data", attrs["data"].encode("utf-8")) if self.verbose: logg.info( "created node '%s', '%s', '%s', old_id from attr='%s'", node.name, node.type, node.id, attrs["id"]) self.id2node[attrs["id"]] = node node.tmpchilds = [] self.nodes.append(node) if self.root is None: self.root = node return elif name == "attribute" and not self.node_already_seen: attr_name = attrs["name"] if "value" in attrs: if attr_name in ["valuelist"]: node.set( attr_name, attrs["value"].replace("\n\n", "\n").replace( "\n", ";").replace(";;", ";")) else: node.set(attr_name, attrs["value"]) else: self.attributename = attr_name elif name == "child" and not self.node_already_seen: nid = attrs["id"] node.tmpchilds += [nid] elif name == "file" and not self.node_already_seen: try: datatype = attrs["type"] except: datatype = None try: mimetype = attrs["mime-type"] except: mimetype = None filename = attrs["filename"] node.files.append( File(path=filename, filetype=datatype, mimetype=mimetype))