def favicon(): """generate the favicon... ugly""" proj() print(". generating favicons...") sizes = [16, 32, 64, 128] tmp_file = lambda size: "/tmp/favicon-%s.png" % size for size in sizes: print("... %sx%s" % (size, size)) sh.convert( "design/logo.svg", "-resize", "%sx%s" % (size, size), tmp_file(size)) print(".. generating bundle") sh.convert( *[tmp_file(size) for size in sizes] + [ "-colors", 256, "static/img/favicon.ico" ] ) print(".. cleaning up") sh.rm(sh.glob("/tmp/favicon-*.png"))
def download(target, temp_dir): zip_path = path.join(temp_dir, "temp.zip") tgt_path = path.join(temp_dir, "chunk") for chunk in CHUNKS: tif_name = TIF_FORMAT.format(chunk) tif_path = path.join(temp_dir, tif_name) wget(URL_FORMAT.format(chunk), q=True, O=zip_path) with zipfile.ZipFile(zip_path, 'r') as pack: contents = pack.namelist() if contents != [tif_name]: raise ValueError("Bad archive contents: {:r}".format(contents)) unzip(zip_path, d=temp_dir) os.unlink(zip_path) convert(tif_path, '-quiet', 'GRAY:{}'.format(tgt_path)) os.unlink(tif_path) if os.stat(tgt_path).st_size != EXPECT_SIZE: raise ValueError("Bad converted size: {}".format(chunk)) with open(tgt_path, "rb") as f: shutil.copyfileobj(f, target) os.unlink(tgt_path)
def main(args): """ Convert an image to png if needed, then optimize it. """ path_input_file, level = args["FILE"], args["--level"] path_tmp_png = path.splitext(path_input_file)[0] + ".png" input_file_ext = path.splitext(path_input_file)[-1][1:] if not input_file_ext == "png": if input_file_ext in ["bmp", "tiff", "raw"]: print("Converting file to png...") if path.exists(path_tmp_png): remove(path_tmp_png) convert(path_input_file, path_tmp_png) else: print("Image is in a lossy format, aborting!") exit(1) print("Optimizing png...") # Will overwrite path_tmp_png with its output. optipng("-o" + level, path_tmp_png) print("Done!") print("Output file at {out_path}".format( out_path=path.abspath(path_tmp_png))) exit(0)
def process_image(content, language, noOCR=False, despeckle=False): if noOCR: logging.error("OCR disabled, no text available") return None from sh import tesseract, convert tmpFolder = tempfile.mkdtemp(prefix='imap-dms-ocr-tmp') logging.debug("Converting image in tmpfolder %s", tmpFolder) convert(convert_options, "-", tmpFolder+"/out.png", _in=content, _in_bufsize=10000) logging.debug("Running tesseract with language %s on file %s", language, tmpFolder+"/out.png") tesseract(tmpFolder+"/out.png", tmpFolder+"/out", "-l",language) f = open(tmpFolder+"/out.txt", "r") content = unicode(f.read(), "utf-8") f.close() logging.debug("Found %d chars for this page", len(content)) content=content.strip() if(len(content)==0): return None else: return content
def run(self): global g, pid time.sleep(15) try: while True: sstv_debug_log('Encoder', 'Encoder reporting for duty') convert( '/tmp/latest.jpg', '-resize', '320x240!', '-pointsize', '35', '-fill', 'black', '-annotate', '+0+37', 'W8UPD', '-fill', 'white', '-annotate', '+0+40', 'W8UPD', '-fill', 'black', '-annotate', '+30+230', '%f, %f' % (g.fix.latitude, g.fix.longitude), '-fill', 'white', '-annotate', '+33+233', '%f, %f' % (g.fix.latitude, g.fix.longitude), '/tmp/latest.ppm') robot36_encode('/tmp/latest.ppm', '/tmp/inprogress.wav') mv('/tmp/inprogress.wav', '/tmp/latest.wav') except Exception as e: sstv_debug_log('Encoder', str(e), True) os._exit(1)
def main(batch_dir): #if __name__ == '__main__': print('Starting ...') #batch_dir = argv[1] if batch_dir[-1] != '/': batch_dir += '/' eeprom_file = batch_dir + 'eeprom.csv' eeprom_data = read_eeprom(eeprom_file) print('Read EEPROM data... ') constants = precalculate_constants(eeprom_data) print('Calculated constants ...') ir_cap_dir = batch_dir + 'ircapture/' temps_dir = batch_dir + 'temperatures/' rot_dir = batch_dir + 'rotated/' cam_dir = batch_dir + 'cameraimages/' try: sh.mkdir(temps_dir) except sh.ErrorReturnCode_1: pass num_files = len(listdir(ir_cap_dir)) for n in range(num_files): fnum = str(n) if n < 100: fnum = '0' + fnum if n < 10: fnum = '0' + fnum print('Processing', fnum, '... ', end='') ir_file = ir_cap_dir + 'pic' + fnum + '.txt' temp_file = temps_dir + 'img' + fnum + '.csv' ir_vals = read_ir(ir_file) ptat_val = read_ptat(ir_file) cpix_val = read_cpix(ir_file) TA = calculate_ta(constants, ptat_val) try: temperatures, max_t, min_t = calculate_to(eeprom_data, ir_vals, constants, TA, cpix_val) with open(temp_file, 'w') as T_FILE: T_FILE.write(','.join( [str(fahrenheit(t)) for t in temperatures])) T_FILE.write('\n') except TypeError: print('FAIL ', fnum, '! Continuing ... ', sep='', end='') try: sh.mkdir(rot_dir) except sh.ErrorReturnCode_1: pass num_files = len(listdir(cam_dir)) for n in range(num_files): fnum = str(n) if n < 100: fnum = '0' + fnum if n < 10: fnum = '0' + fnum cam_name = cam_dir + 'pic' + fnum + '.png' #'.png' rot_name = rot_dir + 'img' + fnum + '.png' #'.png' sh.convert(cam_name, '-rotate', '270', rot_name) print('\nDone.')
def tiff_to_pdf(tiff): convert( '-compress', 'JPEG', '-page', 'A4', tiff, 'content.pdf' ) os.remove('content.tiff') return 'content.pdf'
def resolve_file(): """Inspect the request object to determine the file being requested. If the request is for a thumbnail and it has not been generated, do so before returning. Returns the relative path to the requested file in the base attachments directory. """ thumb_p = (request.query['type'] == "T") storename = request.query.filename relpath = get_rel_path(request.query.coll, thumb_p) if not thumb_p: return path.join(relpath, storename) basepath = path.join(settings.BASE_DIR, relpath) scale = int(request.query.scale) mimetype, encoding = guess_type(storename) assert mimetype in settings.CAN_THUMBNAIL root, ext = path.splitext(storename) if mimetype in ('application/pdf', 'image/tiff'): # use PNG for PDF thumbnails ext = '.png' scaled_name = "%s_%d%s" % (root, scale, ext) scaled_pathname = path.join(basepath, scaled_name) if path.exists(scaled_pathname): log("Serving previously scaled thumbnail") return path.join(relpath, scaled_name) if not path.exists(basepath): mkdir(basepath) orig_dir = path.join(settings.BASE_DIR, get_rel_path(request.query.coll, thumb_p=False)) orig_path = path.join(orig_dir, storename) if not path.exists(orig_path): abort(404, "Missing original: %s" % orig_path) input_spec = orig_path convert_args = ('-resize', "%dx%d>" % (scale, scale)) if mimetype == 'application/pdf': input_spec += '[0]' # only thumbnail first page of PDF convert_args += ('-background', 'white', '-flatten' ) # add white background to PDFs log("Scaling thumbnail to %d" % scale) convert(input_spec, *(convert_args + (scaled_pathname, ))) return path.join(relpath, scaled_name)
def compress(name): mp = settings.MEDIA_ROOT src = '%s/%s' % (mp, name) i = '%s/%s.jpg' % (mp, name) sh.convert(src, i) dst = '%s/compressed/%s.jpg' % (mp, name) cmd = '/opt/mozjpeg/bin/cjpeg -quality 10 %s > %s' % (i, dst) subprocess.check_call(cmd, shell=True) sh.rm(src) sh.rm(i)
def convert_pdf_to_png(source): print("Convert: {} to PNG (one per page)".format(source)) sh.gs("-sDEVICE=pngalpha", "-o", "gs_tmp.file-%03d.png", "-dTextAlphaBits=4", "-r600", source) print("Merge: All converted PNG to a single one") # Use +append for horizontal and -append for vertical sh.convert("-append", "*.png", TMP_PNG_NAME)
def merge_tiffs(tiffs): tiffs = tiffs or [f for f in os.listdir(os.curdir) if f.startswith('_out') and f.endswith('.tiff')] if not tiffs: sys.exit('No tiff files found') tiffcp(tiffs, 'content.tiff') convert(tiffs[0], 'preview.jpg') for tiff in tiffs[1:]: os.remove(tiff) return 'content.tiff'
def process_pdf(content, language, noOCR=False, noPDFText=False, despeckle=False): if noPDFText: logging.debug("pdftotext disabled") pdfText = "" else: from sh import pdftotext logging.debug("Extracting pdf contents using pdftotext") pdfText = unicode(pdftotext('-', '-', _in=content, _in_bufsize=10000)) logging.debug("Extracted %d chars from the text", len(pdfText)) if noOCR: logging.debug("OCR disabled, returning only pdf text") else: from sh import identify, tesseract, convert logging.debug("Starting OCR Operation") logging.debug("Extracing page numbers") pageNos = map(int, str(identify("-format", "%p ", "pdf:-", _in=content,_in_bufsize=10000)).\ strip().split(' ')) logging.debug("Found pages: %s", pageNos) allPages=u"" for pageNo in pageNos: logging.debug("Processing page %d", pageNo) tmpFolder = tempfile.mkdtemp(prefix='imap-dms-ocr-tmp') co = convert_options if not despeckle else convert_options_despeckle logging.debug("Converting page to image in tmpfolder %s with options %s", tmpFolder, co) convert(co, "pdf:-[%d]" % (pageNo), tmpFolder+"/out.png", _in=content, _in_bufsize=10000) logging.debug("Running tesseract with language %s on file %s", language, tmpFolder+"/out.png") tesseract(tmpFolder+"/out.png", tmpFolder+"/out", "-l",language) f = open(tmpFolder+"/out.txt", "r") pageContent = unicode(f.read(), "utf-8") f.close() logging.debug("Found %d chars for this page", len(pageContent)) allPages+=pageContent+u"\n" shutil.rmtree(tmpFolder) pdfText=pdfText.strip()+"\n\n\n"+allPages.strip() if(len(pdfText.strip())==0): logging.error("No text could be recognized") return None else: return pdfText
def resolve_file(): """Inspect the request object to determine the file being requested. If the request is for a thumbnail and it has not been generated, do so before returning. Returns the relative path to the requested file in the base attachments directory. """ thumb_p = (request.query['type'] == "T") storename = request.query.filename relpath = get_rel_path(request.query.coll, thumb_p) if not thumb_p: return path.join(relpath, storename) basepath = path.join(settings.BASE_DIR, relpath) scale = int(request.query.scale) mimetype, encoding = guess_type(storename) assert mimetype in settings.CAN_THUMBNAIL root, ext = path.splitext(storename) if mimetype in ('application/pdf', 'image/tiff'): # use PNG for PDF thumbnails ext = '.png' scaled_name = "%s_%d%s" % (root, scale, ext) scaled_pathname = path.join(basepath, scaled_name) if path.exists(scaled_pathname): log("Serving previously scaled thumbnail") return path.join(relpath, scaled_name) if not path.exists(basepath): mkdir(basepath) orig_dir = path.join(settings.BASE_DIR, get_rel_path(request.query.coll, thumb_p=False)) orig_path = path.join(orig_dir, storename) if not path.exists(orig_path): abort(404, "Missing original: %s" % orig_path) input_spec = orig_path convert_args = ('-resize', "%dx%d>" % (scale, scale)) if mimetype == 'application/pdf': input_spec += '[0]' # only thumbnail first page of PDF convert_args += ('-background', 'white', '-flatten') # add white background to PDFs log("Scaling thumbnail to %d" % scale) convert(input_spec, *(convert_args + (scaled_pathname,))) return path.join(relpath, scaled_name)
def to_gif(self, filename, start, end): """Extremely slow method to go from vpython -> GIF. Linux only... ? """ self.motion.register_dfs_cb(self._handle_dfs) self.motion.register_dfs_end(self._handle_end_frame_gif) self.motion.traverse(self.skeleton.root, start, end) commands = ['-delay', '1x80', '-loop', '0'] commands.extend(map(lambda x: x.name, self.gif_frames)) commands.append(filename) sh.convert(commands) self.gif_frames = []
class ImagesBatch(object): def __init__(self, path): self.dp = int(os.path.basename(path)) self.path = path self.svgs = [ os.path.abspath(p) for p in glob(os.path.join(path, "*.svg")) ] def _do_export(self, density, input_path, drawable): nonspecific = ".nonspecific." in input_path noflip = nonspecific or input_path.endswith(".noflip.svg") file_name = os.path.basename( os.path.splitext(input_path)[0].split(".")[0] + ".png") folder_path = os.path.join(OUTPUT_PATH_PREFIX, drawable) sh.mkdir("-p", folder_path) output_file_path = os.path.join(folder_path, file_name) output_precrush_path = output_file_path + "_" px = int(DENSITIES[density] * self.dp) sh.rsvg_convert(input_path, "-a", h=px, o=output_precrush_path) sh.pngcrush("-q", "-reduce", output_precrush_path, output_file_path) sh.rm(output_precrush_path) return (output_file_path, noflip) def _do_flop(self, density, (input_path, noflip)): if noflip: return folder_name = os.path.join(OUTPUT_PATH_PREFIX, "drawable-ldrtl-" + density) output_file_path = os.path.join(folder_name, os.path.basename(input_path)) sh.mkdir("-p", folder_name) sh.convert(input_path, "-flop", output_file_path)
def favicon(): proj() print(". generating favicons...") sizes = [16, 32, 64, 128] tmp_file = lambda size: "/tmp/favicon-%s.png" % size for size in sizes: print("... %sx%s" % (size, size)) sh.convert("svg/logo.svg", "-resize", "%sx%s" % (size, size), tmp_file(size)) print(".. generating bundle") sh.convert(*[tmp_file(size) for size in sizes] + ["-colors", 256, "dist/favicon.ico"]) print(".. cleaning up") sh.rm(sh.glob("/tmp/favicon-*.png"))
def process_dir(batch_dir): if batch_dir[-1] != '/': batch_dir += '/' # Get directories eeprom_file = batch_dir + 'eeprom.csv' ir_cap_dir = batch_dir + 'ircapture/' cam_dir = batch_dir + 'cameraimages/' # Process the EEPROM data eeprom_data = read_eeprom(eeprom_file) constants = precalculate_constants(eeprom_data) # Count the images, making sure the IR and RGB match num_files = len(listdir(ir_cap_dir)) if num_files != len(listdir(cam_dir)): raise FileNotFoundError ir_imgs = [] imgs = [] # Process the images for n in range(num_files): # Get the filenames fnum = str(n) if n < 100: fnum = '0' + fnum if n < 10: fnum = '0' + fnum ir_file = ir_cap_dir + 'pic' + fnum + '.txt' cam_name = cam_dir + 'pic' + fnum + '.png' #'.png' # Preliminary IR calculations ir_vals = read_ir(ir_file) ptat_val = read_ptat(ir_file) cpix_val = read_cpix(ir_file) TA = calculate_ta(constants, ptat_val) # Get the temperatures try: temperatures, max_t, min_t = calculate_to(eeprom_data, ir_vals, constants, TA, cpix_val) ir_imgs.append([str(fahrenheit(t)) for t in temperatures]) except TypeError: print('FAIL ', fnum, '! Continuing ... ', sep='', end='') # Rotate the RGB images rot_name = '/tmp/rot.jpg' #'.png' sh.convert(cam_name, '-rotate', '270', rot_name) imgs.append(cv2.imread(rot_name)) # Clean-up sh.rm(rot_name) return imgs, ir_imgs
class ImagesBatch(object): def __init__(self, path, filters): self.dp = int(os.path.basename(path)) self.path = path self.svgs = [] all_svgs = self.find_svg_files(path) filtered_svgs = self.filter_filenames(all_svgs, filters) self.svgs = self.abspath(filtered_svgs) @staticmethod def find_svg_files(path): return [p for p in glob(os.path.join(path, "*.svg"))] @staticmethod def filter_filenames(all_svg_files, filters=None): relative_svg_files = [] if filters: for filter in filters: if os.path.join(source_path, filter) in all_svg_files: relative_svg_files.append(os.path.join( source_path, filter)) else: relative_svg_files = all_svg_files return relative_svg_files @staticmethod def abspath(filenames): output = [] for filename in filenames: output.append(os.path.abspath(filename)) return output def _do_export(self, density, input_path, drawable): nonspecific = ".nonspecific." in input_path noflip = nonspecific or input_path.endswith(".noflip.svg") file_name = os.path.basename( os.path.splitext(input_path)[0].split(".")[0] + ".png") folder_path = os.path.join(OUTPUT_PATH_PREFIX, drawable) sh.mkdir("-p", folder_path) output_file_path = os.path.join(folder_path, file_name) output_precrush_path = output_file_path + "_" px = int(DENSITIES[density] * self.dp) sh.rsvg_convert(input_path, "-a", h=px, o=output_precrush_path) sh.pngcrush("-q", "-reduce", output_precrush_path, output_file_path) sh.rm(output_precrush_path) return output_file_path, noflip def _do_flop(self, density, (input_path, noflip)): if noflip: return folder_name = os.path.join(OUTPUT_PATH_PREFIX, "drawable-ldrtl-" + density) output_file_path = os.path.join(folder_name, os.path.basename(input_path)) sh.mkdir("-p", folder_name) sh.convert(input_path, "-flop", output_file_path)
def create_tiff_and_txt(pnms, language): language = language or DEFAULT_LANGUAGE tiffs = [] texts = [] for pnm in sorted(pnms): unpapered = '_' + pnm unpaper(pnm, unpapered) if os.path.exists(unpapered): os.remove(pnm) tiff = unpapered.replace('.pnm', '.tiff') convert(unpapered, tiff) if os.path.exists(tiff): os.remove(unpapered) txtfile = tiff.replace('.tiff', '') tiffs.append(tiff) texts.append(txtfile + '.txt') tesseract(tiff, txtfile, '-l', language) return tiffs, texts
def run(self): global g, pid time.sleep(15) try: while True: sstv_debug_log('Encoder', 'Encoder reporting for duty') convert('/tmp/latest.jpg', '-resize', '320x240!', '-pointsize', '35', '-fill', 'black', '-annotate', '+0+37', 'W8UPD', '-fill', 'white', '-annotate', '+0+40', 'W8UPD', '-fill', 'black', '-annotate', '+30+230', '%f, %f' % (g.fix.latitude, g.fix.longitude), '-fill', 'white', '-annotate', '+33+233', '%f, %f' % (g.fix.latitude, g.fix.longitude), '/tmp/latest.ppm') robot36_encode('/tmp/latest.ppm', '/tmp/inprogress.wav') mv('/tmp/inprogress.wav', '/tmp/latest.wav') except Exception as e: sstv_debug_log('Encoder', str(e), True) os._exit(1)
def grab_pdf_thumbnail(filepath): """ Returns jpeg image thumbnail of the input pdf. """ print 'converting pdf: {}'.format(filepath) out = sh.convert( filepath + '[0]', # force to only get 1st page '-thumbnail', '400x400', # output size '-alpha', 'remove', # fix black border that appears 'jpg:-', # force to output jpeg to stdout ) return out.stdout
def pdf_to_png(pdf_filepath: str, output_filepath: str) -> None: try: convert( "-density", "300", pdf_filepath, "-depth", "8", "-strip", "-background", "white", "-alpha", "remove", output_filepath, ) return except Exception as e: print("ERROR: %s" % pdf_filepath) print(e) return
def _grab_pdf_thumbnail(filepath: str) -> bytes: """ Returns jpeg image thumbnail of the input pdf. """ logger.info("Converting pdf: %s", filepath) out = sh.convert( filepath + "[0]", # force to only get 1st page "-thumbnail", "400x400", # output size "-flatten", "jpg:-", # output jpeg to stdout ) return out.stdout
def swap_image(filepath): assert exists(filepath), "File does not exist." new_filepath = join(tempdir, "camimg_new.png") old_filepath = join(tempdir, "camimg_old.png") link_filepath = join(tempdir, "camimg.png") convert(filepath, "-thumbnail", "x480", "-gravity", "center", "-crop", "640x480+0+0!", "-background", "black", "-flatten", new_filepath, ) cp( link_filepath, old_filepath, f=True, ) ln( new_filepath, link_filepath, s=True, f=True, )
def overlay_text(text, position): '''Overlay text on latest.ppm. We do this twice so that it's readable in light and dark images. The position argument must be either 'top' or 'bottom'. ''' if position == 'top': (y1, y2) = (15, 16) elif position == 'bottom': (y1, y2) = (235, 236) else: raise Exception("argument 'position' must be 'top' or 'bottom'") convert( "latest.ppm", "-pointsize", "18", "-fill", "black", "-annotate", "+0+" + str(y1), # Shadow text, "-fill", "white", "-annotate", "+1+" + str(y2), # Foreground text, "latest.ppm", _cwd=cwd)
def latex(): with codecs.open("temp.md", "w+", "utf-8") as f: f.write(request.form["content"]) args = "temp.md --latex-engine=xelatex -t beamer -V theme:Carsurfing -o temp.pdf --smart" args = "temp.md --latex-engine=xelatex -t beamer -o temp.pdf --smart" pandoc_status = pandoc(*args.split(" ")) rm_status = rm(sh.glob("static/images/*"), "-r") args = "-quality 100 -density 200x200 temp.pdf static/images/output%d.jpg" convert_status = convert(*args.split(" ")) image_dir = "static/images" files = os.listdir(image_dir) print pandoc_status, rm_status, convert_status return json.dumps(files)
I="tmp/i/trans_%s" % i[n] D="tmp/d/trans_%s" % d[n] # transverse op flops the photos jpegtran("-transverse", "i/%s" % i[n], _out=I) jpegtran("-transverse", "d/%s" % d[n], _out=D) trash += [I, D] montage_flopped = "stereo/flopped_%s_%s.jpg" \ % (i[n].replace('.JPG',''), d[n].replace('.JPG','')) montage('-tile', '2x1', '-geometry','+0+0', I, D, montage_flopped) montage_unflopped = "stereo/%s_%s.jpg" \ % (i[n].replace('.JPG',''), d[n].replace('.JPG','')) convert(montage_flopped, '-flop', montage_unflopped) trash.append(montage_flopped) for f in trash: unlink(f)
def download(target, temp_dir, host=DEFAULT_HOST, path=DEM_PATH, zip_pattern=ZIP_PATTERN, tiff_pattern=TIFF_PATTERN, chunks=None, chunk_prefix=None, chunk_directory=None, expect_res=Dataset.default_res): tgt_path = os.path.join(temp_dir, "chunk") # Expected raw data size is 2-bytes (16-bits) per pixel expect_size = expect_res[0] * expect_res[1] * 2 chunks = chunks or CHUNKS LOG.info('Fetching the following chunks: {0}'.format(','.join(chunks))) for chunk_idx, chunk in enumerate(chunks): LOG.info('Fetching chunk {0}/{1}'.format(chunk_idx+1, len(chunks))) tif_name = expand_pattern(tiff_pattern, CHUNK=chunk) tif_path = os.path.join(temp_dir, tif_name) url = urlunsplit(( 'http', host, '/'.join((path, expand_pattern(zip_pattern, CHUNK=chunk))), '', '' )) LOG.info('GET-ing {0}'.format(url)) resp = requests.get(url) if resp.status_code != 200: LOG.error('Error fetching DEM: {0}'.format(resp.status_code)) raise RuntimeError('Error fetching DEM. (HTTP {0} error.)'.format(resp.status_code)) # Open downloaded content as a file object content_fobj = io.BytesIO(resp.content) # Open content as zip with zipfile.ZipFile(content_fobj, 'r') as pack: # Extract TIFF try: tiff_info = pack.getinfo(tif_name) except KeyError: # TODO: Decide if this should be a fatal error LOG.error('DEM zip does not contain expected file {0}'.format(tif_name)) LOG.error('DEM zip contains: {0}'.format(pack.namelist())) raise RuntimeError('Error fetching DEM. (Bad zip file.)') tiff_fobj = pack.open(tiff_info) with open(tif_path, 'wb') as out_fobj: shutil.copyfileobj(tiff_fobj, out_fobj) # Saving individual chunks if chunk_directory is not None: if chunk_prefix is not None: # Use chunk prefix to calculate filename chunk_filename = os.path.join(chunk_directory, chunk_prefix + '{0:02d}'.format(chunk_idx) + '.tiff') else: # Use original filename chunk_filename = os.path.join(chunk_directory, tif_name) with open(chunk_filename, "wb") as dst, open(tif_path, "rb") as src: shutil.copyfileobj(src, dst) # Concatenation of chunks if target is not None: convert(tif_path, '-quiet', 'GRAY:{}'.format(tgt_path)) target_size = os.stat(tgt_path).st_size if target_size != expect_size: raise ValueError("Bad converted size {1} in chunk {0} (expected {2})".format( chunk, target_size, expect_size)) with open(tgt_path, "rb") as f: shutil.copyfileobj(f, target) os.unlink(tgt_path) os.unlink(tif_path)
def convert_picture_to_ppm(): '''Convert latest.jpg to a PPM (latest.ppm).''' convert("latest.jpg", "latest.ppm", _cwd=cwd)
for tex_filename, img_size in tex_files.items(): filename = tex_filename[:-4] print 'Building figure "%s" (%s).' % (filename, img_size) temp_dir = mkdtemp() print_verbose('Made temporary directory "%s".' % temp_dir) temp_filename = temp_dir + '/' + filename + '.cheetah' img_in_filename = temp_dir + '/' + filename + '.pdf' img_out_filename = '../source/static/' + filename + '.png' with open(tex_filename) as f_in: f_contents = f_in.read().decode('utf-8') # fix comments f_contents = f_contents.replace('\\','\\\\').replace('\\\\#','\\#') # compile template compiled = str(Template(compiler_settings + f_contents)).replace('\\\\','\\') with open(temp_filename, 'w') as f_temp: f_temp.write(compiled) print_verbose("STARTING lualatex") for line in lualatex('--output-directory', temp_dir, '--shell-escape', '--jobname', filename, temp_filename, _err_to_out=True, _iter=True): print_verbose(line, newline=False) print_verbose("STOPPED lualatex") print_verbose("STARTING convert") for line in convert('-trim', '-density', '300x300','-resize', img_size, img_in_filename, img_out_filename, _err_to_out=True, _iter=True): print_verbose(line, newline=False) print_verbose("STOPPED convert") rmtree(temp_dir) print_verbose('Removed temporary directory "%s"' % temp_dir) #lualatex --output-directory tex-temp --shell-escape --jobname %B %B.cheetah.tex';
def add_border_to_image(input_filename, width=10): convert( shlex.split('{0} -bordercolor White -border {1}x{1} {0}'.format( input_filename, width, )))
def execute(working_dir: str, frequency: str, duration: timedelta, sh=sh): signal_path = os.path.join(working_dir, "signal.wav") product_path = os.path.join(working_dir, "product.png") normalized_signal_path = os.path.join(working_dir, "normalized_signal.wav") qpsk_path = os.path.join(working_dir, "qpsk") dump_prefix_path = os.path.join(working_dir, "dump") dump_path = dump_prefix_path + ".dec" product_raw_prefix_path = os.path.join(working_dir, "product_raw") product_raw_path = product_raw_prefix_path + ".bmp" # Record signal fm_proc = sh.rtl_fm( # Modulation raw "-M", "raw", # Set frequency (in Hz, e.g. 137MHz) "-f", frequency, # Enable bias-T "-T", # Specify sampling rate (e.g. 48000 Hz) "-s", 48000, # Almost maximal possible value. Probably is wrong for other SDR then rtl-sdr "-g", 48, # Copy-paste from suspects www "-p", 1, _timeout=duration.total_seconds(), _timeout_signal=signal.SIGTERM, _piped=True ) with suppress(sh.TimeoutException): sh.sox(fm_proc, # Type of input "-t", "raw", "-r", "288k", # Channels - 2 - stereo "-c", 2, # Sample size "-b", 16, # Signed integer encoding "-e", "s", # Verbosity level (0 - silence, 1 - failure messages, 2 - warnings, 3 - processing phases, 4 - debug) "-V3", # Read from stdin (from pipe) "-", # Type of output "-t", "wav", signal_path, # Resampling rate "rate", "96k" ) # Normalize signal sh.sox( signal_path, normalized_signal_path, # Normalize to 0dBfs "gain", "-n" ) # Demodulating sh.meteor_demod( sh.yes(_piped=True), "-o", qpsk_path, "-B", normalized_signal_path ) # Keep original file timestamp sh.touch("-r", signal_path, qpsk_path) # Decode QPSK sh.medet( qpsk_path, dump_prefix_path, # Make doceded dump (fastest) "-cd" ) # Generate images sh.medet(dump_path, product_raw_prefix_path, # APID for red "-r", 66, # APID for green "-g", 65, # APID for blue "-b", 64, # Use dump "-d" ) # Convert to PNG sh.convert(product_raw_path, product_path) return [ ("SIGNAL", signal_path), ("PRODUCT", product_path) ]
with open(pngscr, 'r') as f: pngscr_contents = f.read() # Create script to run the pdf script contents = '' with open(scr, 'r') as f: contents = f.read() contents = contents.replace('%SCRIPT_PATH%', tmppng) with open(tmpscr, 'w') as f: f.write(contents) # Figure out the name of the schematic to run this on. for sch in glob('*.sch'): sch_name, sch_ext = os.path.splitext(sch) png_name = '{}_pcb.png'.format(sch_name) rm('-f', png_name) pngscr_contents = pngscr_contents.replace('%N', sch_name) with open(tmppng, 'w') as f: f.write(pngscr_contents) # Generate the png eagle('-S', tmpscr, sch) # Trip whitespace sh.convert(png_name, '-trim', png_name) rm('-f', tmpscr) rm('-f', tmppng)
pngscr_contents = f.read() # Create script to run the pdf script contents = '' with open(scr, 'r') as f: contents = f.read() contents = contents.replace('%SCRIPT_PATH%', tmppng) with open(tmpscr, 'w') as f: f.write(contents) # Figure out the name of the schematic to run this on. for sch in glob('*.sch'): sch_name, sch_ext = os.path.splitext(sch) png_name = '{}_pcb.png'.format(sch_name) rm('-f', png_name) pngscr_contents = pngscr_contents.replace('%N', sch_name) with open(tmppng, 'w') as f: f.write(pngscr_contents) # Generate the png eagle('-S', tmpscr, sch) # Trip whitespace sh.convert(png_name, '-trim', png_name) rm('-f', tmpscr) rm('-f', tmppng)
def convert_gif_to_png(filename, out_filename): log.debug('convert gif to png') sh.convert(filename, 'png:%s' % out_filename)
#!/usr/bin/env python import os from sh import convert images = dict( wait="Please Wait...", nocart="No Cartridge!", ) for filename, text in images.iteritems(): convert("-size", '640x480', 'xc:black', '-fill', 'white', '-stroke', 'black', '-font', 'Nimbus-Mono-Bold', '-pointsize', '70', '-gravity', 'center', '-draw', 'text 0,0 "{}"'.format(text), '-scale', '20%', '-scale', '640x480!', 'images/' + filename + ".png")
def download(target, temp_dir, host=DEFAULT_HOST, path=DEM_PATH, zip_pattern=ZIP_PATTERN, tiff_pattern=TIFF_PATTERN, chunks=None, chunk_prefix=None, chunk_directory=None, expect_res=Dataset.default_res): tgt_path = os.path.join(temp_dir, "chunk") # Expected raw data size is 2-bytes (16-bits) per pixel expect_size = expect_res[0] * expect_res[1] * 2 chunks = chunks or CHUNKS LOG.info('Fetching the following chunks: {0}'.format(','.join(chunks))) for chunk_idx, chunk in enumerate(chunks): LOG.info('Fetching chunk {0}/{1}'.format(chunk_idx + 1, len(chunks))) tif_name = expand_pattern(tiff_pattern, CHUNK=chunk) tif_path = os.path.join(temp_dir, tif_name) url = urlunsplit(('http', host, '/'.join( (path, expand_pattern(zip_pattern, CHUNK=chunk))), '', '')) LOG.info('GET-ing {0}'.format(url)) resp = requests.get(url) if resp.status_code != 200: LOG.error('Error fetching DEM: {0}'.format(resp.status_code)) raise RuntimeError('Error fetching DEM. (HTTP {0} error.)'.format( resp.status_code)) # Open downloaded content as a file object content_fobj = io.BytesIO(resp.content) # Open content as zip with zipfile.ZipFile(content_fobj, 'r') as pack: # Extract TIFF try: tiff_info = pack.getinfo(tif_name) except KeyError: # TODO: Decide if this should be a fatal error LOG.error('DEM zip does not contain expected file {0}'.format( tif_name)) LOG.error('DEM zip contains: {0}'.format(pack.namelist())) raise RuntimeError('Error fetching DEM. (Bad zip file.)') tiff_fobj = pack.open(tiff_info) with open(tif_path, 'wb') as out_fobj: shutil.copyfileobj(tiff_fobj, out_fobj) # Saving individual chunks if chunk_directory is not None: if chunk_prefix is not None: # Use chunk prefix to calculate filename chunk_filename = os.path.join( chunk_directory, chunk_prefix + '{0:02d}'.format(chunk_idx) + '.tiff') else: # Use original filename chunk_filename = os.path.join(chunk_directory, tif_name) with open(chunk_filename, "wb") as dst, open(tif_path, "rb") as src: shutil.copyfileobj(src, dst) # Concatenation of chunks if target is not None: convert(tif_path, '-quiet', 'GRAY:{}'.format(tgt_path)) target_size = os.stat(tgt_path).st_size if target_size != expect_size: raise ValueError( "Bad converted size {1} in chunk {0} (expected {2})". format(chunk, target_size, expect_size)) with open(tgt_path, "rb") as f: shutil.copyfileobj(f, target) os.unlink(tgt_path) os.unlink(tif_path)
def main(resolution: int, color: str, duplex: bool, deskew: bool, trim: bool, batch: bool): result = str(lsusb('-d', '04f9:')) device: str bus: str (_, bus, _, device) = result.partition(':')[0].split(' ') sane_model = 'BrotherADS2700' sane_device = f'{sane_model}:libusb:{bus}:{device}' working_dir = tempfile.mkdtemp() mode_map = { 'monochrome': 'Black & White', 'grayscale': 'Gray', 'truecolor': '24 bit Color' } option_mode = mode_map[color] scan_line = 'Scanned document ' final_images = [] batch_number = 0 batch_complete = False while not batch_complete: scanner_output = os.path.join(working_dir, f'scanned-{batch_number}-%d.pnm') option_source = 'Automatic Document Feeder(left aligned,Duplex)' if duplex \ else 'Automatic Document Feeder(left aligned)' scan_iter: Iterable[str] = scanadf('--device-name', sane_device, '--mode', option_mode, '--resolution', resolution, '--output-file', scanner_output, '--source', option_source, _iter='err') with yaspin(text='Scanning...') as spin: for line in scan_iter: line = line.rstrip() if line.startswith(scan_line): file_name = line[len(scan_line):] out_file_name = file_name.replace('scanned', 'cleaned').replace( '.pnm', '.png') convert_args = [file_name, '-fuzz', '20%'] if trim: convert_args.append('-trim') if deskew: convert_args.extend(('-deskew', '30%')) convert_args.extend(('+repage', out_file_name)) pool.acquire() process = convert(*convert_args, _bg=True, _done=done) processes_lock.acquire() processes.append(process) processing_count = len(processes) processes_lock.release() final_images.append(out_file_name) spin.text = f'Scanning... {len(final_images)} scanned.' if processing_count > 0: spin.text += f' Processing {processing_count}...' spin.text = 'Scanning complete.' spin.green.ok('✔') processes_lock.acquire() processes_remaining = list(processes) processes_lock.release() with yaspin(text='Processing...') as spin: if len(processes_remaining) > 0: i = 0 for process in processes_remaining: spin.text = f'Processing {len(processes_remaining) - i}...' process.wait() i += 1 spin.text = 'Processing complete.' spin.green.ok('✔') processes.clear() if not batch: batch_complete = True else: batch_number += 1 while True: value = click.prompt('Next batch (Single/Duplex/Finished):', type=click.Choice(('s', 'd', 'F'), case_sensitive=False), default='F').lower() if value == 'f': batch_complete = True elif value == 'd': duplex = True elif value == 's': duplex = False else: continue break input_pdf = os.path.join(working_dir, 'cleaned.pdf') with yaspin(text='Creating PDF...') as spin: img2pdf(*final_images, '-o', input_pdf) spin.text = 'PDF complete.' spin.green.ok('✔') with yaspin(text='Waiting for PDF arrangement...', spinner=Spinners.clock) as spin: pdfarranger(input_pdf) spin.text = 'PDF arranger closed.' spin.green.ok('✔') if click.confirm(f'Remove temporary files ({working_dir})?', default=True, show_default=True): with yaspin(text='Cleaning up...') as spin: shutil.rmtree(working_dir) spin.text = 'Clean up complete.' spin.green.ok('✔')
#!/usr/bin/env python import os from sh import convert images = dict(wait="Please Wait...", nocart="No Cartridge!", ) for filename, text in images.iteritems(): convert("-size", '640x480', 'xc:black', '-fill', 'white', '-stroke', 'black', '-font', 'Nimbus-Mono-Bold', '-pointsize', '70', '-gravity', 'center', '-draw', 'text 0,0 "{}"'.format(text), '-scale', '20%', '-scale', '640x480!', 'images/'+filename+".png")
def add_border_to_image(input_filename, width=10): convert(shlex.split('{0} -bordercolor White -border {1}x{1} {0}'.format( input_filename, width, )))
""" Trim whitespace in PNG files. Requires the convert utility (ImageMagick). """ import os import sh for fpath in os.listdir("."): if fpath.endswith(".png"): print fpath sh.convert("-trim", fpath, fpath)
output_file_path = os.path.join(folder_path, new_filename) output_precrush_path = output_file_path + '_' px = int(DENSITIES[density] * dp) sh.rsvg_convert(filepath, '-a', h=px, o=output_precrush_path) sh.pngcrush('-q', '-reduce', output_precrush_path, output_file_path) sh.rm(output_precrush_path) return output_file_path, noflip def flop(density, (filepath, noflip)): if noflip: return folder_name = os.path.join(OUTPUT_PATH_PREFIX, 'drawable-ldrtl-' + density) output_file_path = os.path.join(folder_name, os.path.basename(filepath)) sh.mkdir('-p', folder_name) sh.convert(filepath, '-flop', output_file_path) def convert(icon_path, svg_filters): print('\n* icon_path: {}'.format(icon_path)) dp = int(os.path.basename(icon_path)) trace_value_of('dp') svg_glob = glob.glob(os.path.join(icon_path, '*.svg')) svg_files = ([x for x in svg_glob if os.path.basename(x) in svg_filters] if svg_filters else svg_glob) if not svg_files: return print('converted:')
logging.basicConfig(level=logging.INFO) border_colors = [ { 'red': 172, 'green': 170, 'blue': 164 } ] ''' first we will refresh the screenshot ''' sudo('/home/pi/fb2png', '-p', '/dev/shm/fb.png') ''' then we will check the color of pixel 0,0 ''' output = convert('/dev/shm/fb.png', '-format', '%[pixel: u.p{0,0}]', 'info:') regexp = "(?:(^.*?\((\d+),(\d+),(\d+)\)$)|(\S+))" match = re.search(regexp, str(output)) if match is not None: if re.search('^srgb.*$', match.group(0)) is None: logging.info("we got a color instead of an srgb value.") else: point = {} point['red'] = int(match.group(2)) point['green'] = int(match.group(3)) point['blue'] = int(match.group(4)) ''' if our color in 0,0 matches our known widget border color, then we think we're in windowed mode. ''' if point in border_colors: logging.info("screen appears to be windowed, executing xdotool") new_env = os.environ.copy()