def test_osinfo_db_import_url(): """ Test osinfo-db-import URL """ url = "https://releases.pagure.org/libosinfo/osinfo-db-20190304.tar.xz" tempdir = util.tempdir() os.environ["OSINFO_USER_DIR"] = tempdir cmd = [util.Tools.db_import, util.ToolsArgs.USER, url] returncode = util.get_returncode(cmd) assert returncode == 0 test_file = "downloaded.tar.xz" tempdir2 = util.tempdir() req = requests.get(url) open(test_file, "wb").write(req.content) assert os.path.isfile(test_file) cmd = [util.Tools.db_import, util.ToolsArgs.DIR, tempdir2, test_file] returncode = util.get_returncode(cmd) assert returncode == 0 dcmp = filecmp.dircmp(tempdir, tempdir2) assert dcmp.right_only == [] assert dcmp.left_only == [] assert dcmp.diff_files == [] shutil.rmtree(tempdir) shutil.rmtree(tempdir2) os.unlink(test_file)
def test_osinfo_db_import_latest(): """ Test osinfo-dbimport --latest """ tempdir = util.tempdir() os.environ["OSINFO_USER_DIR"] = tempdir cmd = [util.Tools.db_import, util.ToolsArgs.LATEST] returncode = util.get_returncode(cmd) assert returncode == 0 latest_url = "https://db.libosinfo.org/latest.json" req = requests.get(latest_url) data = json.loads(req.content) url = data["release"]["archive"] test_file = "downloaded.tar.xz" tempdir2 = util.tempdir() req = requests.get(url) open(test_file, "wb").write(req.content) assert os.path.isfile(test_file) cmd = [util.Tools.db_import, util.ToolsArgs.DIR, tempdir2, test_file] returncode = util.get_returncode(cmd) assert returncode == 0 dcmp = filecmp.dircmp(tempdir, tempdir2) assert dcmp.right_only == [] assert dcmp.left_only == [] assert dcmp.diff_files == [] shutil.rmtree(tempdir) shutil.rmtree(tempdir2) os.unlink(test_file)
def run_custom_build(name, link, sha, build): from pip.index import Link from pip.download import unpack_file_url, unpack_vcs_link, is_vcs_url assert has_custom_build(name) link = Link(link, trusted=True) unpack = unpack_vcs_link if is_vcs_url(link) else unpack_file_url with tempdir() as tmpd, tempdir() as wheeld: # pylint: disable=C0321 unpack(link, tmpd) m = getattr(__import__('custom_builds.%s' % (name, )), name) m.Build(tmpd, wheeld) grab_wheel(wheeld, WHEELHOUSE, sha, build)
def run_custom_build(name, link, sha, build): from pip.index import Link from pip.download import unpack_file_url, unpack_vcs_link, is_vcs_url assert has_custom_build(name) link = Link(link, trusted=True) unpack = unpack_vcs_link if is_vcs_url(link) else unpack_file_url with tempdir() as tmpd, tempdir() as wheeld: # pylint: disable=C0321 unpack(link, tmpd) m = getattr(__import__('custom_builds.%s' % (name,)), name) m.Build(tmpd, wheeld) grab_wheel(wheeld, WHEELHOUSE, sha, build)
def test_osinfo_db_import_root_user_versioned_file( osinfo_db_export_user_license_version): """ Test osinfo-db-import --root / --user VERSIONED_FILE """ filename, version, returncode = osinfo_db_export_user_license_version tempdir = util.tempdir() os.environ["OSINFO_USER_DIR"] = tempdir cmd = [ util.Tools.db_import, util.ToolsArgs.ROOT, "/", util.ToolsArgs.USER, filename ] returncode = util.get_returncode(cmd) assert returncode == 0 dcmp = filecmp.dircmp(util.Data.positive, tempdir) assert len(dcmp.right_only) == 2 assert "VERSION" in dcmp.right_only with open(os.path.join(tempdir, "VERSION")) as out: content = out.read() assert content == version assert "LICENSE" in dcmp.right_only fcmp = filecmp.cmp(os.path.join(tempdir, "LICENSE"), util.Data.license) assert fcmp is True assert dcmp.left_only == [] assert dcmp.diff_files == [] shutil.rmtree(tempdir) os.unlink(filename)
def run_command(self): """ABC import (at least for now) needs a specific solution here.""" cmd = self.getCmd('document.ly') directory = util.tempdir() subenviron = None if os.name == "nt": # Python 2.7 subprocess on Windows chokes on unicode in env subenviron = util.bytes_environ() else: subenviron = dict(os.environ) if sys.platform.startswith('darwin'): try: del subenviron['PYTHONHOME'] except KeyError: pass try: del subenviron['PYTHONPATH'] except KeyError: pass proc = subprocess.Popen(cmd, cwd=directory, env = subenviron, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE) stdouterr = proc.communicate() if not stdouterr[0]: try: with open(os.path.join(directory, cmd[-1])) as abc: stdouterr = (abc.read(), stdouterr[1]) except IOError: pass return stdouterr
def __init__( self, text, title=None, base_dir=None): # TODO: ??? # I have the impression this "info" stuff # is not used at all. And *if* it is used, # shouldn't it be implemented in LilyPondJob??? # Initialize default LilyPond version info = lilypondinfo.preferred() # Optionally infer a suitable LilyPond version from the content if QSettings().value("lilypond_settings/autoversion", True, bool): version = ly.docinfo.DocInfo(ly.document.Document(text, 'lilypond')).version() if version: info = lilypondinfo.suitable(version) # Create temporary (document.Document object and file) self.directory = util.tempdir() filename = os.path.join(self.directory, 'document.ly') with open(filename, 'wb') as f: f.write(text.encode('utf-8')) url = QUrl(filename) url.setScheme('file') super(VolatileTextJob, self).__init__(url, title=title) if title: self.set_title(title) if base_dir: self.add_include_path(base_dir)
def wheel(arg, source_sha, build, build_options): with tempdir() as tdir: args = ['wheel', '--no-index', '--no-deps', '--wheel-dir', tdir] for op in build_options: args += ['--global-option', op] args += [arg] pip(*args) grab_wheel(tdir, WHEELHOUSE, source_sha, build)
def configure_job(self): """Create and configure the job to be run. Has to be completed by the subclasses.""" output = os.path.splitext( os.path.join(util.tempdir(), os.path.basename(self._input)) )[0] + '.ly' self._job = j = self._job_class( command=self._info.toolcommand(self._imp_prgm), input=self._input, output='--output={}'.format(output), directory=os.path.dirname(self._input), encoding='utf-8') j._output_file = output
def filename(self): if self.currentFile: return self.currentFile elif not self.image: return # save the image as a PNG file d = util.tempdir() basename = self.basename or 'image' basename += '.png' filename = os.path.join(d, basename) self.image.save(filename) self.currentFile = filename return filename
def run_command(self): """ABC import (at least for now) needs a specific solution here.""" cmd = self.getCmd() directory = util.tempdir() proc = subprocess.Popen(cmd, cwd=directory, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE) stdouterr = proc.communicate() if not stdouterr[0]: with open(os.path.join(directory, cmd[-1])) as abc: stdouterr = (abc.read(), stdouterr[1]) return stdouterr
def test_osinfo_db_validate_root(): """ Test osinfo-db-validate --dir """ os.environ["OSINFO_SYSTEM_DIR"] = "positive" tempdir = util.tempdir() shutil.copytree(util.Data.positive, os.path.join(tempdir, "positive")) cmd = [ util.Tools.db_validate, util.ToolsArgs.ROOT, tempdir, util.ToolsArgs.SYSTEM ] returncode = util.get_returncode(cmd) shutil.rmtree(tempdir) assert returncode == 0
def __init__(self, text, title=None): super(MusicPreviewJob, self).__init__() self.directory = util.tempdir() self.document = os.path.join(self.directory, 'document.ly') with open(self.document, 'w') as f: f.write(text.encode('utf-8')) info = lilypondinfo.preferred() if QSettings().value("lilypond_settings/autoversion", True, bool): version = ly.parse.version(ly.lex.state('lilypond').tokens(text)) if version: info = lilypondinfo.suitable(version) self.command = [info.abscommand(), '-dno-point-and-click', '--pdf', self.document] if title: self.setTitle(title)
def __init__(self, text, title=None): super(MusicPreviewJob, self).__init__() self.directory = util.tempdir() self.document = os.path.join(self.directory, "document.ly") with open(self.document, "w") as f: f.write(text.encode("utf-8")) info = lilypondinfo.preferred() if QSettings().value("lilypond_settings/autoversion", True) in (True, "true"): version = ly.parse.version(ly.lex.state("lilypond").tokens(text)) if version: info = lilypondinfo.suitable(version) self.command = [info.command, "-dno-point-and-click", "--pdf", self.document] if title: self.setTitle(title)
def __init__(self, text, title=""): # Initialize default LilyPond version info = lilypondinfo.preferred() # Optionally infer a suitable LilyPond version from the content if QSettings().value("lilypond_settings/autoversion", True, bool): version = ly.docinfo.DocInfo(ly.document.Document(text, 'lilypond')).version() if version: info = lilypondinfo.suitable(version) # Create temporary (document.Document object and file) directory = util.tempdir() filename = os.path.join(directory, 'document.ly') with open(filename, 'wb') as f: f.write(text.encode('utf-8')) url = QUrl(filename) url.setScheme('file') doc = document.Document(url) super(VolatileTextJob, self).__init__(doc, title=title)
def __init__(self, text, title=""): # Initialize default LilyPond version info = lilypondinfo.preferred() # Optionally infer a suitable LilyPond version from the content if QSettings().value("lilypond_settings/autoversion", True, bool): version = ly.docinfo.DocInfo(ly.document.Document( text, 'lilypond')).version() if version: info = lilypondinfo.suitable(version) # Create temporary (document.Document object and file) directory = util.tempdir() filename = os.path.join(directory, 'document.ly') with open(filename, 'wb') as f: f.write(text.encode('utf-8')) url = QUrl(filename) url.setScheme('file') doc = document.Document(url) super(VolatileTextJob, self).__init__(doc, title=title)
def test_osinfo_db_import_local_file(osinfo_db_export_local): """ Test osinfo-db-import --local FILENAME """ filename, _ = osinfo_db_export_local tempdir = util.tempdir() os.environ["OSINFO_LOCAL_DIR"] = tempdir cmd = [util.Tools.db_import, util.ToolsArgs.LOCAL, filename] returncode = util.get_returncode(cmd) assert returncode == 0 dcmp = filecmp.dircmp(util.Data.positive, tempdir) assert len(dcmp.right_only) == 1 assert "VERSION" in dcmp.right_only assert dcmp.left_only == [] assert dcmp.diff_files == [] shutil.rmtree(tempdir) os.unlink(filename)
def upload_to_s3(self, bucket_name, prefix=None, key_name=None): """ Upload composed archive (.zip) to S3 :param str prefix: without leading and trailing slashes """ s3 = S3() prefix = prefix or "lambda-src" key_name = key_name or datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ.zip") uploaded_path = "%s/%s" % (prefix, key_name) with tempdir() as td: archive_name = self._compile(td.path, key_name) s3.upload(archive_name, bucket_name, uploaded_path) return uploaded_path
def __init__(self, text, title=None): super(MusicPreviewJob, self).__init__() self.decode_errors = 'replace' self.decoder_stdout = self.decoder_stderr = codecs.getdecoder('utf-8') self.directory = util.tempdir() self.document = os.path.join(self.directory, 'document.ly') with open(self.document, 'wb') as f: f.write(text.encode('utf-8')) info = lilypondinfo.preferred() if QSettings().value("lilypond_settings/autoversion", True, bool): version = ly.docinfo.DocInfo(ly.document.Document(text, 'lilypond')).version() if version: info = lilypondinfo.suitable(version) lilypond = info.abscommand() or info.command self.command = [lilypond, '-dno-point-and-click', '--pdf', self.document] if title: self.set_title(title)
def test_osinfo_db_export_import_system(): """ Test osinfo-db-export --system and osinfo-db-import --system """ # We build the expected filename before running osinfo-db-export; # the filename includes the day, so if the day changes while # osinfo-db-export runs then we cannot find the output archive # anymore. # As workaround, build the filename for today and tomorrow, # checking that one of them must exist. today = datetime.date.today() tomorrow = today + datetime.timedelta(days=1) default_filename_today = "osinfo-db-%s.tar.xz" % today.strftime("%Y%m%d") default_filename_tomorrow = ("osinfo-db-%s.tar.xz" % tomorrow.strftime("%Y%m%d")) os.environ["OSINFO_SYSTEM_DIR"] = util.Data.positive cmd = [util.Tools.db_export, util.ToolsArgs.SYSTEM] returncode = util.get_returncode(cmd) assert returncode == 0 assert os.path.isfile(default_filename_today) or \ os.path.isfile(default_filename_tomorrow) if os.path.isfile(default_filename_today): default_filename = default_filename_today else: default_filename = default_filename_tomorrow tempdir = util.tempdir() os.environ["OSINFO_SYSTEM_DIR"] = tempdir cmd = [util.Tools.db_import, util.ToolsArgs.SYSTEM, default_filename] returncode = util.get_returncode(cmd) assert returncode == 0 dcmp = filecmp.dircmp(util.Data.positive, tempdir) assert len(dcmp.right_only) == 1 assert "VERSION" in dcmp.right_only assert dcmp.left_only == [] assert dcmp.diff_files == [] shutil.rmtree(tempdir) os.unlink(default_filename)
def create(self): """Creates the local temporary directory.""" if not self._directory: self._directory = util.tempdir()
class FontsPreviewWidget(QWidget): """Show a preview score using the font selection.""" # Permanently cache compilations of the provided samples persistent_cache_dir = get_persistent_cache_dir() # Cache compilations of custom samples for Frescobaldi's lifetime only temp_dir = util.tempdir() def __init__(self, parent): super(FontsPreviewWidget, self).__init__(parent) # Create the cache directory for default samples os.makedirs(self.persistent_cache_dir, 0o700, exist_ok=True) layout = QVBoxLayout(margin=0) self.setLayout(layout) # Label with text "Example:" self.lb_sample = QLabel() # ComboBox for provided default samples and other options self.cb_samples = QComboBox() # Select custom file self.custom_sample_url = widgets.urlrequester.UrlRequester( fileMode=QFileDialog.ExistingFile, mustExist=True) # put the default sample in the combobox self.populate_default_samples() # add other actions self.cb_samples.insertSeparator(self.cb_samples.count()) self.cb_samples.addItem(_("Custom"), "<CUSTOM>") self.cb_samples.addItem(_("Current Document"), "<CURRENT>") # Add sample source widgets to layout bl = QHBoxLayout(margin=0) bl.addWidget(self.lb_sample) bl.addWidget(self.cb_samples) bl.addWidget(self.custom_sample_url) layout.addLayout(bl) # The score preview widget self.musicFontPreview = mfp = musicpreview.MusicPreviewWidget( parent, showProgress=False, showWaiting=True, showLog=False) layout.addWidget(mfp) app.translateUI(self) self.loadSettings() # Trigger showing of new samples self.cb_samples.currentIndexChanged.connect(self.show_sample) self.custom_sample_url.editingFinished.connect(self.show_sample) parent.finished.connect(self.saveSettings) def translateUI(self): self.lb_sample.setText("Example:") csu = self.custom_sample_url csu.setToolTip( _("Use custom sample for music font.\n" + "NOTE: This should not include a version statement " + "or a \\paper {...} block.")) csu.setDialogTitle(_("Select sample score")) csu.fileDialog(True).setNameFilters(['LilyPond files (*.ly)']) i = self.cb_samples.findData("<CURRENT>") self.cb_samples.setItemData( i, _("Use current document as music font sample.\n" + "NOTE: This is not robust if the document contains " + "a \\paper {...} block."), Qt.ToolTipRole) i = self.cb_samples.findData("<CUSTOM>") self.cb_samples.setItemData(i, csu.toolTip(), Qt.ToolTipRole) def loadSettings(self): s = QSettings() s.beginGroup('document-fonts-dialog') sample = s.value('default-music-sample', '', str) index = max(0, self.cb_samples.findData(sample)) self.cb_samples.setCurrentIndex(index) custom_sample = s.value('custom-music-sample-url', '', str) self.custom_sample_url.setPath(custom_sample) if custom_sample: sample_dir = os.path.dirname(custom_sample) else: sample_dir = os.path.dirname( self.window().parent().currentDocument().url().toLocalFile()) self.custom_sample_url.fileDialog().setDirectory(sample_dir) def saveSettings(self): s = QSettings() s.beginGroup('document-fonts-dialog') s.setValue('default-music-sample', self.cb_samples.currentData()) s.setValue('custom-music-sample-url', self.custom_sample_url.path()) def populate_default_samples(self): """Populate hte default samples ComboBox. This is just factored out to unclutter __init__. """ cb = self.cb_samples def add_entry(entry): cb.addItem(entry['label'], entry['file']) cb.setItemData(cb.count() - 1, entry['tooltip'], Qt.ToolTipRole) add_entry({ 'label': _('Bach (Piano)'), 'file': 'bach.ly', 'tooltip': _("Baroque music lends itself to traditional fonts") }) add_entry({ 'label': _('Scriabine (Piano)'), 'file': 'scriabine.ly', 'tooltip': _("Late romantic, complex piano music") }) add_entry({ 'label': _('Berg (String Quartet)'), 'file': 'berg-string-quartet.ly', 'tooltip': _("Complex score, requires a 'clean' font") }) add_entry({ 'label': _('Real Book (Lead Sheet)'), 'file': 'realbook.ly', 'tooltip': _("Jazz-like lead sheet.\n" + "NOTE: beautiful results rely on appropriate text fonts.\n" + "Good choices are \"lilyjazz-text\" for roman and\n" + "\"lilyjazz-chords\" for sans text fonts.") }) add_entry({ 'label': _('Schenker Diagram'), 'file': 'schenker.ly', 'tooltip': _("Schenker diagram with absolutely\n" + "non-standard notation.") }) add_entry({ 'label': _('Glyphs'), 'file': 'glyphs.ly', 'tooltip': _("Non-comprehensive specimen sheet") }) def show_sample(self): """Display a sample document for the selected notation font.""" print("Enter show_sample") global_size = '' base_dir = None sample_content = '' cache_persistently = False target = self.cb_samples.currentData() self.custom_sample_url.setEnabled(target == "<CUSTOM>") def handle_staff_size(): """ If the sample file *starts with* a staff-size definition it will be injected *after* our paper block. """ nonlocal sample_content, global_size match = re.match('#\(set-global-staff-size \d+\)', sample_content) if match: global_size = match.group(0) sample_content = sample_content[len(global_size):] def load_content(): """ Load the content to be engraved as sample, either from the active editor or from a file. """ nonlocal sample_content, base_dir nonlocal cache_persistently, target custom_file = self.custom_sample_url.path() if target == "<CUSTOM>" and not custom_file: target = self.cb_samples.itemData(0) # Provided sample files will be cached persistently cache_persistently = target not in ("<CUSTOM>", "<CURRENT>") if target == "<CURRENT>": # Engrave active document import engrave current_doc = engrave.engraver(app.activeWindow()).document() sample_content = current_doc.toPlainText() if not current_doc.url().isEmpty(): base_dir = os.path.dirname(current_doc.url().toLocalFile()) else: if target == "<CUSTOM>": print("Custom file:", custom_file) sample_file = custom_file else: # Engrave from a file import fonts template_dir = os.path.join(fonts.__path__[0], 'templates') sample_file = os.path.join(template_dir, 'musicfont-' + target) print("Default:", sample_file) base_dir = os.path.dirname(sample_file) with open(sample_file, 'r') as f: sample_content = f.read() def sample_document(): """ Steps of composing the used sample document. """ load_content() handle_staff_size() result = [ '\\version "{}"\n'.format(self.window( ).available_fonts.music_fonts().lilypond_info.versionString()), '{}\n'.format(global_size) if global_size else '', # TODO: "Protect" this regarding openLilyLib. # It would be easy to simply pass 'lily' as an argument # to always use the generic approach. However, that would # prevent the use of font extensions and stylesheets. self.window().font_full_cmd(), sample_content ] return '\n'.join(result) sample = sample_document() cache_dir = (self.persistent_cache_dir if cache_persistently else self.temp_dir) self.musicFontPreview.preview(sample, title='Music font preview', base_dir=base_dir, temp_dir=cache_dir, cached=True)
def main(): parser = argparse.ArgumentParser() parser.add_argument('package', help='Name of the PyPI package to ingest') parser.add_argument('version', help='Version to ingest') o = parser.parse_args() pkg = o.package version = o.version # '-' is used in wheel file names as a separator. Some scripts are getting # confused if '-' is used in package names. Use '_' instead. PyPI understand # this: "a-b-c" and "a_b_c" point to the exact same package. pkg = pkg.replace('-', '_') info = query_pypi(pkg, version) releases = sorted(info['releases'][version]) # We first find source *.zip (or *.tar.gz) and use its SHA1 as build # identifier (assuming all wheels were build with that exact source). zips = [ r for r in releases if r['packagetype'] == 'sdist' and r['filename'].endswith('.zip') ] tars = [ r for r in releases if r['packagetype'] == 'sdist' and r['filename'].endswith('.tar.gz') ] sources = zips or tars if not sources: print 'Could not find a source distribution (*.zip or *.tar.gz)' return 1 if len(sources) != 1: print 'More than 1 source distribution, don\'t know what to pick:' print ' \n'.join(sources) return 1 source = sources[0] print 'Referring to the following source release:' print json.dumps(source, sort_keys=True, indent=2) print # Now find wheels for all platforms we care about. wheels = [] only_on = set() for r in releases: if r['packagetype'] != 'bdist_wheel': continue fn = r['filename'] if not fn.endswith('.whl'): print 'Not a wheel: %s' % fn continue # See https://www.python.org/dev/peps/pep-0491/#file-name-convention chunks = fn[:-len('.whl')].split('-') if chunks[0] != pkg or chunks[1] != version: print 'Unexpected filename: %s' % fn continue # See also https://www.python.org/dev/peps/pep-0425/ py_tag, abi_tag, platform_tag = chunks[-3:] for (known_py, known_abi, known_plat), deps_tag in PLATFORMS.iteritems(): if (py_tag == known_py and abi_tag == known_abi and known_plat in platform_tag): only_on.add(deps_tag) break else: continue wheels.append(r) print 'Going to ingest the following source distribution:' print ' * %s (%d downloads)' % (source['filename'], source['downloads']) print if wheels: print 'Going to ingest the following wheels:' for r in wheels: print ' * %s (%d downloads)' % (r['filename'], r['downloads']) print if 'any' in only_on: only_on = [] print 'This will make the package available on all platforms.' else: only_on = sorted(only_on) print 'This will make the package available on:' for p in only_on: print ' * %s' % p print if raw_input('Continue? [Y] ') not in ('', 'y', 'Y'): return 2 with util.tempdir() as tmp: # Upload the source code. We checked above it is .zip or .tar.gz. src = download_release(source, tmp) build_id = get_file_sha1(src) src_gs_file = build_id + ('.zip' if src.endswith('.zip') else '.tar.gz') upload_to_gs(src, '%s/%s' % (SOURCE_BUCKET, src_gs_file)) # Upload all binary wheels. for release in wheels: chunks = release['filename'].split('-') new_name = '%s-%s-0_%s-%s' % ( chunks[0], # package name chunks[1], # package version build_id, # our fake build identifier '-'.join( chunks[-3:]), # all original tags and '.whl' extension ) wheel = download_release(release, tmp) upload_to_gs(wheel, '%s/%s' % (WHEELS_BUCKET, new_name)) print 'Done!' print entry = { 'version': version, 'build': '0', 'gs': src_gs_file, } if only_on: entry['only_on'] = only_on print 'deps.pyl entry:' print json.dumps({pkg: entry}, sort_keys=True, indent=2) if not wheels: print print 'This is a source-only release. Once you update deps.pyl you\'ll need' print 'to run the following command to build and upload the cross-platform' print 'wheel made of the source code:' print print './build_deps.py --upload %s' % pkg
class CachedPreviewJob(PublishJob): """Represents a cached example LilyPond Job where the document is only passed in as a string. Internally a document is created in a cached file, and options set to not use point-and-click. The filename is generated as the md5 hash of the passed text, and the compilation is only started if a corresponding file has not been compiled yet. If a target_dir is given it is used (for example to allow persistent caching), otherwise an autogenerated temporary directory is used (and deleted upon program termination). base_dir can be used to add a 'virtual' document Directory in order to use relative includes from the 'current document'. """ _target_dir = util.tempdir() def __init__( self, text, target_dir=None, title=None, base_dir=None ): import hashlib md = hashlib.md5() md.update(text.encode('utf-8')) self.hash_name = md.hexdigest() self.base_name = self.hash_name + '.ly' self.target_dir = target_dir or self._target_dir filename = os.path.join(self.target_dir, self.base_name) if os.path.exists(os.path.join( self.target_dir, self.hash_name + '.pdf') ): self._needs_compilation = False else: with open(filename, 'wb') as f: f.write(text.encode('utf-8')) self._needs_compilation = True url = QUrl(filename) url.setScheme('file') super(CachedPreviewJob, self).__init__(url, title=title) self.done.connect(self.remove_intermediate) if title: self.set_title(title) if base_dir: self.add_include_path(base_dir) def cleanup(self): """Do *not* remove the generated files.""" pass def needs_compilation(self): return self._needs_compilation def remove_intermediate(self): """Remove all files from the compilation except the (main) .pdf and the .ly files.""" dir = self.target_dir files = os.listdir(dir) hash_name = self.hash_name keep = [hash_name + '.ly', hash_name + '.pdf'] for f in files: if f.startswith(hash_name) and f not in keep: os.remove(os.path.join(dir, f)) def resultfiles(self): """ Returns a single result file, wrapped in a list. This for example prevents system-wise files from lilypond-book-preamble to clutter the preview. """ #TODO: Support non-PDF compilation modes output_name, _ = os.path.splitext(self.base_name) resultfile = os.path.join(self.directory(), output_name + '.pdf') if os.path.exists(resultfile): return [resultfile] else: return [] def start(self): """Override the Job start, using cached PDF if possible.""" if self.needs_compilation(): super(CachedPreviewJob, self).start() else: self.done("cached")