def _store_output(self): log.debug("Storing output ...") tempfile = open(self._tempfile, 'r') log.debug("Current position: " + str(self._current_position)) tempfile.seek(self._current_position) finished = False started = False while self.silent and not finished: lines = tempfile.readlines() self._current_position = tempfile.tell() for line in lines: line = line.strip() log.debug("Started: %s | Line: %s" %(str(started), line)) log.debug("Condition: " +str(line.strip().endswith(self.STARTED))) if not started: started = line.endswith(self.STARTED) continue if line.endswith(self.FINISHED): finished = True break line = self._prepare_output(line) if line: self.output.append(line) tempfile.close()
def unpickle(tempfile): """unpickle(tempfile) - unpickles object from open filed object tempfile oposite of pickle() returns original pickled object.""" startTime = time.time() tempfile.seek(0) object = cPickle.load(tempfile) tempfile.seek(0) print "TIME: unpickle() " + str(time.time() - startTime) + " s" return object
def _load_db_to_memory(in_db_path): # Read database to tempfile conn = sqlite3.connect('file:%s?mode=ro' % in_db_path, uri=True) tempfile = io.StringIO() for line in conn.iterdump(): tempfile.write('%s\n' % line) conn.close() tempfile.seek(0) # Create a database in memory and import from tempfile conn = sqlite3.connect(":memory:") conn.cursor().executescript(tempfile.read()) return conn
def __from_wave__(cls, filename, wave_filename, compression=None): if (str(compression) not in cls.COMPRESSION_MODES): compression = cls.DEFAULT_COMPRESSION #mppenc requires files to end with .mpc for some reason if (not filename.endswith(".mpc")): import tempfile actual_filename = filename tempfile = tempfile.NamedTemporaryFile(suffix=".mpc") filename = tempfile.name else: actual_filename = tempfile = None ###Musepack SV7### #sub = subprocess.Popen([BIN['mppenc'], # "--silent", # "--overwrite", # "--%s" % (compression), # wave_filename, # filename], # preexec_fn=ignore_sigint) ###Musepack SV8### sub = subprocess.Popen([BIN['mpcenc'], "--silent", "--overwrite", "--%s" % (compression), wave_filename, filename]) if (sub.wait() == 0): if (tempfile is not None): filename = actual_filename f = file(filename, 'wb') tempfile.seek(0, 0) transfer_data(tempfile.read, f.write) f.close() tempfile.close() return MusepackAudio(filename) else: if (tempfile is not None): tempfile.close() raise EncodingError(u"error encoding file with mpcenc")
def main(): parser = argparse.ArgumentParser(description='Amazon S3 Backup.') parser.add_argument('config_file', help='JSON configuration file') args = parser.parse_args() try: config = read_config(args.config_file) except ConfigError as e: logger.exception("Error reading configuration file") sys.exit(1) logger.debug(config) logger.debug("Creating temporary tar file") tempfile = create_tempfile() try: create_tarfile(tempfile, config.get("sources"), config.get("exclusions")) except TarFileCreationError as e: logger.error(f"Error creating tar file: {e}") sys.exit(1) tempfile.seek(0) # Needed for S3 upload to work logger.debug("Creating S3 bucket object") s3_bucket = S3Bucket( config.get("s3").get("bucket_name"), config.get("s3").get("bucket_region"), config.get("s3").get("storage_class")) s3_metadata = { "sources": ",".join(config.get("sources")), "exclusions": ",".join(config.get("exclusions")) } s3_key = generate_s3_key(config.get("tarfile_name_prefix")) logger.info("Uploading file to S3") s3_bucket.upload_fileobj(tempfile, s3_key, s3_metadata) logger.debug("Closing temporary tar file") tempfile.close() logger.info("Done!")
def testTrollitaireConverter(self): """Test that the Trollitaire converter works properly.""" # First test case (no surprises) tempfile = cStringIO.StringIO() with open(old_troll_file) as infile: trollconvert.convert_draft_file(infile, tempfile) expected = open(converted_troll).read() tempfile.seek(0) result = tempfile.read() self.assertEqual(expected, result) # Second test case (has an UNDO) tempfile = cStringIO.StringIO() with open(old_troll_file2) as infile: trollconvert.convert_draft_file(infile, tempfile) expected = open(converted_troll2).read() tempfile.seek(0) result = tempfile.read() self.assertEqual(expected, result)
sub.stdin.close() sub.wait() cls.__unlink__(filename) raise err try: pcmreader.close() except DecodingError, err: raise EncodingError(err.error_message) sub.stdin.close() if sub.wait() == 0: if tempfile is not None: filename = actual_filename f = file(filename, "wb") tempfile.seek(0, 0) transfer_data(tempfile.read, f.write) f.close() tempfile.close() return M4AAudio(filename) else: if tempfile is not None: tempfile.close() raise EncodingError(u"unable to write file with faac") @classmethod def can_add_replay_gain(cls): """Returns False.""" return False
def from_pcm(cls, filename, pcmreader, compression=None, total_pcm_frames=None): """encodes a new file from PCM data takes a filename string, PCMReader object, optional compression level string and optional total_pcm_frames integer encodes a new audio file from pcmreader's data at the given filename with the specified compression level and returns a new M4AAudio object""" import subprocess import os from audiotools import PCMConverter from audiotools import transfer_data from audiotools import transfer_framelist_data from audiotools import ignore_sigint from audiotools import EncodingError from audiotools import DecodingError from audiotools import ChannelMask from audiotools import __default_quality__ if ((compression is None) or (compression not in cls.COMPRESSION_MODES)): compression = __default_quality__(cls.NAME) if pcmreader.channels > 2: pcmreader = PCMConverter(pcmreader, sample_rate=pcmreader.sample_rate, channels=2, channel_mask=ChannelMask.from_channels(2), bits_per_sample=pcmreader.bits_per_sample) # faac requires files to end with .m4a for some reason if not filename.endswith(".m4a"): import tempfile actual_filename = filename tempfile = tempfile.NamedTemporaryFile(suffix=".m4a") filename = tempfile.name else: actual_filename = tempfile = None sub = subprocess.Popen( [ BIN['faac'], "-q", compression, "-P", "-R", str(pcmreader.sample_rate), "-B", str(pcmreader.bits_per_sample), "-C", str(pcmreader.channels), "-X", "-o", filename, "-" ], stdin=subprocess.PIPE, stderr=subprocess.DEVNULL if hasattr(subprocess, "DEVNULL") else open(os.devnull, "wb"), stdout=subprocess.DEVNULL if hasattr(subprocess, "DEVNULL") else open(os.devnull, "wb"), preexec_fn=ignore_sigint) # Note: faac handles SIGINT on its own, # so trying to ignore it doesn't work like on most other encoders. try: if total_pcm_frames is not None: from audiotools import CounterPCMReader pcmreader = CounterPCMReader(pcmreader) transfer_framelist_data(pcmreader, sub.stdin.write) if ((total_pcm_frames is not None) and (total_pcm_frames != pcmreader.frames_written)): from audiotools.text import ERR_TOTAL_PCM_FRAMES_MISMATCH raise EncodingError(ERR_TOTAL_PCM_FRAMES_MISMATCH) except (ValueError, IOError) as err: sub.stdin.close() sub.wait() cls.__unlink__(filename) raise EncodingError(str(err)) except Exception: sub.stdin.close() sub.wait() cls.__unlink__(filename) raise sub.stdin.close() if sub.wait() == 0: if tempfile is not None: filename = actual_filename f = open(filename, 'wb') tempfile.seek(0, 0) transfer_data(tempfile.read, f.write) f.close() tempfile.close() return M4AAudio(filename) else: if tempfile is not None: tempfile.close() raise EncodingError(u"unable to write file with faac")
def from_pcm(cls, filename, pcmreader, compression=None, total_pcm_frames=None): """encodes a new file from PCM data takes a filename string, PCMReader object, optional compression level string and optional total_pcm_frames integer encodes a new audio file from pcmreader's data at the given filename with the specified compression level and returns a new M4AAudio object""" import subprocess import os from audiotools import PCMConverter from audiotools import transfer_data from audiotools import transfer_framelist_data from audiotools import ignore_sigint from audiotools import EncodingError from audiotools import DecodingError from audiotools import ChannelMask from audiotools import __default_quality__ if ((compression is None) or (compression not in cls.COMPRESSION_MODES)): compression = __default_quality__(cls.NAME) if pcmreader.bits_per_sample not in {8, 16, 24}: from audiotools import UnsupportedBitsPerSample pcmreader.close() raise UnsupportedBitsPerSample(filename, pcmreader.bits_per_sample) if pcmreader.channels > 2: pcmreader = PCMConverter(pcmreader, sample_rate=pcmreader.sample_rate, channels=2, channel_mask=ChannelMask.from_channels(2), bits_per_sample=pcmreader.bits_per_sample) # faac requires files to end with .m4a for some reason if not filename.endswith(".m4a"): import tempfile actual_filename = filename tempfile = tempfile.NamedTemporaryFile(suffix=".m4a") filename = tempfile.name else: actual_filename = tempfile = None sub = subprocess.Popen( [BIN['faac'], "-q", compression, "-P", "-R", str(pcmreader.sample_rate), "-B", str(pcmreader.bits_per_sample), "-C", str(pcmreader.channels), "-X", "-o", filename, "-"], stdin=subprocess.PIPE, stderr=subprocess.DEVNULL if hasattr(subprocess, "DEVNULL") else open(os.devnull, "wb"), stdout=subprocess.DEVNULL if hasattr(subprocess, "DEVNULL") else open(os.devnull, "wb"), preexec_fn=ignore_sigint) # Note: faac handles SIGINT on its own, # so trying to ignore it doesn't work like on most other encoders. try: if total_pcm_frames is not None: from audiotools import CounterPCMReader pcmreader = CounterPCMReader(pcmreader) transfer_framelist_data(pcmreader, sub.stdin.write) if ((total_pcm_frames is not None) and (total_pcm_frames != pcmreader.frames_written)): from audiotools.text import ERR_TOTAL_PCM_FRAMES_MISMATCH raise EncodingError(ERR_TOTAL_PCM_FRAMES_MISMATCH) except (ValueError, IOError) as err: sub.stdin.close() sub.wait() cls.__unlink__(filename) raise EncodingError(str(err)) except Exception: sub.stdin.close() sub.wait() cls.__unlink__(filename) raise sub.stdin.close() if sub.wait() == 0: if tempfile is not None: filename = actual_filename f = open(filename, 'wb') tempfile.seek(0, 0) transfer_data(tempfile.read, f.write) f.close() tempfile.close() return M4AAudio(filename) else: if tempfile is not None: tempfile.close() raise EncodingError(u"unable to write file with faac")
resource_link_dict = core.resource_link_dict core.remove_links(resource_link_dict.keys()) core.ignored_resources = [] remote_resources = core.remote_resources # Write out resources in Dropbox folder in temp file json_obj = dict.fromkeys(remote_resources, " ") tempfile = tempfile.NamedTemporaryFile(delete=False) json.dump(json_obj, tempfile, indent=4, separators=(",", "\t:\t")) tempfile.flush() # Open editor to let user set what should be installed editor = os.environ.get("EDITOR", "vim") return_code = subprocess.call([editor, tempfile.name]) tempfile.seek(0) desired_links = json.load(tempfile) tempfile.close() # Process user's decisions resources_to_ignore = [ resource for resource in desired_links.keys() if resource not in remote_resources ] core.ignored_resources.append(resources_to_ignore) core.forge_links(desired_links) # Print help elif command_arg == "help": printHelp() else: usage()
actions['forward'] = [] print 'Removing forwards...' continue elif x[0] == '.forward': actions['forward'] += [x[1]] print 'Adding forward:', x[1] continue elif x[0] == '.edit': default = ['base'] if len(x) == 1 else x[[1]:] print 'Entering editor...' with tempfile.NamedTemporaryFile(suffix=".tmp") as tempfile: tempfile.write(comment + '\n' + '-' * 10 + '\n' + '\n'.join(map(lambda x: responses[x], default))) tempfile.flush() call([EDITOR, tempfile.name]) tempfile.flush() tempfile.seek(0) contents = tempfile.readlines() print contents reply = ''.join(contents[contents.index('-'*10+'\r\n') + 1 :]) elif x[0] in responses: reply = responses[x[0]] break else: try: choice = int(x[0]) - 1 except: continue if choice >= 0 and choice < len(actions['reply']): reply = responses[actions['reply'][choice]] break
def _download_and_unpack_file(url): """Downloads the database files created with setup-exfor-db.py as a tarball and unpacks them to the correct folder.""" from tqdm import tqdm from glob import glob import requests import math import tarfile import tempfile import shutil # cleanup for f in [ fullIndexFileName, fullErrorFileName, fullCoupledFileName, fullMonitoredFileName, fullReactionCountFileName, fullDBPath, dbTagFile ]: try: shutil.rmtree(f) except NotADirectoryError: os.remove(f) except FileNotFoundError: pass # Tag files: tag_files = [ f for tag in ['X4-*', 'EXFOR-*'] for f in glob(os.path.join(DATAPATH, tag)) ] for tagfile in tag_files: try: os.remove(tagfile) except FileNotFoundError: pass # Streaming, so we can iterate over the response. r = requests.get(url, stream=True) tarname = os.path.basename(url) # Total size in bytes. total_size = int(r.headers.get('content-length', 0)) block_size = 1024 * 1024 wrote = 0 tempfile = tempfile.TemporaryFile() print('Downloading data file', tarname) for data in tqdm(r.iter_content(block_size), total=math.ceil(total_size // block_size), unit='MB', unit_scale=True): wrote = wrote + len(data) tempfile.write(data) if total_size != 0 and wrote != total_size: raise Exception("ERROR, something went wrong") tempfile.flush() tempfile.seek(0) print('Decompressing archive', tarname) wrote = 0 with tarfile.open(fileobj=tempfile, mode='r') as _tar: total = len(_tar.getmembers()) for member in tqdm(_tar.getmembers(), total=total): wrote = wrote + len(data) _tar.extract(member, DATAPATH) tempfile.close() with open(dbTagFile,'wb') as f: print('Installed database version', dbTagFile) pass
def pack(self): """ Packs the fasta and sqlite databases to remove redundancy and merge sqlite columns Returns: void """ # First we handle the fastq file unique_hash = set() redundant_file = fasta.file(self.database_fasta_file) temp = gzip.open(self.database_temp_file,'wb') for record in redundant_file.read(): if not record['header'] in unique_hash : unique_hash.add(record['header']) temp.write('>'+record['header']+os.linesep+record['sequence']+os.linesep) os.rename(self.database_temp_file , self.database_fasta_file) # Now the sqlite (http://stackoverflow.com/a/10856450) from StringIO import StringIO tempfile = StringIO() for line in self.sq3_connection.iterdump(): tempfile.write('%s\n' % line) tempfile.seek(0) sq3_temp_connection = sql.connect(self.database_temp_file) sq3_temp_cursor = sq3_temp_connection.cursor() sq3_temp_cursor.execute("CREATE TABLE IF NOT EXISTS genes ( sequence_hash TEXT, genus TEXT, species TEXT, NCBItaxID TEXT, kegg_ontology TEXT , kegg_reaction TEXT , go_term TEXT, kegg_map TEXT , sequence TEXT)") sq3_temp_connection.commit() sq3_memory_connection = sql.connect(":memory:") sq3_memory_cursor = sq3_memory_connection.cursor() sq3_memory_cursor.executescript(tempfile.read()) sq3_memory_connection.commit() sq3_memory_connection.row_factory = sql.Row for h in unique_hash: sq3_memory_cursor.execute('SELECT * FROM genes WHERE `sequence_hash` = \'%s\'' % h) rows = sq3_memory_cursor.fetchall() rows_selected = len(rows) columns = tuple ([c[0] for c in sq3_memory_cursor.description]) merge_dict = dict.fromkeys(columns) for r in rows: r=[str(x) if x else None for x in r ] incoming = dict(zip(columns,r)) merge_dict = merge_insert_dicts(merge_dict,incoming) merge_dict = {i:j for i,j in merge_dict.items() if j != []} insert = 'INSERT INTO genes({}) VALUES ({})'.format(', '.join(merge_dict.keys()),', '.join('?' * len(merge_dict))) try: sq3_temp_cursor.execute(insert,merge_dict.values()) except sql.Error as e: print merge_dict logger.warn(e) raise sq3_temp_connection.commit() sq3_temp_connection.close()