def filename_to_stream(filename, out_stream, bitrate, buffered=False): print "Handling request for %s" % (filename,) try: decode_command = decoders.get_decoder(filename) except KeyError: raise StreamGenerationError( "Couldn't play specified format: %r" % (filename,)) encoder_path = "/usr/bin/oggenc" if not os.path.exists(encoder_path): raise StreamGenerationError( ("No Vorbis encoder found at %s. " % (encoder_path,)) + \ "Please install 'vorbis-tools'.") encode_command = [encoder_path, "-r", "-Q", "-b", str(bitrate), "-"] # Pipe the decode command into the encode command. p1 = subprocess.Popen(decode_command, stdout=subprocess.PIPE) if buffered: # Read the entire output and then write it to the output stream. p2 = subprocess.Popen(encode_command, stdin=p1.stdout, stdout=subprocess.PIPE) out_stream.write(p2.stdout.read()) else: # Stream the encoder output while limiting the total bandwidth used. We # do this by writing the encoder output to a pipe and reading from the # pipe at a limited rate. (read_fd, write_fd) = os.pipe() p2 = subprocess.Popen(encode_command, stdin=p1.stdout, stdout=os.fdopen(write_fd, 'wb')) # Don't let reads block when we've read to the end of the encoded song # data. fcntl.fcntl(read_fd, fcntl.F_SETFL, os.O_NONBLOCK) try: copy_output_with_shaping(read_fd, out_stream, bitrate, lambda : p2.poll() != None) except socket.error: p1.terminate() p2.terminate()
def test_get_decoder(self): """ Test decoders.get_decoder """ self.assertTrue( decoders.get_decoder("/path/to/SOMETHING.MP3")[0].startswith( "/usr/bin"))
def filename_to_stream(filename, out_stream, bitrate, buffered=False): print "Handling request for %s" % (filename,) try: decode_command = decoders.get_decoder(filename) except KeyError: raise StreamGenerationError( "Couldn't play specified format: %r" % (filename,)) encode_command = ["/usr/bin/oggenc", "-r", "-Q", "-b", str(bitrate), "-"] # Pipe the decode command into the encode command. p1 = subprocess.Popen(decode_command, stdout=subprocess.PIPE) if buffered: # Read the entire output and then write it to the output stream. p2 = subprocess.Popen(encode_command, stdin=p1.stdout, stdout=subprocess.PIPE) out_stream.write(p2.stdout.read()) else: # Stream the encoder output while limiting the total bandwidth used. We # do this by writing the encoder output to a pipe and reading from the # pipe at a limited rate. (read_fd, write_fd) = os.pipe() p2 = subprocess.Popen(encode_command, stdin=p1.stdout, stdout=os.fdopen(write_fd, 'wb')) bytes_written = 0 start_time = time.time() # Compute the output rate, converting kilobits/sec to bytes/sec. max_bytes_per_sec = RATE_MULTIPLIER * bitrate * 1024 / 8 # Don't let reads block when we've read to the end of the encoded song # data. fcntl.fcntl(read_fd, fcntl.F_SETFL, os.O_NONBLOCK) encoder_finished = False while True: time.sleep(1/STREAM_WRITE_FREQUENCY) # If the average transfer rate exceeds the threshold, sleep for a # while longer. if bytes_written >= (time.time() - start_time) * max_bytes_per_sec: continue # Detect when the encoder process has finished. We assume that data # written by the encoder is immediately available via os.read. # Therefore, if the encoder has finished, and we subsequently # cannot read data from the input stream, we can conclude that we # have read all the data. if p2.poll() != None: encoder_finished = True try: data = os.read(read_fd, STREAM_CHUNK_SIZE) except OSError: # OSError will be thrown if we read before the pipe has had any # data written to it. data = "" if encoder_finished and len(data) == 0: break try: out_stream.write(data) except socket.error: # The client likely terminated the connection. Abort. p1.terminate() p2.terminate() return bytes_written = bytes_written + len(data)
def filename_to_stream(filename, out_stream, bitrate, buffered=False): print "Handling request for %s" % (filename,) try: decode_command = decoders.get_decoder(filename) except KeyError: raise StreamGenerationError( "Couldn't play specified format: %r" % (filename,)) encoder_path = "/usr/bin/oggenc" if not os.path.exists(encoder_path): raise StreamGenerationError( ("No Vorbis encoder found at %s. " % (encoder_path,)) + \ "Please install 'vorbis-tools'.") encode_command = [encoder_path, "-r", "-Q", "-b", str(bitrate), "-"] # Pipe the decode command into the encode command. p1 = subprocess.Popen(decode_command, stdout=subprocess.PIPE) if buffered: # Read the entire output and then write it to the output stream. p2 = subprocess.Popen(encode_command, stdin=p1.stdout, stdout=subprocess.PIPE) out_stream.write(p2.stdout.read()) else: # Stream the encoder output while limiting the total bandwidth used. We # do this by writing the encoder output to a pipe and reading from the # pipe at a limited rate. (read_fd, write_fd) = os.pipe() p2 = subprocess.Popen(encode_command, stdin=p1.stdout, stdout=os.fdopen(write_fd, 'wb')) # Don't let reads block when we've read to the end of the encoded song # data. fcntl.fcntl(read_fd, fcntl.F_SETFL, os.O_NONBLOCK) try: copy_output_with_shaping(read_fd, out_stream, bitrate, lambda : p2.poll() != None) except socket.error: p1.terminate() p2.terminate() # Close the FIFO we opened. try: os.close(read_fd) except OSError: pass
def init_specific_model(model_type, img_size, latent_dim, hidden_dim=None): """ Return an instance of a VAE with encoder and decoder from `model_type`. Parameters ---------- img_size : tuple of ints for model_type=Burgess, int for model_type=Lin Size or Dimension of images """ model_type = model_type.lower().capitalize() get_enc = get_encoder(model_type) get_dec = get_decoder(model_type) if model_type == "Burgess": encoder = get_enc(img_size, latent_dim) decoder = get_dec(img_size, latent_dim) elif model_type == "Lin": encoder = get_enc(img_size, latent_dim, hidden_dim) decoder = get_dec(img_size, latent_dim, hidden_dim) else: err = "Unkown model_type={}. Possible values: {}" raise ValueError(err.format(model_type, MODELS)) model = VAE(encoder, decoder) model.model_type = model_type # store to help reloading return model
def test_get_decoder(self): """ Test decoders.get_decoder """ self.assertTrue(decoders.get_decoder("/path/to/SOMETHING.MP3")[0] .startswith("/usr/bin"))
def __init__(self, opt, lexicon, pretrained_wembs=None, pretrained_user=None, lang_model=None): """Constructor :param input_dim: Embedding dimension :param hidden_dim: Dimension of the recurrent layers :param att_dim: Dimension of the hidden layer in the attention MLP :param lexicon: Lexicon object containing dictionaries/bilingual discrete lexicons... :param enc_type: Type of encoder :param att_type: Type of attention mechanism :param dec_type: Type of decoder :param model_file: File where the model should be saved (default: (None)) :param label_smoothing: interpolation coefficient with second output distribution :param dropout: dropout rate for parameters :param word_dropout: dropout rate for words in the decoder :param max_len: Maximum length allowed when generating translations (default: (60)) """ # Store config self.nl = opt.num_layers self.dr, self.wdr = opt.dropout_rate, opt.word_dropout_rate self.ls, self.ls_eps = (opt.label_smoothing > 0), opt.label_smoothing self.max_len = opt.max_len self.src_sos, self.src_eos = lexicon.w2ids['SOS'], lexicon.w2ids['EOS'] self.trg_sos, self.trg_eos = lexicon.w2idt['SOS'], lexicon.w2idt['EOS'] # Dimensions self.vs, self.vt = len(lexicon.w2ids), len(lexicon.w2idt) self.du = opt.usr_dim self.nu = len(lexicon.usr2id) self.di, self.dh, self.da = opt.emb_dim, opt.hidden_dim, opt.att_dim # Model self.pc = dy.ParameterCollection() self.model_file = opt.model # Encoder self.enc = encoders.get_encoder(opt.encoder, self.nl, self.di, self.dh, self.du, self.vs, self.pc, dr=self.dr, pre_embs=pretrained_wembs) # Attention module self.att = attention.get_attention(opt.attention, self.enc.dim, self.dh, self.da, self.pc) # Decoder self.dec = decoders.get_decoder(opt.decoder, self.nl, self.di, self.enc.dim, self.dh, self.vt, self.du, self.pc, pre_embs=pretrained_wembs, dr=self.dr, wdr=self.wdr) # User recognizer parameters self.usr = user.ZeroVocabUserRecognizer(self.vt, self.nu, self.enc.dim, self.pc) # Target language model (for label smoothing) self.lm = lang_model self.lex = lexicon self.unk_replace = opt.unk_replacement self.user_token = opt.user_token self.test = True self.update = True