def fetch_headers_from_s3(self): def collector(data, h_file): h_file.write(data) local_size = float(h_file.tell()) final_size = float(final_size_after_download) self._headers_progress_percent = math.ceil(local_size / final_size * 100) local_header_size = self.local_header_file_size() resume_header = {"Range": f"bytes={local_header_size}-"} response = yield treq.get(HEADERS_URL, headers=resume_header) got_406 = response.code == 406 # our file is bigger final_size_after_download = response.length + local_header_size if got_406: log.warning("s3 is more out of date than we are") # should have something to download and a final length divisible by the header size elif final_size_after_download and not final_size_after_download % HEADER_SIZE: s3_height = (final_size_after_download / HEADER_SIZE) - 1 local_height = self.local_header_file_height() if s3_height > local_height: if local_header_size: log.info("Resuming download of %i bytes from s3", response.length) with open(self.headers_file, "a+b") as headers_file: yield treq.collect(response, lambda d: collector(d, headers_file)) else: with open(self.headers_file, "wb") as headers_file: yield treq.collect(response, lambda d: collector(d, headers_file)) log.info("fetched headers from s3 (s3 height: %i), now verifying integrity after download.", s3_height) self._check_header_file_integrity() else: log.warning("s3 is more out of date than we are") else: log.error("invalid size for headers from s3")
class Command(base.Command): header = "nexus download" requiredArgs = [ 'host', 'cred', 'repo', 'artifacts', ] def copy(self, artifact, d, done, info): g, a, v, e, f, dest = artre.findall(artifact)[0] if dest == "": dest = "%s-%s.%s" % (a, v, e) fileName = os.path.join(self.builder.basedir, dest) dirName = os.path.abspath(os.path.dirname(fileName)) #result = mkpath(dirName,verbose=True) try: result = os.makedirs(dirName) except Exception, e: pass file = open(fileName, "wb") reading = treq.collect(d, file.write) reading.addBoth(lambda _: file.close()) reading.addCallback(lambda _: done.callback(info)) reading.addErrback(done.errback)
def handle_response(self, response, handle_body): if response.code != 200: raise AppException("Response code %d" % response.code) try: headers = response.headers.getRawHeaders("Content-Type") except KeyError: raise AppException("No Content-Type") if not headers: raise AppException("Empty Content-Type") else: header = headers[0] log.msg("Header line %s" % header) mime, _, encoding = header.partition(";") if encoding: _, _, encoding = encoding.strip().partition("=") try: codecs.lookup(encoding) except LookupError: encoding = None if mime not in self.accepted_mimes: raise AppException("Mime %s not supported" % mime) if handle_body: if encoding: log.msg("Using encoding %s to handle response" % encoding) self.parser = self.parser_class() self.connection = treq.collect(response, self.feed) return self.connection
def test_collect_0_length(self): self.response.length = 0 d = collect( self.response, lambda d: self.fail("Unexpectedly called with: {0}".format(d))) self.assertEqual(self.successResultOf(d), None)
def stream_response(response): request.setResponseCode(response.code) for key, values in response.headers.getAllRawHeaders(): for value in values: request.setHeader(key, value) d = treq.collect(response, request.write) d.addCallback(lambda _: request.finish()) return d
def main(reactor): data = b'{"create_request": {"key": "Zm9v"} }' response = yield treq.post('http://localhost:2379/v3alpha/watch', data=data) if response.code != 200: raise Exception('watch call returned with {}'.format(response.code)) if True: result = yield response.json() pprint(result) else: def process(data): print(data) treq.collect(response, process) yield sleep(300)
def download(self, cap, local_path): log.debug("Downloading %s...", local_path) resp = yield treq.get('{}uri/{}'.format(self.nodeurl, cap)) if resp.code == 200: with open(local_path, 'wb') as f: yield treq.collect(resp, f.write) log.debug("Successfully downloaded %s", local_path) else: content = yield treq.content(resp) raise TahoeWebError(content.decode('utf-8'))
def download(self, cap, local_path): log.debug("Downloading %s...", local_path) yield self.await_ready() resp = yield treq.get("{}uri/{}".format(self.nodeurl, cap)) if resp.code == 200: with atomic_write(local_path, mode="wb", overwrite=True) as f: yield treq.collect(resp, f.write) log.debug("Successfully downloaded %s", local_path) else: content = yield treq.content(resp) raise TahoeWebError(content.decode("utf-8"))
def get_external_ip(): # used if upnp is disabled or non-functioning try: buf = [] response = yield treq.get("https://api.lbry.io/ip") yield treq.collect(response, buf.append) parsed = json.loads(b"".join(buf).decode()) if parsed['success']: return parsed['data']['ip'] return except Exception as err: return
def test_collect_failure(self): data = [] d = collect(self.response, data.append) self.protocol.dataReceived(b'foo') self.protocol.connectionLost(Failure(ResponseFailed("test failure"))) self.failureResultOf(d, ResponseFailed) self.assertEqual(data, [b'foo'])
def test_collect_failure(self): data = [] d = collect(self.response, data.append) self.protocol.dataReceived('foo') self.protocol.connectionLost(Failure(ResponseFailed("test failure"))) self.failureResultOf(d, ResponseFailed) self.assertEqual(data, ['foo'])
def update_downloaded(response): if response.code == OK: update_filename = join(config["agent_updates_dir"], "pyfarm-agent.zip") logger.debug("Writing update to %s", update_filename) if not isdir(config["agent_updates_dir"]): makedirs(config["agent_updates_dir"]) with open(update_filename, "wb+") as update_file: collect(response, update_file.write) logger.info("Update file for version %s has been downloaded " "and stored under %s", data["version"], update_filename) # TODO Only shut down if we were started by the supervisor config["restart_requested"] = True if len(config["current_assignments"]) == 0: stopping = agent.stop() stopping.addCallback(lambda _: reactor.stop()) else: logger.error("Unexpected return code %s on downloading update " "from master", response.code) config["downloading_update"] = False
def test_collect(self): data = [] d = collect(self.response, data.append) self.protocol.dataReceived('{') self.protocol.dataReceived('"msg": "hell') self.protocol.dataReceived('o"}') self.protocol.connectionLost(Failure(ResponseDone())) self.assertEqual(self.successResultOf(d), None) self.assertEqual(data, ['{', '"msg": "hell', 'o"}'])
def test_collect(self): data = [] d = collect(self.response, data.append) self.protocol.dataReceived(b'{') self.protocol.dataReceived(b'"msg": "hell') self.protocol.dataReceived(b'o"}') self.protocol.connectionLost(Failure(ResponseDone())) self.assertEqual(self.successResultOf(d), None) self.assertEqual(data, [b'{', b'"msg": "hell', b'o"}'])
def test_collect_failure_potential_data_loss(self): """ PotentialDataLoss failures are treated as success. """ data = [] d = collect(self.response, data.append) self.protocol.dataReceived('foo') self.protocol.connectionLost(Failure(PotentialDataLoss())) self.assertEqual(self.successResultOf(d), None) self.assertEqual(data, ['foo'])
def test_collect_failure_potential_data_loss(self): """ PotentialDataLoss failures are treated as success. """ data = [] d = collect(self.response, data.append) self.protocol.dataReceived(b'foo') self.protocol.connectionLost(Failure(PotentialDataLoss())) self.assertEqual(self.successResultOf(d), None) self.assertEqual(data, [b'foo'])
def _downstream(self, blobs_id_list, namespace=''): uri = urljoin(self.remote_stream, self.user) params = {'namespace': namespace} if namespace else {} params['direction'] = 'download' data = BytesIO(json.dumps(blobs_id_list)) response = yield self._client.post(uri, params=params, data=data) deferreds = [] def done_cb(blob_id, blobfd, size): d = self.local.put(blob_id, blobfd, size=size, namespace=namespace) deferreds.append(d) buf = StreamDecrypterBuffer(self.secret, blobs_id_list, done_cb) yield treq.collect(response, buf.write) yield defer.gatherResults(deferreds, consumeErrors=True) buf.close()
def success(self, response): """ successful treq get """ # TODO possible this is UNKNOWN_LENGTH if response.length != UNKNOWN_LENGTH: self.totallength = response.length else: self.totallength = 0 if self.limit_size > 0 and self.totallength > self.limit_size: log.msg( f"Not saving URL ({self.url}) (size: {self.totallength}) exceeds file size limit ({self.limit_size})" ) self.exit() return self.started = time.time() if not self.quiet: self.errorWrite("200 OK\n") if response.headers.hasHeader(b"content-type"): self.contenttype = response.headers.getRawHeaders(b"content-type")[ 0 ].decode() else: self.contenttype = "text/whatever" if not self.quiet: if response.length != UNKNOWN_LENGTH: self.errorWrite( f"Length: {self.totallength} ({sizeof_fmt(self.totallength)}) [{self.contenttype}]\n" ) else: self.errorWrite(f"Length: unspecified [{self.contenttype}]\n") if self.outfile is None: self.errorWrite("Saving to: `STDOUT'\n\n") else: self.errorWrite(f"Saving to: `{self.outfile}'\n\n") deferred = treq.collect(response, self.collect) deferred.addCallback(self.collectioncomplete) return deferred
def downloadFile(remote_path, fobj): """ Download a file over HTTP from ``remote_path`` and save it to the provided file object ``fobj``. """ logger.msg( "downloading file", remote_path=remote_path, function='downloadFile' ) def file_writer(data): fobj.write(data) remote_path = remote_path.encode('utf-8') r = yield treq.get(remote_path, timeout=5) try: yield treq.collect(r, file_writer) except Exception as e: print e raise
def downloadFile(remote_path, fobj): """ Download a file over HTTP from ``remote_path`` and save it to the provided file object ``fobj``. """ logger.msg("downloading file", remote_path=remote_path, function='downloadFile') def file_writer(data): fobj.write(data) remote_path = remote_path.encode('utf-8') r = yield treq.get(remote_path, timeout=5) try: yield treq.collect(r, file_writer) except Exception as e: print e raise
def _download_and_decrypt(self, blob_id, namespace=''): logger.info("Staring download of blob: %s" % blob_id) # TODO this needs to be connected in a tube uri = urljoin(self.remote, self.user + '/' + blob_id) params = {'namespace': namespace} if namespace else None response = yield self._client.get(uri, params=params) check_http_status(response.code, blob_id=blob_id) if not response.headers.hasHeader('Tag'): msg = "Server didn't send a tag header for: %s" % blob_id logger.error(msg) raise SoledadError(msg) tag = response.headers.getRawHeaders('Tag')[0] tag = base64.urlsafe_b64decode(tag) buf = DecrypterBuffer(blob_id, self.secret, tag) # incrementally collect the body of the response yield treq.collect(response, buf.write) fd, size = buf.close() logger.info("Finished download: (%s, %d)" % (blob_id, size)) defer.returnValue((fd, size))
def _download_and_decrypt(self, blob_id): logger.info("Staring download of blob: %s" % blob_id) # TODO this needs to be connected in a tube uri = urljoin(self.remote, self.user + '/' + blob_id) data = yield self._client.get(uri) if data.code == 404: logger.warn("Blob not found in server: %s" % blob_id) defer.returnValue(None) elif not data.headers.hasHeader('Tag'): logger.error("Server didn't send a tag header for: %s" % blob_id) defer.returnValue(None) tag = data.headers.getRawHeaders('Tag')[0] tag = base64.urlsafe_b64decode(tag) buf = DecrypterBuffer(blob_id, self.secret, tag) # incrementally collect the body of the response yield treq.collect(data, buf.write) fd, size = buf.close() logger.info("Finished download: (%s, %d)" % (blob_id, size)) defer.returnValue((fd, size))
def _cb(response, fn): tp = downloadTempPath.child(fn) fd = tp.open('wb') def _extracted(ignored): extractedPath = tp.sibling(tp.basename().replace('.bz2', '')) extractedPath.moveTo( self.mapsPath.child(tp.basename().replace('.bz2', ''))) try: tp.remove() # File already gone except OSError: pass print 'Finished downloading {}'.format(fn) def _finished(ignored): fd.close() d = getProcessOutputAndValue( 'aunpack', (tp.path, '-X', downloadTempPath.path)) d.addErrback(log.err) d.addCallback(_extracted) return d def _eb(failure): print 'Error downloading {}:'.format(fn) print failure.getTraceback() fd.close() try: tp.remove() # File already gone except OSError: pass d = treq.collect(response, fd.write) d.addCallback(_finished) d.addErrback(_eb) return d
def success(self, response): """ successful treq get """ self.totallength = response.length # TODO possible this is UNKNOWN_LENGTH if self.limit_size > 0 and self.totallength > self.limit_size: log.msg( f"Not saving URL ({self.url}) (size: {self.totallength}) exceeds file size limit ({self.limit_size})" ) self.exit() return if self.outfile and not self.silent: self.write( " % Total % Received % Xferd Average Speed Time Time Time Current\n" ) self.write( " Dload Upload Total Spent Left Speed\n" ) deferred = treq.collect(response, self.collect) deferred.addCallback(self.collectioncomplete) return deferred
def get(self): try: if os.environ.get('DEMO_VER'): self.render_json(code=1, msg=u'这是一个演示版本,不提供此功能') return uid = self.get_argument('uid') upgrade_url = self.settings.config.system.upgrade_url api_url = '{0}/api/v1/taurusxee/upgrade/fetch/{1}'.format( upgrade_url, uid) api_token = yield tools.get_sys_token() params = dict(token=api_token) oemid = self.get_param_value('upgrade_oemid') if oemid: params['oemid'] = oemid param_str = urlencode(params) resp = yield treq.get(api_url + '?' + param_str, allow_redirects=True) if resp.code == 500: rbody = yield treq.content(resp) self.render_json(code=1, msg=u'获取升级包失败。服务器错误:<br> %s' % utils.safeunicode(rbody)) return savepath = '/tmp/{0}.zip'.format(uid) _zipfile = open(savepath, 'wb') yield treq.collect(resp, _zipfile.write) _zipfile.close() backup_path = self.settings.config.database.backup_path backup_file = 'taurusxrdb_ubackup_%s.json.gz' % utils.gen_backep_id( ) self.db_backup.dumpdb(os.path.join(backup_path, backup_file)) tools.upgrade_release(savepath) self.render_json(code=0, msg=u'升级完成,请重启所有服务') except Exception as err: logger.error(err) self.render_json(code=1, msg=utils.safeunicode(err))
def download_data(self, src, dst): f = open(dst, 'wb') resp = yield treq.get(src, agent=no_verify_agent()) yield treq.collect(resp, f.write) f.close()
def filter_unzip(self, artifact, url, done, info): g, a, v, e, f, dest = artre.findall(artifact)[0] dest = os.path.join(self.builder.basedir, dest) tmp = tempfile.SpooledTemporaryFile(8 * 2048) # 16KiB def handleUnzipping(): try: mkpath(os.path.abspath(dest)) zip = zipfile.ZipFile(tmp) result = zip.extractall(dest) done.callback(info) except Exception, e: done.errback(e) reading = treq.collect(url, tmp.write) reading.addCallback(lambda _: reactor.callInThread(handleUnzipping)) reading.addErrback(done.errback) @inlineCallbacks def handleArtifact(self, artifact, done): try: info = {} g, a, v, e, f, dest = artre.findall(artifact)[0] args = "?r=%s&g=%s&a=%s&v=%s&e=%s" % (self.args["repo"], g, a, v, e) url = self.host + self.res + args res = yield treq.get(url, auth=self.args["cred"]) if res.code != 200: done.errback(Exception(res.code)) else:
def rip(self, guid, show, quality, video, subs): exception = self.master.modules["commands"].exception if not self.logged_in: raise exception(u"Not logged in to CrunchyRoll") if quality not in qualities: raise exception(u"Invalid quality, must be one of: {}".format(u", ".join(qualities.keys()))) filename = u"[CR] {} - {:02d} [{}p]".format(show.series, show.episode, quality).replace(u":", u":").replace(u"/", u" \u2044 ") format = qualities[quality] url = xml_url.format(show.media_id, format[0], format[1]) headers = Headers({ 'Content-Type': ['application/x-www-form-urlencoded'], 'Referer': ['https://www.crunchyroll.com'], 'User-Agent': ['Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:17.0) Gecko/17.0 Firefox/17.0'] }) data = FileBodyProducer(StringIO(urllib.urlencode({ 'current_page': show.link }))) response = yield self.agent.request("POST", url, headers, data) xml = yield self.master.modules["utils"].returnBody(response) soup = BeautifulSoup(xml, from_encoding="utf8") player_url = soup.find('default:chromelessplayerurl').string stream_info = soup.find('stream_info') subtitles = soup.find('subtitles') if not stream_info: raise exception(u"Could not parse XML") stream = {} stream['url'] = stream_info.host.string stream['token'] = stream_info.token.string stream['file'] = stream_info.file.string stream['swf_url'] = swf_url+player_revision+"/"+player_url if subs: if not subtitles: raise exception(u"Could not find subtitles") decoded = Decoder(xml) formatted = decoded.fancy with open(os.path.join(guid, filename.encode("utf8") + ".ass"), 'wb') as subfile: subfile.write(codecs.BOM_UTF8) subfile.write(formatted.encode('utf-8')) yield self.master.modules["ftp"].put(guid, filename+".ass") if video: parsed_url = urlparse.urlparse(stream['url']) if parsed_url.netloc.endswith("fplive.net"): ### START NEW CDN RIP & CONVERT ### inner_path, _, args = parsed_url.path.partition("?") if not args and parsed_url.query: args = parsed_url.query elif parsed_url.query: args += "&" + parsed_url.query ddl_url = "http://v.lvlt.crcdn.net{}/{}?{}".format(inner_path, stream['file'][4:], args) response = yield self.agent.request("GET", ddl_url) if response.code != 200: self.log(u"DDL URL: {}".format(ddl_url)) self.log(u"RESPONSE CODE: {:d}".format(response.code)) raise exception(u"Failed to download FLV") try: with open(os.path.join(guid, filename.encode("utf8") + '.mp4'), "wb") as f: yield treq.collect(response, f.write) except Exception as e: self.err(u"Failed to download FLV") raise exception(u"Failed to download FLV") mkvmergeargs = ["-o", os.path.join(guid, filename.encode("utf8") + ".mkv"), os.path.join(guid, filename.encode("utf8") + ".mp4")] out, err, code = yield getProcessOutputAndValue(self.master.modules["utils"].getPath("mkvmerge"), args=mkvmergeargs, env=os.environ) if code == 2: raise exception(u"Failed to mux MKV") ### END NEW CDN RIP & CONVERT ### else: ### START OLD CDN RIP & CONVERT ### rtmpargs = ["-e", "-r", stream['url'], "-y", stream['file'], "-W", stream['swf_url'], "-T", stream['token'], "-o", os.path.join(guid, filename.encode("utf8") + '.flv')] retries = 15 out, err, code = yield getProcessOutputAndValue(self.master.modules["utils"].getPath("rtmpdump"), args=rtmpargs, env=os.environ) while code == 2 and retries: retries -= 1 out, err, code = yield getProcessOutputAndValue(self.master.modules["utils"].getPath("rtmpdump"), args=rtmpargs, env=os.environ) if code != 0: self.log(u"RTMPDUMP CMDLINE:\nrtmpdump " + u" ".join(rtmpargs)) self.log(u"RTMPDUMP STDOUT:\n" + out) self.log(u"RTMPDUMP STDERR:\n" + err) raise exception(u"Failed to download FLV") try: self.master.modules["flv"].FLVFile(os.path.join(guid, filename.encode("utf8") + ".flv")).ExtractStreams(True, True, True, True) except: self.err(u"FLVFile failed to extract streams") raise exception(u"Failed to extract streams from FLV") mkvmergeargs = ["-o", os.path.join(guid, filename.encode("utf8") + ".mkv"), "--forced-track","0:yes","--compression","0:none","--timecodes","0:"+os.path.join(guid, filename.encode("utf8") + ".txt"),"-d","0","-A","-S",os.path.join(guid, filename.encode("utf8") + ".264"), "--forced-track","0:yes","-a","0","-D","-S",os.path.join(guid, filename.encode("utf8") + ".aac")] out, err, code = yield getProcessOutputAndValue(self.master.modules["utils"].getPath("mkvmerge"), args=mkvmergeargs, env=os.environ) if code == 2: raise exception(u"Failed to mux MKV") ### END OLD CDN RIP & CONVERT ### yield self.master.modules["ftp"].put(guid, filename+".mkv") yield self.master.modules["ftp"].upload()
def rip(self, guid, show, quality, video, subs): exception = self.master.modules["commands"].exception if not self.logged_in: raise exception(u"Not logged in to CrunchyRoll") if quality not in qualities: raise exception(u"Invalid quality, must be one of: {}".format(u", ".join(qualities.keys()))) filename = u"[CR] {} - {:02d} [{}p]".format(show.series, show.episode, quality).replace(u":", u":").replace(u"/", u" \u2044 ") format = qualities[quality] url = xml_url.format(show.media_id, format[0], format[1]) headers = Headers({ 'Content-Type': ['application/x-www-form-urlencoded'], 'Referer': ['https://www.crunchyroll.com'], 'User-Agent': ['Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:17.0) Gecko/17.0 Firefox/17.0'] }) data = FileBodyProducer(StringIO(urllib.urlencode({ 'current_page': show.link }))) response = yield self.agent.request("POST", url, headers, data) xml = yield self.master.modules["utils"].returnBody(response) soup = BeautifulSoup(xml, from_encoding="utf8") player_url = soup.find('default:chromelessplayerurl').string stream_info = soup.find('stream_info') subtitles = soup.find('subtitles') if not stream_info: raise exception(u"Could not parse XML") stream = {} stream['url'] = stream_info.host.string stream['token'] = stream_info.token.string stream['file'] = stream_info.file.string stream['swf_url'] = swf_url+player_revision+"/"+player_url if subs: if not subtitles: raise exception(u"Could not find subtitles") decoded = Decoder(xml) formatted = decoded.fancy with open(os.path.join(guid, filename.encode("utf8") + ".ass"), 'wb') as subfile: subfile.write(codecs.BOM_UTF8) subfile.write(formatted.encode('utf-8')) yield self.master.modules["ftp"].put(guid, filename.encode("utf8") + ".ass") if video: parsed_url = urlparse.urlparse(stream['url']) if parsed_url.netloc.endswith("fplive.net"): ### START NEW CDN RIP & CONVERT ### inner_path, _, args = parsed_url.path.partition("?") if not args and parsed_url.query: args = parsed_url.query elif parsed_url.query: args += "&" + parsed_url.query ddl_url = "http://v.lvlt.crcdn.net{}/{}?{}".format(inner_path, stream['file'][4:], args) response = yield self.agent.request("GET", ddl_url) if response.code != 200: self.log(u"DDL URL: {}".format(ddl_url)) self.log(u"RESPONSE CODE: {:d}".format(response.code)) raise exception(u"Failed to download FLV") try: with open(os.path.join(guid, filename.encode("utf8") + '.mp4'), "wb") as f: yield treq.collect(response, f.write) except Exception as e: self.err(u"Failed to download FLV") raise exception(u"Failed to download FLV") mkvmergeargs = ["-o", os.path.join(guid, filename.encode("utf8") + ".mkv"), os.path.join(guid, filename.encode("utf8") + ".mp4")] out, err, code = yield getProcessOutputAndValue(self.master.modules["utils"].getPath("mkvmerge"), args=mkvmergeargs, env=os.environ) if code == 2: raise exception(u"Failed to mux MKV") ### END NEW CDN RIP & CONVERT ### else: raise exception(u"RTMPDUMP has been disabled due to it potentially crashing the server. You can still rip subs though.") ### START OLD CDN RIP & CONVERT ### rtmpargs = ["-e", "-r", stream['url'], "-y", stream['file'], "-W", stream['swf_url'], "-T", stream['token'], "-o", os.path.join(guid, filename.encode("utf8") + '.flv')] retries = 15 out, err, code = yield getProcessOutputAndValue(self.master.modules["utils"].getPath("rtmpdump"), args=rtmpargs, env=os.environ) while code == 2 and retries: retries -= 1 out, err, code = yield getProcessOutputAndValue(self.master.modules["utils"].getPath("rtmpdump"), args=rtmpargs, env=os.environ) if code != 0: self.log(u"RTMPDUMP CMDLINE:\nrtmpdump " + u" ".join(rtmpargs)) self.log(u"RTMPDUMP STDOUT:\n" + out) self.log(u"RTMPDUMP STDERR:\n" + err) raise exception(u"Failed to download FLV") try: self.master.modules["flv"].FLVFile(os.path.join(guid, filename.encode("utf8") + ".flv")).ExtractStreams(True, True, True, True) except: self.err(u"FLVFile failed to extract streams") raise exception(u"Failed to extract streams from FLV") mkvmergeargs = ["-o", os.path.join(guid, filename.encode("utf8") + ".mkv"), "--forced-track","0:yes","--compression","0:none","--timecodes","0:"+os.path.join(guid, filename.encode("utf8") + ".txt"),"-d","0","-A","-S",os.path.join(guid, filename.encode("utf8") + ".264"), "--forced-track","0:yes","-a","0","-D","-S",os.path.join(guid, filename.encode("utf8") + ".aac")] out, err, code = yield getProcessOutputAndValue(self.master.modules["utils"].getPath("mkvmerge"), args=mkvmergeargs, env=os.environ) if code == 2: raise exception(u"Failed to mux MKV") ### END OLD CDN RIP & CONVERT ### yield self.master.modules["ftp"].put(guid, filename.encode("utf8") + ".mkv") yield self.master.modules["ftp"].upload()
def _collect(response): d = treq.collect(response, write) d.addCallback(lambda _: content_type(response.headers)) return d
def content(response): content = [] cd = treq.collect(response, content.append) cd.addCallback(lambda _: ''.join(content)) cd.addCallback(done, response) return cd