def get(self): stream = self.request.uri.replace("/schedule/", "") # schedule producing the stream print("request received to process stream: " + stream, flush=True) producer = Producer() msg = {} msg.update({ "name": stream.split("/")[1], "parameters": { "renditions": [], "codec_type": "AVC" }, "output": { "target": "file", "type": stream.split("/")[0] }, "live_vod": "vod", "loop": 0 }) producer.send(KAFKA_TOPIC, json.dumps(msg)) producer.close() # wait until file is available, return it start_time = time.time() while time.time() - start_time < 60: if isfile(DASHLS_ROOT + "/" + stream): self.set_header('X-Accel-Redirect', '/' + stream) self.set_status(200, "OK") return yield gen.sleep(0.5) # wait too long, skip this REST API self.set_status(503, "Request scheduled")
class AdStatsHandler(web.RequestHandler): def __init__(self, app, request, **kwargs): super(AdStatsHandler, self).__init__(app, request, **kwargs) self._cache = {} def check_origin(self, origin): return True @gen.coroutine def get(self): self.set_status(200, "OK") self.set_header("Content-Type", "application/json") self.write(json.dumps(adstats)) self._producer = Producer() self._producer.send(kafka_topic, json.dumps(adstats)) self._producer.close() @gen.coroutine def post(self): try: data = json.loads(self.request.body.decode('utf-8')) for item in adstats: if item['uri'] == data['uri']: if data['clicked'] == 1: item['clicked'] += 1 if data['watched'] == 1: item['watched'] += 1 self.set_status(200, "OK") except Exception as e: self.set_status(503, "Ad-content:Exception during post")
class Schedule(object): def __init__(self): super(Schedule, self).__init__() self._producer = Producer() def analyze(self, seg_info, pipeline): request = { "source": { "uri": seg_info["analytics"] }, "pipeline": pipeline, "tags": { "seg_time": seg_info["seg_time"] }, "parameters": { "every-nth-frame": 3 } } if "initSeg" in seg_info: request["source"]["uri-init"] = seg_info["initSeg"] self._producer.send(analytics_topic, json.dumps(request)) def transcode(self, user, seg_info, search_interval=10): request = { "meta-db": { "stream": seg_info["stream"], "time_range": [ seg_info["seg_time"] - search_interval, seg_info["seg_time"], ], "time_field": "time", }, "ad_config": { "codec": seg_info["codec"], "resolution": seg_info["resolution"], "bandwidth": seg_info["bandwidth"], "streaming_type": seg_info["streaming_type"], "duration": seg_info["ad_duration"], "segment": seg_info["ad_segment"], }, "destination": { "adpath": seg_info["transcode"], }, "user_info": { "name": user, "keywords": [] #"keywords": ["sports","animal"] } } self._producer.send(transcode_topic, json.dumps(request)) def flush(self): self._producer.flush()
def send_video_analytics_fps(fps): if fps < 0: return global p if not p: p=Producer() if p: p.send(video_analytics_fps_topic, json.dumps({ "fps": fps, "machine":machine_prefix+socket.gethostname()[0:3], "time": datetime.datetime.utcnow().isoformat(), }));
def get(self): stream = self.request.uri.replace("/schedule/", "") # schedule producing the stream print("request received to process stream: " + stream, flush=True) producer = Producer() producer.send(KAFKA_TOPIC, stream) producer.close() # wait until file is available, return it for t in range(50): if isfile(DASHLS_ROOT + "/" + stream): self.set_header('X-Accel-Redirect', '/' + stream) self.set_status(200, "OK") return yield gen.sleep(0.1) # wait too long, skip this REST API self.set_status(503, "Request scheduled")
def get(self): stream = self.request.uri.replace("/schedule/", "") # schedule producing the stream print("request received to process stream: " + stream, flush=True) producer = Producer() producer.send(kafka_topic, stream) producer.close() # wait until file is available, return it start_time = time.time() while time.time() - start_time < 60: if isfile(dashls_root + "/" + stream): self.set_header('X-Accel-Redirect', '/' + stream) self.set_status(200, "OK") return yield gen.sleep(0.5) # wait too long, skip this REST API self.set_status(503, "Request scheduled")
def post(self, *args, **kwargs): fileName = self.get_body_argument('fileName', None) file = self.request.files.get('file', None) uploadStatus = self.get_body_argument('uploadStatus', None) timeStamp = self.get_body_argument('timeStamp', None) count = self.get_body_argument('count', None) streamType = self.get_body_argument('type', "dash") fileName = timeStamp + "-" + fileName proPath = os.path.join(TEMP_ROOT, fileName) if not os.path.isdir(proPath): os.makedirs(proPath) try: with open(os.path.join(proPath, count), 'wb') as f: f.write(file[0]['body']) self.set_status(200) if uploadStatus == 'end': in_out.delay(proPath, ARCHIVE_ROOT, fileName, count) # schedule producing the stream stream = streamType + "/" + fileName + "/index." + ("m3u8" if streamType == "hls" else "mpd") print("request received to process offline stream: " + stream, flush=True) start_time = time.time() while time.time() - start_time < 10: if isfile(ARCHIVE_ROOT + "/" + fileName): print("file " + fileName + " exists, sending job", flush=True) producer = Producer() producer.send(KAFKA_TOPIC, stream) producer.close() return yield gen.sleep(0.5) print("timeout :(", flush=True) except: self.set_status(401) print(traceback.format_exc(), flush=True)
class Schedule(object): def __init__(self): super(Schedule, self).__init__() self._producer=Producer() def analyze(self, seg_info, pipeline): request={ "source": { "uri": "" }, "pipeline": pipeline, "tags":{ "seg_time": 0.0 }, "parameters": { "every-nth-frame":int(os.environ.get("EVERY_NTH_FRAME")) } } for item in seg_info["analytics"]: temp=request.copy() temp["source"]["uri"]=item["stream"] temp["tags"]["seg_time"]=item["seg_time"] print("Schedule analysis: "+temp["source"]["uri"], flush=True) if "initSeg" in seg_info: temp["source"]["uri-init"]=seg_info["initSeg"] self._producer.send(analytics_topic, json.dumps(temp)) def transcode(self, user, seg_info, search_interval=10): request={ "meta-db": { "stream": seg_info["stream"], "time_range": [ 0.0, 10.0, ], "time_field": "time", }, "ad_config": { "codec": seg_info["codec"], "resolution": seg_info["resolution"], "bandwidth": seg_info["bandwidth"], "streaming_type": seg_info["streaming_type"], "duration": seg_info["ad_duration"], "segment": seg_info["ad_segment"], }, "destination": { "adpath": "", }, "user_info": { "name": user, "keywords": [] #"keywords": ["sports","animal"] }, "bench_mode": 0 } for item in seg_info["transcode"]: temp=request.copy() temp["meta-db"]["time_range"]=[item["seg_time"]-search_interval,item["seg_time"]] temp["destination"]["adpath"]=item["stream"] temp["bench_mode"]=item["bench_mode"] print("Schedule transcode: "+temp["destination"]["adpath"], flush=True) self._producer.send(transcode_topic, json.dumps(temp)) def flush(self): self._producer.flush()
print("Submit jobs:", flush=True) # ingest jobs to start transcoding producer = Producer() idx = 0 for idx1, msg in enumerate(jobs): # schedule producing the stream name_pattern = msg["name"] for stream1 in streams: if re.search(name_pattern, stream1): msg.update({"idx": str(idx), "name": stream1}) print(msg, flush=True) idx = idx + 1 while True: try: producer.send(KAFKA_TOPIC, json.dumps(msg)) break except Exception as e: print("Exception: {}".format(e)) time.sleep(5) # show transcoding statistics def stats_fileinfo(root): nfiles = 0 size = 0 for path, dirs, files in walk(root): for stream1 in files: if stream1.endswith((".mp4", ".avi", ".ts")): nfiles = nfiles + 1
import datetime import psutil import time import json import sys kafka_topic = "workloads" if __name__ == "__main__": prefix = "" if len(sys.argv) > 1: prefix = sys.argv[1] instance = socket.gethostname()[0:3] machine = prefix + instance while True: try: p = Producer() while True: p.send( kafka_topic, json.dumps({ "time": datetime.datetime.utcnow().isoformat(), "machine": machine, "workload": psutil.cpu_percent(), })) time.sleep(1) p.close() except Exception as e: print(str(e)) time.sleep(2)