예제 #1
0
class AdStatsHandler(web.RequestHandler):
    def __init__(self, app, request, **kwargs):
        super(AdStatsHandler, self).__init__(app, request, **kwargs)
        self._cache = {}

    def check_origin(self, origin):
        return True

    @gen.coroutine
    def get(self):
        self.set_status(200, "OK")
        self.set_header("Content-Type", "application/json")
        self.write(json.dumps(adstats))

        self._producer = Producer()
        self._producer.send(kafka_topic, json.dumps(adstats))
        self._producer.close()

    @gen.coroutine
    def post(self):
        try:
            data = json.loads(self.request.body.decode('utf-8'))

            for item in adstats:
                if item['uri'] == data['uri']:
                    if data['clicked'] == 1:
                        item['clicked'] += 1
                    if data['watched'] == 1:
                        item['watched'] += 1

            self.set_status(200, "OK")

        except Exception as e:
            self.set_status(503, "Ad-content:Exception during post")
예제 #2
0
    def get(self):
        self.set_status(200, "OK")
        self.set_header("Content-Type", "application/json")
        self.write(json.dumps(adstats))

        self._producer = Producer()
        self._producer.send(kafka_topic, json.dumps(adstats))
        self._producer.close()
예제 #3
0
def send_video_analytics_fps(fps):
    if fps < 0:
        return
    global p
    if not p:
        p=Producer()
    if p:
        p.send(video_analytics_fps_topic, json.dumps({
            "fps": fps,
            "machine":machine_prefix+socket.gethostname()[0:3],
            "time": datetime.datetime.utcnow().isoformat(),
        }));
예제 #4
0
    def __init__(self, thread_id, name, arg):
        self.logger = Logger.Logger(self.__class__.__name__).get()

        global producer
        producer = Producer.Producer()
        threading.Thread.__init__(self)
        self.thread_id = thread_id
        self.name = name
        self.arg = arg
예제 #5
0
    def get(self):
        stream = self.request.uri.replace("/schedule/", "")

        # schedule producing the stream
        print("request received to process stream: " + stream, flush=True)
        producer = Producer()
        msg = {}
        msg.update({
            "name": stream.split("/")[1],
            "parameters": {
                "renditions": [],
                "codec_type": "AVC"
            },
            "output": {
                "target": "file",
                "type": stream.split("/")[0]
            },
            "live_vod": "vod",
            "loop": 0
        })
        producer.send(KAFKA_TOPIC, json.dumps(msg))
        producer.close()

        # wait until file is available, return it
        start_time = time.time()
        while time.time() - start_time < 60:
            if isfile(DASHLS_ROOT + "/" + stream):
                self.set_header('X-Accel-Redirect', '/' + stream)
                self.set_status(200, "OK")
                return
            yield gen.sleep(0.5)

        # wait too long, skip this REST API
        self.set_status(503, "Request scheduled")
예제 #6
0
class Schedule(object):
    def __init__(self):
        super(Schedule, self).__init__()
        self._producer = Producer()

    def analyze(self, seg_info, pipeline):
        request = {
            "source": {
                "uri": seg_info["analytics"]
            },
            "pipeline": pipeline,
            "tags": {
                "seg_time": seg_info["seg_time"]
            },
            "parameters": {
                "every-nth-frame": 3
            }
        }
        if "initSeg" in seg_info:
            request["source"]["uri-init"] = seg_info["initSeg"]
        self._producer.send(analytics_topic, json.dumps(request))

    def transcode(self, user, seg_info, search_interval=10):
        request = {
            "meta-db": {
                "stream":
                seg_info["stream"],
                "time_range": [
                    seg_info["seg_time"] - search_interval,
                    seg_info["seg_time"],
                ],
                "time_field":
                "time",
            },
            "ad_config": {
                "codec": seg_info["codec"],
                "resolution": seg_info["resolution"],
                "bandwidth": seg_info["bandwidth"],
                "streaming_type": seg_info["streaming_type"],
                "duration": seg_info["ad_duration"],
                "segment": seg_info["ad_segment"],
            },
            "destination": {
                "adpath": seg_info["transcode"],
            },
            "user_info": {
                "name": user,
                "keywords": []  #"keywords": ["sports","animal"]
            }
        }
        self._producer.send(transcode_topic, json.dumps(request))

    def flush(self):
        self._producer.flush()
예제 #7
0
    def get(self):
        stream = self.request.uri.replace("/schedule/", "")

        # schedule producing the stream
        print("request received to process stream: " + stream, flush=True)
        producer = Producer()
        producer.send(KAFKA_TOPIC, stream)
        producer.close()

        # wait until file is available, return it
        for t in range(50):
            if isfile(DASHLS_ROOT + "/" + stream):
                self.set_header('X-Accel-Redirect', '/' + stream)
                self.set_status(200, "OK")
                return
            yield gen.sleep(0.1)

        # wait too long, skip this REST API
        self.set_status(503, "Request scheduled")
예제 #8
0
    def get(self):
        stream = self.request.uri.replace("/schedule/", "")

        # schedule producing the stream
        print("request received to process stream: " + stream, flush=True)
        producer = Producer()
        producer.send(kafka_topic, stream)
        producer.close()

        # wait until file is available, return it
        start_time = time.time()
        while time.time() - start_time < 60:
            if isfile(dashls_root + "/" + stream):
                self.set_header('X-Accel-Redirect', '/' + stream)
                self.set_status(200, "OK")
                return
            yield gen.sleep(0.5)

        # wait too long, skip this REST API
        self.set_status(503, "Request scheduled")
예제 #9
0
    def post(self, *args, **kwargs):
        fileName = self.get_body_argument('fileName', None)
        file = self.request.files.get('file', None)
        uploadStatus = self.get_body_argument('uploadStatus', None)
        timeStamp = self.get_body_argument('timeStamp', None)
        count = self.get_body_argument('count', None)
        streamType = self.get_body_argument('type', "dash")
        fileName = timeStamp + "-" + fileName
        proPath = os.path.join(TEMP_ROOT, fileName)
        if not os.path.isdir(proPath):
            os.makedirs(proPath)
        try:
            with open(os.path.join(proPath, count), 'wb') as f:
                f.write(file[0]['body'])
                self.set_status(200)
            if uploadStatus == 'end':
                in_out.delay(proPath, ARCHIVE_ROOT, fileName, count)

                # schedule producing the stream
                stream = streamType + "/" + fileName + "/index." + ("m3u8" if streamType == "hls" else "mpd")
                print("request received to process offline stream: " + stream, flush=True)

                start_time = time.time()
                while time.time() - start_time < 10:
                    if isfile(ARCHIVE_ROOT + "/" + fileName):
                        print("file " + fileName + " exists, sending job", flush=True)
                        producer = Producer()
                        producer.send(KAFKA_TOPIC, stream)
                        producer.close()
                        return
                    yield gen.sleep(0.5)

                print("timeout :(", flush=True)
        except:
            self.set_status(401)
            print(traceback.format_exc(), flush=True)
예제 #10
0
class Schedule(object):
    def __init__(self):
        super(Schedule, self).__init__()
        self._producer=Producer()

    def analyze(self, seg_info, pipeline):
        request={
            "source": {
                "uri": ""
            },
            "pipeline": pipeline,
            "tags":{
                "seg_time": 0.0
            },
            "parameters": {
                "every-nth-frame":int(os.environ.get("EVERY_NTH_FRAME"))
            }
        }
        for item in seg_info["analytics"]:
            temp=request.copy()
            temp["source"]["uri"]=item["stream"]
            temp["tags"]["seg_time"]=item["seg_time"]
            print("Schedule analysis: "+temp["source"]["uri"], flush=True)
            if "initSeg" in seg_info:
                temp["source"]["uri-init"]=seg_info["initSeg"]
            self._producer.send(analytics_topic, json.dumps(temp))

    def transcode(self, user, seg_info, search_interval=10):
        request={
            "meta-db": {
                "stream": seg_info["stream"],
                "time_range": [
                    0.0,
                    10.0,
                ],
                "time_field": "time",
            },
            "ad_config": {
                "codec": seg_info["codec"],
                 "resolution": seg_info["resolution"],
                "bandwidth": seg_info["bandwidth"],
                "streaming_type": seg_info["streaming_type"],
                "duration": seg_info["ad_duration"],
                "segment": seg_info["ad_segment"],
            },
            "destination": {
                "adpath": "",
            },
            "user_info": {
                "name": user,
                "keywords": [] #"keywords": ["sports","animal"]
            },
            "bench_mode": 0
        }
        for item in seg_info["transcode"]:
            temp=request.copy()
            temp["meta-db"]["time_range"]=[item["seg_time"]-search_interval,item["seg_time"]]
            temp["destination"]["adpath"]=item["stream"]
            temp["bench_mode"]=item["bench_mode"]
            print("Schedule transcode: "+temp["destination"]["adpath"], flush=True)
            self._producer.send(transcode_topic, json.dumps(temp))

    def flush(self):
        self._producer.flush()
예제 #11
0
 def __init__(self):
     super(Schedule, self).__init__()
     self._producer=Producer()
예제 #12
0
ARCHIVE_ROOT = "/var/www/archive"
TARGET_ROOT = "/var/www/video"
log_file = TARGET_ROOT + "/log.txt"

config_file = "/home/transcoding.json"

streams = [s for s in listdir(ARCHIVE_ROOT) if s.endswith((".mp4", ".avi"))]

jobs = []
with open(config_file, "rt") as fd:
    jobs = json.load(fd)

print("Submit jobs:", flush=True)
# ingest jobs to start transcoding
producer = Producer()
idx = 0
for idx1, msg in enumerate(jobs):
    # schedule producing the stream
    name_pattern = msg["name"]
    for stream1 in streams:
        if re.search(name_pattern, stream1):
            msg.update({"idx": str(idx), "name": stream1})
            print(msg, flush=True)
            idx = idx + 1

            while True:
                try:
                    producer.send(KAFKA_TOPIC, json.dumps(msg))
                    break
                except Exception as e:
예제 #13
0
KAFKA_WORKLOAD_TOPIC = "transcoding"

ARCHIVE_ROOT = "/var/www/archive"
VIDEO_ROOT = "/var/www/video/"
DASH_ROOT = "/var/www/video/dash"
HLS_ROOT = "/var/www/video/hls"
MP4_ROOT = "/var/www/video/mp4"

HW_ACC_TYPE=os.getenv("HW_ACC_TYPE","sw")
HW_DEVICE=os.getenv("HW_DEVICE",None)

fps_regex = re.compile(
            r"\s*frame=\s*(?P<frame_count>\d+)\s*fps=\s*(?P<fps>\d+\.?\d*).*"
            r"time=(?P<duration>\d+:\d+:\d+\.\d+).*speed=\s*(?P<speed>\d+\.\d+)x")

producer = Producer()

def get_fps(next_line,start_time):
    matched = fps_regex.match(next_line)
    if (matched):
        fps = float(matched.group('fps'))
        speed = float(matched.group("speed"))
        frame_count = int(matched.group("frame_count"))
        time_value = datetime.strptime(
            matched.group("duration"), "%H:%M:%S.%f")
        duration = timedelta(
            hours=time_value.hour,
            minutes=time_value.minute,
            seconds=time_value.second,
            microseconds=time_value.microsecond)
        if fps < 0:
예제 #14
0
from zkstate import ZKState
import merged_segment as merge
import datetime
import json
import socket
import time
import os
import re

video_analytics_topic = "seg_analytics_sched"
video_analytics_fps_topic="video_analytics_fps"
machine_prefix=os.environ.get("VA_PRE")
if machine_prefix == None:
    machine_prefix="VA-"
va=RunVA()
p=Producer()

def process_stream(streamstring):
    streamjson = ast.literal_eval(streamstring)
    pipeline1 = streamjson["pipeline"]+"/1"
    stream = streamjson['source']['uri']
    print("VA feeder: stream: "+stream, flush=True)
    init_stream = None
    zk_path = None
    if 'uri-init' in streamjson['source']:
        init_stream = streamjson['source']['uri-init']
        print("VA feeder: init_stream: "+init_stream, flush=True)
        zk_path = stream+"/"+pipeline1

    m1 = re.search("(.*)/.*_([0-9]+.ts)$", stream)
    if m1:
예제 #15
0
import datetime
import psutil
import time
import json
import sys

kafka_topic = "workloads"

if __name__ == "__main__":
    prefix = ""
    if len(sys.argv) > 1: prefix = sys.argv[1]
    instance = socket.gethostname()[0:3]
    machine = prefix + instance

    while True:
        try:
            p = Producer()
            while True:
                p.send(
                    kafka_topic,
                    json.dumps({
                        "time": datetime.datetime.utcnow().isoformat(),
                        "machine": machine,
                        "workload": psutil.cpu_percent(),
                    }))
                time.sleep(1)
            p.close()
        except Exception as e:
            print(str(e))
        time.sleep(2)