import cv2
import tensorflow as tf
import glob
import numpy as np

# current script path
scripty_path = os.path.dirname(os.path.realpath(__file__))

# config yolo model
config_path = scripty_path + '/yolo_model/yolov3.cfg'
weight_path = scripty_path + '/yolo_model/yolov3.weights'
class_path = scripty_path + '/yolo_model/yolov3.txt'
# build net
net = cv2.dnn.readNet(weight_path, config_path)
# build output layer
output_layers = fun.get_output_layers(net)

# define classes
classes = None
with open(class_path, 'r') as f:
    classes = [line.strip() for line in f.readlines()]
# pre-define color scheme
COLORS = np.random.uniform(0, 255, size=(len(classes), 3))

# load in .mp4 and find all frames
# video_name = "/The Good, the Bad and the Ugly - The Final Duel (1966 HD).mp4"
video_name = "/traffic.mp4"
frame_path = scripty_path + "/frames"
vidcap = cv2.VideoCapture(scripty_path + video_name)
total_frames = vidcap.get(7)
all_frames = range(0, int(total_frames))
def image_processing(cam_index):

    # cam id
    cam_ID = cam_info.cam_IDs[cam_index]
    # Video URL
    VIDEO_URL = cam_info.VIDEO_URLs[cam_index]

    # Camera Info
    cam_name = cam_info.addresses[cam_index]
    # lat
    lat = cam_info.lats[cam_index]
    # lon
    lon = cam_info.lons[cam_index]
    # facing
    facing = cam_info.facings[cam_index]

    # connect to kafka server and produce topic
    producer = KafkaProducer(
        bootstrap_servers=servers,
        value_serializer=lambda x: dumps(x).encode('utf-8'))
    print(producer.bootstrap_connected())

    # current script path
    scripty_path = os.path.dirname(os.path.realpath(__file__))

    # config yolo model
    config_path = scripty_path + '/yolo_model/yolov3.cfg'
    weight_path = scripty_path + '/yolo_model/yolov3.weights'
    class_path = scripty_path + '/yolo_model/yolov3.txt'
    # build net
    net = cv2.dnn.readNet(weight_path, config_path)
    # build output layer
    output_layers = fun.get_output_layers(net)

    # define classes
    classes = None
    with open(class_path, 'r') as f:
        classes = [line.strip() for line in f.readlines()]
    # pre-define color scheme
    COLORS = np.random.uniform(0, 255, size=(len(classes), 3))

    # read video
    cam = cv2.VideoCapture(VIDEO_URL)

    while True:

        f, im = cam.read()

        # count objects in the frame
        if im is not None:
            image_o, class_ids = fun.object_identification(
                im, classes, net, COLORS)

        data = {
            'cars':
            class_ids.count(2),
            'trucks':
            class_ids.count(7),
            'person':
            class_ids.count(0),
            'bicycle':
            class_ids.count(1),
            'motorcycle':
            class_ids.count(3),
            'bus':
            class_ids.count(5),
            'vehicles':
            class_ids.count(2) + class_ids.count(7) + class_ids.count(3) +
            class_ids.count(5),
            'cam_ID':
            cam_ID,
            'cam_name':
            cam_name,
            'lat':
            lat,
            'lon':
            lon,
            'facing':
            facing,
            'time':
            datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'region':
            500
        }

        # send data to topic
        # only if car detected
        if class_ids.count(2) + class_ids.count(7) + class_ids.count(
                3) + class_ids.count(5) > 0:
            producer.send(cam_ID, value=data)
            print(cam_index)