示例#1
0
from yoloOpencv import opencvYOLO
import cv2
import imutils
import time, math

yolo = opencvYOLO(modeltype="yolov3", objnames="../darknet/data/coco.names",
    weights="../darknet/weights/yolov3.weights", cfg="../darknet/cfg/yolov3.cfg")

calculateLine1 = [(36, 610), (1126, 900)]  #[(from(x,y), to(x,y)]
calculateLine2 = [(820, 350), (1574, 530)]  #[(from(x,y), to(x,y)]
calculateLine3 = [(830, 380), (0, 580)]  #[(from(x,y), to(x,y)]
calculateLine4 = [(1532, 590), (1224, 880)]  #[(from(x,y), to(x,y)]

calculateRange_x = 30   # length of X (for up or down of the line)
calculateRange_y = 30   # length of Y (for up or down of the line)

video_file = "taiachun.mp4"
output_video = "/media/sf_ShareFolder/traffic2.avi"

def draw_CalculateLine(frame):
    cv2.line(frame, (calculateLine1[0][0],calculateLine1[0][1]+20), (calculateLine1[1][0],calculateLine1[1][1]+25), (0, 255, 0), 40)
    cv2.line(frame, (calculateLine2[0][0],calculateLine2[0][1]-20), (calculateLine2[1][0],calculateLine2[1][1]-25), (0, 255, 0), 40)
    cv2.line(frame, (calculateLine3[0][0],calculateLine3[0][1]-20), (calculateLine3[1][0],calculateLine3[1][1]-25), (0, 255, 0), 40)
    cv2.line(frame, (calculateLine4[0][0],calculateLine4[0][1]+25), (calculateLine4[1][0],calculateLine4[1][1]+35), (0, 255, 0), 45)
    cv2.line(frame, (calculateLine1[0][0],calculateLine1[0][1]), (calculateLine1[1][0],calculateLine1[1][1]), (0, 0, 255), 40)
    cv2.line(frame, (calculateLine2[0][0],calculateLine2[0][1]), (calculateLine2[1][0],calculateLine2[1][1]), (0, 0, 254), 40)
    cv2.line(frame, (calculateLine3[0][0],calculateLine3[0][1]), (calculateLine3[1][0],calculateLine3[1][1]), (0, 0, 253), 40)
    cv2.line(frame, (calculateLine4[0][0],calculateLine4[0][1]), (calculateLine4[1][0],calculateLine4[1][1]), (0, 0, 252), 40)


    #cv2.imshow("TEST", frame)
示例#2
0
from yoloOpencv import opencvYOLO
import cv2
import imutils
import time

yolo = opencvYOLO(modeltype="yolov3", \
    objnames="/media/sf_VMshare/cfg.hand_gesture/obj.names", \
    weights="/media/sf_VMshare/cfg.hand_gesture/weights/yolov3-tiny_60000.weights",\
    cfg="/media/sf_VMshare/cfg.hand_gesture/yolov3-tiny.cfg")

inputType = "video"  # webcam, image, video
media = "/media/sf_VMshare/hand1.MOV"
write_video = False
video_out = "/media/sf_VMshare/out_hand1_yolov3.avi"
output_rotate = True
rotate = 180

start_time = time.time()

if __name__ == "__main__":

    if(inputType == "webcam"):
        INPUT = cv2.VideoCapture(0)
    elif(inputType == "image"):
        INPUT = cv2.imread(media)
    elif(inputType == "video"):
        INPUT = cv2.VideoCapture(media)

    if(inputType == "image"):
        yolo.getObject(INPUT, labelWant="", drawBox=True, bold=1, textsize=0.6, bcolor=(0,0,255), tcolor=(255,255,255))
        print ("Object counts:", yolo.objCounts)
示例#3
0
cam_num = len(cam_data)
streams = []
for cam in cam_data:
    streams.append(
        ESC32CAM(url=cam[0], \
            fix_size=cam[1], \
            rotate=cam[2], \
            wait_restart=cam[3] )
    )

blank = np.zeros((cam_resolution[1], cam_resolution[0], 3), dtype='uint8')
out = None

if (enable_ai is True):
    yolo = opencvYOLO(modeltype="tiny", \
        objnames="models/yolov3-tiny/obj.names", \
        weights="models/yolov3-tiny/crowd_human.weights",\
        cfg="models/yolov3-tiny/yolov3-tiny.cfg")

for stream in streams:
    stream.run()

if __name__ == "__main__":
    check_env()
    port = 5555
    jpeg_quality = 95
    sender = imagezmq.ImageSender("tcp://*:{}".format(port), REQ_REP=False)
    rpi_name = socket.gethostname()

    while True:
        '''
        for id, stream in enumerate(streams):
示例#4
0
gps_file_play = "1550799908.6317935.gps"

video_type = 1  # 0--> webcam  1--> video file

write_video_out = True  #output video or not
video_out = "output"
defect_out = "defect"
#webcam_size  = (1920, 1080)
webcam_size = (960, 540)
rotatePIC = 0
frameRate = 10.0
comPort = "COM5"  #PC的TTL2USB port
baudRate = 4800

yolo = opencvYOLO(modeltype="yolov3-tiny", \
    objnames="cfg.road_edge.tiny\\obj.names", \
    weights="cfg.road_edge.tiny\\yolov3-tiny_500000.weights",\
    cfg="cfg.road_edge.tiny\\yolov3-tiny.cfg")

if (video_type == 1):
    gps_frames = []
    gps_file = open(gps_file_play, 'r', encoding='UTF-8')

    for line in gps_file.readlines():
        gps_frame_id, gps_data = line.split("|")
        if (int(gps_frame_id) >= 0):
            gps_frames.append((int(gps_frame_id), gps_data))

    print("GPS:", gps_frames)


def getGPS(frameid=0):
示例#5
0
from yoloOpencv import opencvYOLO
import cv2
import imutils
import time, math

yolo = opencvYOLO(modeltype="yolov3", objnames="model/coco.names",
    weights="model/yolov4/yolov4.weights", cfg="model/yolov4/yolov4.cfg")

calculateLine1 = [(290, 420), (580, 420)]  #Down [(from(x,y), to(x,y)]
calculateLine2 = [(680, 400), (950, 400)]  #Up [(from(x,y), to(x,y)]
calculateRange_y = 50   # length of Y (for up or down of the line)

video_file = "videos/tt1.mp4"
output_video = "output/tt1.avi"

def draw_CalculateLine(frame):
    cv2.line(frame, (calculateLine1[0][0],calculateLine1[0][1]), (calculateLine1[1][0],calculateLine1[1][1]), (0, 0, 255), 2)
    cv2.line(frame, (calculateLine2[0][0],calculateLine2[1][1]), (calculateLine2[1][0],calculateLine2[1][1]), (255, 0, 0), 2)
    return frame

def bbox2Centroid(bbox):
    x = bbox[0]
    y = bbox[1]
    w = bbox[2]
    h = bbox[3]

    return (int(x+(w/2)), int(y+(h/2)))

def in_range_S2N(bbox):
    #only calculate the cars (from south to north) run across the line and not over Y +- calculateRange_y
    x = bbox[0]
示例#6
0
#! /usr/bin/env python3
# -*- coding: utf-8 -*-

from yoloOpencv import opencvYOLO
import cv2
import imutils
import time
from libPOS import desktop

yolo = opencvYOLO(modeltype="yolov3-tiny", \
    objnames="cfg.breads_v3.tiny/obj.names", \
    weights="cfg.breads_v3.tiny/weights/yolov3-tiny_500000.weights",\
    cfg="cfg.breads_v3.tiny/yolov3-tiny.cfg")

labels = {  "a1":["杯子蛋糕", 15], "a2":["丹麥起司",55], "a3":["十勝紅豆",42], "a4":["花生夾心", 26], \
            "a5":["夾心鬆餅", 42], "a6":["風味布雪", 38] }
idle_checkout = (6, 10)
media = "bread_test.mp4"
video_out = "output.avi"
dt = desktop("images/bg.jpg", "images/bgClick.jpg")

cv2.namedWindow("SunplusIT", cv2.WND_PROP_FULLSCREEN)        # Create a named window
cv2.setWindowProperty("SunplusIT", cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)

start_time = time.time()
dt.emptyBG = None
last_movetime = time.time()  #objects > 0
YOLO = False  # YOLO detect in this loop?
txtStatus = ""

def group(items):
import argparse
import cv2
from xml.dom import minidom
from yoloOpencv import opencvYOLO
import os

MODEL_PATH = "../output/siamese_model/"
media = "/WORK1/MyProjects/for_Sale/Face_Mask_wear/demo/videos/1.mp4"
predict_size = (64, 64)
write_output = True
output_video_path = "output.avi"
video_rate = 24.0

yolo_face_detect = opencvYOLO(modeltype="yolov3", \
	objnames="models/face_detect/yolov3/obj.names", \
	weights="models/face_detect/yolov3/weights/yolov3_268000.weights",\
	cfg="models/face_detect/yolov3/yolov3.cfg",\
	score=0.25, nms=0.6)

#---------------------------------------------------------------------------------------

camera = cv2.VideoCapture(media)
width = int(camera.get(cv2.CAP_PROP_FRAME_WIDTH))   # float
height = int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT)) # float
print("This video's resolution is: %d x %d" % (width, height))
if(write_output is True):
	#fourcc = cv2.VideoWriter_fourcc(*'MJPG')
	fourcc = cv2.VideoWriter_fourcc(*'XVID')
	out = cv2.VideoWriter(output_video_path, fourcc, video_rate, (int(width),int(height)))

def getFaces(img):
示例#8
0
from PIL import Image
import numpy as np

import Adafruit_ILI9341 as TFT
import Adafruit_GPIO as GPIO
import Adafruit_GPIO.SPI as SPI

#-------------------------------------------------

#yolo = opencvYOLO(modeltype="yolov3-tiny", \
#    objnames="coco.names", \
#    weights="yolov3-tiny.weights",\
#    cfg="yolov3-tiny.cfg")

yolo = opencvYOLO(modeltype="yolov3-tiny", \
    objnames="road.names", \
    weights="yolov3-road-tiny_500000.weights",\
    cfg="yolov3-road-tiny.cfg")

inputType = "picam"  # webcam, image, video, picam
media = ""
#video_out = "/media/pi/SSD1T/recording/road.avi"
video_out = "record/"
video_length = 600
framerate = 5.0
picam_size = (640, 480)  #(1280,960)
webcam_size = (960, 640)
#--------------------------------------------------

# Raspberry Pi configuration.
DC = 18
RST = 23
示例#9
0
cfg = ConfigParser()
cfg.read("pos.ini", encoding="utf-8")

cam_id = cfg.getint("camera", "cam_id")
record_video = cfg.getboolean("camera", "record_video")
video_out = cfg.get("camera", "record_video")
frame_rate = cfg.getint("camera", "frame_rate")
dt = desktop(cfg.get("desktop", "bg"))
flipFrame = cfg.get("camera", "flipFrame")  #(H, V)
lang = cfg.get("operation", "lang")

wait_for_next = cfg.getint("operation", "wait_for_next")
cart_list_size = ast.literal_eval(cfg.get("desktop", "cart_list_size"))

yolo = opencvYOLO(modeltype=cfg.get("yoloModel", "modeltype"), \
    objnames=cfg.get("yoloModel", "objnames"),\
    weights=cfg.get("yoloModel", "weights"),\
    cfg=cfg.get("yoloModel", "cfg"))
'''
labels_tw = {"v1":["蓮藕", 36, "twkg"], "v2":["小蕃茄", 25, "twkg"], "v3":["綠彩椒",30, "twkg"], "v4":["紅彩椒",30, "twkg"], \
    "v5":["黃彩椒",30, "twkg"], "v6":["茄子", 18, "twkg"], "v7":["老薑",21, "twkg"], "v8":["綠辣椒",8, "twkg"],\
    "v9":["紅辣椒",8, "twkg"], "v10":["黃椒",15, "twkg"], "v11":["青椒",15, "twkg"], "v12":["蒜頭",6, "twkg"],\
    "v13":["雞蛋",11, "twkg"], "v14":["紅蘿蔔",32, "twkg"], "v15":["馬玲薯",10, "twkg"], "v16":["洋菇",6, "twkg"], "v17":["大白菜",52, "twkg"], \
    "v18":["玉米",21, "twkg"], "v19":["小南瓜",60, "twkg"], "v20":["牛蕃茄",26, "twkg"], "v21":["紫地瓜",26, "twkg"]}

labels_tw = {"v1":["橘子", 42, "twkg"], "v2":["雞蛋", 10, "one"], "v3":["綠辣椒", 18, "twkg"], "v4":["玉米荀", 0.25, "gram"],\
    "v5":["小蕃茄", 12, "twkg"], "v6":["棗子", 30, "one"], "v7":["哈密瓜", 65, "kg"], "v8":["蘋果", 25, "one"], \
    "v20": ["紅蘿蔔", 8, "twkg"], "v22":["牛奶芭樂", 20, "one"], "v23":["帶殼玉米荀", 0.15, "gram"] }

labels_en = { "001":["Croissant", 36], "002":["Hot dog", 75], "003":["Big bag",40], "004":["Pineapple",32], \
    "005":["Sesame bun",25], "006":["Hamburger", 66],"007":["Danish Bread",38], "008":["Twist roll",55],\
    "009":["Donuts",22], "010":["Meal bag",28], "011":["Powdered milk",32], "012":["Long Fort",42],\
示例#10
0
    (r, g, b) = rgb
    return '#{:02x}{:02x}{:02x}'.format(r, g, b)


#------------------------------------------------------------------------------------
checkenv()

winUI = UI(preview_size=win_preview_size)
window = winUI.create_window(win_title="道路缺陷-初期分類", resizable=True)

model_cfg = model_cfg.replace('\\', '/')
model_names = model_names.replace('\\', '/')
model_weights = model_weights.replace('\\', '/')

yolo = opencvYOLO(imgsize=model_size, \
    objnames=model_names, \
    weights=model_weights,\
    cfg=model_cfg, score=0.25, nms=0.5)

path_preview_img, filename_preview = '', ''  #for selected file
objects = get_mode_classes()
classColors = []
for i in range(0, len(class_list)):
    classColors.append(np.random.choice(range(256), size=3).tolist())

option_classes = []
for i, cname in enumerate(class_list):
    option_classes.append('{}_{}_{}'.format(i, cname,
                                            cname + '/' + class_list[cname]))

#dragging = False
#start_point = end_point = prior_rect = None
示例#11
0
from yoloOpencv import opencvYOLO
import numpy as np
import cv2
import imutils
import time, sys
from PIL import ImageFont, ImageDraw, Image
from libGreen import webCam
from libGreen import OBJTracking
from operator import itemgetter, attrgetter

yolo = opencvYOLO(modeltype="yolov3", imgsize=(416,416), \
    objnames="obj.names", \
    weights="yolov3-tiny_90000.weights",\
    cfg="yolov3-tiny.cfg")

hot_area_x = (610, 1160)
count_line_x = 730

cam_rotate = 0
flip_vertical = False
flip_horizontal = False

frame_display_size = (800, 600)
video_file = "tv2.MOV"
write_video = False
output_rotate = False
rotate = 0

draw_face_box = True
cam_id = 0
webcam_size = (1920, 1080)
示例#12
0
from yoloOpencv import opencvYOLO
import cv2
import imutils
import time

yolo = opencvYOLO(modeltype="yolov3", \
    objnames="../models/cfg.road.yolo_tiny.all/obj.names", \
    weights="../models/cfg.road.yolo_tiny.all/weights/yolov3-tiny_500000.weights",\
    cfg="../models/cfg.road.yolo_tiny.all/yolov3-tiny.cfg")

inputType = "video"  # webcam, image, video
media = "../test_videos/Edge03_20190321095303.avi"
write_video = False
video_out = "/media/sf_VMshare/out_hand1_yolov3.avi"
output_rotate = False
rotate = 0

#fps count
start = time.time()


def fps_count(num_frames):
    end = time.time()
    seconds = end - start
    fps = num_frames / seconds
    #print("Estimated frames per second : {0}".format(fps))
    return fps


if __name__ == "__main__":