def visualizeTracks(total_frames, videoFile, reSize=None):
    fno = 0
    completed_tracks = []
    root = ET.Element("Traffic")
    static_plane = np.zeros((2880, 5120, 3), dtype=float)
    vehicleDetectionFile = videoFile.split('.')[0] + "_predict.txt"
    #print vehicleDetectionFile
    cap = cv2.VideoCapture(videoFile)
    visual = np.zeros((1), np.uint8)
    color = np.random.randint(0, 255, (100, 3))
    tracks = []
    for fno in range(total_frames):
        print "Frameno: ", fno, len(completed_tracks)
        #pdb.set_trace()
        #print "frame number - " + str(fno)
        #print "Completed Tracks ", len(completed_tracks)
        #print "total tracks length " + str(len(tracks) + len(completed_tracks))
        ret, frames = cap.read()
        cv2.putText(frames, str(fno), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                    (255, 0, 0), 1, cv2.CV_AA)
        if reSize != None:
            frames = misc.imresize(frames, reSize)
        if visual.shape == (1, ):
            visual = np.zeros(frames.shape, np.uint8)
        vehicles, vehicleTypes = getVehiclesFromFile(vehicleDetectionFile,
                                                     fno)  #Frame number
        print " number of vehicles ", vehicles.shape
        if vehicles.shape == (3, 1) or vehicles.shape[0] == 0:
            continue
        actual_vehicles = []
        print vehicles.shape
        for i, vehicle in enumerate(vehicles):
            #print vehicle
            if vehicle[2] > 100 or vehicle[3] > 100:
                continue
            actual_vehicles.append(vehicle)
            actual_vehicles = filterVehicles(actual_vehicles)
        actual_vehicles = np.array(actual_vehicles)
        trackNo = [0] * (actual_vehicles.shape[0])
        hungarianMatrix, hungarianAssignment = getHungary(
            actual_vehicles, tracks, frames)
        tracks, trackNo, completed_tracks = updateTracks(
            hungarianAssignment, tracks, actual_vehicles, vehicleTypes,
            hungarianMatrix, trackNo, completed_tracks)
        '''
                    getTransformedTracks
                    getTransformedVehicles

                '''
        frames = drawVehicles(frames, actual_vehicles, trackNo)
        frames, tracks, visual = printTracks(frames, tracks, visual)
        root = store_tracks_xml(tracks, root, fno)
        #print fno, vehicles.shape[0], len(completed_tracks) , len(tracks)
        cv2.imshow("Frames", frames)
        #drawTracks(visual, tracks)
        if cv2.waitKey(33) == 27:
            break
    cv2.destroyAllWindows()
    tree = ET.ElementTree(root)
    tree.write("traffic.xml")
Exemplo n.º 2
0
def visualizeTracks(total_frames, videoFile, reSize):
    #cap = cv2.VideoCapture("../data/Trimmed_trafficVideo.mp4");
    #folder = "../output/Trimmed_trafficVideo_flow_output/tv"+str(fno)+"-"+str(fno+1)+".flo"
    fno = 0
    folder = "../output/" + videoFile.split('/')[-1].split(
        '.')[0] + "_flow_output/"
    vehicleDetectionFile = videoFile.split('.')[0] + "_predict.txt"
    #print vehicleDetectionFile
    cap = cv2.VideoCapture(videoFile)
    visual = np.zeros(reSize, np.uint8)
    color = np.random.randint(0, 255, (100, 3))
    tracks = []
    for fno in range(total_frames):
        ret, frames = cap.read()
        frames = misc.imresize(frames, reSize)
        vehicles, _ = getVehiclesFromFile(vehicleDetectionFile,
                                          fno)  #Frame number
        print("Vehicles detected:-")
        print(vehicles.shape)
        #if fno == 0:
        #	vehicles = vehicles[4:5]
        #else:
        #	vehicles = np.array([])
        frames = drawVehicles(frames, vehicles)
        flow, w, h = getDenseflowPreComputed(
            folder + str(fno) + "-" + str(fno + 1) +
            ".flo")  #Get the flow from the frame
        #print "Flow information "
        #print flow.shape, w, h
        newTracks = []
        # if the vehicle centers dont lie in any of the tracks create a new object
        count = 0
        for vehicle in vehicles:
            prevTrack = vehicleAlreadyTracked(vehicle, tracks)
            if prevTrack is not None:
                count += 1
                #print prevTrack.track, " Previous track ", vehicle
                prevTrack.w = vehicle[2]
                prevTrack.h = vehicle[3]
                #v.updateObj(vehicle)
            else:
                newTracks.append(
                    Track(vehicle[1], vehicle[0], vehicle[2], vehicle[3]))
        print "Count of already things " + str(count)
        tracks = updateTracksOfExistingObjects(flow, tracks, reSize)
        #print "updating the tracks is done"
        print "New tracks = ", len(newTracks)
        tracks = tracks + newTracks
        frames, tracks, visual = printTracks(frames, tracks, visual)
        cv2.imshow("Frames", frames)
        drawTracks(visual, tracks)
        #pdb.set_trace()
        if cv2.waitKey(33) == 27:
            break
    #cv2.waitKey(0)
    cv2.destroyAllWindows()
Exemplo n.º 3
0
def visualizeTracks(total_frames, videoFile, reSize):
    fno = 0
    completed_tracks = []
    static_plane = np.zeros((2880, 5120, 3), dtype=float)
    vehicleDetectionFile = videoFile.split('.')[0] + "_predict.txt"
    #print vehicleDetectionFile
    cap = cv2.VideoCapture(videoFile)
    visual = np.zeros(reSize, np.uint8)
    color = np.random.randint(0, 255, (100, 3))
    tracks = []
    for fno in range(total_frames):
        #pdb.set_trace()
        #print "frame number - " + str(fno)
        #print "Completed Tracks ", len(completed_tracks)
        #print "total tracks length " + str(len(tracks) + len(completed_tracks))
        ret, frames = cap.read()
        frames = misc.imresize(frames, reSize)
        vehicles, _ = getVehiclesFromFile(vehicleDetectionFile,
                                          fno)  #Frame number
        #print " number of vehicles ", vehicles.shape
        if vehicles.shape == 3:
            break
        trackNo = [0] * (vehicles.shape[0])
        hungarianMatrix, hungarianAssignment = getHungary(vehicles, tracks)
        tracks, trackNo = updateTracks(hungarianAssignment, tracks, vehicles,
                                       hungarianMatrix, trackNo,
                                       len(completed_tracks))
        tracks, ctracks = postProcessTracks(tracks)
        completed_tracks += ctracks
        frames = drawVehicles(frames, vehicles, trackNo)
        frames, tracks, visual = printTracks(frames, tracks, visual)
        cv2.imshow("Frames", frames)
        drawTracks(visual, tracks)
        if cv2.waitKey(33) == 27:
            break
    cv2.destroyAllWindows()
def visualizeTracks(total_frames, videoFile, reSize):
    fno = 0
    folder = "../output/" + videoFile.split('/')[-1].split(
        '.')[0] + "_flow_output/"
    vehicleDetectionFile = videoFile.split('.')[0] + "_predict.txt"
    #print vehicleDetectionFile
    cap = cv2.VideoCapture(videoFile)
    visual = np.zeros(reSize, np.uint8)
    color = np.random.randint(0, 255, (100, 3))
    tracks = []
    for fno in range(total_frames):
        ret, frames = cap.read()
        frames = misc.imresize(frames, reSize)
        vehicles, _ = getVehiclesFromFile(vehicleDetectionFile,
                                          fno)  #Frame number
        oldvsnew = [0] * (vehicles.shape[0])
        print("Vehicles detected:-")
        print(vehicles.shape)
        #if fno == 0:
        #	vehicles = vehicles[4:5]
        #else:
        #	vehicles = np.array([])
        flow, w, h = getDenseflowPreComputed(
            folder + "tv" + str(fno) + "-" + str(fno + 1) +
            ".flo")  #Get the flow from the frame
        newTracks = []
        #pdb.set_trace()
        # if the vehicle centers dont lie in any of the tracks create a new object
        count = 0
        for i, vehicle in enumerate(vehicles):
            if vehicle[2] > 100 or vehicle[
                    3] > 100:  ## Taking the permissable limit as 100
                continue
            #some = np.zeros(reSize);
            #cv2.circle(some,(vehicle[1],vehicle[0]),2,(0,0,255),-1)
            prevTrack = vehicleAlreadyTracked(vehicle, tracks)
            if prevTrack is not None:
                #cv2.rectangle(some,((int(round(prevTrack.init_x-prevTrack.width/2))),int(round(prevTrack.init_y+prevTrack.height/2))),(int(round(prevTrack.init_x+prevTrack.width/2)),int(round(prevTrack.init_y-prevTrack.height/2))),(0,0,255),2)
                #some[prevTrack.track[-1][0]][prevTrack.track[-1][1]] = [255,255,0]
                count += 1
                oldvsnew[i] = 1
                #print prevTrack.track, " Previous track ", vehicle
                prevTrack.w = vehicle[2]
                prevTrack.h = vehicle[3]
            else:
                newTracks.append(
                    Track(
                        vehicle[0], vehicle[1], vehicle[2],
                        vehicle[3]))  ## x axis is up/down- h, y is right/left
            #cv2.imshow("Some", some)
            if cv2.waitKey(33) == 27:
                break
        #pdb.set_trace()
        print "Count of already things " + str(count)
        frames = drawVehicles(frames, vehicles, oldvsnew)
        tracks = updateTracksOfExistingObjects(flow, tracks, reSize)
        #print "updating the tracks is done"
        print "New tracks = ", len(newTracks)
        tracks = tracks + newTracks
        frames, tracks, visual = printTracks(frames, tracks, visual)
        cv2.imshow("Frames", frames)
        drawTracks(visual, tracks)
        if cv2.waitKey(33) == 27:
            break
    #cv2.waitKey(0)
    cv2.destroyAllWindows()
Exemplo n.º 5
0
    return frame


fno = 0
frame0 = 1
tracks = []
vehicleDetectionFile = "../data/Trimmed_trafficVideo_predict.txt"
total_frames = 1000
visual = np.zeros((384, 512, 3), np.uint8)
videoFile = "/Users/gramaguru/ComputerVision/computer-vision/trafficSimulation/data/Trimmed_trafficVideo.mp4"
cap = cv2.VideoCapture('../data/Trimmed_trafficVideo.mp4')
color = np.random.randint(0, 255, (100, 3))
for fno in range(total_frames):
    ret, frames = cap.read()
    frames = misc.imresize(frames, (384, 512, 3))
    vehicles, _ = getVehiclesFromFile(vehicleDetectionFile, fno)  #Frame number
    print("Vehicles detected:-")
    print(vehicles.shape)
    frames = drawVehicles(frames, vehicles)
    flow, w, h = getDenseflowPreComputed(
        "../output/Trimmed_trafficVideo_flow_output/tv" + str(fno) + "-" +
        str(fno + 1) + ".flo")  #Get the flow from the frame
    print "Flow information "
    print flow.shape, w, h
    newTracks = []
    # if the vehicle centers dont lie in any of the tracks create a new object
    for vehicle in vehicles:
        prevTrack = vehicleAlreadyTracked(vehicle)
        if prevTrack is not None:
            #print prevTrack.track, " Previous track ", vehicle
            prevTrack.w = vehicle[2]
	vis[:h2, w1:w1+w2] = img2
	cv2.imshow("combined", vis)


# capture frames from a video
cap = cv2.VideoCapture("trip10/trip10_trimmed.mp4");
cap = cv2.VideoCapture("Trimmed_trafficVideo.mp4");
frameNumber = 0
if True:
	# loop runs if capturing has been initialized.
	# reads frames from a video
	ret, frames = cap.read()
	
	#vehicles = getVehiclesHarCascades(frames);
#vehicles, labels = getVehiclesYolo(frames);
	vehicles, labels  = getVehiclesFromFile(frameNumber)
	print vehicles.shape, " shape of the vehicles";
	i = 0;
	print labels
	print type(labels)
	for (x,y,w,h) in vehicles:
		#print "Vehicle dim" ,  x,y,w,h
		#cv2.rectangle(frames,(int(round(x)),int(round(y))),(int(round(x+w)),int(round(y+h))),(0,0,255),2)
		cv2.rectangle(frames,(int(round((x-w/2))),int(round(y+h/2))),(int(round(x+w/2)),int(round(y-h/2))),(0,0,255),2)
		cv2.putText(frames, labels[i][0], (int(round(x)), int(round(y))), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,0,0), 1, cv2.CV_AA);
		i += 1

	cv2.imshow('frame', frames)
	frameNumber += 1
#	static_plane = cv2.resize(static_plane,(int(round(0.25*static_plane.shape[0])),int(round(0.25*static_plane.shape[1]))))
#	cv2.imshow('static_plane', static_plane)
    #highlighting those points on the plane
    frames = drawPloy(frames, other_road)
    # Apply homographic transformation from the road points to the 4 selected points and see how it is, use the destination image as static_image and then see, this is using the library
    H, status = cv2.findHomography(road_points, output_points)
    #static_plane = warpPerspectiveForward(frames, static_plane, H, 1)
    #static_plane = cv2.warpPerspective(frames, H,(4*frames.shape[1], 4*frames.shape[0]))
    #static_plane = warpPerspective(frames, static_plane, H, 1)
    static_plane = transformSetOfPoints(road_points, static_plane, H,
                                        (0, 255, 0))

    #vehicles = getVehiclesHarCascades(frames);
    #vehicles = getVehiclesYolo(frames);

    vehicleDetectionFile = videoFile.split('.')[0] + "_predict.txt"
    print vehicleDetectionFile
    vehicles, _ = getVehiclesFromFile(vehicleDetectionFile, frameNumber)
    print vehicles.shape, " shape of the vehicles"
    for (x, y, w, h) in vehicles:
        print "Vehicle dim", x, y, w, h
        newPoint = np.array([x, y])
        vehicle_points = np.vstack([vehicle_points, newPoint])
        cv2.rectangle(frames, (int(round(x)), int(round(y))),
                      (int(round(x + w)), int(round(y + h))), (0, 0, 255), 2)

    # adding vehicles
    static_plane = transformSetOfPoints(vehicle_points, static_plane, H,
                                        (255, 0, 0))
    transformed_pts = transformPoly(other_road, H)
    static_plane = drawPloy(static_plane, transformed_pts)
    showSideBySide(frames, static_plane, 0.25, 0.5)
    #cv2.imshow('frame', frames)