Ejemplo n.º 1
0
import time
x_co = 0
y_co = 0


def on_mouse(event, x, y, flag, param):
    global x_co
    global y_co
    if (event == cv.CV_EVENT_MOUSEMOVE):
        x_co = x
        y_co = y


cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 1, 0, 2, 8)
while True:
    src = cv.QueryFrame(capture)
    cv.Smooth(src, src, cv.CV_BLUR, 3)
    hsv = cv.CreateImage(cv.GetSize(src), 8, 3)
    thr = cv.CreateImage(cv.GetSize(src), 8, 1)
    cv.CvtColor(src, hsv, cv.CV_BGR2HSV)
    cv.SetMouseCallback("camera", on_mouse, 0)
    s = cv.Get2D(hsv, y_co, x_co)
    print "H:", s[0], "      S:", s[1], "       V:", s[2]
    cv.PutText(src,
               str(s[0]) + "," + str(s[1]) + "," + str(s[2]), (x_co, y_co),
               font, (55, 25, 255))
    cv.ShowImage("camera", src)
    if cv.WaitKey(10) == 27:
        break
Ejemplo n.º 2
0
def main():
    os.chdir(sys.argv[1])
    try:
        os.mkdir(OUTPUT_DIR_NAME)
    except:
        pass

    tree = et.parse("project.xml")
    movie = tree.getroot()
    file_path = movie.attrib["path"]
    cap = cv.CreateFileCapture(file_path)
    cv.QueryFrame(cap)

    # skip frames in the beginning, if neccessary
    start_frame = int(movie.attrib["start_frame"])
    for i in range(start_frame):
        cv.QueryFrame(cap)

    if DEBUG:
        cv.NamedWindow("win", cv.CV_WINDOW_AUTOSIZE)
        cv.MoveWindow("win", 200, 200)

    t = time.time()

    f = open("shots.txt", "r")
    scene_durations = [
        int(values[2]) for values in [line.split("\t") for line in f if line]
    ]
    f.close()

    for scene_nr, duration in enumerate(scene_durations):
        print "shot #%d" % scene_nr, "/", len(scene_durations) - 1

        h = int(math.ceil(float(duration) / EVERY_NTH_FRAME))
        output_img = cv.CreateImage((PIXELS_PER_COLOR * NUM_CLUSTERS, h),
                                    cv.IPL_DEPTH_8U, 3)
        frame_counter = 0

        for i in range(duration):
            img_orig = cv.QueryFrame(cap)
            if not img_orig:  # eof
                break

            if i % EVERY_NTH_FRAME != 0:
                continue

            new_width = int(img_orig.width / 4.0)
            new_height = int(img_orig.height / 4.0)

            img_small = cv.CreateImage((new_width, new_height),
                                       cv.IPL_DEPTH_8U, 3)
            cv.Resize(img_orig, img_small, cv.CV_INTER_AREA)

            if DEBUG:
                cv.ShowImage("win", img_small)

            img = cv.CreateImage((new_width, new_height), cv.IPL_DEPTH_8U, 3)
            cv.CvtColor(img_small, img, cv.CV_BGR2HLS)

            # convert to numpy array
            a = numpy.asarray(cv.GetMat(img))
            a = a.reshape(a.shape[0] * a.shape[1],
                          a.shape[2])  # make it 1-dimensional

            # set initial centroids
            init_cluster = []
            for y in [int(new_height / 4.0), int(new_height * 3 / 4.0)]:
                for x in [int(new_width * f) for f in [0.25, 0.75]]:
                    init_cluster.append(a[y * new_width + x])
            init_cluster.insert(
                2, a[int(new_height / 2.0) * new_width + int(new_width / 2.0)])

            centroids, labels = scipy.cluster.vq.kmeans2(
                a, numpy.array(init_cluster))

            vecs, dist = scipy.cluster.vq.vq(a, centroids)  # assign codes
            counts, bins = scipy.histogram(vecs,
                                           len(centroids))  # count occurrences
            centroid_count = []
            for i, count in enumerate(counts):
                #print centroids[i], count
                if count > 0:
                    centroid_count.append((centroids[i].tolist(), count))

            #centroids = centroids.tolist()
            #centroids.sort(hls_sort)

            centroid_count.sort(hls_sort2)

            px_count = new_width * new_height
            x = 0
            for item in centroid_count:
                count = item[1] * (PIXELS_PER_COLOR * NUM_CLUSTERS)
                count = int(math.ceil(count / float(px_count)))
                centroid = item[0]
                for l in range(count):
                    if x + l >= PIXELS_PER_COLOR * NUM_CLUSTERS:
                        break
                    cv.Set2D(output_img, frame_counter, x + l,
                             (centroid[0], centroid[1], centroid[2]))
                x += count

            if DEBUG:
                if cv.WaitKey(1) == 27:
                    cv.DestroyWindow("win")
                    return

            frame_counter += 1

        output_img_rgb = cv.CreateImage(cv.GetSize(output_img),
                                        cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(output_img, output_img_rgb, cv.CV_HLS2BGR)
        cv.SaveImage(OUTPUT_DIR_NAME + "\\shot_colors_%04d.png" % (scene_nr),
                     output_img_rgb)

    if DEBUG:
        cv.DestroyWindow("win")
    print "%.2f min" % ((time.time() - t) / 60)
    #raw_input("- done -")
    return
Ejemplo n.º 3
0
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)
        new_size = ( frame_size[0] / 2, frame_size[1] / 2)
        color_image = cv.CreateImage(new_size, 8, 3)
        grey_image = cv.CreateImage(new_size, cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(new_size, cv.IPL_DEPTH_32F, 3)
        font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX_SMALL, 1, 1, 0, 1, 1)
        first = True
        k = 0        
        while True:
            k+=1

            captured_image = cv.QueryFrame(self.capture)
            color_image = cv.CreateImage(new_size, captured_image.depth, captured_image.nChannels)
            cv.Resize(captured_image, color_image)
            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, 0.020, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 18)
            cv.Erode(grey_image, grey_image, None, 10)

            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_TC89_KCOS)
            points = []
            #cv.DrawContours(color_image, contour, cv.CV_RGB(255,0,0), cv.CV_RGB(255,0,255), 2, 1, 8, (0, 0))
            i = 0
            while contour:
                self.observed_occupancy = True

                bound_rect = cv.BoundingRect(list(contour))

                center_x = bound_rect[0] + (bound_rect[2]/2)
                center_y = bound_rect[1] + (bound_rect[3]/2)
                #if center_y < 200:
                #    continue
                i+=1
                closest_distance = 10000
                closest_object = None
                for to in self.tracked_objects: 
                    current_distance = math.hypot(to.latest_position[0] - center_x, to.latest_position[1] - center_y)
                    closest_distance = min(closest_distance, current_distance)                    
                    #print "DISTANCES: ", str(closest_distance), str(current_distance)
                    if current_distance == closest_distance:
                        closest_object = to

                if closest_object is None:
                    #print "OBJECT IS NEW"
                    self.tracked_objects.append(TrackedObject((center_x, center_y), [(center_x, center_y)], "new"))
                else: 
                    #print "CLOSEST OBJECT: ", closest_object.latest_position
                    closest_object.movement_vector.append((center_x, center_y))
                    closest_object.latest_position = (center_x, center_y)
                #print "AMOUNT OF OBJECTS: ", str(len(self.tracked_objects))

                if closest_object is not None:
                    cv.Line(color_image, closest_object.latest_position, (center_x, center_y), cv.CV_RGB(0,255,0))
                   
                    #closest_x = min(closest_x, to.latest_position[0])
                    #closest_y = min(closest_y, to.latest_position[0])

                contour = contour.h_next()

                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
                points.append(pt1)
                points.append(pt2)
                cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(0,0,255), 1)
                cv.PutText(color_image, str(i), pt1, font, cv.CV_RGB(255,0,255))
                cv.Circle(color_image, (center_x, center_y), 2, cv.CV_RGB(255,0,255), 2, 8, 0)

            #print "LEN ", len(self.tracked_objects)
            #if len(self.tracked_objects) > 0 and self.tracked_objects[0] is not None:
            #    #print "ENTRE"
            #    obj_vector = self.tracked_objects[0].movement_vector
            #    print "MVV LEN ", len(obj_vector)
            #    for index in range(0, len(obj_vector)-2):
            #        try:
            #            print "Index ", index, "len(obj_vector) ", len(obj_vector)
            #            cv.Line(color_image, obj_vector[index], obj_vector[index+1], cv.CV_RGB(0,255,0))
            #
            #        except: print "oops"

            #print "Iteration ", k, " Vector: ", vectors["1"]
            cv.ShowImage("Target", color_image)

            time_passed = time.time() - self.last_request
            request_threshold = 60
            if time_passed > request_threshold:
                self.send_occupancy()
                self.send_image(color_image)
            
            
            #Listen for ESC key
            c = cv.WaitKey(10)
            #c = cv.WaitKey(7) % 0x100
            if c == 27:
                break  
Ejemplo n.º 4
0
        hedef_konumu.width = logo_bau.width
        hedef_konumu.height = logo_bau.height
        hedefler.append(hedef_konumu)

    return hedefler


sayi = 5
hedefler = hedefleri_olustur(sayi)
gecikme_zamani = 100
skor = 0
yazi_tipi = python_opencv_modulu.InitFont(
    python_opencv_modulu.CV_FONT_HERSHEY_SIMPLEX, 1, 1)

while True:
    goruntu_yakala = python_opencv_modulu.QueryFrame(kamera)
    python_opencv_modulu.Flip(goruntu_yakala, goruntu_yakala, flipMode=1)
    python_opencv_modulu.Smooth(goruntu_yakala, current,
                                python_opencv_modulu.CV_BLUR, 15, 15)
    python_opencv_modulu.AbsDiff(current, previous, difference)
    cerceve = python_opencv_modulu.CreateImage(cerceve_boyutu, 8, 1)
    python_opencv_modulu.CvtColor(difference, cerceve,
                                  python_opencv_modulu.CV_BGR2GRAY)
    python_opencv_modulu.Threshold(cerceve, cerceve, 10, 0xff,
                                   python_opencv_modulu.CV_THRESH_BINARY)
    python_opencv_modulu.Dilate(cerceve,
                                cerceve,
                                element=elips_sekil_kalibi,
                                iterations=3)

    if gecikme_zamani <= 0:
Ejemplo n.º 5
0
            rightmost = i
            if temp == 0:
                leftmost = i
                temp = 1
    for i in range(im.height):
        row = cv.GetRow(im, i)
        if cv.Sum(row)[0] != 0.0:
            bottommost = i
            if temp == 1:
                topmost = i
                temp = 2
    return (leftmost, rightmost, topmost, bottommost)


capture = cv.CaptureFromCAM(1)
frame = cv.QueryFrame(capture)
test = cv.CreateImage(cv.GetSize(frame), 8, 3)

# 	Now the selection of the desired color from video.( new)
cv.NamedWindow("pick")
cv.SetMouseCallback("pick", my_mouse_callback)
while (1):
    frame = cv.QueryFrame(capture)
    cv.ShowImage("pick", frame)
    cv.WaitKey(33)
    if evente == 7:  # When double-clicked(i.e. event=7), this window closes and opens next window
        break
cv.DestroyWindow("pick")

#	Drawing Part (from earlier program)
cv.NamedWindow("threshold")
Ejemplo n.º 6
0
    def closest_point(self, pt):
        pos0, dpos = zip(*self.objects)
        est_pts = np.asarray(pos0)+np.asarray(dpos)
        min_dist = np.sum( est_pts

    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)
        color_image = cv.CreateImage(cv.GetSize(frame), 8, 3)
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)

        first = True

        while True:
            closest_to_left = cv.GetSize(frame)[0]
            closest_to_right = cv.GetSize(frame)[1]

            color_image = cv.QueryFrame(self.capture)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, 0.020, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 18)
            cv.Erode(grey_image, grey_image, None, 10)

            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
            
            objects = []

            while contour:
                bound_rect = cv.BoundingRect(list(contour))
                contour = contour.h_next()

                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
                cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)

                center_point = ((pt1[0] + pt2[0]) / 2, (pt1[1] + pt2[1]) / 2), points)
                if len(objects):
                    closest
Ejemplo n.º 7
0
def main():
    captura = cv.CreateCameraCapture(
        1)  ##guardamos la imagen de la camara web usb
    global arra  ##cargamos el arreglo de los objetos
    font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3,
                       3)  #creamos el fondo para las letras
    proses = 0
    sumaa = 0
    while True:
        img = cv.QueryFrame(captura)
        #cv.Resize(img,img,cv.CV_INTER_CUBIC)
        #tiempoi = time.time()
        #draw = ImageDraw.Draw(img)
        anch, alt = cv.GetSize(img)  ##obtenemos las dimensiones
        k = cv.WaitKey(10)
        ##esperemos para cualquier incombeniente
        #cv.SaveImage("test.jpg",img)
        cv.Smooth(img, img, cv.CV_GAUSSIAN, 9,
                  9)  ##aplicamos filtro para reducir el ruido
        #cv.SaveImage("sruido.jpg",img)
        grey = cv.CreateImage(cv.GetSize(img), 8,
                              1)  ##creamos una imagen en blanco
        bn = cv.CreateImage(cv.GetSize(img), 8, 1)
        ##creamos imagen en blanco
        cv.CvtColor(
            img, grey, cv.CV_BGR2GRAY
        )  ###pasamos la imagen a escala de grises y la guardamos en la imagen ne blanco
        #cv.SaveImage("gris.jpg",grey)
        cv.ConvertImage(img, bn, 0)
        ##convertimos la imagen a blancos
        threshold = 40  ##umbral 1 para binarizacion
        colour = 255  ## umbral 2 para binarizacion
        cv.Threshold(grey, grey, threshold, colour,
                     cv.CV_THRESH_BINARY)  ##aplicamos binarizacion
        cv.Canny(
            grey, bn, 1, 1, 3
        )  ##preparamos para obtener contornos, esto nos muestra la imagen con los contornos
        #cv.SaveImage("cont.jpg",bn)
        cv.SaveImage("grey.jpg", grey)  ##guardamos la imagen
        cambio("grey.jpg")  ##invertimos la imagen y discretisamos
        imgg = cv.LoadImage(
            'ngrey.jpg',
            cv.CV_LOAD_IMAGE_GRAYSCALE)  ##cargamos nuevamente la imagen
        storage = cv.CreateMemStorage(
            0)  ##para guardar los puntos y no saturar la memoria
        contours = cv.FindContours(
            imgg, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE,
            (0, 0))  ##obtener los puntos de los contornos
        puntos = [
        ]  ##para guardar los diferente centros de los objetos y verificarlas posteriormente
        while contours:  ##leemos los contornos
            nx, ny = contours[
                0]  ##para verificar donde se encuentra los centros de la figura u bojeto
            mx, my = contours[0]  ##
            ancho, altura = cv.GetSize(img)  ##obtenemos el tama de la imagen
            for i in range(len(contours)):  ##verificamos las esquinas
                xx, yy = contours[i]
                if xx > mx:
                    mx = xx
                if xx < nx:
                    nx = xx
                if yy > my:
                    my = yy
                if yy < ny:
                    ny = yy
            a, b, c, d = random.randint(0, 255), random.randint(
                0,
                255), random.randint(0,
                                     255), random.randint(0,
                                                          255)  ##para el color
            if len(contours
                   ) >= 50:  ##si son mas de 50 puntos es tomada como figura
                cv.Rectangle(img, (nx, ny), (mx, my), cv.RGB(0, 255, 0), 1, 8,
                             0)  ##pintamos el rectangulo con las esquinas
                #are = abs(mx-nx)*abs(my-ny)
                puntos.append((abs(mx + nx) / 2,
                               abs(my + ny) / 2))  ##agregamos los centros
                #are = abs(mx-nx)*abs(my-ny)
            contours = contours.h_next(
            )  #pasamos con los siguientes puntos unidos
        nuevo = de(
            puntos, anch, alt
        )  ##verificamos los objetos y obtenemos los centros de los mismos

        for i in range(len(nuevo)):  ## pintamos la direccin de los mismos
            x, y, z = nuevo[i]
            cv.PutText(img, "" + z + "", (x, y), font, 255)
        tiempof = time.time()  ##verificar rendimiento
        cv.ShowImage('img', img)
        #cv.SaveImage("final.jpg",img)
        #tiempoa = tiempof - tiempoi
        #proses += 1
        #sumaa  =  sumaa + tiempoa
        #print float(sumaa)/float(proses)
        #f.write(""+str(proses)+" "+str(tiempoa)+"\n")
        ##verificar rendimientp
        if k == 'f':  ##si se preciona f se sale
            break
Ejemplo n.º 8
0
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)
        difference = None

        while True:
            # Capture frame from webcam
            color_image = cv.QueryFrame(self.capture)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if not difference:
                # Initialize
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
            else:
                cv.RunningAvg(color_image, moving_average, 0.020, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            # Convert the image to grayscale.
            cv.hsv
            cv.cvtColor(color_image, cv.hsv, CV_BGR2HSV)

            inRange(hsv, Scalar(0, 58, 89), Scalar(25, 173, 229), cv.bw)

            # Dilate and erode to get object blobs
            # cv.Dilate(grey_image, grey_image, None, 18)
            # cv.Erode(grey_image, grey_image, None, 10)

            # Calculate movements
            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)
            points = []

            while contour:
                # Draw rectangles
                bound_rect = cv.BoundingRect(list(contour))
                contour = contour.h_next()

                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2],
                       bound_rect[1] + bound_rect[3])
                points.append(pt1)
                points.append(pt2)
                cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255, 0, 0), 1)

            num_points = len(points)
            if num_points:
                # Draw bullseye in midpoint of all movements
                x = y = 0
                for point in points:
                    x += point[0]
                    y += point[1]
                x /= num_points
                y /= num_points
                center_point = (x, y)
                cv.Circle(color_image, center_point, 40,
                          cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 30,
                          cv.CV_RGB(255, 100, 0), 1)
                cv.Circle(color_image, center_point, 20,
                          cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 10,
                          cv.CV_RGB(255, 100, 0), 5)

            # Display frame to user
            cv.ShowImage("Target", color_image)

            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break
Ejemplo n.º 9
0
    #Get component colors
    imgyellow = cv.CreateImage(cv.GetSize(imhsv), 8, 1)
    imgblue = cv.CreateImage(cv.GetSize(imhsv), 8, 1)

    imgthreshold = cv.CreateImage(cv.GetSize(imhsv), 8, 1)

    cv.InRangeS(imghsv, cv.Scalar(20, 100, 100), cv.Scalar(30, 255, 255),
                imgyellow)  # Select a range of yellow color
    cv.InRangeS(imghsv, cv.Scalar(100, 100, 100), cv.Scalar(120, 255, 255),
                imgblue)  # Select a range of blue color
    cv.Add(imgyellow, imgblue, imgthreshold)
    return imgthreshold


capture = cv.CaptureFromCAM(0)
frame = cv.QueryFrame(capture)
frame_size = cv.GetSize(frame)
test = cv.CreateImage(cv.GetSize(frame), 8, 3)
img2 = cv.CreateImage(cv.GetSize(frame), 8, 3)
cv.NamedWindow("Real", 0)
cv.NamedWindow("Threshold", 0)
cv.NamedWindow("final", 0)

#	Create two lists to store co-ordinates of blobs
blue = []
yellow = []
red = []
green = []
orange = []

while (1):
Ejemplo n.º 10
0
    def run(self):
        """
		Handles the actual video playback
		"""

        # Log the onset time of the item
        self.set_item_onset()

        t = pygame.time.get_ticks()
        start_t = t

        # Loop until a key is pressed
        go = True
        while go:

            # Get the frame
            self.src = cv.QueryFrame(self.video)

            # Check for the end of the video
            if self.src == None:
                break

            # Resize if requested and convert the resulting image to
            # RGB format, which is compatible with PyGame
            if self._fullscreen:
                cv.Resize(self.src, self.src_tmp)
                cv.CvtColor(self.src_tmp, self.src_rgb, cv.CV_BGR2RGB)
            else:
                cv.CvtColor(self.src, self.src_rgb, cv.CV_BGR2RGB)

            # Convert the image to PyGame format
            pg_img = pygame.image.frombuffer(self.src_rgb.tostring(),
                                             cv.GetSize(self.src_rgb), "RGB")

            # Show the video frame!
            self.experiment.surface.blit(pg_img, (self._x, self._y))
            pygame.display.flip()

            # Pause before jumping to the next frame
            pygame.time.wait(self.frame_dur - pygame.time.get_ticks() + t)
            t = pygame.time.get_ticks()

            if type(self.duration) == int:
                # Wait for a specified duration
                if t - start_t >= self.duration:
                    go = False

            # Catch escape presses
            for event in pygame.event.get():

                if event.type == KEYDOWN:
                    if event.key == pygame.K_ESCAPE:
                        raise exceptions.runtime_error(
                            "The escape key was pressed.")
                    if self.duration == "keypress":
                        go = False

                if event.type == MOUSEBUTTONDOWN and self.duration == "mouseclick":
                    go = False

        # Release the camera
        # Note: This function appears to be missing. Perhaps it's ok
        # and Python will release it automatically?
        # cv.ReleaseCapture(self.video)

        # Report success
        return True
 def get_cv_image(self):
     return cv.QueryFrame(self.__capture)
Ejemplo n.º 12
0
 def handle_frame(self):
     frame = cv.QueryFrame(self.capture)
     frame_array = numpy.asarray(frame[:, :])
     frame_array = self.rotate(frame_array)
     self.buffer_frame(frame_array)
Ejemplo n.º 13
0
mas = cv.LoadImage("NEED_ME3.jpg")
rora = cv.LoadImage("Q2.png")
q = 0
points = [int] * 11
capo = cv.CaptureFromFile("bobo.mpeg")
red = cv.CreateImage((1200, 900), 8, 1)
jingo = 0
x = 0
y = 0
R = [None] * 11
G = [None] * 11
B = [None] * 11
alpha = [None] * 11
sump = 0
while (1):
    telly = cv.QueryFrame(capo)
    #bingo=cv.CloneImage(telly)
    #red=cv.CreateImage(cv.GetSize(bingo),8,1)
    #bingo=cv.CloneImage(telly)
    cv.Zero(red)
    if (q == 9):
        break
    cv.Split(telly, None, None, red, None)
    (_, _, _, jingo) = cv.MinMaxLoc(red, None)
    (x, y) = jingo
    points[q] = cv.Get2D(mas, y, x)
    (B[q], G[q], R[q], alpha[q]) = points[q]
    print "Points scored in shot"
    print q + 1
    print "is:"
    print R[q]
Ejemplo n.º 14
0
cv.NamedWindow("Snapshot")
cv.NamedWindow("Raw Video")

image_points = cv.CreateMat(num_boards * board_total, 2, cv.CV_32FC1)
object_points = cv.CreateMat(num_boards * board_total, 3, cv.CV_32FC1)
point_counts = cv.CreateMat(num_boards, 1, cv.CV_32SC1)
intrinsic = cv.CreateMat(3, 3, cv.CV_32FC1)
distortion = cv.CreateMat(4, 1, cv.CV_32FC1)

corners = None
corner_count = 0
successes = 0
step = 0
frame = 0

image = cv.QueryFrame(cam)
gray_image = cv.CreateImage(cv.GetSize(image), 8, 1)

while (successes < num_boards):
    frame += 1
    if (frame % num_framestep == 0):
        corners = cv.FindChessboardCorners(
            image, board_size,
            cv.CV_CALIB_CB_ADAPTIVE_THRESH | cv.CV_CALIB_CB_FILTER_QUADS)
        corners = corners[1]
        cv.CvtColor(image, gray_image, cv.CV_BGR2GRAY)
        cv.FindCornerSubPix(
            gray_image, corners, (11, 11), (0, 0),
            (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1))
        if (len(corners) > 1):
            cv.DrawChessboardCorners(image, board_size, corners, 1)
capture = cv.CaptureFromCAM(MY_CAMERA)
if not capture:
    print "I am blinded, check Camera Config"
    exit(1)


cv.NamedWindow('camera', cv.CV_WINDOW_AUTOSIZE)
cv.NamedWindow('threshed', cv.CV_WINDOW_AUTOSIZE)
cv.NamedWindow('cropped', cv.CV_WINDOW_AUTOSIZE)

# initialize position array
positions_x, positions_y = [0]*SMOOTHNESS, [0]*SMOOTHNESS


while 1:    
    image = cv.QueryFrame(capture)
#    image = cv.LoadImage("2012_automata.jpg")
    if not image:
        break

#    Blurring image
    image_smoothed = cv.CloneImage(image)
    cv.Smooth(image, image_smoothed, cv.CV_GAUSSIAN, 1)


    image_threshed = thresholded_image(image_smoothed)
    
    cv.Dilate(image_threshed, image_threshed, None, 3)
    cv.Erode(image_threshed, image_threshed, None, 3)

    blobContour = None
Ejemplo n.º 16
0
def main():
    os.chdir(sys.argv[1])
    try:
        os.mkdir(OUTPUT_DIR_NAME)
    except OSError:
        pass

    tree = et.parse("project.xml")

    movie = tree.getroot()
    file_path = movie.attrib["path"]

    if DEBUG:
        cv.NamedWindow("win", cv.CV_WINDOW_AUTOSIZE)
        cv.MoveWindow("win", 200, 200)

    cap = cv.CreateFileCapture(file_path)
    skip_frames(cap, movie)

    pixel_count = None
    prev_img = None

    global_frame_counter = 0
    file_counter = 0

    w = None
    h = None

    output_img = cv.CreateImage((WIDTH, MAX_FRAMES), cv.IPL_DEPTH_8U, 3)

    f = open("shots.txt", "r")
    lines = [line for line in f if line]  # (start_frame, end_frame, duration)
    f.close()

    f_frm = open("motion.txt", "w")
    f_avg = open("motion_shot-avg.txt", "w")
    motion = []

    t = time.time()

    for nr, line in enumerate(lines):
        print(nr + 1), "/", len(lines)

        duration = int(line.split("\t")[2])

        for frame_counter in range(duration):
            img = cv.QueryFrame(cap)
            if not img:
                print "error?"
                print nr, frame_counter
                #break
                return

            if DEBUG:
                cv.ShowImage("win", img)

            global_frame_counter += 1

            if nr == 0 and frame_counter == 0:  # first shot, first frame
                w = img.width
                h = img.height
                pixel_count = float(img.width * img.height)
                prev_img = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 3)
                cv.Zero(prev_img)

            diff = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 3)
            cv.AbsDiff(img, prev_img, diff)
            cv.Threshold(diff, diff, 10, 255, cv.CV_THRESH_BINARY)
            d_color = 0
            for i in range(1, 4):
                cv.SetImageCOI(diff, i)
                d_color += cv.CountNonZero(diff) / pixel_count
            d_color = d_color / 3  # 0..1
            #print "%.1f" % (d_color*100), "%"

            motion.append(d_color)
            cv.Copy(img, prev_img)

            # WRITE TEXT FILE
            f_frm.write("%f\n" % (d_color))
            if frame_counter == duration - 1:  # last frame of current shot
                motion_value = sum(motion) / len(motion)
                print "average motion:", motion_value
                f_avg.write("%f\t%d\n" % (motion_value, duration))
                motion = []

            # WRITE IMAGE
            if frame_counter == 0:  # ignore each first frame -- the diff after a hard cut is meaningless
                global_frame_counter -= 1
                continue
            else:
                for i in range(WIDTH):
                    value = d_color * 255
                    cv.Set2D(output_img,
                             (global_frame_counter - 1) % MAX_FRAMES, i,
                             cv.RGB(value, value, value))

            if global_frame_counter % MAX_FRAMES == 0:
                cv.SaveImage(
                    os.path.join(OUTPUT_DIR_NAME,
                                 "motion_%03d.png" % (file_counter)),
                    output_img)
                file_counter += 1

            if DEBUG:
                if cv.WaitKey(1) == 27:
                    break

    if global_frame_counter % MAX_FRAMES != 0:
        #cv.SetImageROI(output_img, (0, 0, WIDTH-1, (global_frame_counter % MAX_FRAMES)-1))
        cv.SetImageROI(output_img, (0, 0, WIDTH - 1,
                                    (global_frame_counter - 1) % MAX_FRAMES))
        cv.SaveImage(
            os.path.join(OUTPUT_DIR_NAME, "motion_%03d.png" % (file_counter)),
            output_img)

    f_frm.close()
    f_avg.close()

    if DEBUG:
        cv.DestroyWindow("win")

    print "%.2f min" % ((time.time() - t) / 60)
    #raw_input("- done -")
    return
Ejemplo n.º 17
0
def main():
    BLACK_AND_WHITE = False
    THRESHOLD = 0.48
    BW_THRESHOLD = 0.4

    os.chdir(sys.argv[1])
    try:
        os.mkdir(OUTPUT_DIR_NAME)
    except:
        pass

    if len(sys.argv) > 2:
        if sys.argv[2] == "bw":
            BLACK_AND_WHITE = True
            THRESHOLD = BW_THRESHOLD
            print "##########"
            print " B/W MODE"
            print "##########"

    tree = et.parse("project.xml")
    movie = tree.getroot()
    file_path = movie.attrib["path"]
    cap = cv.CreateFileCapture(file_path)

    if DEBUG:
        cv.NamedWindow("win", cv.CV_WINDOW_AUTOSIZE)
        cv.MoveWindow("win", 200, 200)

    hist = None
    prev_hist = None
    prev_img = None

    pixel_count = None
    frame_counter = 0

    last_frame_black = False
    black_frame_start = -1

    t = time.time()

    while 1:
        img_orig = cv.QueryFrame(cap)

        if not img_orig:  # eof
            cv.SaveImage(OUTPUT_DIR_NAME + "\\%06d.png" % (frame_counter - 1),
                         prev_img)
            """movie.set("frames", str(frame_counter))
			tree.write("project.xml")"""
            break

        img = cv.CreateImage(
            (int(img_orig.width / 4), int(img_orig.height / 4)),
            cv.IPL_DEPTH_8U, 3)
        cv.Resize(img_orig, img, cv.CV_INTER_AREA)

        if frame_counter == 0:  # erster frame
            cv.SaveImage(OUTPUT_DIR_NAME + "\\%06d.png" % (0), img)
            pixel_count = img.width * img.height
            prev_img = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 3)
            cv.Zero(prev_img)

        if DEBUG and frame_counter % 2 == 1:
            cv.ShowImage("win", img)

        img_hsv = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(img, img_hsv, cv.CV_BGR2HSV)

        # #####################
        # METHOD #1: find the number of pixels that have (significantly) changed since the last frame
        diff = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 3)
        cv.AbsDiff(img_hsv, prev_img, diff)
        cv.Threshold(diff, diff, 10, 255, cv.CV_THRESH_BINARY)
        d_color = 0
        for i in range(1, 4):
            cv.SetImageCOI(diff, i)
            d_color += float(cv.CountNonZero(diff)) / float(pixel_count)

        if not BLACK_AND_WHITE:
            d_color = float(d_color / 3.0)  # 0..1

        # #####################
        # METHOD #2: calculate the amount of change in the histograms
        h_plane = cv.CreateMat(img.height, img.width, cv.CV_8UC1)
        s_plane = cv.CreateMat(img.height, img.width, cv.CV_8UC1)
        v_plane = cv.CreateMat(img.height, img.width, cv.CV_8UC1)
        cv.Split(img_hsv, h_plane, s_plane, v_plane, None)
        planes = [h_plane, s_plane, v_plane]

        hist_size = [50, 50, 50]
        hist_range = [[0, 360], [0, 255], [0, 255]]
        if not hist:
            hist = cv.CreateHist(hist_size, cv.CV_HIST_ARRAY, hist_range, 1)
        cv.CalcHist([cv.GetImage(i) for i in planes], hist)
        cv.NormalizeHist(hist, 1.0)

        if not prev_hist:
            prev_hist = cv.CreateHist(hist_size, cv.CV_HIST_ARRAY, hist_range,
                                      1)
            # wieso gibt es kein cv.CopyHist()?!
            cv.CalcHist([cv.GetImage(i) for i in planes], prev_hist)
            cv.NormalizeHist(prev_hist, 1.0)
            continue

        d_hist = cv.CompareHist(prev_hist, hist, cv.CV_COMP_INTERSECT)

        # combine both methods to make a decision
        if ((0.4 * d_color + 0.6 * (1 - d_hist))) >= THRESHOLD:
            if DEBUG:
                if frame_counter % 2 == 0:
                    cv.ShowImage("win", img)
                winsound.PlaySound(soundfile,
                                   winsound.SND_FILENAME | winsound.SND_ASYNC)
            print "%.3f" % ((0.4 * d_color + 0.6 * (1 - d_hist))), "%.3f" % (
                d_color), "%.3f" % (1 - d_hist), frame_counter
            if DEBUG and DEBUG_INTERACTIVE:
                if win32api.MessageBox(0, "cut?", "",
                                       win32con.MB_YESNO) == 6:  #yes
                    cv.SaveImage(
                        OUTPUT_DIR_NAME + "\\%06d.png" % (frame_counter), img)
            else:
                cv.SaveImage(OUTPUT_DIR_NAME + "\\%06d.png" % (frame_counter),
                             img)

        cv.CalcHist([cv.GetImage(i) for i in planes], prev_hist)
        cv.NormalizeHist(prev_hist, 1.0)

        # #####################
        # METHOD #3: detect series of (almost) black frames as an indicator for "fade to black"
        average = cv.Avg(v_plane)[0]
        if average <= 0.6:
            if not last_frame_black:  # possible the start
                print "start", frame_counter
                black_frame_start = frame_counter
            last_frame_black = True
        else:
            if last_frame_black:  # end of a series of black frames
                cut_at = black_frame_start + int(
                    (frame_counter - black_frame_start) / 2)
                print "end", frame_counter, "cut at", cut_at
                img_black = cv.CreateImage(
                    (img_orig.width / 4, img_orig.height / 4), cv.IPL_DEPTH_8U,
                    3)
                cv.Set(img_black, cv.RGB(0, 255, 0))
                cv.SaveImage(OUTPUT_DIR_NAME + "\\%06d.png" % (cut_at),
                             img_black)
            last_frame_black = False

        cv.Copy(img_hsv, prev_img)
        frame_counter += 1

        if DEBUG:
            if cv.WaitKey(1) == 27:
                break

    if DEBUG:
        cv.DestroyWindow("win")

    print "%.2f min" % ((time.time() - t) / 60)
    #raw_input("- done -")
    return
Ejemplo n.º 18
0
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)
        color_image = cv.CreateImage(cv.GetSize(frame), 8, 3)
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)

        first = True

        while True:
            closest_to_left = cv.GetSize(frame)[0]
            closest_to_right = cv.GetSize(frame)[1]

            color_image = cv.QueryFrame(self.capture)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, 0.020, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 18)
            cv.Erode(grey_image, grey_image, None, 10)

            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)
            points = []

            while contour:
                bound_rect = cv.BoundingRect(list(contour))
                contour = contour.h_next()

                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2],
                       bound_rect[1] + bound_rect[3])
                points.append(pt1)
                points.append(pt2)
                cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255, 0, 0), 1)

            if len(points):
                center_point = reduce(
                    lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2),
                    points)
                cv.Circle(color_image, center_point, 40,
                          cv.CV_RGB(255, 255, 255), 1)
                # cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1)
                # cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1)
                # cv.Circle(color_image, center_point, 10, cv.CV_RGB(255, 100, 0), 1)
                print center_point

            cv.ShowImage("Target", color_image)

            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
Ejemplo n.º 19
0
 def run(self):
     # Capture first frame to get size
     frame = cv.QueryFrame(self.capture)
Ejemplo n.º 20
0
def main():
    import argparse
    import logging
    import os
    import yaml

    parser = argparse.ArgumentParser()
    parser.add_argument('classifier')
    parser.add_argument(
        '--postprocess',
        action="store_true",
        help='Run postprocessing, close blobs and remove noise')
    parser.add_argument('videolist',
                        help='A file listed all the videos to be indexed')
    parser.add_argument('cores',
                        type=int,
                        help='Number of processes of paralellism')
    args = parser.parse_args()

    logging.basicConfig(level=logging.WARNING,
                        format="%(asctime)s - %(message)s")

    classifier = zipfile.ZipFile(args.classifier)
    global forest0, svmmodels, training_bosts, hist0
    forest0, hist0, forest1, hist1, training_bosts, svmmodels, prior = \
        load_from_classifier(classifier)
    classifier.close()

    KEY_FRAME_PERIOD = 2  # in seconds

    #processes = args.cores
    #pool = Pool(processes = processes)

    for processes in [4]:
        video_list = open(args.videolist, 'r')
        log_file = open('statistics%d.txt' % processes, 'w')

        fps = 0
        sps = 0
        fps_count = 0

        for video_file in video_list:
            video_file = video_file.strip()
            name = os.path.splitext(video_file)[0]
            file_path = os.path.join(VIDEO_RESOURCE, video_file)
            log_file.write(file_path + "\n")
            print file_path

            capture = cv.CaptureFromFile(file_path)
            frame_rate = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
            total_frames = cv.GetCaptureProperty(capture,
                                                 cv.CV_CAP_PROP_FRAME_COUNT)
            log_file.write("frame rate: %.3f, total frames: %d\n" %
                           (frame_rate, total_frames))
            print "frame rate: %.3f, total frames: %d\n" % (frame_rate,
                                                            total_frames)

            start_time0 = time.time()
            key_frame_counter = 0
            frame = cv.QueryFrame(capture)
            os.makedirs("tmp")
            while frame:
                cv.SaveImage("tmp/" + name + "%d.png" % key_frame_counter,
                             frame)
                for i in xrange(int(KEY_FRAME_PERIOD * frame_rate)):
                    frame = cv.QueryFrame(capture)
                #calculate_class(image)
                #p = Process(target = calculate_class, args=(image, ))
                #p.start()
                key_frame_counter += 1
            del (capture)

            start_time = time.time()

            ps = []
            for group in xrange(processes):
                p = Process(target=calculate_class,
                            args=(
                                group,
                                name,
                                processes,
                            ))
                p.start()
                ps.append(p)
            for p in ps:
                p.join()

            #pool.map(calculate_class, range(processes))

            elapse_time = time.time() - start_time

            os.system("rm -rf tmp")

            log_file.write("decoding time: %.2f, total time: %.2f, key frames: %d, frame per sec: %.3f\n" \
                % (start_time - start_time0, elapse_time, key_frame_counter, key_frame_counter / elapse_time))
            fps += key_frame_counter / elapse_time
            sps += key_frame_counter / elapse_time * KEY_FRAME_PERIOD
            fps_count += 1

            time.sleep(10)

        video_list.close()
        log_file.write("average fps: %.3f\n" % (fps / fps_count))
        log_file.write("average sps: %.3f\n" % (sps / fps_count))
        #print "total tags: %d" % (sum(tag_number))
        log_file.close()
Ejemplo n.º 21
0
pygame.mixer.init(frequency=22050, size=-16, channels=2, buffer=4096)

snare = pygame.mixer.Sound("E_SNARE_02.wav")
tom1 = pygame.mixer.Sound("tomtomdrum7.wav")
tom2 = pygame.mixer.Sound("tomtomdrum6.wav")
hihat = pygame.mixer.Sound("hihat22.wav")
synchNoise = pygame.mixer.Sound("cowbell9.wav")
based = pygame.mixer.Sound("01-Based-Is-How-You-Feel-Inside.wav")
over = pygame.mixer.Sound("vegetaover9000.wav")
bonzo = pygame.mixer.Sound("Led-Zeppelin-Coda-Bonzo's-Montreux.wav")
yyz = pygame.mixer.Sound("Rush-yyz.wav")

# opencv stuff
CAMERA_INDEX = 0
capture = cv.CaptureFromCAM(CAMERA_INDEX)
sampleFrame = cv.QueryFrame(capture)
imageSize = cv.GetSize(sampleFrame)
imageSize = 1280, 720

#pygame stuff
screen = pygame.display.set_mode(imageSize, 0)
#camlist = pygame.camera.list_cameras()
#print camlist
#cam = pygame.camera.Camera(camlist[0], imageSize)
#cam.start()
#cam.set_controls(hflip = True, vflip = False)
#snapshot = pygame.surface.Surface(imageSize, 0, screen)


class Stick(object):
    def __init__(self, color):
Ejemplo n.º 22
0
    def getImageCapture(videoFlow):
        """  Capture the current image of camera/IP camera video flow and save it in current.jpg file  """

        capture = cv.CaptureFromFile(videoFlow)
        frame = cv.QueryFrame(capture)
        image = cv.SaveImage("img/current.jpg", frame)
Ejemplo n.º 23
0
import cv

vidFile = cv.CaptureFromFile('crash-480.mp4')

nFrames = int(cv.GetCaptureProperty(vidFile, cv.CV_CAP_PROP_FRAME_COUNT))
fps = cv.GetCaptureProperty(vidFile, cv.CV_CAP_PROP_FPS)
waitPerFrameInMillisec = int(1 / fps * 1000 / 1)

print 'Num. Frames = ', nFrames
print 'Frame Rate = ', fps, ' frames per sec'

for f in xrange(nFrames):
    frameImg = cv.QueryFrame(vidFile)
    cv.ShowImage("My Video Window", frameImg)
    cv.WaitKey(waitPerFrameInMillisec)

# When playing is done, delete the window
#  NOTE: this step is not strictly necessary,
#         when the script terminates it will close all windows it owns anyways
cv.DestroyWindow("My Video Window")

# import numpy as np
# import cv2

# cap = cv2.VideoCapture('crash-480.mp4')

# while(cap.isOpened()):
#     ret, frame = cap.read()

#     gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
Ejemplo n.º 24
0
 def getFrame(self):
     return cv.QueryFrame(self.capture)
cv.NamedWindow("hsv_frame", 1)
cv.MoveWindow('webcam', 450, 0)
cam = cv.CaptureFromCAM(1)
pos_x = 0
pos_y = 0
last_x = 0
last_y = 0
# my creation to find good ball position
count_of_measurements = 0
not_found_two_circles = 0
dead_end = 0
direction = 1

while True:
    
    frame = cv.QueryFrame(cam)
                
    #blur the source image to reduce color noise 
    #cv.Smooth(frame, frame, cv.CV_BLUR, 3); 
    
    #convert the image to hsv(Hue, Saturation, Value) so its  
    #easier to determine the color to track(hue) 
    timesample = time.clock()
    time.sleep(1)
    print timesample
    hsv_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) 
    cv.CvtColor(frame, hsv_frame, cv.CV_BGR2HSV) 
    
    #limit all pixels that don't match our criteria, in this case we are  
    #looking for purple but if you want you can adjust the first value in  
    #both turples which is the hue range(120,140).  OpenCV uses 0-180 as  
Ejemplo n.º 26
0
def main():

    # Initialize capturing live feed from the camera
    capture = 0
    capture = cv.CaptureFromCAM(0)

    # Couldn't get a device? Throw an error and quit
    if (not capture):
        print "Could not initialize capturing...\n"
        return -1

    # The two windows we'll be using
    cv.NamedWindow("video")
    cv.NamedWindow("thresh")

    # This image holds the "scribble" data...
    # the tracked positions of the ball
    imgScribble = 0

    # a flag which indicates a valid mouse click
    clicked = 0

    # to hold the previous co-ordinate values.
    prevXred = 0
    prevYred = 0
    prevXyellow = 0
    prevYyellow = 0

    #initialising fake motioning
    import ctypes
    c = ctypes.CDLL("libhelper.so.1")
    libc = ctypes.CDLL("libc.so.6")
    c.helper_init()

    Xred = -1
    Yred = -1
    Xyellow = -1
    Yyellow = -1
    # An infinite loop
    while (True):
        #----------------------------------------------------------------------------------------

        # Will hold a frame captured from the camera
        frame = 0
        frame = cv.QueryFrame(capture)

        # If we couldn't grab a frame... quit
        if (not frame):
            break

        # If this is the first frame, we need to initialize it
        if (imgScribble == 0):
            imgScribble = cv.CreateImage(cv.GetSize(frame), 8, 3)

        # representative co-ordinates of RED & YELLOW finger-tips, initialized.
        dx = 0
        dy = 0

        #----------------------------------------------------------------------------------------

        # Holds the RED thresholded image (red = white, rest = black)
        imgRedThresh = GetRedThresholded(frame)
        moments = 0
        # Calculate the moments to estimate the position of RED finger-tip
        moments = cv.Moments(imgRedThresh)

        # The actual moment values
        moment01 = cv.GetSpatialMoment(moments, 0, 1)
        moment10 = cv.GetSpatialMoment(moments, 1, 0)
        area = cv.GetSpatialMoment(moments, 0, 0)

        if (area == 0):
            continue

        prevXred = Xred
        prevYred = Yred
        if area:
            Xred = moment10 / area
            Yred = moment01 / area

        # Print it out for debugging purposes
        #print "position "+ str(Xred)+' '+ str(Yred)+'\n'

        #----------------------------------------------------------------------------------------

        # Holds the YELLOW thresholded image (yellow = white, rest = black)
        imgYellowThresh = GetYellowThresholded(frame)
        moments = 0
        # Calculate the moments to estimate the position of YELLOW finger-tip
        moments = cv.Moments(imgYellowThresh)

        # The actual moment values
        moment01 = cv.GetSpatialMoment(moments, 0, 1)
        moment10 = cv.GetSpatialMoment(moments, 1, 0)
        area = cv.GetSpatialMoment(moments, 0, 0)

        if (area == 0): continue

        prevXyellow = Xyellow
        prevYyellow = Yyellow
        Xyellow = moment10 / area
        Yyellow = moment01 / area

        #dx = (Xyellow - prevXyellow)
        #dy = (Yyellow - prevYyellow)

        #x=1390-Xyellow*2.125
        #y=Yyellow*1.575

        Xt = 1390 - (abs(Xred + Xyellow) * 1.125)
        Yt = (abs(Yred + Yyellow) * 0.7875)

        libc.usleep(150)
        c.helper_mov_absxy(int(Xt), int(Yt))
        #c.helper_mov_relxy(int(dx),int(dy))
        # Print it out for debugging purposes
        #print "position "+ str(Xyellow)+' '+ str(Yyellow)

        #----------------------------------------------------------------------------------------

        # find distance between RED & YELLOW finger-tips
        # for faster calculation, individually Xdiff, Ydiff
        # considered, instead of sqrt((x2-x1)^2 + (y2-y2)^2)

        Xdiff = abs(Xred - Xyellow)
        Ydiff = abs(Yred - Yyellow)

        # determine the 'clicked' state, using approximation to circle method.
        d = 50
        #if((21*Xdiff + 50*Ydiff <= 50*d) and (50*Xdiff + 21*Ydiff <= 50*d)):
        if (Xdiff * Xdiff + Ydiff * Ydiff < d * d):
            #if(Xdiff < 25 and Ydiff < 25):
            if (not clicked):
                clicked = 1
                print "clicked"

        else:
            if (clicked):
                clicked = 0

        if clicked:
            c.helper_press()
            c.helper_release()

        # convey prevXred,prevYred,Xred,Yred,clicked for generation of appropriate mouse event.

        #libc.sleep(2)
        #c.helper_mov_relxy()

        #c.helper_mov_relxy()
        #libc.sleep(2)
        #if clicked
        #c.helper_press()
        #c.helper_release()
        #----------------------------------------------------------------------------------------


#		# We want to draw a line only if its a valid position
#		if(lastX>0 and lastY>0 and posX>0 and posY>0):
#
#			# Draw a yellow line from the previous point to the current point
#			cv.Line(imgScribble, cv.Point(posX, posY), cv.Point(lastX, lastY), cv.Scalar(0,255,255), 5)
#
#
#		# Add the scribbling image and the frame... and we get a combination of the two
#		cv.Add(frame, imgScribble, frame)
#cv.ShowImage("thresh", imgYellowThresh)
#cv.ShowImage("video", frame)

#----------------------------------------------------------------------------------------

# Wait for a keypress
        cin = cv.WaitKey(10)
        if not (cin == -1):
            # If pressed, break out of the loop
            break

        # Release the thresholded image... we need no memory leaks.. please

        #----------------------------------------------------------------------------------------

    # We're done using the camera. Other applications can now use it
    return 0
Ejemplo n.º 27
0
    f.write("v " + str(v) + chr(10))
    f.write("e " + str(e) + chr(10))
    f.write("s " + str(s) + chr(10))
    f.close()


v = 90
h = 90
e = 90
s = 90
STEP = 6
ENSTEP = 30
SSTEP = 1
cv.NamedWindow("CamView")
while True:
    im = cv.QueryFrame(camera)
    cv.ShowImage("CamView", im)
    #    im = cv.CreateMat(im)
    c = cv.WaitKey(33)
    if c == 27:
        break
    if c == ord('a'):
        h += STEP
        move()
    if c == ord('d'):
        h -= STEP
        move()
    if c == ord('w'):
        v -= STEP
        move()
    if c == ord('s'):
Ejemplo n.º 28
0
                                      options.min_obj_size)

    # Now open the video for input
    inStream = cv.CaptureFromFile(inputFile)

    fps = cv.GetCaptureProperty(inStream, cv.CV_CAP_PROP_FPS)

    # Next open the file for output
    outStream = cv.CreateVideoWriter(
        outputFile, cv.CV_FOURCC('P', 'I', 'M', '1'), fps,
        (cv.GetCaptureProperty(inStream, cv.CV_CAP_PROP_FRAME_WIDTH),
         cv.GetCaptureProperty(inStream, cv.CV_CAP_PROP_FRAME_HEIGHT)),
        1)  # is_color

    curTime = 0
    cvFrame = cv.QueryFrame(inStream)
    while (cvFrame is not None):

        frame = np.asarray(cv.GetMat(cvFrame)).astype(np.float64) / 255.0
        motionExtractor.AddImage(frame, curTime)

        if (motionExtractor.RetreiveObjects() is not None
                and (curTime - motionExtractor.time() < 5.0 / fps)):
            # Create a new frame showing the objects in red
            outFrame = frame
            outFrame[:,:,2] = outFrame[:,:,2]*0.6 + 0.4 * \
                              motionExtractor.RetreiveObjects().ToBinaryImageMask().astype(np.float64)

            outFrame *= 255.0
            cv.WriteFrame(outStream,
                          cv.GetImage(cv.fromarray(frame.astype(np.uint8()))))
Ejemplo n.º 29
0
import cv
import sys
if (len(sys.argv) > 1):
    capture = cv.CaptureFromCAM(1)
    img = cv.QueryFrame(capture)
    cv.SaveImage(sys.argv[1], img)
Ejemplo n.º 30
0
    def charger_modules(self):
        """Pour recharger la liste des modules si on a modifié le fichier
        modules.js qui contient leur description sous format json"""
        self.liste_modules.clear()
        self.modules = json.loads(open("modules.js").read())
        for module in self.modules:
            self.liste_modules.addItem(module["nom"])


if __name__ == "__main__":
    # Ce code ne s'exécute que si on exécute directement le module (pas en cas d'import)
    webcam = cv.CaptureFromCAM(0)
    print "Appuyez sur S pour prendre la photo"
    while True:
        image = cv.GetSubRect(cv.QueryFrame(webcam), (281, 191, 80, 100))
        cv.ShowImage("webcam", image)
        key = cv.WaitKey(1)
        # La touche pour prendre la photo est la touche s
        # Sous le linux utilisé pour tester, la valeur correspondante
        # était 1048691.
        # Sous Windows, la valeur était ord("s"), soit 115.
        # Sous certaines machine, cette valeur peut être "s".
        if key in [1048691, ord("s"), "s"]:
            cv.SaveImage("visage.bmp", image)
            break
    # thira = THIbault RAlph
    # sys.argv = les arguments de la commande exécutée,
    # par exemple ["interface.py", "-o3", "visage.bmp"]
    # Inutile pour nous mais demandé dans le constructeur de QApplication
    thira = QApplication(sys.argv)