示例#1
0
    def __init__(self):
        self.threshold_value = THRESH  # value associated with slider bar
        self.capture = cv.CaptureFromCAM(0)
        cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_WIDTH, 640)
        cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

        self.hc = cv.Load(HAARCASCADE)
        self.ms = cv.CreateMemStorage()

        self.orig = cv.QueryFrame(self.capture)
        if not self.orig:
            print "can't get frame, check camera"
            sys.exit(2)

        self.width = self.orig.width
        self.height = self.orig.height
        self.size = (self.width, self.height)
        self.smallwidth = int(self.width / SCALING)
        self.smallheight = int(self.height / SCALING)
        self.smallsize = (self.smallwidth, self.smallheight)

        # alloc mem for images
        self.small = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.visualize = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.bw = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.hsv = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.hue = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.sat = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.val = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.bp = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.scaled = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.th = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.morphed = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.temp = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.temp3 = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.result = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.hist_image = cv.CreateImage((320, 200), cv.IPL_DEPTH_8U, 1)
        self.scaled_c = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.hue_c = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.sat_c = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.th_c = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.morphed_c = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)

        # Greyscale image, thresholded to create the motion mask:
        self.grey_image = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)

        # The RunningAvg() function requires a 32-bit or 64-bit image...
        self.running_average_image = cv.CreateImage(self.smallsize,
                                                    cv.IPL_DEPTH_32F, 3)
        # ...but the AbsDiff() function requires matching image depths:
        self.running_average_in_display_color_depth = cv.CloneImage(self.small)

        # RAM used by FindContours():
        self.mem_storage = cv.CreateMemStorage(0)
        print "here"

        # The difference between the running average and the current frame:
        self.difference = cv.CloneImage(self.small)

        self.target_count = 1
        self.last_target_count = 1
        self.last_target_change_t = 0.0
        self.k_or_guess = 1
        self.codebook = []
        self.frame_count = 0
        self.last_frame_entity_list = []

        # make matrix for erode/dilate
        MORPH_SIZE = 3
        center = (MORPH_SIZE / 2) + 1
        self.morpher_small = cv.CreateStructuringElementEx(
            MORPH_SIZE, MORPH_SIZE, center, center, cv.CV_SHAPE_ELLIPSE)
        # self.morpher_small = cv.CreateStructuringElementEx(cols=MORPH_SIZE, rows=MORPH_SIZE, anchor_x=center, anchor_y=center, shape=cv.CV_SHAPE_ELLIPSE)
        MORPH_SIZE = 11
        center = (MORPH_SIZE / 2) + 1
        self.morpher = cv.CreateStructuringElementEx(MORPH_SIZE, MORPH_SIZE,
                                                     center, center,
                                                     cv.CV_SHAPE_ELLIPSE)

        # alloc mem for histogram
        self.hist = cv.CreateHist([HUEBINS, SATBINS], cv.CV_HIST_ARRAY,
                                  [[0, 180], [0, 255]], 1)

        # initalize
        #cv.CvtColor(self.small, self.bw, cv.CV_BGR2GRAY)
        #cv.CvtColor(self.small, self.hsv, cv.CV_BGR2HSV)
        #cv.CalcArrHist([self.hue, self.sat], self.hist)

        # video writer
        if STORE:
            self.writer = cv.CreateVideoWriter(OUTPUT,
                                               cv.CV_FOURCC(
                                                   'M', 'J', 'P', 'G'),
                                               15,
                                               cv.GetSize(self.combined),
                                               is_color=1)

        # make window
        cv.NamedWindow('Skin Detection')
        cv.CreateTrackbar('Threshold', 'Skin Detection', self.threshold_value,
                          255, self.change_threshold)
示例#2
0

print("> Recording ..")

framesno = 0
lastsize = 0
writer = None
recording = True
printerror = True

capture = cv.CaptureFromCAM(0)
width = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)
height = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT)
filename = 'CamCapture.avi'

writer = cv.CreateVideoWriter(filename, cv.CV_FOURCC('M', 'P', '4', '2'), 8,
                              (int(width), int(height)), True)
start = datetime.datetime.now()
now = datetime.datetime.now()

while recording:
    try:
        image = cv.QueryFrame(capture)

        if framesno % 100 == 0:
            print(
                "Resources datagram for " + str(framesno) + " frames in " +
                str(round((datetime.datetime.now() -
                           now).total_seconds(), 2)) + "/" +
                str(round((datetime.datetime.now() -
                           start).total_seconds(), 2)) + " s: " +
                json.dumps(memoryinfo()))
示例#3
0
                    sys.stdout.flush()
                    break
            else:
                connected = True

            # Stop the recording
            if has_data and datadec.find('stop') != -1 and writer is not None:
                print("WEBCAM: Stopping video")
                del (writer)
                writer = None
                # cmd = ['a2mp4.sh', '%s' % fn, '%s.mp4' % fn[:-4]]
                # proc = subprocess.Popen(cmd, bufsize=-1)
            if has_data and datadec.find('avi') != -1 and datadec.find(
                    'stop') == -1:
                if writer is None:
                    print("WEBCAM: Starting video")
                    fn = datadec[datadec.find("begin") + 5:datadec.find("end")]
                    writer = cv.CreateVideoWriter(
                        fn, cv.CV_FOURCC('X', 'V', 'I', 'D'), fps,
                        (width, height), True)
                connected = True

        # except:
        #     pass

        # print

    cv.DestroyWindow("Webcam")
    s.close()
    sys.stdout.write("done\n")
    sys.exit(0)
示例#4
0
 def __init__(self, filename, fourcc=('D', 'I', 'V', 'X'),
              fps=30.0, frame_size=(256,256), is_color=True):
     self.writer = cv.CreateVideoWriter(filename,
              cv.CV_FOURCC(*fourcc), fps, frame_size, int(is_color))
示例#5
0
        WIDTH = int(cv.GetCaptureProperty(c, cv.CV_CAP_PROP_FRAME_WIDTH))
        #Alto de resolucion usado en el video
        HEIGHT = int(cv.GetCaptureProperty(c, cv.CV_CAP_PROP_FRAME_HEIGHT))
        print WIDTH, HEIGHT
except:
    print "No se especifico video de entrada, usando camara web."
    print "Usa %s -h   o %s --help para conocer los parametros." % (
        sys.argv[0], sys.argv[0])

########################################################################################
#Lectura de archivo de salida, si se especifico alguno.
########################################################################################
try:
    if ".avi" in sys.argv[2]:
        VIDEO_WRITER = cv.CreateVideoWriter(sys.argv[2],
                                            cv.CV_FOURCC('P', 'I', 'M', '1'),
                                            30, (WIDTH, HEIGHT), True)
        print "Grabando video en: %s" % sys.argv[2]
except:
    print "No se especifico video de salida."
    print "Usa %s -h   o %s --help para conocer los parametros." % (
        sys.argv[0], sys.argv[0])
########################################################################################
#Propiedades de la captura de imagenes desde la webcam.
########################################################################################
cv.SetCaptureProperty(c, cv.CV_CAP_PROP_FRAME_WIDTH,
                      WIDTH)  #Ajustar ancho de video
cv.SetCaptureProperty(c, cv.CV_CAP_PROP_FRAME_HEIGHT,
                      HEIGHT)  #Ajustar altura de video
#cv.SetCaptureProperty( c, cv.CV_CAP_PROP_FPS, FPS )
#cv.SetCaptureProperty( c, cv.CV_CAP_PROP_BRIGHTNESS, BRIGHTNESS )
    segmasked = []

    i = 0

    param_str = '%s-%s_seg%s_%s-%s' % (target_frame_start, target_frame_stop,
                                       opts.seglen, thresh_str,
                                       opts.video_suffix)
    if opts.video_suffix:
        vidout = opts.vid[:-4] + '_%s.avi' % (param_str)
        print >> sys.stderr, 'write output video to %s' % vidout
        try:
            os.unlink(vidout)
        except:
            pass
        pylab.gray()
        vidwriter = cv.CreateVideoWriter(vidout, cv.FOURCC('x', 'v', 'i', 'd'),
                                         fps, pixdim, 1)

    analysis_root = os.path.join(opts.vid[:-4], 'analysis', param_str)
    print >> sys.stderr, 'write analysis results to %s' % analysis_root
    try:
        os.makedirs(analysis_root)
    except:
        pass

    #prepare output tarfile names
    tarfiles = dict([(fname,os.path.join(analysis_root,fname+'.tar')) \
                     for fname in ['miceols','objs','objs_fols','objs_sizes','prevact_ols','newact_ols','grounds','mousemasks','digdiffs','segavgs']])

    open(os.path.join(analysis_root, 'SHAPE'), 'w').write(SHAPE.__repr__())
    times = []
def run_real_time_recognition(para_path, Labels):

    status_dictionary = {}
    # status, pos, radias, color, text, ,pos, font_color
    # states:
    #   0 -> waiting to be hovered
    #   1 -> hovered waiting to be selected(clicked)
    #   2 -> selected waiting to be unselected(clicked)
    start_time = 0
    status_dictionary['b1'] = [False, (530, 70), 60, (255, 255, 0), 'Record', (490, 70), (0,0,0), [], False]
    status_dictionary['b2'] = [False, (380, 70), 60, (0, 255, 0), 'Select', (350, 70), (0,0,0), [], False]
    status_dictionary['b3'] = [False, (240, 70), 60, (0, 255, 255), 'Billard', (210, 70),(0,0,0), [], False]
    status_dictionary['b4'] = [False, (100, 270), 90, (255, 255, 255), 'Drag Me', (70, 270),(0,0,0), [], False]

    global depth,ir, rgb
    count = 0

    # frame_size = (480,640)
    # Setting web cam config
    capture=cv.CaptureFromCAM(0)
    fourcc = cv.CV_FOURCC('X','V','I','D')
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FPS, 25)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 640)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

    # Neuronet Configuration
    resize_row = 20
    resize_width = 20
    weights = loadmat(para_path)
    T1 = weights['Theta1']
    T2 = weights['Theta2']
    history_prediction = [] # smoothing and other purpose
    history_gesture_pos = [] # smoothing and other purpose

    # Recording
    record_st = False
    rgb_writer = cv.CreateVideoWriter("recording.avi", cv.CV_FOURCC('X','V','I','D'), 5, (640, 480), True)

    # Capture frames IR, RGB, Depth
    while True:

        # Web cam feed (300, 400, 3)
        rgb_ipl = cv.QueryFrame(capture)

        # Depth IR feed
        (depth,_), (ir,_) = get_depth(), get_video(format=2)

        ir = (ir>150).astype(float)
        ir = ir*255
        ir_ipl = resize_ir_callibrate_with_rgb(ir)

        new_rgb_ipl = cv.CreateImage(cv.GetSize(rgb_ipl), 8, 3)


        #Billard Mode
        yo = rgb_ipl
        f = iplimage_to_numpy_color(yo)
        green_mono = f[:,:,1]
        #image = cv.fromarray(np.array(green_mono[:,:]))
        #cv.ShowImage('G', image)

        rgb_np, threshold_np, contour_list = billard_extract_and_draw_countour(f, 20, green_mono, 120, 0)
        image = cv.fromarray(np.array(rgb_np))

        #print contour_list
        maxx = (0,0,0,0)
        for pos in contour_list:
            if pos[1] > maxx[1]:
                maxx = pos
        #print maxx

        for item in contour_list:
            if maxx != item:
                cv.Line(image, (maxx[0]+maxx[2]/2, maxx[1]+maxx[3]/2), (item[0]+item[2]/2,item[1]+item[3]/2), (0,255,0), thickness=1, lineType=8, shift=0)
        #cv.ShowImage('G Threshold', image)
        new_rgb_ipl = cvMat_to_iplimage_color(image)
        #cv.ShowImage('G Threshold', new_rgb_ipl)







        # Hand Sengmentation
        rgb_np, ir_np, contour_list, history_gesture_pos = real_time_extract_and_draw_countour(ir_ipl, rgb_ipl, 20000, history_gesture_pos)

        # Gesture Recognition
        if contour_list:
            ir_ipl, rgb_ipl, history_prediction = real_time_gesture_recognition_and_labeling(ir_np, rgb_np, contour_list, T1, T2, Labels, history_prediction, False)

            # Update button status
            status_dictionary, start_time = update_button_status(contour_list, history_prediction, Labels, status_dictionary, history_gesture_pos, False, start_time)

        draw_menu_button(ir_ipl, rgb_ipl, status_dictionary, start_time)


        # resize for full screen display
        """
        rgb_np = iplimage_to_numpy_color(rgb_ipl)
        rgb_np = imresize(rgb_np, (800, 1066))
        image = cv.fromarray(np.array(rgb_np))
        cv.ShowImage('rgb', image)
        """
        if status_dictionary['b3'][0]:
            opacity = 0.4
            cv.AddWeighted(new_rgb_ipl, opacity, rgb_ipl, 1 - opacity, 0, rgb_ipl)

        if status_dictionary['b1'][0]:
            cv.WriteFrame(rgb_writer, rgb_ipl)
        else:
            record_status=False

        cv.ShowImage('rgb', rgb_ipl)
        cv.ShowImage('ir', ir_ipl)

        c=cv.WaitKey(5)
        if c==27: #Break if user enters 'Esc'.
            break
示例#8
0
                                          '*.h5'))  # find all pkl files

    for infile in filelist:
        print infile
        File = infile.split('\\')[-1][:-3]

        f = h5py.File(infile, "r")

        if len(f['imgs'].keys()) == 2:

            for j in xrange(2):
                if j == 0:
                    size = (512, 424)

                    video = cv.CreateVideoWriter(
                        savepath + File + '_bdidx.avi',
                        cv.CV_FOURCC('X', 'V', 'I', 'D'), fps, size, True)
                    cimg = f['imgs']['bdimgs']
                else:
                    size = (512, 424)
                    video = cv.CreateVideoWriter(
                        savepath + File + '_d.avi',
                        cv.CV_FOURCC('X', 'V', 'I', 'D'), fps, size, True)
                    cimg = f['imgs']['dimgs']

                for i in cimg.keys():
                    bitmap = cv.CreateImageHeader(size, cv.IPL_DEPTH_8U, 3)
                    if j == 1:
                        cv.SetData(
                            bitmap,
                            np.uint8(cimg[i][:] / 256.).tostring(),
import cv

# connect to the camera. 0 refers to which camera (look for /dev/videoN)
capture = cv.CaptureFromCAM(0)
if not capture:
    print "could not capture from camera!"
    exit(1)

# grab a frame and display it
print "displaying camera image [press any key to continue]"
image = cv.QueryFrame(capture)
cv.NamedWindow('camera', cv.CV_WINDOW_AUTOSIZE)
cv.ShowImage('camera', image)
cv.WaitKey(10000)

# save the image to the filesystem
cv.SaveImage('image.png', image)

# create a video writer
writer = cv.CreateVideoWriter("output.avi", 0, 15, cv.GetSize(image), 1)

# capture a series of images and write to the file
count = 0
while count < 250:
    image = cv.QueryFrame(capture)
    cv.WriteFrame(writer, image)
    cv.ShowImage('camera', image)
    cv.WaitKey(2)
    count += 1
示例#10
0
#	3D Volumetric Display Project
#	required: Python 2.7, 32-bit, OpenCV for python
#

#!/usr/bin/python
import cv

#dirc = "C:/Users/Bereket/Documents/BlenderModels/run0/"
dirc = "C:/Users/Bereket/Documents/3D Display/test_runs/"
#cv.LoadImage('picture.png', cv.CV_LOAD_IMAGE_COLOR)
name = "test4"

im0 = cv.LoadImage(dirc + name + ".png")
if not im0:
    print "Could not load im0"

fps = 60.0
frame_size = cv.GetSize(im0)
#codec = CV_FOURCC('D', 'I', 'V', 'X')
codec = 0
writer = cv.CreateVideoWriter(dirc + name + ".avi", codec, fps, frame_size,
                              True)
if not writer:
    print "Error in creating video writer"
    sys.exit(1)
else:
    for i in range(600):
        cv.WriteFrame(writer, im0)

#cv.ReleaseVideoWriter(writer)
示例#11
0
def WriteVideoCapture(videoPathFileName, folderName):
    # OpenCV can read only AVIs - not 3GP, nor FLVs with MPEG compression
    # From http://answers.opencv.org/question/6/how-to-readwrite-video-with-opencv-in-python/

    #assert os.path.isfile(videoPathFileName);

    #assert False; # UNFINISHED

    # From http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#videowriter-videowriter
    #capture = cv2.VideoWriter(videoPathFileName); # Gives error: <<TypeError: Required argument 'fourcc' (pos 2) not found>>

    # Inspired from http://stackoverflow.com/questions/14440400/creating-a-video-using-opencv-2-4-0-in-python
    #writer = cv.CreateVideoWriter("out.avi", CV_FOURCC("M", "J", "P", "G"), fps, frame_size, True)
    if False:
        writer = cv.CreateVideoWriter("out.avi",
                                      cv.CV_FOURCC("M", "J", "P", "G"), fps,
                                      frameSize, True)
    else:
        videoWriter = None

    folderContent = os.listdir(folderName)
    sortedFolderContent = sorted(folderContent)

    for fileName in sortedFolderContent:
        pathFileName = folderName + "/" + fileName
        if os.path.isfile(pathFileName) and \
                            fileName.lower().endswith("_good.png"):
            common.DebugPrint("ComputeHarlocs(): Loading %s" % pathFileName)
            img = cv2.imread(pathFileName)
            assert img != None

            if videoWriter == None:
                common.DebugPrint("img.shape = %s" % str(img.shape))
                # From http://docs.opencv.org/trunk/doc/py_tutorials/py_gui/py_video_display/py_video_display.html#saving-a-video
                # See also http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html
                # WRITES 0 BYTES IN THE VIDEO: vidFourcc = cv2.VideoWriter_fourcc('M','J','P','G');

                # See also http://www.fourcc.org/codecs.php
                vidFourcc = cv2.VideoWriter_fourcc(*'XVID')

                videoWriter = cv2.VideoWriter(filename=videoPathFileName, \
                                            fourcc=vidFourcc, fps=10, \
                                            frameSize=(img.shape[1], img.shape[0]))
                if not videoWriter:
                    common.DebugPrint("Error in creating video writer")
                    sys.exit(1)

            #cv.WriteFrame(writer, img);
            videoWriter.write(img)

    videoWriter.release()
    common.DebugPrint("Finished writing the video")
    return

    height, width, layers = img1.shape

    video = cv2.VideoWriter("video.avi", -1, 1, (width, height))

    video.write(img1)

    video.release()

    resolution = (width, height)

    # From http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#videowriter-write
    capture.write(im)

    return

    if config.OCV_OLD_PY_BINDINGS:
        capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, 0)
    else:
        capture.set(cv2.CAP_PROP_POS_FRAMES, 0)

    if config.OCV_OLD_PY_BINDINGS:
        frameCount = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    else:
        frameCount = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
示例#12
0
# CODEC = cv.CV_FOURCC('M','P','4','2') # MPEG 4.2
# CODEC = cv.CV_FOURCC('M','J','P','G') # Motion Jpeg
# CODEC = cv.CV_FOURCC('U','2','6','3') # H263
# CODEC = cv.CV_FOURCC('I','2','6','3') # H263I
# CODEC = cv.CV_FOURCC('F','L','V','1') # FLV
CODEC = cv.CV_FOURCC('P', 'I', 'M', '1')  # MPEG-1
CODEC = cv.CV_FOURCC('D', 'I', 'V', 'X')  # MPEG-4 = MPEG-1

# Initialize the camera for video capture
capture = cv.CaptureFromCAM(CAMERA_INDEX)

# Initialize the video writer to write the file
writer = cv.CreateVideoWriter(
    '/Users/sean/Desktop/out.avi',  # Filename
    CODEC,  # Codec for compression
    25,  # Frames per second
    (640, 480),  # Width / Height tuple
    True  # Color flag
)

# Capture 50 frames and write each one to the file
for i in range(0, 25):
    print 'frame #:', i
    frame = cv.QueryFrame(capture)
    cv.ShowImage("w1", frame)
    cv.WriteFrame(writer, frame)

# Release the capture
del (capture)
del (writer)
print 'released capture'
示例#13
0
if not arquivos:
    print 'Não há arquivos de vídeo no diretório informado!'
    exit()

print 'Arquivos de vídeo encontrados: %d' % len(arquivos)
print arquivos
print






# cria o escritores para os videos de saida
writer_saida = cv.CreateVideoWriter('%s/%s' % (dirVideos,ARQ_SAIDA),int(fourcc),fps,(int(nw),int(nh)),1)


# usa o primeiro vídeo como tamanho de todos


captures = []
for i in range(len(arquivos)):
    capture = cv.CaptureFromFile(dirVideos + arquivos[i])
    captures.append(capture)
    
totalFrames = int(cv.GetCaptureProperty(captures[0], cv.CV_CAP_PROP_FRAME_COUNT)) 

print 'Total de frames do video 0: %d' % totalFrames

示例#14
0
import cv

sz = (1280, 720)
fps = 30
# codec = cv.CV_FOURCC('P', 'I', 'M', '1')
# codec = cv.CV_FOURCC('f', 'f', 'd', 's')
codec = cv.CV_FOURCC('I', '4', '2', '0')
filename = "testing.avi"

vw = cv.CreateVideoWriter(filename, codec, fps, sz)
if not vw:
    print 'could not create writer!'

img = cv.CreateImage(sz, 8, 3)
cv.Circle(img, (500,500), 50, cv.RGB(0,255,255), -1)

if not cv.WriteFrame(vw, img):
    print 'write failed!'
else:
    print 'write succeeded!'
示例#15
0
sequencias.append((0, totalFrames - 1))

#sequencias = [[400,500]]
#sequencias = [[0,1000]]

iseq = 0
for frameInicial, frameFinal in sequencias:
    iseq += 1

    if frameInicial % 2:
        print 'Frame inicial deve ser par!'
        exit()

    # cria o escritores para os videos de saida
    writer_original = cv.CreateVideoWriter(
        '%s/0-ORIGINAL-seq-%d(%d a %d).avi' %
        (dirSaida, iseq, frameInicial, frameFinal), int(fourcc), fpsSaida,
        (int(width), int(height)), 1)

    writer_diff_fundo = cv.CreateVideoWriter(
        '%s/1-DIF.FUNDO-seq-%d(%d a %d).avi' %
        (dirSaida, iseq, frameInicial, frameFinal), int(fourcc), fpsSaida,
        (int(width), int(height)), 1)

    writer_diff_quadro = cv.CreateVideoWriter(
        '%s/2-DIF.QUADRO-seq-%d(%d a %d).avi' %
        (dirSaida, iseq, frameInicial, frameFinal), int(fourcc), fpsSaida,
        (int(width), int(height)), 1)

    writer_canny = cv.CreateVideoWriter(
        '%s/4-CANNY-seq-%d(%d a %d).avi' %
        (dirSaida, iseq, frameInicial, frameFinal), int(fourcc), fpsSaida,
示例#16
0
#            print('Error !!')
#
#    Compdata.close()

File = 'E:/20161216/Kinect data _ h5 and pkl file/data12151611_c.h5'
import h5py, cv, glob, pdb
import numpy as np

fps = 30

f = h5py.File(File, 'r')
for j in xrange(2, 3):
    if j == 0:
        size = (1920, 1080)
        video = cv.CreateVideoWriter('test.avi',
                                     cv.CV_FOURCC('X', 'V', 'I', 'D'), fps,
                                     size, True)
        cimg = f['imgs']['cimgs']
    elif j == 1:
        size = (512, 424)
        video = cv.CreateVideoWriter('test_bdidx.avi',
                                     cv.CV_FOURCC('X', 'V', 'I', 'D'), fps,
                                     size, True)
        cimg = f['imgs']['bdimgs']
    else:
        size = (512, 424)
        video = cv.CreateVideoWriter('test_d.avi',
                                     cv.CV_FOURCC('X', 'V', 'I', 'D'), fps,
                                     size, True)
        cimg = f['imgs']['dimgs']
示例#17
0
def capture():
    """main function"""
    # parse cmd line options
    parser = OptionParser()

    parser.add_option("-0",
                      "--file0",
                      dest="file0",
                      help="path of target file0")
    parser.add_option("-1",
                      "--file1",
                      dest="file1",
                      help="path of target file1")

    (options, args) = parser.parse_args()

    file0 = "out0.avi"
    file1 = "out1.avi"

    if options.file0:
        file0 = options.file0
    if options.file1:
        file1 = options.file1

    print "[INFO ] output file0: " + str(file0)
    print "[INFO ] output file1: " + str(file1)

    # init cams
    cam0 = cv.CaptureFromCAM(0)
    cam1 = cv.CaptureFromCAM(1)

    # check if cams are init correctly
    if not cam0:
        print "[ERROR] Could not init cam0"
        return
    if not cam1:
        print "[ERROR] Could not init cam1"
        return

    # skip first frames since they are normally garbage...
    print "[INFO ] Skipping first 10 frames..."
    for i in xrange(10):
        frame0 = cv.QueryFrame(cam0)
        frame1 = cv.QueryFrame(cam1)

    # init some vars
    # TODO: calc fps and image size correctly and output on console
    writer0 = cv.CreateVideoWriter(file0, cv.CV_FOURCC('M', 'J', 'P', 'G'), 30,
                                   (320, 240))
    writer1 = cv.CreateVideoWriter(file1, cv.CV_FOURCC('M', 'J', 'P', 'G'), 30,
                                   (320, 240))

    # create some windows to output frames
    cv.NamedWindow("cam0", cv.CV_WINDOW_AUTOSIZE)
    cv.NamedWindow("cam1", cv.CV_WINDOW_AUTOSIZE)

    print "[INFO ] Starting recording..."
    print "[INFO ] To quit press q or esc..."
    while True:
        # save the frames we want...
        frame0 = cv.QueryFrame(cam0)
        frame1 = cv.QueryFrame(cam1)

        # check if frames are valid
        if not frame0:
            print "[ERROR] could not query frame from cam0"
            continue
        if not frame1:
            print "[ERROR] could not query frame from cam1"
            continue

        # write frames to video files
        cv.WriteFrame(writer0, frame0)
        cv.WriteFrame(writer1, frame1)

        # output frames...
        cv.ShowImage("cam0", frame0)
        cv.ShowImage("cam1", frame1)
        key = cv.WaitKey(100)
        if key == 113 or key == 27:  # esc or q key
            break

    # destroy stuff...
    print "[INFO ] destroying opencv objects..."
    cv.DestroyWindow("cam0")
    cv.DestroyWindow("cam1")
    del (writer0)
    del (writer1)
    del (cam0)
    del (cam1)

    print "[INFO ] everything done... bye..."
示例#18
0
    def __init__(self):
        osc.init()
        self.source = Source(CAMERAID)
        #self.source.print_info()
        self.threshold_value = THRESH
        self.hc = cv.Load(HAARCASCADE)
        self.ms = cv.CreateMemStorage()

        self.orig = self.source.grab_frame()

        self.width = self.orig.width
        self.height = self.orig.height
        self.size = (self.width, self.height)
        self.smallheight = WORKING_HEIGHT
        self.smallwidth = int(self.width * self.smallheight / self.height *
                              1.0)
        self.smallsize = (self.smallwidth, self.smallheight)

        # alloc mem for images
        self.small = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.visualize = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.bw = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.hsv = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.hue = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.sat = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.bp = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.th = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.morphed = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.temp = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.temp3 = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.result = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.hist_image = cv.CreateImage((320, 200), cv.IPL_DEPTH_8U, 1)

        # make matrix for erode/dilate
        MORPH_SIZE = 3
        center = (MORPH_SIZE / 2) + 1
        self.morpher_small = cv.CreateStructuringElementEx(
            cols=MORPH_SIZE,
            rows=MORPH_SIZE,
            anchor_x=center,
            anchor_y=center,
            shape=cv.CV_SHAPE_ELLIPSE)
        MORPH_SIZE = 11
        center = (MORPH_SIZE / 2) + 1
        self.morpher = cv.CreateStructuringElementEx(cols=MORPH_SIZE,
                                                     rows=MORPH_SIZE,
                                                     anchor_x=center,
                                                     anchor_y=center,
                                                     shape=cv.CV_SHAPE_ELLIPSE)

        # alloc mem for face histogram
        self.face_hist = cv.CreateHist([HUEBINS, SATBINS], cv.CV_HIST_ARRAY,
                                       [[0, 180], [0, 255]], 1)

        # alloc mem for background histogram
        self.bg_hist = cv.CreateHist([HUEBINS, SATBINS], cv.CV_HIST_ARRAY,
                                     [[0, 180], [0, 255]], 1)

        # video writer
        if STORE:
            self.writer = cv.CreateVideoWriter(OUTPUT,
                                               cv.CV_FOURCC(
                                                   'M', 'J', 'P', 'G'),
                                               15,
                                               cv.GetSize(self.combined),
                                               is_color=1)

        # make window
        cv.NamedWindow('Skin Detection')
        cv.CreateTrackbar('Threshold', 'Skin Detection', self.threshold_value,
                          255, self.change_threshold)
示例#19
0
import cv

# setup webcam
capture = cv.CaptureFromCAM(0)

# set resolution
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 320)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 240)

# setup file output
filenm = "output.avi"
codec = 0
fps = 15
width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
writer = cv.CreateVideoWriter(filenm, codec, fps, (width, height), 1)


# display frame on screen and write it to disk
def DisplayWriteFrame(image):
    cv.ShowImage('Image_Window', image)

    i = 0
    while i < 15:
        cv.WriteFrame(writer, image)
        i += 1

    cv.WaitKey(1000)


j = 0
示例#20
0
    def __init__(self):
        self.source = Source(CAMERAID)
        self.threshold_value = THRESH
        self.erode_value = ERODE
        self.dialate_value = DIALATE
        self.count = -1
        self.ms = cv.CreateMemStorage()

        self.orig = self.source.grab_frame()

        self.width = self.orig.width
        self.height = self.orig.height
        self.size = (self.width, self.height)
        self.smallheight = WORKING_HEIGHT
        self.smallwidth = int(self.width * self.smallheight / self.height *
                              1.0)
        self.smallsize = (self.smallwidth, self.smallheight)

        # alloc mem for images
        self.small = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.visualize = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.bw = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.grey = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.smooth = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.first = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.prev = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.fuzz = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.diff1 = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.diff2 = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.temp = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.result = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)

        # make matrix for erode/dilate
        MORPH_SIZE = 3
        center = (MORPH_SIZE / 2) + 1
        self.morpher_small = cv.CreateStructuringElementEx(
            MORPH_SIZE, MORPH_SIZE, center, center, cv.CV_SHAPE_ELLIPSE)
        MORPH_SIZE = 11
        center = (MORPH_SIZE / 2) + 1
        self.morpher = cv.CreateStructuringElementEx(MORPH_SIZE, MORPH_SIZE,
                                                     center, center,
                                                     cv.CV_SHAPE_ELLIPSE)

        # alloc mem for feature histogram
        self.feature_hist = cv.CreateHist([HUEBINS, SATBINS], cv.CV_HIST_ARRAY,
                                          [[0, 180], [0, 255]], 1)

        # alloc mem for background histogram
        self.bg_hist = cv.CreateHist([HUEBINS, SATBINS], cv.CV_HIST_ARRAY,
                                     [[0, 180], [0, 255]], 1)

        # video writer
        if STORE:
            self.writer = cv.CreateVideoWriter(OUTPUT,
                                               cv.CV_FOURCC(
                                                   'M', 'J', 'P', 'G'),
                                               15,
                                               cv.GetSize(self.combined),
                                               is_color=1)
        # make window
        cv.NamedWindow("Skin Detection")
        cv.CreateTrackbar('Threshold', 'Skin Detection', self.threshold_value,
                          100, self.change_threshold)

        cv.CreateTrackbar('Erode', 'Skin Detection', self.erode_value, 40,
                          self.change_threshold)

        cv.CreateTrackbar('Dialate', 'Skin Detection', self.dialate_value, 40,
                          self.change_threshold)

        cv.SetMouseCallback("Skin Detection", self.on_mouse)
        self.drag_start = None  # Set to (x,y) when mouse starts drag
        self.track_window = None  # Set to rect when the mouse drag finishes
示例#21
0
def main():
    """
    Main program - controls grabbing images from video stream and loops around each frame.
    """
    #camera = cv.CaptureFromFile("rtsp://192.168.1.18/live_mpeg4.sdp")
    camera = cv.CaptureFromFile("testcards/sample1.mp4")
    #camera = cv.CaptureFromCAM(0)
    if (camera!=None):
        frameSize = (640,480)
        videoFormat = cv.FOURCC('p','i','m','1')
        vw = cv.CreateVideoWriter("seizure_test.mpg",videoFormat, outputfps,frameSize,1)

        cv.NamedWindow(window1,cv.CV_WINDOW_AUTOSIZE)
        origImg = cv.QueryFrame(camera)
        lastTime = datetime.datetime.now()
        while (origImg):
            # Preprocess, then add the new image to the list, along with the 
            # time it was recorded.
            imgList.append(
                (lastTime,
                 preProcessImage(origImg)
                 ))
            # Drop the oldest image off the list if we have enough in the list.
            if (len(imgList)>IMG_STACK_LEN):
                imgList.pop(0)  # Remove first item
                
 
            xorig = 0
            yorig = 0
            if (len(imgList) == IMG_STACK_LEN):
                # imgList[] is now a list of tuples (time,image) containing the
                # reduced size images -
                spectra = getSpectra(imgList)
                binWidth = 1.0*inputfps/IMG_STACK_LEN
                #(a,fftMax,b,(freqNo,pixelNo))= cv.MinMaxLoc(spectra)
                for freqNo in range(0,int(len(imgList)/2)):
                    for pixelNo in range(0,70):
                        if (abs(spectra[pixelNo,freqNo])>FREQ_THRESH):
                            print "PixelNo %d exceeds threshold (val=%f) in freq bin %d (%f Hz" % (pixelNo,abs(spectra[pixelNo,freqNo]),freqNo,freqNo*binWidth)
                            (xmax,ymax) = pixelNo2xy(pixelNo,imgList[0][1])
                            (xorig,yorig) = getEquivLoc(xmax,ymax,ANALYSIS_LAYER)
                            if (freqNo<10):
                                colour = cv.Scalar(255,1,1)
                                thickness = 1
                            elif (freqNo>10 and freqNo<20):
                                colour = cv.Scalar(1,255,1)
                                thickness = 5
                            elif (freqNo>20 and freqNo<30):
                                colour = cv.Scalar(1,1,255)
                                thickness = 10
                            elif (freqNo>30):
                                colour = cv.Scalar(255,255,255)
                                thickness = 20
                            cv.Circle(origImg, (xorig,yorig), 30, colour, thickness=thickness, lineType=-1, shift=0) 
            cv.WriteFrame(vw,origImg)
            cv.ShowImage(window1,origImg)
            cv.ShowImage(window2,imgList[0][1])
            cv.WaitKey(1) # This is very important or ShowImage doesn't work!!
                

            timeDiff = (datetime.datetime.now() - lastTime).total_seconds() 
            if (timeDiff<1./inputfps):
                print "timediff=%f, 1/fps=%f" % (timeDiff,1./inputfps)
                cv.WaitKey(1+int(1000.*(1./inputfps - timeDiff)))

            # Note - there is something odd about this time calculation
            # it does not seem to be consistent with the timestamps on the
            # images.
            timeDiff = (datetime.datetime.now() - lastTime).total_seconds() 
            fps = 1./timeDiff
            print "timeDiff=%f, fps=%f fps" % (timeDiff,fps)

            # Now get a new frame ready to start the loop again
            origImg = cv.QueryFrame(camera)
            lastTime = datetime.datetime.now()
        print "no more images..."
    else:
        print "Error - failed to connect to camera"
var = os.getcwd() + '\\' + var + '.avi'
fps = 24.0
width = 960
height = 720
fourcc = cv.CV_FOURCC('P', 'I', 'M', '1')
#MPEG-1 only supports framerates of 23.976, 24, 25, 29.97, 30, 50, 59.94, and 60. Anything else is out of spec.
#CV_FOURCC('P','I','M','1')    = MPEG-1 codec
#CV_FOURCC('M','J','P','G')    = motion-jpeg codec (does not work well)
#CV_FOURCC('M', 'P', '4', '2') = MPEG-4.2 codec
#CV_FOURCC('D', 'I', 'V', '3') = MPEG-4.3 codec
#CV_FOURCC('D', 'I', 'V', 'X') = MPEG-4 codec
#CV_FOURCC('U', '2', '6', '3') = H263 codec
#CV_FOURCC('I', '2', '6', '3') = H263I codec
#CV_FOURCC('F', 'L', 'V', '1') = FLV1 codec
#writer = cv.CreateVideoWriter('Z:\DCC-003-TD\pdwater\project\webcam\python\\video\\baby.avi', fourcc, fps, (width, height), 1)
writer = cv.CreateVideoWriter(var, fourcc, fps, (width, height), 1)

while (1):
    time_start = time.time()
    color_image = cv.QueryFrame(capture)
    imdraw = cv.CreateImage(cv.GetSize(frame), 8, 3)
    cv.SetZero(imdraw)
    cv.Flip(color_image, color_image, 1)
    cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

    #find movement

    if first:
        difference = cv.CloneImage(color_image)
        temp = cv.CloneImage(color_image)
        cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
示例#23
0
 def Save(self, filename):
     writer = cv.CreateVideoWriter(filename, cv.CV_FOURCC('M', 'J', 'P', 'G'), self.getFPS(), self.getSize(), 1)
     for i in self.frames:
         cv_im = cv.CreateImageHeader(i.size, cv.IPL_DEPTH_8U, 3)
         cv.SetData(cv_im, i.tostring())
         cv.WriteFrame(writer, cv_im)