import matplotlib matplotlib.use('Agg') import pylab import cv vid = '20100818-1_PO32-2_PO34-3_PO17/merge6mbit_720_PO32_0-29443.mp4' pixdim = (522, 246) _ip.system("rm 20100818-1_PO32-2_PO34-3_PO17/test_opencv_write.avi") vidwriter = cv.CreateVideoWriter('/n/hoekstrafs1/burrowing/antfarms/data/_2011fall/20100818-1_PO32-2_PO34-3_PO17/test_opencv_write.avi',cv.FOURCC('x','v','i','d'), 29.97, pixdim,1) from video_analysis import vidtools m = vidtools.array_from_stream(stream) stream = cv.CaptureFromFile(vid) m = vidtools.array_from_stream(stream) def mat2cv(m): fig = pylab.figure(1) _ip.magic("timeit fig = pylab.figure(1)") ax = pylab.matshow(m,fignum=1) _ip.magic("timeit ax = pylab.matshow(m,fignum=1)") def mat2cv(m): fig = plt.figure(1) ax = plt.matshow(m,fignum=1) buf = io.BytesIO() fig.savefig(buf,format='png') buf.seek(0) pi = Image.open(buf) cv_im = cv.CreateImageHeader(pi.size, cv.IPL_DEPTH_8U, 1) cv.SetData(cv_im, pi.tostring()) def mat2cv(m): fig = pylab.figure(1)
python_opencv_modulu.NamedWindow("Python - Bahçeşehir University Game", python_opencv_modulu.CV_WINDOW_AUTOSIZE) elips_sekil_kalibi = python_opencv_modulu.CreateStructuringElementEx( 9, 9, 4, 4, python_opencv_modulu.CV_SHAPE_ELLIPSE) kamera = python_opencv_modulu.CaptureFromCAM(-1) cerceve_boyutu = (int( python_opencv_modulu.GetCaptureProperty( kamera, python_opencv_modulu.CV_CAP_PROP_FRAME_WIDTH)), int( python_opencv_modulu.GetCaptureProperty( kamera, python_opencv_modulu.CV_CAP_PROP_FRAME_HEIGHT))) yazdir_goruntuyu = False fourcc_degeri = python_opencv_modulu.FOURCC('M', 'J', 'P', 'G') fps_degeri = 30 if yazdir_goruntuyu: video_yazdir = python_opencv_modulu.CreateVideoWriter( "film.avi", fourcc_degeri, fps_degeri, cerceve_boyutu) previous = python_opencv_modulu.CreateImage(cerceve_boyutu, 8L, 3) python_opencv_modulu.SetZero(previous) difference = python_opencv_modulu.CreateImage(cerceve_boyutu, 8L, 3) python_opencv_modulu.SetZero(difference) current = python_opencv_modulu.CreateImage(cerceve_boyutu, 8L, 3) python_opencv_modulu.SetZero(current) logo_bau_original = python_opencv_modulu.LoadImage("img/logo-bau.png") logo_bau = python_opencv_modulu.CreateImage((64, 64), logo_bau_original.depth, logo_bau_original.channels)
break elif current_key == ord('s'): scaled = cv.CreateMat(height, width, cv.CV_8UC1) cv.ConvertScale(current_depth_frame, scaled, 0.05) cv.SaveImage(outpath + '/image_%d.jpg' % save_count, current_image_frame) cv.SaveImage(outpath + '/depth_%d.jpg' % save_count, scaled) cv.SaveImage(outpath + '/out_%d.jpg' % save_count, out) save_count += 1 elif current_key == ord('v'): if writer: writer = None print 'video recording stopped' else: print 'start recording video to: %s/video_%d.avi' % (outpath, save_count) writer = cv.CreateVideoWriter(outpath + '/video_%d.avi' % save_count, cv.FOURCC('P', 'I', 'M', '1'), 30, (width, height + hist_height)) save_count += 1 elif current_key == ord('h'): print help elif current_key == ord('c'): draw_contours = not draw_contours elif current_key == ord('b'): draw_box = not draw_box elif current_key == ord('k'): draw_keypoints = not draw_keypoints elif current_key == ord('e'): erode = not erode elif current_key == ord('l'): draw_cluster = not draw_cluster
def main(): """ Main program - controls grabbing images from video stream and loops around each frame. """ camera = cv.CaptureFromFile("rtsp://192.168.1.18/live_mpeg4.sdp") #camera = cv.CaptureFromFile("testcards/testcard.mpg") #camera = cv.CaptureFromCAM(0) if (camera!=None): frameSize = (640,480) #fps = 30 videoFormat = cv.FOURCC('p','i','m','1') vw = cv.CreateVideoWriter("seizure_test.mpg",videoFormat, inputfps,frameSize,1) cv.NamedWindow(window1,cv.CV_WINDOW_AUTOSIZE) origImg = cv.QueryFrame(camera) lastTime = datetime.datetime.now() while (origImg): # Preprocess, then add the new image to the list, along with the # time it was recorded. imgList.append( (lastTime, preProcessImage(origImg) )) # Drop the oldest image off the list if we have enough in the list. if (len(imgList)>IMG_STACK_LEN): imgList.pop(0) # Remove first item xorig = 0 yorig = 0 if (len(imgList) == IMG_STACK_LEN): # imgList[] is now a list of tuples (time,image) containing the # reduced size images - spectra = getSpectra(imgList) binWidth = 1.0*inputfps/IMG_STACK_LEN #(a,fftMax,b,(freqNo,pixelNo))= cv.MinMaxLoc(spectra) for freqNo in range(0,int(len(imgList)/2)): for pixelNo in range(0,70): if (abs(spectra[pixelNo,freqNo])>FREQ_THRESH): print "PixelNo %d exceeds threshold (val=%f) in freq bin %d (%f Hz" % (pixelNo,abs(spectra[pixelNo,freqNo]),freqNo,freqNo*binWidth) (xmax,ymax) = pixelNo2xy(pixelNo,imgList[0][1]) (xorig,yorig) = getEquivLoc(xmax,ymax,ANALYSIS_LAYER) if (freqNo<10): colour = cv.Scalar(255,1,1) thickness = 1 elif (freqNo>10 and freqNo<20): colour = cv.Scalar(1,255,1) thickness = 5 elif (freqNo>20 and freqNo<30): colour = cv.Scalar(1,1,255) thickness = 10 elif (freqNo>30): colour = cv.Scalar(255,255,255) thickness = 20 cv.Circle(origImg, (xorig,yorig), 30, colour, thickness=thickness, lineType=-1, shift=0) cv.WriteFrame(vw,origImg) cv.ShowImage(window1,origImg) cv.ShowImage(window2,imgList[0][1]) cv.WaitKey(1) # This is very important or ShowImage doesn't work!! timeDiff = (datetime.datetime.now() - lastTime).total_seconds() if (timeDiff<1./inputfps): print "timediff=%f, 1/fps=%f" % (timeDiff,1./inputfps) cv.WaitKey(1+int(1000.*(1./inputfps - timeDiff))) # Note - there is something odd about this time calculation # it does not seem to be consistent with the timestamps on the # images. timeDiff = (datetime.datetime.now() - lastTime).total_seconds() fps = 1./timeDiff print "timeDiff=%f, fps=%f fps" % (timeDiff,fps) # Now get a new frame ready to start the loop again origImg = cv.QueryFrame(camera) lastTime = datetime.datetime.now() print "no more images..." else: print "Error - failed to connect to camera"
segmasked = [] i = 0 param_str = '%s-%s_seg%s_%s-%s' % (target_frame_start, target_frame_stop, opts.seglen, thresh_str, opts.video_suffix) if opts.video_suffix: vidout = opts.vid[:-4] + '_%s.avi' % (param_str) print >> sys.stderr, 'write output video to %s' % vidout try: os.unlink(vidout) except: pass pylab.gray() vidwriter = cv.CreateVideoWriter(vidout, cv.FOURCC('x', 'v', 'i', 'd'), fps, pixdim, 1) analysis_root = os.path.join(opts.vid[:-4], 'analysis', param_str) print >> sys.stderr, 'write analysis results to %s' % analysis_root try: os.makedirs(analysis_root) except: pass #prepare output tarfile names tarfiles = dict([(fname,os.path.join(analysis_root,fname+'.tar')) \ for fname in ['miceols','objs','objs_fols','objs_sizes','prevact_ols','newact_ols','grounds','mousemasks','digdiffs','segavgs']]) open(os.path.join(analysis_root, 'SHAPE'), 'w').write(SHAPE.__repr__()) times = []
# -*- coding: utf-8 -*- import cv2 import cv import numpy as np from os.path import join as pjoin from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('mnist', one_hot=True) train_data = mnist.train.images print "👉 processed input data!" # Define the codec and create VideoWriter object out = cv2.VideoWriter('./mnist.mjpg', cv.FOURCC(*'MJPG'), 25, (28, 28)) i = 0 for frame in train_data: print "frame... " + str(i) i = i + 1 frame = frame * 255.0 x = frame.reshape([28, 28]).astype('uint8') x = np.repeat(x, 3, axis=1) x = x.reshape(28, 28, 3) out.write(x) # Release everything if job is finished out.release()
def write_file(fn, p, q, outfn, original_slides, sync_result, M=20, fourcc="XVID", SKIP=None): W, H = 1440, 960 W, H = 1920, 1080 cap = cv2.VideoCapture(fn) SW, SH = int(cap.get(cv.CV_CAP_PROP_FRAME_WIDTH)), int( cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT)) p2 = (p[0] * H / SH, p[1] * W / SW, p[2] * H / SH, p[3] * W / SW) pw, ph = p2[3] - p2[1], p2[2] - p2[0] print p2, q fourcc = cv.FOURCC(*fourcc) num_of_frames = cap.get(cv.CV_CAP_PROP_FRAME_COUNT) frame_rate = cap.get(cv.CV_CAP_PROP_FPS) print "frame_rate", frame_rate sys.stdout.flush() out = cv2.VideoWriter(outfn, fourcc, frame_rate, (W, H)) frame_index = -1 last_slide = -1 last_start = -1 frame_list = [] result_index = 0 progress = widgets.IntProgressWidget(min=0, max=num_of_frames - 1, value=0) progress_text = widgets.TextWidget() progress.set_css('background', 'black') display(progress) display(progress_text) while cap.isOpened(): frame_index += 1 ret, frame = cap.read() if not ret: break while result_index < len( sync_result) and sync_result[result_index][1] < frame_index: result_index += 1 the_slide = ( -1, -1, -1 ) if result_index >= len(sync_result) else sync_result[result_index] if SKIP and the_slide[2] in SKIP: the_slide = (-1, -1, -1) original_frame = cv2.resize(frame, (W, H), interpolation=cv2.INTER_CUBIC) if the_slide[2] >= 0 and the_slide[1] - the_slide[0] > 3 * M: slide = original_slides[the_slide[2]] inner_frame = cv2.resize(slide[q[0]:q[2], q[1]:q[3]], (pw, ph), interpolation=cv2.INTER_CUBIC) d = min(frame_index - the_slide[0], the_slide[1] - frame_index) out_frame = original_frame.copy() out_frame[p2[0]:p2[2], p2[1]:p2[3]] = inner_frame if d < M: out_frame = cv2.addWeighted(out_frame, d * 1.0 / M, original_frame, 1 - d * 1.0 / M, 0) else: out_frame = original_frame out.write(out_frame) if frame_index % 100 == 0: progress.value = frame_index progress_text.value = "%d/%d (%.1f)" % (frame_index, num_of_frames, 100.0 * frame_index / num_of_frames) if frame_index % 10000 == 0: disp_frame = np.concatenate( (out_frame[:, :W / 2], original_frame[:, W / 2:]), axis=1) display_img_array(cv2.cvtColor(disp_frame, cv2.COLOR_BGR2RGB), width=200) cap.release() out.release()