コード例 #1
0
ファイル: video2rgb.py プロジェクト: asottile/ancient-pythons
def applypackfactor(image, w, h, pf, bpp):
	import imageop
	if type(pf) == type(()):
		xpf, ypf = pf
	elif pf == 0:
		xpf = ypf = 1
	else:
		xpf = ypf = pf
	w1 = w/xpf
	h1 = h/abs(ypf)
	if ypf < 0:
		ypf = -ypf
		image = imageop.crop(image, bpp, w1, h1, 0, h1-1, w1-1, 0)
	return imageop.scale(image, bpp, w1, h1, w, h)
コード例 #2
0
ファイル: video2rgb.py プロジェクト: carol8421/gosh
def applypackfactor(image, w, h, pf, bpp):
    import imageop
    if type(pf) == type(()):
        xpf, ypf = pf
    elif pf == 0:
        xpf = ypf = 1
    else:
        xpf = ypf = pf
    w1 = w / xpf
    h1 = h / abs(ypf)
    if ypf < 0:
        ypf = -ypf
        image = imageop.crop(image, bpp, w1, h1, 0, h1 - 1, w1 - 1, 0)
    return imageop.scale(image, bpp, w1, h1, w, h)
コード例 #3
0
ファイル: Vb.py プロジェクト: carol8421/gosh
	def single_capture(self, stepfunc, timecode):
		self.open_if_closed()
		self.init_cont()
		while 1:
			try:
				cd, id = self.video.GetCaptureData()
				break
			except sv.error:
				pass
			sgi.nap(1)
			if stepfunc:		# This might step the video
				d=stepfunc()	# to the next frame
		if not self.use_24:
			data = cd.InterleaveFields(1)
		else:
			x, y = self.vout.getsize()
			if self.use_compress:
				if self.rgb24_size == 1:
					data = cd.YUVtoYUV422DC(0)
				elif self.rgb24_size == 2:
					data = cd.YUVtoYUV422DC_quarter(1)
					x = x/2
					y = y/2
				elif self.rgb24_size == 3:
					data = cd.YUVtoYUV422DC_sixteenth(1)
					x = x/4
					y = y/4
			else:
				data = cd.YUVtoRGB(1)
				if self.maxx*self.maxy*4 <> len(data):
					print 'maxx,maxy,exp,got=', self.maxx,
					print self.maxy,self.maxx*self.maxy*4,
					print len(data)
					fl.showmessage('Wrong sized data')
					return 0
				if self.rgb24_size <> 1:
					data = imageop.scale(data, 4, \
						  self.maxx, self.maxy, x, y)
			if self.use_jpeg:
				import jpeg
				data = jpeg.compress(data, x, y, 4)
			if self.use_compress:
				data = self.compressor.Compress(1, data)
		cd.UnlockCaptureData()
		self.end_cont()
		if timecode == None:
			timecode = (self.nframes+1) * (1000/25)
		return self.write_frame(timecode, data)
コード例 #4
0
ファイル: Vb.py プロジェクト: asottile/ancient-pythons
	def single_capture(self, stepfunc, timecode):
		self.open_if_closed()
		self.init_cont()
		while 1:
			try:
				cd, id = self.video.GetCaptureData()
				break
			except sv.error:
				pass
			sgi.nap(1)
			if stepfunc:		# This might step the video
				d=stepfunc()	# to the next frame
		if not self.use_24:
			data = cd.InterleaveFields(1)
		else:
			x, y = self.vout.getsize()
			if self.use_compress:
				if self.rgb24_size == 1:
					data = cd.YUVtoYUV422DC(0)
				elif self.rgb24_size == 2:
					data = cd.YUVtoYUV422DC_quarter(1)
					x = x/2
					y = y/2
				elif self.rgb24_size == 3:
					data = cd.YUVtoYUV422DC_sixteenth(1)
					x = x/4
					y = y/4
			else:
				data = cd.YUVtoRGB(1)
				if self.maxx*self.maxy*4 <> len(data):
					print 'maxx,maxy,exp,got=', self.maxx,
					print self.maxy,self.maxx*self.maxy*4,
					print len(data)
					fl.showmessage('Wrong sized data')
					return 0
				if self.rgb24_size <> 1:
					data = imageop.scale(data, 4, \
						  self.maxx, self.maxy, x, y)
			if self.use_jpeg:
				import jpeg
				data = jpeg.compress(data, x, y, 4)
			if self.use_compress:
				data = self.compressor.Compress(1, data)
		cd.UnlockCaptureData()
		self.end_cont()
		if timecode == None:
			timecode = (self.nframes+1) * (1000/25)
		return self.write_frame(timecode, data)
コード例 #5
0
#! /usr/bin/env python
コード例 #6
0
def main(use_rgbimg=1):

    # Create binary test files
    uu.decode(get_qualified_path('testrgb'+os.extsep+'uue'), 'test'+os.extsep+'rgb')

    if use_rgbimg:
        image, width, height = getrgbimage('test'+os.extsep+'rgb')
    else:
        image, width, height = getimage('test'+os.extsep+'rgb')

    # Return the selected part of image, which should by width by height
    # in size and consist of pixels of psize bytes.
    if verbose:
        print 'crop'
    newimage = imageop.crop (image, 4, width, height, 0, 0, 1, 1)

    # Return image scaled to size newwidth by newheight. No interpolation
    # is done, scaling is done by simple-minded pixel duplication or removal.
    # Therefore, computer-generated images or dithered images will
    # not look nice after scaling.
    if verbose:
        print 'scale'
    scaleimage = imageop.scale(image, 4, width, height, 1, 1)

    # Run a vertical low-pass filter over an image. It does so by computing
    # each destination pixel as the average of two vertically-aligned source
    # pixels. The main use of this routine is to forestall excessive flicker
    # if the image two vertically-aligned source pixels,  hence the name.
    if verbose:
        print 'tovideo'
    videoimage = imageop.tovideo (image, 4, width, height)

    # Convert an rgb image to an 8 bit rgb
    if verbose:
        print 'rgb2rgb8'
    greyimage = imageop.rgb2rgb8(image, width, height)

    # Convert an 8 bit rgb image to a 24 bit rgb image
    if verbose:
        print 'rgb82rgb'
    image = imageop.rgb82rgb(greyimage, width, height)

    # Convert an rgb image to an 8 bit greyscale image
    if verbose:
        print 'rgb2grey'
    greyimage = imageop.rgb2grey(image, width, height)

    # Convert an 8 bit greyscale image to a 24 bit rgb image
    if verbose:
        print 'grey2rgb'
    image = imageop.grey2rgb(greyimage, width, height)

    # Convert a 8-bit deep greyscale image to a 1-bit deep image by
    # thresholding all the pixels. The resulting image is tightly packed
    # and is probably only useful as an argument to mono2grey.
    if verbose:
        print 'grey2mono'
    monoimage = imageop.grey2mono (greyimage, width, height, 0)

    # monoimage, width, height = getimage('monotest.rgb')
    # Convert a 1-bit monochrome image to an 8 bit greyscale or color image.
    # All pixels that are zero-valued on input get value p0 on output and
    # all one-value input pixels get value p1 on output. To convert a
    # monochrome  black-and-white image to greyscale pass the values 0 and
    # 255 respectively.
    if verbose:
        print 'mono2grey'
    greyimage = imageop.mono2grey (monoimage, width, height, 0, 255)

    # Convert an 8-bit greyscale image to a 1-bit monochrome image using a
    # (simple-minded) dithering algorithm.
    if verbose:
        print 'dither2mono'
    monoimage = imageop.dither2mono (greyimage, width, height)

    # Convert an 8-bit greyscale image to a 4-bit greyscale image without
    # dithering.
    if verbose:
        print 'grey2grey4'
    grey4image = imageop.grey2grey4 (greyimage, width, height)

    # Convert an 8-bit greyscale image to a 2-bit greyscale image without
    # dithering.
    if verbose:
        print 'grey2grey2'
    grey2image = imageop.grey2grey2 (greyimage, width, height)

    # Convert an 8-bit greyscale image to a 2-bit greyscale image with
    # dithering. As for dither2mono, the dithering algorithm is currently
    # very simple.
    if verbose:
        print 'dither2grey2'
    grey2image = imageop.dither2grey2 (greyimage, width, height)

    # Convert a 4-bit greyscale image to an 8-bit greyscale image.
    if verbose:
        print 'grey42grey'
    greyimage = imageop.grey42grey (grey4image, width, height)

    # Convert a 2-bit greyscale image to an 8-bit greyscale image.
    if verbose:
        print 'grey22grey'
    image = imageop.grey22grey (grey2image, width, height)

    # Cleanup
    unlink('test'+os.extsep+'rgb')
コード例 #7
0
ファイル: video2rgb.py プロジェクト: mcyril/ravel-ftn
#! /usr/bin/env python
コード例 #8
0
			break
		if decompress:
			data = vin.decompress(data)
		nin = nin + 1
		if regen:
			tout = nin * regen
		else:
			tout = tin
		tout = int(tout / speed)
		if tout - told < mindelta:
			continue
		told = tout
		if newtype:
			data = convert(data, inwidth, inheight)
		if scale:
			data = imageop.scale(data, vout.bpp/8, \
				  inwidth, inheight, newwidth, newheight)
		if flip:
			x0, y0 = 0, 0
			x1, y1 = newwidth-1, newheight-1
			if vin.upside_down <> vout.upside_down:
				y1, y0 = y0, y1
			if vin.mirror_image <> vout.mirror_image:
				x1, x0 = x0, x1
			data = imageop.crop(data, vout.bpp/8, \
				  newwidth, newheight, x0, y0, x1, y1)
		print 'Writing frame', nout
		vout.writeframe(tout, data, cdata)
		nout = nout + 1

	vout.close()
	vin.close()
コード例 #9
0
def main(use_rgbimg=1):

    # Create binary test files
    uu.decode(get_qualified_path('testrgb' + os.extsep + 'uue'),
              'test' + os.extsep + 'rgb')

    if use_rgbimg:
        image, width, height = getrgbimage('test' + os.extsep + 'rgb')
    else:
        image, width, height = getimage('test' + os.extsep + 'rgb')

    # Return the selected part of image, which should by width by height
    # in size and consist of pixels of psize bytes.
    if verbose:
        print 'crop'
    newimage = imageop.crop(image, 4, width, height, 0, 0, 1, 1)

    # Return image scaled to size newwidth by newheight. No interpolation
    # is done, scaling is done by simple-minded pixel duplication or removal.
    # Therefore, computer-generated images or dithered images will
    # not look nice after scaling.
    if verbose:
        print 'scale'
    scaleimage = imageop.scale(image, 4, width, height, 1, 1)

    # Run a vertical low-pass filter over an image. It does so by computing
    # each destination pixel as the average of two vertically-aligned source
    # pixels. The main use of this routine is to forestall excessive flicker
    # if the image two vertically-aligned source pixels,  hence the name.
    if verbose:
        print 'tovideo'
    videoimage = imageop.tovideo(image, 4, width, height)

    # Convert an rgb image to an 8 bit rgb
    if verbose:
        print 'rgb2rgb8'
    greyimage = imageop.rgb2rgb8(image, width, height)

    # Convert an 8 bit rgb image to a 24 bit rgb image
    if verbose:
        print 'rgb82rgb'
    image = imageop.rgb82rgb(greyimage, width, height)

    # Convert an rgb image to an 8 bit greyscale image
    if verbose:
        print 'rgb2grey'
    greyimage = imageop.rgb2grey(image, width, height)

    # Convert an 8 bit greyscale image to a 24 bit rgb image
    if verbose:
        print 'grey2rgb'
    image = imageop.grey2rgb(greyimage, width, height)

    # Convert a 8-bit deep greyscale image to a 1-bit deep image by
    # thresholding all the pixels. The resulting image is tightly packed
    # and is probably only useful as an argument to mono2grey.
    if verbose:
        print 'grey2mono'
    monoimage = imageop.grey2mono(greyimage, width, height, 0)

    # monoimage, width, height = getimage('monotest.rgb')
    # Convert a 1-bit monochrome image to an 8 bit greyscale or color image.
    # All pixels that are zero-valued on input get value p0 on output and
    # all one-value input pixels get value p1 on output. To convert a
    # monochrome  black-and-white image to greyscale pass the values 0 and
    # 255 respectively.
    if verbose:
        print 'mono2grey'
    greyimage = imageop.mono2grey(monoimage, width, height, 0, 255)

    # Convert an 8-bit greyscale image to a 1-bit monochrome image using a
    # (simple-minded) dithering algorithm.
    if verbose:
        print 'dither2mono'
    monoimage = imageop.dither2mono(greyimage, width, height)

    # Convert an 8-bit greyscale image to a 4-bit greyscale image without
    # dithering.
    if verbose:
        print 'grey2grey4'
    grey4image = imageop.grey2grey4(greyimage, width, height)

    # Convert an 8-bit greyscale image to a 2-bit greyscale image without
    # dithering.
    if verbose:
        print 'grey2grey2'
    grey2image = imageop.grey2grey2(greyimage, width, height)

    # Convert an 8-bit greyscale image to a 2-bit greyscale image with
    # dithering. As for dither2mono, the dithering algorithm is currently
    # very simple.
    if verbose:
        print 'dither2grey2'
    grey2image = imageop.dither2grey2(greyimage, width, height)

    # Convert a 4-bit greyscale image to an 8-bit greyscale image.
    if verbose:
        print 'grey42grey'
    greyimage = imageop.grey42grey(grey4image, width, height)

    # Convert a 2-bit greyscale image to an 8-bit greyscale image.
    if verbose:
        print 'grey22grey'
    image = imageop.grey22grey(grey2image, width, height)

    # Cleanup
    unlink('test' + os.extsep + 'rgb')
コード例 #10
0
ファイル: test_imageop.py プロジェクト: mcyril/ravel-ftn
#! /usr/bin/env python
コード例 #11
0
ファイル: test_imageop.py プロジェクト: mcyril/ravel-ftn
#! /usr/bin/env python
コード例 #12
0
ファイル: video2rgb.py プロジェクト: mcyril/ravel-ftn
#! /usr/bin/env python
コード例 #13
0
#! /usr/bin/env python
# Universal (non-interactive) CMIF video file copier.

# Possibilities:
#
# - Manipulate the time base:
#   = resample at a fixed rate
#   = divide the time codes by a speed factor (to make it go faster/slower)
#   = drop frames that are less than n msec apart (to accommodate slow players)
# - Convert to a different format
# - Magnify (scale) the image

# Usage function (keep this up-to-date if you change the program!)
def usage():
	print 'Usage: Vcopy [options] [infile [outfile]]'
	print
	print 'Options:'
	print
	print '-t type    : new image type (default unchanged)'
	print
	print '-M magnify : image magnification factor (default unchanged)'
	print '-w width   : output image width (default height*4/3 if -h used)'
	print '-h height  : output image height (default width*3/4 if -w used)'
	print
	print '-p pf      : new x and y packfactor (default unchanged)'
	print '-x xpf     : new x packfactor (default unchanged)'
	print '-y ypf     : new y packfactor (default unchanged)'
	print
	print '-m delta   : drop frames closer than delta msec (default 0)'
	print '-r delta   : regenerate input time base delta msec apart'
コード例 #14
0
ファイル: Vb.py プロジェクト: mcyril/ravel-ftn
#! /usr/bin/env python