def test_iba_deep_holdout(): print("\nTesting ImageBufAlgo.deep_holdout...") spec = oiio.ImageSpec(6, 1, 6, oiio.FLOAT) spec.deep = True spec.channelformats = (oiio.TypeDesc.TypeHalf, oiio.TypeDesc.TypeHalf, oiio.TypeDesc.TypeHalf, oiio.TypeDesc.TypeHalf, oiio.TypeDesc.TypeFloat, oiio.TypeDesc.TypeFloat) spec.channelnames = ("R", "G", "B", "A", "Z", "Zback") src = oiio.ImageBuf(spec) # Set up source image # pixel 0: empty # pixel 1: one sample close add_ib_sample(src, 1, 0, (0.5, 0.0, 0.0, 0.75, 10.0, 10.5)) # pixel 2: one sample far add_ib_sample(src, 2, 0, (0.5, 0.0, 0.0, 0.75, 20.0, 20.5)) # pixel 3: one sample close, one far add_ib_sample(src, 3, 0, (0.5, 0.0, 0.0, 0.75, 10.0, 10.5)) add_ib_sample(src, 3, 0, (0.5, 0.0, 0.0, 0.75, 20.0, 20.5)) # pixel 4: three samples, one spans the threshold add_ib_sample(src, 4, 0, (0.5, 0.0, 0.0, 0.75, 10.0, 10.5)) add_ib_sample(src, 4, 0, (0.5, 0.0, 0.0, 0.75, 15.0, 16.0)) add_ib_sample(src, 4, 0, (0.5, 0.0, 0.0, 0.75, 20.0, 20.5)) print_deep_imagebuf(src, "Input image") # Set up holdout image: depth increases left to right hold = oiio.ImageBuf(spec) for x in range(6): add_ib_sample(hold, x, 0, (0, 0, 0, 0.5, 12.0, 12.5)) add_ib_sample(hold, x, 0, (0, 0, 0, 1.0, 15.0 + 0.1 * x, 15.0 + 0.1 * x + 0.1)) print_deep_imagebuf(hold, "Holdout image") result = oiio.ImageBuf() oiio.ImageBufAlgo.deep_holdout(result, src, hold) print_deep_imagebuf(result, "Result after holdout")
def processFrame(number, inDir, inFile, outDir, outFileType, fileType): #print('{} {} {} {}'.format(number,inFile,outDir,outFileType)) # Read a camera raw, crop and write out to a tiff buf = oiio.ImageBuf(inDir + '/' + inFile) spec = buf.spec() print('wxh {} {}'.format(spec.width, spec.height)) cropped = oiio.ImageBuf() oiio.ImageBufAlgo.resize(cropped, buf, roi=oiio.ROI(0, spec.width / 4, 0, spec.height / 4, 0, 1, 0, 3)) #cropped.write( inFileName[0:inFileName.find('.') ]+'.png') borderSize = 50 final = oiio.ImageBuf( oiio.ImageSpec((spec.width / 4) + borderSize, (spec.height / 4) + (borderSize), 3, oiio.FLOAT)) oiio.ImageBufAlgo.fill(final, [0.1, 0.1, 0.1]) oiio.ImageBufAlgo.paste(final, borderSize / 2, borderSize / 2, 0, 0, cropped) oiio.ImageBufAlgo.render_text(final, borderSize / 2, 20, "Show : Inferno : Shot 01", 20, "OpenSans-Bold") frame = 'Frame {0:05d} File {1}'.format(number, inFile) oiio.ImageBufAlgo.render_text(final, borderSize / 2, (spec.height / 4) + borderSize - 5, frame, 20, "OpenSans-Bold") final.write(outDir + '/' + inFile[0:inFile.find('.' + fileType)] + '.' + outFileType) print('Writing {}'.format(outDir + '/' + inFile[0:inFile.find('.' + fileType)] + '.' + outFileType))
def get_rawpixels_from_file(filename, scale_image=1): """ Using OpenImageIO get raw pixels from an image file for previewing purposes (uint8). """ import math # TODO: Migrate it outside callbacks.py try: import OpenImageIO as oiio except: print "Cant' find OpenImageIO." return None, None, None source = oiio.ImageBuf(str(filename)) if not source: return None, None, None # OIIO to get raw uint pixels rgb w = int(math.ceil(source.oriented_width * scale_image)) h = int(math.ceil(source.oriented_height * scale_image)) dest = oiio.ImageBuf(oiio.ImageSpec(w, h, 3, oiio.UINT8)) # DeLinearize optionally if source.spec().format in (oiio.TypeDesc(oiio.HALF), oiio.TypeDesc(oiio.FLOAT)): oiio.ImageBufAlgo.colorconvert(source, source, "linear", "sRGB") dest.copy(source, oiio.UINT8) roi = oiio.ROI(0, w, 0, h, 0, 1, 0, 3) pixels = dest.get_pixels(oiio.UINT8, roi) return pixels, w, h
def _perform(self): """ Perform the task. """ import OpenImageIO as oiio for crawler in self.crawlers(): width = self.option('width') height = self.option('height') # resolving template if isinstance(width, basestring): width = int(Template(width).valueFromCrawler(crawler)) if isinstance(height, basestring): height = int(Template(height).valueFromCrawler(crawler)) targetFilePath = Crawler.Fs.Image.OiioCrawler.supportedString( self.target(crawler)) # trying to create the directory automatically in case it does not exist try: os.makedirs(os.path.dirname(targetFilePath)) except OSError: pass # opening the source image to generate a resized image inputImageBuf = oiio.ImageBuf( Crawler.Fs.Image.OiioCrawler.supportedString( crawler.var('filePath'))) inputSpec = inputImageBuf.spec() # output spec outputSpec = oiio.ImageSpec(width, height, inputSpec.nchannels, inputSpec.format) # resized image buf resizedImageBuf = oiio.ImageBuf(outputSpec) # resizing image oiio.ImageBufAlgo.resize(resizedImageBuf, inputImageBuf, nthreads=multiprocessing.cpu_count()) # in case the convertToRGBA is enabled if self.option('convertToRGBA'): temporaryBuffer = oiio.ImageBuf() oiio.ImageBufAlgo.channels(temporaryBuffer, resizedImageBuf, ("R", "G", "B", "A")) resizedImageBuf = temporaryBuffer # saving target resized image resizedImageBuf.write(targetFilePath) # default result based on the target filePath return super(ResizeImageTask, self)._perform()
def doCompare(imgA='',imgB=''): bufB = oiio.ImageBuf() # get rid of extra channels in the di-matte image oiio.ImageBufAlgo.channels(bufB, oiio.ImageBuf(imgB), (0, 1, 2)) comp = oiio.CompareResults() oiio.ImageBufAlgo.compare(oiio.ImageBuf(imgA), bufB, 1.0e-6, 1.0e-6, comp) if comp.nwarn == 0 and comp.nfail == 0: return True else: return False
def createMasterBuf(maxwidth=2048, maxheight=858, printFormat=True, nrow=5, ncol=13 ,space=40, seq='s0265', task='compo_comp', topBar=True, bottomBar=True): # create the master buffer # area containing the images of sequence widthShotArea = (maxwidth * nrow) + (space * (nrow + 1)) heightShotArea = (maxheight * ncol) + (space * (ncol + 1)) masterBufwidth = widthShotArea + (578 * 2) masterBufHeight = int(heightShotArea + (4 * maxheight) + (2 * 578)) if printFormat: masterBufHeight = int(masterBufwidth * 1.414) masterBuf = oiio.ImageBuf(oiio.ImageSpec(masterBufwidth, masterBufHeight, 3, oiio.FLOAT)) # create the white border oiio.ImageBufAlgo.render_box(masterBuf, 518, 518, masterBufwidth - 518, masterBufHeight - 518, (1, 1, 1, 0), True) oiio.ImageBufAlgo.render_box(masterBuf, 578, 578, masterBufwidth - 578, masterBufHeight - 578, (0, 0, 0, 0), True) # top bar if topBar: oiio.ImageBufAlgo.render_box(masterBuf, 578, (578 + (2 * maxheight)) - 60, masterBufwidth - 578, 578 + (2 * maxheight), (1, 1, 1, 0), True) # bottom bar if bottomBar: oiio.ImageBufAlgo.render_box(masterBuf, 578, 578 + heightShotArea + (2 * maxheight), masterBufwidth - 578, 578 + heightShotArea + (2 * maxheight) + 60, (1, 1, 1, 1), True) # adding the task,seq number oiio.ImageBufAlgo.render_text(masterBuf, int(masterBufwidth / 2) + 200, int(masterBufHeight - (maxheight + 200)), task.upper(), 400, fontname=_FONT_, textcolor=(1, 1, 1, 1)) oiio.ImageBufAlgo.render_text(masterBuf, x=int((masterBufwidth / 2) - (1.35 * maxwidth)), y=int(masterBufHeight - (maxheight + 200)), text=seq, fontsize=1050, fontname=_FONT_, textcolor=(1, 1, 1, 1)) return masterBuf,masterBufwidth,masterBufHeight
def create_test_img_array(self): neg_clip = np.finfo(np.dtype('f16')).min neg_tiniest = -4 * np.finfo(np.float16).tiny zero = 0.0 pos_tiniest = 4 * np.finfo(np.float16).tiny pos_clip = np.finfo(np.dtype('f16')).max img_array = np.array([[[zero, pos_clip, zero], [neg_clip, neg_tiniest, pos_clip]], [[neg_clip, pos_tiniest, pos_clip], [zero, zero, zero]], [[pos_clip, zero, zero], [neg_tiniest, pos_clip, neg_clip]], [[pos_tiniest, pos_clip, neg_clip], [zero, zero, zero]], [[zero, zero, pos_clip], [pos_clip, neg_clip, neg_tiniest]], [[pos_clip, neg_clip, pos_tiniest], [zero, zero, zero]]]) img_array_shape = img_array.shape img_spec = oiio.ImageSpec(img_array_shape[1], img_array_shape[0], img_array_shape[2], oiio.TypeHalf) buf = oiio.ImageBuf(img_spec) # for row in range(img_array_shape[0]): # for col in range(img_array_shape[1]): # buf.setpixel(col, row, img_array[row][col]) return img_array, img_spec
def findMinMaxBuff(filename='', output='both'): imgBuff = oiio.ImageBuf(filename) imgBuff.read(0, 0, True) spec = imgBuff.spec() maxValue = 0.0 minValue = 10.0 #for z in range(spec.depth): for y in range(spec.height): for x in range(spec.width): pixValue = imgBuff.getchannel(x, y, 0, 1) #pixValue = imgBuff.getpixel(x,y)[0] if output == 'both': if pixValue < minValue: minValue = pixValue if pixValue > maxValue: maxValue = pixValue elif output == 'min': if pixValue < minValue: minValue = pixValue elif output == 'max': if pixValue > maxValue: maxValue = pixValue else: print 'wrong type of ouput' return imgBuff.clear() if output == 'min': return minValue elif output == 'max': return maxValue else: return minValue, maxValue
def minMaxOIIO(filename='', output='both'): file = oiio.ImageInput.open(filename) pixels = file.read_image(0, 0, oiio.FLOAT) spec = file.spec() size = (spec.width, spec.height) rgbf = Image.frombytes("F", size, pixels) buf = oiio.ImageBuf(filename) valuePix = [] # for x in range(0,spec.width/3): # for y in range(0,spec.height/3): # valuePix.append(rgbf.getpixel((x,y))) # print rgbf.getpixel((0,0)) # print rgbf.getpixel((0,spec.height-1)) # print rgbf.getpixel((spec.width-1,0)) inputFile = open('/tmp/1673033690.tx', 'r') start = -10.0 for line in inputFile.readlines(): maxLum = buf.getchannel(int(eval(line)[0] * spec.width), int(eval(line)[1] * spec.height), 0, 1) #maxLum = rgbf.getpixel((eval(line)[0]*spec.width,eval(line)[1]*spec.height)) # maxLum = rgbf.getpixel(eval(line)) if maxLum > start: start = maxLum print start extrema = rgbf.getextrema() if output == 'min': return extrema[0] elif output == 'max': return extrema[1] else: return extrema
def process(self): ip = self.input("image").receive() if ip.isEOP(): return False org = ip.value() w_p = self.input("width").receive() if w_p.isEOP(): return False w = w_p.value() w_p.drop() h_p = self.input("height").receive() if h_p.isEOP(): return False h = h_p.value() h_p.drop() spec = org.spec() new = OpenImageIO.ImageBuf( OpenImageIO.ImageSpec(w, h, spec.nchannels, spec.format.basetype)) OpenImageIO.ImageBufAlgo.resize(new, org) ip.drop() self.output("resized").send(new) return True
def frame_thread(self): stitch_args = None try: stitch_args = self.stitch_queue.get_nowait() except queue.Empty: pass while stitch_args: frame = stitch_args[0] tiles_path = Path(stitch_args[1]) images_path = Path(stitch_args[2]) frame_path = str(os.path.join(images_path, frame["outfile"])) print("Assembling frame {}".format(frame_path)) first_tile_path = os.path.join(tiles_path, frame["tiles"][0]["outfile"]) if os.path.isfile(first_tile_path): first_tile = oiio.ImageBuf(str(first_tile_path)) spec = first_tile.spec() frame_buf = oiio.ImageBuf( oiio.ImageSpec(frame["res_x"], frame["res_y"], spec.nchannels, spec.format)) for tile in frame["tiles"]: tile_path = os.path.join(tiles_path, tile["outfile"]) if not os.path.isfile(tile_path): continue tile_buf = oiio.ImageBuf(str(tile_path)) oiio.ImageBufAlgo.paste(frame_buf, tile["coords"][0], tile["coords"][1], 0, 0, tile_buf) print("Writing {}".format(frame_path)) try: os.mkdir(os.path.dirname(frame_path)) except FileExistsError: pass frame_buf.write(str(frame_path)) else: print("Could not find first tile. Aborting frame stitch") try: stitch_args = self.stitch_queue.get_nowait() except queue.Empty: print("Worker exiting") break
def process(self): path_p = self.input("path").receive() if path_p.isEOP(): return False self.output("image").send(OpenImageIO.ImageBuf(path_p.value())) path_p.drop() return True
def NumpyArrayToImageBuf(self): """ Converts a np.ndarray to an OIIO ImageBuf image. :returns: ``oiio.ImageBuf`` object """ height, width = self._img.shape[:2] spec = oiio.ImageSpec(width, height, 4, "uint16") buf = oiio.ImageBuf(spec) buf.set_pixels(oiio.ROI(), self._img) return buf
def tiles_from_heatmap(frame, depth, threshold, heatmap_dir): # Load rendered heatmap frame_path = os.path.join(heatmap_dir, frame["outfile"]) print("Frame path: {}".format(frame_path)) frame_buf = oiio.ImageBuf(str(os.path.join(heatmap_dir, frame["outfile"]))) orig_spec = oiio.ImageSpec(frame_buf.spec()) orig_spec.width = frame["res_x"] orig_spec.full_width = frame["res_x"] orig_spec.height = frame["res_y"] orig_spec.full_height = frame["res_y"] resized_frame_buf = oiio.ImageBuf(orig_spec) oiio.ImageBufAlgo.resize(resized_frame_buf, frame_buf) # Create tiles from resized images quadtree = QuadSplit(Rect(0, resized_frame_buf.spec().width, 0, resized_frame_buf.spec().height), 1, depth, ["raycount"], threshold) quadtree.test_image(resized_frame_buf) return quadtree.get_quads()
def test_deep(): # Test write and read of deep data # Let's try writing one print("\nWriting deep buffer...") deepbufout_spec = oiio.ImageSpec(2, 2, 5, oiio.FLOAT) deepbufout_spec.channelnames = ("R", "G", "B", "A", "Z") deepbufout_spec.deep = True deepbufout = oiio.ImageBuf(deepbufout_spec) deepbufout.set_deep_samples(x=1, y=0, z=0, nsamples=2) deepbufout.set_deep_value(x=1, y=0, z=0, channel=0, sample=0, value=0.42) deepbufout.set_deep_value(x=1, y=0, z=0, channel=4, sample=0, value=42.0) deepbufout.set_deep_value(x=1, y=0, z=0, channel=0, sample=1, value=0.47) deepbufout.set_deep_value(x=1, y=0, z=0, channel=4, sample=1, value=43.0) # Also insert some new samples deepbufout.deep_insert_samples(x=1, y=0, z=0, samplepos=1, nsamples=2) deepbufout.set_deep_value(x=1, y=0, z=0, channel=0, sample=1, value=1.1) deepbufout.set_deep_value(x=1, y=0, z=0, channel=1, sample=1, value=2.2) deepbufout.set_deep_value(x=1, y=0, z=0, channel=2, sample=1, value=2.3) deepbufout.set_deep_value(x=1, y=0, z=0, channel=3, sample=1, value=1.0) deepbufout.set_deep_value(x=1, y=0, z=0, channel=3, sample=1, value=42.25) deepbufout.set_deep_value(x=1, y=0, z=0, channel=0, sample=2, value=0.1) deepbufout.set_deep_value(x=1, y=0, z=0, channel=1, sample=2, value=0.2) deepbufout.set_deep_value(x=1, y=0, z=0, channel=2, sample=2, value=0.3) deepbufout.set_deep_value(x=1, y=0, z=0, channel=3, sample=2, value=1.0) deepbufout.set_deep_value(x=1, y=0, z=0, channel=3, sample=2, value=42.5) # But delete the first one deepbufout.deep_erase_samples(x=1, y=0, z=0, samplepos=1, nsamples=1) # Save deepbufout.write("deepbuf.exr") # And read it back print("\nReading back deep buffer:") deepbufin = oiio.ImageBuf("deepbuf.exr") deepbufin_spec = deepbufin.spec() dd = deepbufin.deepdata() for p in range(dd.pixels): ns = dd.samples(p) if ns > 1: print("Pixel", p // deepbufin_spec.width, p % deepbufin_spec.width, "had", ns, "samples") for s in range(ns): print("Sample", s) for c in range(dd.channels): print("\tc {0} : {1:.3f}".format(c, dd.deep_value(p, c, s)))
def main(): stage = Usd.Stage.CreateNew('CornField.usd') world = UsdGeom.Xform.Define(stage, '/World') _addCamera(stage) buf = oiio.ImageBuf('cropMap.png') spec = buf.spec() divisor = 1 width = spec.width / divisor depth = spec.height / divisor print("image dimensions {} {}".format(width, depth)) instancer = UsdGeom.PointInstancer.Define( stage, world.GetPath().AppendChild('TreePointInstance')) prototypesPrim = stage.DefinePrim( instancer.GetPath().AppendChild('prototypes')) prototypesPrimPath = prototypesPrim.GetPath() _addGround(stage, width, depth) models = ['corn.usd', 'crushedCorn.usd'] modelTargets = [] for m in models: modelTargets.append(_addModel(m, prototypesPrimPath, stage)) positions = [] indices = [] rotations = [] rot = Gf.Rotation() xstart = -width / 2.0 ystart = -depth / 2.0 print("x/y start {}{}".format(xstart, ystart)) for y in range(0, depth / divisor): for x in range(0, width / divisor): pixel = buf.getpixel(x, y) xpos = xstart + (x) + random.uniform(-0.2, 0.2) ypos = ystart + (y) + random.uniform(-0.2, 0.2) xpos = xpos * 0.3 ypos = ypos * 0.3 positions.append(Gf.Vec3f(xpos, 0, ypos)) rot = Gf.Rotation(Gf.Vec3d(0, 1, 0), random.uniform(0, 360)) r = rot.GetQuaternion().GetReal() img = rot.GetQuaternion().GetImaginary() rotations.append(Gf.Quath(r, img[0], img[1], img[2])) if pixel[0] > 0.0: indices.append(0) else: indices.append(1) instancer.CreatePositionsAttr(positions) instancer.CreateProtoIndicesAttr(indices) instancer.CreateOrientationsAttr(rotations) instancer.CreatePrototypesRel().SetTargets(modelTargets) stage.GetRootLayer().Save()
def main(): image = "Tictactoe-X.png" buf = oiio.ImageBuf(image) if buf.spec().alpha_channel < 0: raise RuntimeError( f"{image} does not have an identifiable alpha channel") root = generate_x(buf) with open("x-tree.csv", "w") as out_f: output(root, out_f)
def minMaxOIIO(filename = '', output = 'both'): file = oiio.ImageBuf(filename) stats = oiio.ImageBufAlgo.computePixelStats(file) if output == 'min': return stats.min elif output == 'max': return stats.max else: return (stats.min,stats.max)
def writeLayers( img=oiio.ImageBuf(), frameNb='0101', path='/tmp', channelDict={}): print 'writing layers for ' + path + ' frane number: ' + frameNb for key in sorted(channelDict.keys()): frameTmp = oiio.ImageBuf(img.spec()) oiio.ImageBufAlgo.channels(frameTmp, img, channelDict[key]) # create the path for the image and set it to be 8bit (suffisant for matte) shotPathLayer = path + '/' + key imageName = '/' + key + '.' + frameNb + '.exr' frameTmp.set_write_format(oiio.UINT8) # if it's the primary set the path and the output to be exr half float if key == 'd0': frameTmp.set_write_format(oiio.HALF) #frameTmp.set_write_tiles(0,0) shotPathLayer = path + '/primary' imageName = '/primary.' + frameNb + '.exr' # create the output path if it doesn't exist if not os.path.isdir(shotPathLayer): os.makedirs(shotPathLayer) frameTmp.write(shotPathLayer + imageName)
def test_tiff_remembering_config(): # Create a file that has unassociated alpha print("Testing write and read of unassociated:") spec = oiio.ImageSpec(2, 2, 4, "float") spec.attribute("oiio:UnassociatedAlpha", 1) wbuf = oiio.ImageBuf(spec) oiio.ImageBufAlgo.fill(wbuf, (0.5, 0.5, 0.5, 0.5)) print(" writing: ", wbuf.get_pixels()) wbuf.write("test_unassoc.tif") rbuf = oiio.ImageBuf("test_unassoc.tif") print("\n default reading as IB: ", rbuf.get_pixels()) config = oiio.ImageSpec() config.attribute("oiio:UnassociatedAlpha", 1) rbuf = oiio.ImageBuf("test_unassoc.tif", 0, 0, config) print("\n reading as IB with unassoc hint: ", rbuf.get_pixels()) print("\n reading as II with hint, read scanlines backward: ") ii = oiio.ImageInput.open("test_unassoc.tif", config) print(" [1] = ", ii.read_scanline(1)) print(" [0] = ", ii.read_scanline(0)) print("\n")
def check_criteria(self, criteria, dirname, identifier, files): """Check that the passed files match the timecode and naming we're looking for. Alternatively it will attempt to match frame number against source_range :param criteria: `dict` containing regex and timecode tests :param dirname: str` with dirname of file location :param identifier: `str` filename with hashed frame number :param files: `list` of first and last file found :return: `bool` reflecting a match or not """ testname = 'timecode' for index, filename in enumerate(files): fullpath = os.path.join(dirname, filename) valid_path = criteria['regex'].search(fullpath) if not valid_path: return False buf = oiio.ImageBuf(fullpath) if buf.has_error: return False if index == 0: value = self.file_cache[dirname][identifier].setdefault( 'tc_in', get_timecode_str(buf)) self.file_cache[dirname][identifier].setdefault( 'fps', get_fps(buf)) else: value = get_timecode_str(buf) # No TimeCode found. Try using frame number if not value: testname = 'frames' try: if index == 0: value = self.file_cache[dirname][ identifier].setdefault( 'first_frame', int(frame_regex.search(filename).group())) else: value = int(frame_regex.search(filename).group()) except (ValueError, AttributeError): value = None func, test_value = criteria['tests'][testname][index] if not func(test_value, value): return False return True
def openPSD(): config = oiio.ImageSpec() config.attribute("oiio:UnassociatedAlpha", 1) inputFile = '/s/prodanim/ta/assets/Mattepaint/ldev_skydome_night/mattepaint/mattepaint_deliver/work/images/photoshop/migeotp_testaces/v003/ldev_skydome_night-mattepaint_deliver-migeotp_testaces-OL_02-v003.tif' Input = oiio.ImageBuf( '/s/prodanim/ta/assets/Mattepaint/ldev_skydome_night/mattepaint/mattepaint_deliver/work/images/photoshop/migeotp_testaces/v003/ldev_skydome_night-mattepaint_deliver-migeotp_testaces-OL_02-v003.tif' ) Input.specmod().attribute("oiio:UnassociatedAlpha", 1) OutputName = '/s/prodanim/ta/_sandbox/duda/tmp/crap/donuts' BG = oiio.ImageBuf( '/s/prodanim/ta/assets/Mattepaint/ldev_skydome_night/mattepaint/mattepaint_deliver/work/images/photoshop/migeotp_testaces/v003/ldev_skydome_night-mattepaint_deliver-migeotp_testaces-BG-v003.tif' ) for i in range(Input.nsubimages): config = oiio.ImageSpec() config.attribute("oiio:UnassociatedAlpha", 1) Input.reset(inputFile, i, 0, config) # RGB = oiio.ImageBuf() # Alpha = oiio.ImageBuf() # tmp = oiio.ImageBuf() # print oiio.ImageBufAlgo.channels(BG, BG, ('R', 'G', 'B', 1.0)) # print oiio.ImageBufAlgo.over(tmp,Input,BG) # print(oiio.geterror()) #print oiio.ImageBufAlgo.unpremult(Input, Input) #print oiio.ImageBufAlgo.channels(RGB, Input, ('R', 'G', 'B', 1.0)) #print oiio.ImageBufAlgo.channels(Alpha, Input, ('A', )) ##print oiio.ImageBufAlgo.colorconvert(tmp, tmp, 'matte_paint', 'acescg') #print oiio.ImageBufAlgo.channels(BG, RGB, (0,1,2)) #print oiio.ImageBufAlgo.channel_append(Input, RGB,Alpha) print oiio.ImageBufAlgo.colorconvert(Input, Input, 'scene_linear', 'compositing_linear') print oiio.ImageBufAlgo.colorconvert(Input, Input, 'matte_paint', 'compositing_linear') Input.specmod().attribute("oiio:ColorSpace", 'acescg') #print oiio.ImageBufAlgo.premult(Input, Input) #print oiio.ImageBufAlgo.unpremult(Input,Input) #print oiio.ImageBufAlgo.premult(Input, Input) Input.set_write_format(oiio.FLOAT) bla = OutputName + '.' + str(i).zfill(4) + '.exr' Input.write(bla) print(bla)
def convertImageOIIO(filename = '',fileOutName = '',format='tif'): inFile = oiio.ImageBuf(filename) if not inFile.has_error : oiio.ImageBufAlgo.colorconvert(inFile,inFile,'linear_srgb','acescg') if format != 'tif': inFile.set_write_format(oiio.UINT16) else: inFile.set_write_format(oiio.UINT8) inFile.write(fileOutName) print 'done' if inFile.has_error : print "Error writing ",fileOutName, ": ", inFile.geterror()
def convertImageOIIO(filename = '',fileOutName = '',format='tif'): inFile = oiio.ImageBuf(filename) if not inFile.has_error : oiio.ImageBufAlgo.ociofiletransform(inFile,inFile,"acescg") # if format != 'tif': # inFile.set_write_format(oiio.UINT16) # else: # inFile.set_write_format(oiio.UINT8) inFile.write(fileOutName) print 'done' if inFile.has_error : print "Error writing ",fileOutName, ": ", inFile.geterror()
def test_tiff_cmyk(): # Create a file that has unassociated alpha filename = "test_cmyk.tif" print("Testing write and read of TIFF CMYK with auto RGB translation:") spec = oiio.ImageSpec(2, 2, 4, "uint8") spec.attribute("tiff:ColorSpace", "CMYK") spec.channelnames = ("C", "M", "Y", "K") wbuf = oiio.ImageBuf(spec) oiio.ImageBufAlgo.fill(wbuf, (0.5, 0.0, 0.0, 0.5)) print(" writing: ", wbuf.get_pixels()) wbuf.write(filename) rbuf = oiio.ImageBuf(filename) print("\n default reading as IB: ", rbuf.get_pixels()) config = oiio.ImageSpec() config.attribute("oiio:RawColor", 1) rbuf = oiio.ImageBuf(filename, 0, 0, config) print("\n reading as IB with rawcolor=1: ", rbuf.get_pixels()) print("\n reading as II with rawcolor=0, read scanlines backward: ") ii = oiio.ImageInput.open(filename) print(" [1] = ", ii.read_scanline(1)) print(" [0] = ", ii.read_scanline(0)) print("\n")
def read_image_cubemap(filename): image_array = [] input = oiio.ImageInput.open(filename) spec = input.spec() width = spec.width height = spec.height print "reading {} - {}x{}x{}".format(filename, width, height, spec.nchannels) for sub in range(6): buf = oiio.ImageBuf(filename, sub, 0) RGBA = oiio.ImageBuf() oiio.ImageBufAlgo.channels(RGBA, buf, ("R", "G", "B", 1.0), ("R", "G", "B", "A")) pixels = RGBA.get_pixels(oiio.FLOAT) image = numpy.frombuffer(numpy.getbuffer(numpy.float32(pixels)), dtype=cl_array.vec.float4) image = image.reshape(width, height) image_array.append(image) if len(image_array) != 6: print "not all faces have been read" return image_array
def main(): #convertExr() buf = oiio.ImageBuf( '/s/prodanim/ta/_sandbox/duda/render/granMaHouseInt/GranMaIntHouse.mov' ) print buf.nsubimages # spec = a.spec() # for i in range(len(spec.extra_attribs)): # print spec.extra_attribs[i].name # b = a.spec().extra_attribs # for i in b: # print a.spec().extra_attribs[i].name buf.clear()
def make_test_pattern1 (filename, xres=288, yres=216) : buf = oiio.ImageBuf (oiio.ImageSpec (xres, yres, 3, oiio.FLOAT)) for y in range(yres) : for x in range(xres) : b = 0.25 + 0.5 * float (((x/16) & 1) ^ ((y/16) & 1)) if x == 1 or y == 1 or x == xres-2 or y == yres-2 : b = 0.0 if (((x >= 10 and x <= 20) or (x >= xres-20 and x <= xres-10)) and ((y >= 10 and y <= 20) or (y >= yres-20 and y <= yres-10))) : b = 0.0 if ((x == 15 or x == xres-15) and (y == 15 or y == yres-15)) : b = 1.0 buf.setpixel (x, y, (float(x)/1000.0, float(y)/1000.0, b)) buf.write (filename)
def extractLayer(res={}, path='/tmp'): framePathRight = res['framePathCompoCompRight'] framePathLeft = res['framePathCompoCompLeft'] cutIn = res['cutIn'] cutOut = res['cutOut'] shotName = res['name'] # path to output the layers shotPath = path + '/' + shotName shotPathRight = shotPath + '/right' shotPathLeft = shotPath + '/left' # dictionary for the channels channelDict = { 'd0': (0, 1, 2), 'd1': (4, 5, 6), 'd2': (8, 9, 10), 'd3': (12, 13, 14), 'd4': (16, 17, 18), 'd5': (20, 21, 22), 'd6': (24, 25, 26) } for frameNb in range(cutIn, cutOut + 1): frameNbStr = str(frameNb).zfill(4) # replace the frame nb for the left and right images framePathNbLeft = framePathLeft.replace('%04d', frameNbStr) framePathNbRight = framePathRight.replace('%04d', frameNbStr) # open the left and right image frameInLeft = oiio.ImageBuf(framePathNbLeft) frameInLeft.read() if frameInLeft.has_error: print 'donuts', frameInLeft.geterror() frameInRight = oiio.ImageBuf(framePathNbRight) writeLayers(frameInLeft, frameNbStr, shotPathLeft, channelDict) writeLayers(frameInRight, frameNbStr, shotPathRight, channelDict)
def ingest(self, vendor): """Ingest this image sequence to psyop pipeline. Args: vendor (str): source vendor """ _out = self.to_psy_file_seq() print ' - OUT', _out.path # Create images on psy side if not _out.exists(): # Check asset/shot + step exists _root = tk2.TTRoot(_out.path) _step = tk2.TTStepRoot(_out.path) if not _step.exists(): _root.create_workspaces(force=True) assert _step.exists() print ' - STEP EXISTS', _step.path # Copy images self.copy_to(_out) _out.cache_write('vendor_source', self.path) print ' - COPIED IMAGES' # Publish in shotgun if not _out.get_sg_data(): _comment = 'From {} {}'.format(vendor, time.strftime('%m/%d/%y')) print ' - COMMENT', _comment _start, _end = self.find_range() _img = oiio.ImageBuf(self[_start]) _data = { 'width': _img.spec().width, 'height': _img.spec().height, 'start_frame': _start, 'end_frame': _end} _out.register_in_shotgun( comment=_comment, complete=True, metadata=_data) # Transgen if not self.has_sg_version(): _start, _end = self.find_range() helper.process_submission_preset( _out.path, _start, _end, 'dailies-scene-referred', submit_to_farm=True) _out.cache_write('submitted transgen', True)