Exemplo n.º 1
0
    def updateTexture(self, main):
        dir_node = getNode('/config/directories', True)
        
        # reset base textures
        for i, m in enumerate(self.models):
            if m != main:
                if m.getName() in tcache:
                    fulltex = tcache[m.getName()][1]
                    self.models[i].setTexture(fulltex, 1)
                else:
                    if self.base_textures[i] != None:
                        self.models[i].setTexture(self.base_textures[i], 1)
            else:
                print(m.getName())
                if m.getName() in tcache:
                    fulltex = tcache[m.getName()][1]
                    self.models[i].setTexture(fulltex, 1)
                    continue
                base, ext = os.path.splitext(m.getName())
                image_file = None
                search = [ args.project, os.path.join(args.project, 'images') ]
                for dir in search:
                    tmp1 = os.path.join(dir, base + '.JPG')
                    tmp2 = os.path.join(dir, base + '.jpg')
                    if os.path.isfile(tmp1):
                        image_file = tmp1
                    elif os.path.isfile(tmp2):
                        image_file = tmp2
                if not image_file:
                    print('Warning: no full resolution image source file found:', base)
                else:
                    if True:
                        # example of passing an opencv image as a
                        # panda texture
                        print(base, image_file)
                        #image = proj.findImageByName(base)
                        #print(image)
                        rgb = cv2.imread(image_file, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
                        rgb = np.flipud(rgb)
                        if do_histogram:
                            rgb = histogram.match_neighbors(rgb, base)
                        # vignette correction
                        if not self.vignette_mask is None:
                            rgb = rgb.astype('uint16') + self.vignette_mask
                            rgb = np.clip(rgb, 0, 255).astype('uint8')

                        h, w = rgb.shape[:2]
                        print('shape: (%d,%d)' % (w, h))
                        rescale = False
                        if h > self.max_texture_dimension:
                            h = self.max_texture_dimension
                            rescale = True
                        if w > self.max_texture_dimension:
                            w = self.max_texture_dimension
                            rescale = True
                        if self.needs_pow2:
                            h2 = 2**math.floor(math.log(h,2))
                            w2 = 2**math.floor(math.log(w,2))
                            if h2 != h:
                                h = h2
                                rescale = True
                            if w2 != w:
                                w = w2
                                rescale = True
                        if rescale:
                            print("Notice: rescaling texture to (%d,%d) to honor video card capability." % (w, h))
                            rgb = cv2.resize(rgb, (w,h))

                        # filter_by = 'none'
                        filter_by = 'equalize_value'
                        # filter_by = 'equalize_rgb'
                        # filter_by = 'equalize_blue'
                        # filter_by = 'equalize_green'
                        # filter_by = 'equalize_blue'
                        # filter_by = 'equalize_red'
                        # filter_by = 'red/green'
                        if filter_by == 'none':
                            b, g, r = cv2.split(rgb)
                            result = cv2.merge((b, g, r))
                        if filter_by == 'equalize_value':
                            # equalize val (essentially gray scale level)
                            hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
                            hue, sat, val = cv2.split(hsv)
                            aeq = clahe.apply(val)
                            # recombine
                            hsv = cv2.merge((hue,sat,aeq))
                            # convert back to rgb
                            result = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
                        elif filter_by == 'equalize_rgb':
                            # equalize individual b, g, r channels
                            b, g, r = cv2.split(rgb)
                            b = clahe.apply(b)
                            g = clahe.apply(g)
                            r = clahe.apply(r)
                            result = cv2.merge((b,g,r))
                        elif filter_by == 'equalize_blue':
                            # equalize val (essentially gray scale level)
                            hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
                            hue, sat, val = cv2.split(hsv)
                            # blue hue = 120
                            
                            # slide 120 -> 90 (center of 0-180 range
                            # with mod() roll over)
                            diff = np.mod(hue.astype('float64') - 30, 180)
                            # move this center point to 0 (-90 to +90
                            # range) and take absolute value
                            # (distance)
                            diff = np.abs(diff - 90)
                            # scale to 0 to 1 (1 being the closest to
                            # our target hue)
                            diff = 1.0 - diff / 90
                            print('hue:', np.amin(hue), np.amax(hue))
                            print('sat:', np.amin(sat), np.amax(sat))
                            print('diff:', np.amin(diff), np.amax(diff))
                            #print(diff)
                            #g = (256 - (256.0/90.0)*diff).astype('uint8')
                            b = (diff * sat).astype('uint8')
                            g = np.zeros(hue.shape, dtype='uint8')
                            r = np.zeros(hue.shape, dtype='uint8')
                            #g = clahe.apply(g)
                            result = cv2.merge((b,g,r))
                            print(result.shape, result.dtype)
                        elif filter_by == 'equalize_green':
                            # equalize val (essentially gray scale level)
                            hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
                            hue, sat, val = cv2.split(hsv)
                            # green hue = 60
                            
                            # slide 60 -> 90 (center of 0-180 range
                            # with mod() roll over)
                            diff = np.mod(hue.astype('float64') + 30, 180)
                            # move this center point to 0 (-90 to +90
                            # range) and take absolute value
                            # (distance)
                            diff = np.abs(diff - 90)
                            # scale to 0 to 1 (1 being the closest to
                            # our target hue)
                            diff = 1.0 - diff / 90
                            print('hue:', np.amin(hue), np.amax(hue))
                            print('sat:', np.amin(sat), np.amax(sat))
                            print('diff:', np.amin(diff), np.amax(diff))
                            #print(diff)
                            b = np.zeros(hue.shape, dtype='uint8')
                            g = (diff * sat).astype('uint8')
                            r = np.zeros(hue.shape, dtype='uint8')
                            #g = clahe.apply(g)
                            result = cv2.merge((b,g,r))
                            print(result.shape, result.dtype)
                        elif filter_by == 'equalize_red':
                            # equalize val (essentially gray scale level)
                            hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
                            hue, sat, val = cv2.split(hsv)
                            # red hue = 0
                            
                            # slide 0 -> 90 (center of 0-180 range
                            # with mod() roll over)
                            diff = np.mod(hue.astype('float64') + 90, 180)
                            # move this center point to 0 (-90 to +90
                            # range) and take absolute value
                            # (distance)
                            diff = np.abs(diff - 90)
                            # scale to 0 to 1 (1 being the closest to
                            # our target hue)
                            diff = 1.0 - diff / 90
                            print('hue:', np.amin(hue), np.amax(hue))
                            print('sat:', np.amin(sat), np.amax(sat))
                            print('diff:', np.amin(diff), np.amax(diff))
                            b = np.zeros(hue.shape, dtype='uint8')
                            g = np.zeros(hue.shape, dtype='uint8')
                            r = (diff * sat).astype('uint8')
                            result = cv2.merge((b,g,r))
                            print(result.shape, result.dtype)
                        elif filter_by == 'red/green':
                            # equalize val (essentially gray scale level)
                            max = 4.0
                            b, g, r = cv2.split(rgb)
                            ratio = r / (g.astype('float64')+1.0)
                            ratio = np.clip(ratio, 0, max)
                            inv = g / (r.astype('float64')+1.0)
                            inv = np.clip(inv, 0, max)
                            max_ratio = np.amax(ratio)
                            max_inv = np.amax(inv)
                            print(max_ratio, max_inv)
                            b[:] = 0
                            g = (inv * (255/max)).astype('uint8')
                            r = (ratio * (255/max)).astype('uint8')
                            result = cv2.merge((b,g,r))
                            print(result.shape, result.dtype)
                            
                        fulltex = Texture(base)
                        fulltex.setCompression(Texture.CMOff)
                        fulltex.setup2dTexture(w, h, Texture.TUnsignedByte, Texture.FRgb)
                        fulltex.setRamImage(result)
                        # fulltex.load(rgb) # for loading a pnm image
                        fulltex.setWrapU(Texture.WM_clamp)
                        fulltex.setWrapV(Texture.WM_clamp)
                        m.setTexture(fulltex, 1)
                        tcache[m.getName()] = [m, fulltex, time.time()]
                    else:
                        print(image_file)
                        fulltex = self.loader.loadTexture(image_file)
                        fulltex.setWrapU(Texture.WM_clamp)
                        fulltex.setWrapV(Texture.WM_clamp)
                        #print('fulltex:', fulltex)
                        m.setTexture(fulltex, 1)
                        tcache[m.getName()] = [m, fulltex, time.time()]
        cachesize = 10
        while len(tcache) > cachesize:
            oldest_time = time.time()
            oldest_name = ""
            for name in tcache:
                if tcache[name][2] < oldest_time:
                    oldest_time = tcache[name][2]
                    oldest_name = name
            del tcache[oldest_name]
Exemplo n.º 2
0
    # take the cumsum of the counts and normalize by the number of pixels to
    # get the empirical cumulative distribution functions for the source and
    # template images (maps pixel value --> quantile)
    s_quantiles = np.cumsum(s_counts).astype(np.float64)
    s_quantiles /= s_quantiles[-1]
    t_quantiles = np.cumsum(t_counts).astype(np.float64)
    t_quantiles /= t_quantiles[-1]

    # interpolate linearly to find the pixel values in the template image
    # that correspond most closely to the quantiles in the source image
    interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)

    return interp_t_values[bin_idx].reshape(oldshape)


if not histogram.load(proj.analysis_dir):
    histogram.make_histograms(proj.image_list)

histogram.make_templates(proj.image_list, dist_cutoff=50, self_weight=1.0)
histogram.save(proj.analysis_dir)

histograms = histogram.histograms
templates = histogram.templates
for image in proj.image_list:
    rgb = image.load_rgb()
    scaled = cv2.resize(rgb, (0, 0), fx=0.25, fy=0.25)
    result = histogram.match_neighbors(scaled, image.name)
    cv2.imshow('scaled', scaled)
    cv2.imshow('result', result)
    cv2.waitKey()
Exemplo n.º 3
0
    def load(self, path):
        # vignette mask
        self.vignette_mask = None
        self.vignette_mask_small = None
        vfile = os.path.join(path, "vignette-mask.jpg")
        if os.path.exists(vfile):
            print("loading vignette correction mask:", vfile)
            self.vignette_mask = cv2.imread(vfile, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
            self.vignette_mask_small = cv2.resize(self.vignette_mask, (512, 512))
        files = []
        for file in sorted(os.listdir(path)):
            if fnmatch.fnmatch(file, '*.egg'):
                # print('load:', file)
                files.append(file)
        print('Loading models:')
        for file in tqdm(files, smoothing=0.05, ascii=(os.name=='nt')):
            # load and reparent each egg file
            pandafile = Filename.fromOsSpecific(os.path.join(path, file))
            model = self.loader.loadModel(pandafile)
            # model.set_shader(self.shader)

            # print(file)
            # self.pretty_print(model, '  ')
            
            model.reparentTo(self.render)
            self.models.append(model)
            tex = model.findTexture('*')
            if tex != None:
                tex.setWrapU(Texture.WM_clamp)
                tex.setWrapV(Texture.WM_clamp)
            self.base_textures.append(tex)

        # The egg model lists "dummy.jpg" as the texture model which
        # doesn't exists.  Here we load the actual textures and
        # possibly apply vignette correction and adaptive histogram
        # equalization.
        print('Loading base textures:')
        for i, model in enumerate(tqdm(self.models, smoothing=0.05, ascii=(os.name=='nt'))):
            base, ext = os.path.splitext(model.getName())
            image_file = None
            dir = os.path.join(proj.analysis_dir, 'models')
            tmp1 = os.path.join(dir, base + '.JPG')
            tmp2 = os.path.join(dir, base + '.jpg')
            if os.path.isfile(tmp1):
                image_file = tmp1
            elif os.path.isfile(tmp2):
                image_file = tmp2
            #print("texture file:", image_file)
            if False:
                tex = self.loader.loadTexture(image_file)
            else:
                rgb = cv2.imread(image_file, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
                rgb = np.flipud(rgb)
                # histogram matching
                if do_histogram:
                    rgb = histogram.match_neighbors(rgb, base)
                # vignette correction
                if not self.vignette_mask_small is None:
                    rgb = rgb.astype('uint16') + self.vignette_mask_small
                    rgb = np.clip(rgb, 0, 255).astype('uint8')
                # adaptive equalization
                hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
                hue, sat, val = cv2.split(hsv)
                aeq = clahe.apply(val)
                # recombine
                hsv = cv2.merge((hue,sat,aeq))
                # convert back to rgb
                rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
                tex = Texture(base)
                tex.setCompression(Texture.CMOff)
                tex.setup2dTexture(512, 512, Texture.TUnsignedByte,
                                   Texture.FRgb)
                tex.setRamImage(rgb)
            tex.setWrapU(Texture.WM_clamp)
            tex.setWrapV(Texture.WM_clamp)
            model.setTexture(tex, 1)
            self.base_textures[i] = tex
        self.sortImages()
        self.annotations.rebuild(self.view_size)