def __init__(self, input_, copy_=True, max_l=None, norm=4): """ Projects `input_` to its spherical harmonics basis up to degree `max_l`. norm = 4 means orthonormal harmonics. For more details, please see https://shtools.oca.eu/shtools/pyshexpanddh.html """ if copy_: self.spatial = input_.copy() else: self.spatial = input_ if not isinstance(self.spatial, EnvironmentMap): self.spatial = EnvironmentMap(self.spatial, 'LatLong') if self.spatial.format_ != "latlong": self.spatial = self.spatial.convertTo("latlong") self.norm = norm self.coeffs = [] for i in range(self.spatial.data.shape[2]): self.coeffs.append( SHExpandDH(self.spatial.data[:, :, i], norm=norm, sampling=2, lmax_calc=max_l))
def extract_perspective_image(self, frame_data): """ extract perspective images from panoramic images. \ The aspect ratio of image is the proportional difference between width and height. :param frame_data: frame data """ # collect the parameters aspect_ratio = float( self.persp_resolution_width) / self.persp_resolution_height resolution = (self.persp_resolution_width, self.persp_resolution_height) # self.show_info("generate perspective images with rotation, {}".format(persp_forward)) # get forward rotation matrix rotation_matrix = np.zeros([3, 3]) self.spherical2dcm(self.persp_forward, rotation_matrix) prespect_frame_data = \ np.ones((np.shape(frame_data)[0], self.persp_resolution_height, \ self.persp_resolution_width, 3), dtype=np.uint8) for idx in range(0, np.shape(frame_data)[0]): try: envmap = EnvironmentMap(frame_data[idx], format_='latlong') except AssertionError as error: msg = "ERROR! extract perspective image error \" {} \"".format( error) print(msg) raise # get perspective images new_image = envmap.project(vfov=self.persp_fov_v, \ rotation_matrix=rotation_matrix, ar=aspect_ratio, resolution=resolution) prespect_frame_data[idx, :, :, :] = new_image.astype(np.uint8) return prespect_frame_data
def iFSHT(coeffs, envmap_size, envmap_format='latlong', reduction_type='right'): if reduction_type != 'right': raise NotImplemented() degrees = int(np.sqrt(8 * coeffs.shape[0]) / 2. - 1) ch = coeffs.shape[1] envmap = EnvironmentMap(np.zeros((envmap_size, envmap_size * 2, ch)), envmap_format) envmap.data = envmap.data.astype(np.complex128) P, _ = _getP(envmap, degrees) i = 0 for l in tqdm(range(degrees + 1)): for m in range(0, l + 1): for c in range(ch): envmap.data[:, m, c] += P[:, i] * coeffs[i, c] i += 1 #import pdb; pdb.set_trace() envmap.data = np.fft.ifft(envmap.data, axis=1).real return envmap
def inverseSphericalHarmonicTransform(coeffs, envmap_height=512, format_='latlong', reduction_type='right'): """ Recovers an EnvironmentMap from a list of coefficients. """ degrees = np.asscalar(np.sqrt(coeffs.shape[0]).astype('int')) - 1 coeffs = addRedundantCoeffs(coeffs, reduction_type)[..., np.newaxis] ch = coeffs.shape[1] if len(coeffs.shape) > 0 else 1 retval = EnvironmentMap(envmap_height, format_) retval.data = np.zeros((retval.data.shape[0], retval.data.shape[1], ch), dtype=np.float32) x, y, z, valid = retval.worldCoordinates() theta = np.arctan2(x, -z) phi = np.arccos(y) for l in range(degrees + 1): for col, m in enumerate(range(-l, l + 1)): Y = sph_harm(m, l, theta, phi) for c in range(ch): retval.data[..., c] += (coeffs[l**2 + col, c] * Y).real return retval
def environment_map(self): """ :returns: EnvironmentMap object. """ if self.format_: return EnvironmentMap(self.path, self.format_) else: return EnvironmentMap(self.path)
def __call__(self, sample): from envmap import EnvironmentMap image = EnvironmentMap(64, 'LatLong') image.data = sample rotation = self.random_direction() img_hdr = image.rotate('DCM', rotation).data.astype('float32') sample = img_hdr return sample
def reproject(self, image, uvs): """ Reprojects an image using the EnvironmentMap.interpolate function image: the image to reproject in latlong format uvs: the uv coordinates to use for reprojection returns the reprojected image with the same dimensions as the input image """ envmap = EnvironmentMap(image, "latlong") u, v = np.split(uvs, 2, axis=2) envmap.interpolate(u, v) return envmap.data
def rotate_image(data, rotation): """ rotation images with skylibs :param data: image data will be rotated, dimension is 4 [ x , width, height, 3] :return : weather rotated the images """ if [0.0, 0.0] == rotation: return False rotation_matrix = np.zeros([3, 3]) spherical2dcm(rotation, rotation_matrix) envmap = EnvironmentMap(data, format_='latlong') new_image = envmap.rotate("DCM", rotation_matrix).data data[:] = new_image.astype(np.uint8) return True
def xyz_from_depth(width, height, filename): os.system('mitsuba scene.xml') _, _, depth = load_hdr_multichannel('scene.exr', color=True, depth=True) # depth = np.flip(depth,1) # flip horizontically # depth = shift_image(depth, int(depth.shape[0]/2),0) # depth = np.roll(depth, depth.shape[0], axis=1) depth = cv2.resize(depth, (width, height)) cv2.imwrite(filename, depth / depth.max() * 255) xyz_surface_S = np.array( EnvironmentMap(depth.shape[0], 'latlong').worldCoordinates()) x = xyz_surface_S[0] * depth y = xyz_surface_S[1] * depth z = xyz_surface_S[2] * depth for i in np.argwhere(np.isnan(x)): x[i[0]][i[1]] = x[i[0]][i[1] + 1] for i in np.argwhere(np.isnan(y)): y[i[0]][i[1]] = y[i[0]][i[1] + 1] for i in np.argwhere(np.isnan(z)): z[i[0]][i[1]] = z[i[0]][i[1] + 1] assert not np.isnan(np.sum(x)) assert not np.isnan(np.sum(y)) assert not np.isnan(np.sum(z)) x = x.reshape(-1) y = y.reshape(-1) z = z.reshape(-1) return np.array([x, y, z]).transpose()
def __init__(self): super(AutoEncoderNet, self).__init__() # encoder self.en_conv1 = nn.Conv2d(3, 64, 9, stride=1, padding=1) # 64 x 64 x 32 self.en_conv1_bn = nn.BatchNorm2d(64) self.en_conv2 = nn.Conv2d(64, 128, 3, stride=2, padding=1) # 128 x 32 x 16 self.en_conv2_bn = nn.BatchNorm2d(128) self.res1 = self.make_layer(ResidualBlock, 128, 256, 2) self.res2 = self.make_layer(ResidualBlock, 256, 256, 2, 2) self.res3 = self.make_layer(ResidualBlock, 256, 256, 2, 2) self.res4 = self.make_layer(ResidualBlock, 256, 256, 2, 2) self.en_fc1 = nn.Linear(8192, 128) # decoder self.de_fc1 = nn.Linear(128, 8192) self.de_conv1 = UpsampleConvLayer(256, 256, kernel_size=3, stride=1, upsample=2) self.de_conv1_bn = nn.BatchNorm2d(256) self.de_conv2 = UpsampleConvLayer(256, 256, kernel_size=3, stride=1, upsample=2) self.de_conv2_bn = nn.BatchNorm2d(256) self.de_conv3 = UpsampleConvLayer(256, 128, kernel_size=3, stride=1, upsample=2) self.de_conv3_bn = nn.BatchNorm2d(128) self.de_conv4 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2) self.de_conv4_bn = nn.BatchNorm2d(64) self.de_conv5 = UpsampleConvLayer(64, 3, kernel_size=3, stride=1) self.sa = EnvironmentMap(64, 'LatLong').solidAngles() self.view_size = [256, 4, 8] self.saved_input = []
class SphericalHarmonic: def __init__(self, input_, copy_=True, max_l=None, norm=4): """ Projects `input_` to its spherical harmonics basis up to degree `max_l`. norm = 4 means orthonormal harmonics. For more details, please see https://shtools.oca.eu/shtools/pyshexpanddh.html """ if copy_: self.spatial = input_.copy() else: self.spatial = input_ if not isinstance(self.spatial, EnvironmentMap): self.spatial = EnvironmentMap(self.spatial, 'LatLong') if self.spatial.format_ != "latlong": self.spatial = self.spatial.convertTo("latlong") self.norm = norm self.coeffs = [] for i in range(self.spatial.data.shape[2]): self.coeffs.append( SHExpandDH(self.spatial.data[:, :, i], norm=norm, sampling=2, lmax_calc=max_l)) def reconstruct(self, height=None, max_l=None, clamp_negative=True): """ :height: height of the reconstructed image :clamp_negative: Remove reconstructed values under 0 """ retval = [] for i in range(len(self.coeffs)): retval.append( MakeGridDH(self.coeffs[i], norm=self.norm, sampling=2, lmax=height, lmax_calc=max_l)) retval = np.asarray(retval).transpose((1, 2, 0)) if clamp_negative: retval = np.maximum(retval, 0) return retval
def rotate_image(self, data): """ rotation images with skylibs :param data: image data will be rotated, dimension is 4 [ x , width, height, 3] :return : weather rotated the images """ if [0.0, 0.0] == self.rotation: return data rotation_matrix = np.zeros([3, 3]) self.spherical2dcm(self.rotation, rotation_matrix) for i in range(0, np.shape(data)[0]): if i % self.show_infor_interval == 0: self.show_info( "Rotating image with 'rotate_image' function, index is {}." .format(i)) # show_image(image) image = data[i] envmap = EnvironmentMap(image, format_='latlong') new_image = envmap.rotate("DCM", rotation_matrix).data new_image = new_image.astype(np.uint8) data[i] = new_image return data
def __init__(self, input_, copy_=True, max_l=None, norm=4): """ Projects `input_` to its spherical harmonics basis up to degree `max_l`. norm = 4 means orthonormal harmonics. For more details, please see https://shtools.oca.eu/shtools/pyshexpanddh.html """ if copy_: self.spatial = input_.copy() else: self.spatial = input_ if not isinstance(self.spatial, EnvironmentMap): self.spatial = EnvironmentMap(self.spatial, 'LatLong') if self.spatial.format_ != "latlong": self.spatial = self.spatial.convertTo("latlong") self.norm = norm #from cffi import FFI #ffi = FFI() #ffi.cdef(""" # void generateAssociatedLegendreFactors(const float N, float *data_out, const float * nodes, const unsigned int num_nodes); #""") #if os.name == 'nt': # C = ffi.dlopen(os.path.join(os.path.dirname(os.path.realpath(__file__)), "spharm_tools.dll")) #else: # C = ffi.dlopen(os.path.join(os.path.dirname(os.path.realpath(__file__)), "spharm_tools.so")) self.coeffs = [] for i in range(self.spatial.data.shape[2]): self.coeffs.append( SHExpandDH(self.spatial.data[:, :, i], norm=norm, sampling=2, lmax_calc=max_l))
def generateLDRfromHDR(im_path, out_prefix): """Convert an HDR image into a clipped 0-255 value ("simulating" a camera)""" print('Processing: ', im_path) im = imread(im_path) h, w, c = im.shape im = im[:, w/2 - h/2:w/2 + h/2] envmap = EnvironmentMap(im, 'SkyAngular').convertTo('LatLong', TARGET_SIZE[0]) im = envmap.data valid = (im > 0) & (~np.isnan(im)) im_median = np.median(im[valid]) im_low = np.percentile(im[valid], 3) im_high = np.percentile(im[valid], 95) #scales = (TARGET_SIZE[0]/im.shape[0], TARGET_SIZE[1]/im.shape[1]) #im = zoom(im, [scales[0], scales[1], 1]) with open(out_prefix + "_hdr.pkl", 'wb') as fhdl: pickle.dump(im, fhdl, pickle.HIGHEST_PROTOCOL) imsave(out_prefix + '_hdr.exr', im) # 20th percentile -> value 5 # 80th percentile -> value 250 #print("Ratio:", (im_high - im_low)) ratio = im_high - im_low if ratio < 0.1: ratio = 0.1 im_ldr = (im - im_low) * 250. / ratio + 5 im_ldr = np.clip(im_ldr, 0, 255).astype('uint8') imsave(out_prefix + '_ldr.jpg', im_ldr) plt.figure() plt.subplot(1,2,1); plt.hist(im.ravel()[im.ravel()<im_high], 50) plt.subplot(1,2,2); plt.hist(im_ldr.ravel()[im_ldr.ravel()>0], 50) plt.savefig(out_prefix + 'debug.png') plt.close()
class SphericalHarmonic: def __init__(self, input_, copy_=True, max_l=None, norm=4): """ Projects `input_` to its spherical harmonics basis up to degree `max_l`. norm = 4 means orthonormal harmonics. For more details, please see https://shtools.oca.eu/shtools/pyshexpanddh.html """ if copy_: self.spatial = input_.copy() else: self.spatial = input_ if not isinstance(self.spatial, EnvironmentMap): self.spatial = EnvironmentMap(self.spatial, 'LatLong') if self.spatial.format_ != "latlong": self.spatial = self.spatial.convertTo("latlong") self.norm = norm #from cffi import FFI #ffi = FFI() #ffi.cdef(""" # void generateAssociatedLegendreFactors(const float N, float *data_out, const float * nodes, const unsigned int num_nodes); #""") #if os.name == 'nt': # C = ffi.dlopen(os.path.join(os.path.dirname(os.path.realpath(__file__)), "spharm_tools.dll")) #else: # C = ffi.dlopen(os.path.join(os.path.dirname(os.path.realpath(__file__)), "spharm_tools.so")) self.coeffs = [] for i in range(self.spatial.data.shape[2]): self.coeffs.append( SHExpandDH(self.spatial.data[:, :, i], norm=norm, sampling=2, lmax_calc=max_l)) def reconstruct(self, height=None, max_l=None, clamp_negative=True): """ :height: height of the reconstructed image :clamp_negative: Remove reconstructed values under 0 """ retval = [] for i in range(len(self.coeffs)): retval.append( MakeGridDH(self.coeffs[i], norm=self.norm, sampling=2, lmax=height, lmax_calc=max_l)) retval = np.asarray(retval).transpose((1, 2, 0)) if clamp_negative: retval = np.maximum(retval, 0) #original fork return here ! #return retval # nodes = [] # weights = [] # for d in range(1, degrees + 2): # x, y = np.polynomial.legendre.leggauss(d) # nodes.extend(x) # weights.extend(y) retval = np.zeros((int((2 * (degrees + 1) + 1)**2 / 8), ch), dtype=np.complex128) P, nodes = _getP(envmap, degrees) print(degrees, P.shape) # Gauss-Legendre / Gauss-Chebyshev quadrature to speed up? # Perform in C # for j in range(envmap.data.shape[1]): # for k in range(ch): # #fmi = np.interp(nodes, np.linspace(-np.pi/2, np.pi/2, fm.shape[0]), np.squeeze(fm[:,j,k])) # i = 0 # for l in range(degrees + 1): # for m in range(0, l + 1): # retval[l,m,k] += fm * P[l,m] # i += 1 import operator i = 0 for l in tqdm(range(degrees + 1)): for m in range(0, l + 1): #coef = np.sqrt(( (2.*l+1.) / (4.*np.pi) ) * ( 1. / (functools.reduce(operator.mul, range(l-m+1, l+m+1), 1)) ) ) for c in range(ch): #retval[i,c] = np.nansum(coef*P[:,i]*np.squeeze(fm[:,m,c])) retval[i, c] = np.nansum(P[:, i] * np.squeeze(fm[:, m, c])) #import pdb; pdb.set_trace() #retval[i,c] = np.nansum(coef*P[:,i]*np.exp(1j*m*theta.ravel())*envmap.data.reshape([-1,ch])[:,c]) #retval[i,c] = np.nansum(coef*ref[:,i]*np.exp(-1j*m*(theta).ravel())*f[:,c]) #print("2: ", coef*P[:,i]*np.exp(1j*m*theta.ravel())*f[:,c]) i += 1 #import pdb; pdb.set_trace() from matplotlib import pyplot as plt #plt.scatter(nodes_cos, ref); plt.show() return retval
def flow_blend_image(self, precision=2): """ "Flow-based blending" Synthesize the image using flow-based blending: For each pixel, gets the two closest (by deviation angle) viewpoints A and B on _either_ side of the synthesized point, uses 1DoF interpolation to interpolate the viewpoint v_AB that is at the intersection of SP and AB then reprojects v_AB to the position of the synthesized point precision: the precision with which to interpolate, i.e., the number of decimal points to round to when determining the interpolation distance. 1: [0.0, .. 0.1, 1.0] (11 images), 2: [0.00, 0.01, .. , 0.99, 1.00] (101 images), >2 will lead to extremely long computation times and should be avoided returns: the synthesized image Note: This function uses information stored by the interpolation function and also adds information """ #get the deviation angles (which are in [-180,180]) and shift the angles <0 by + 360 degrees so that the largest negative angles (closest to 0) are now closest to 360 mod_dev = np.copy(self.dev_angles) mod_dev[mod_dev < 0] += 2 * np.pi #sort the modified angles and take the angles closest to 0 and closest to 360, which yields the two viewpoints with the smallest deviation angle _on either side_ sorted_indices = np.argsort(mod_dev, axis=-1) best_indices = np.dstack((sorted_indices[:, :, 0], sorted_indices[:, :, -1])) self.best_indices_flow = np.copy(best_indices) #get 2D vectors between best two indices for the best indices for each pixel unique_indices = np.unique(np.reshape( best_indices, (best_indices.shape[0] * best_indices.shape[1], best_indices.shape[2])), axis=0) best_indices = best_indices.reshape( (best_indices.shape[0] * best_indices.shape[1], best_indices.shape[2])) vectors_A = np.zeros_like(best_indices).astype(np.float64) vectors_B = np.zeros_like(best_indices).astype(np.float64) for pair in unique_indices: pos = self.capture_set.get_positions( [self.indices[pair[0]], self.indices[pair[1]]]) vec_A = pos[0][:2] vec_B = pos[1][:2] mask = np.equal(best_indices, pair) mask = np.logical_and(mask[:, 0], mask[:, 1]) mask = np.dstack((mask, mask)) np.putmask(vectors_A, mask, vec_A) np.putmask(vectors_B, mask, vec_B) vectors_A = vectors_A.reshape(self.best_indices_flow.shape) vectors_B = vectors_B.reshape(self.best_indices_flow.shape) ''' intersect line AB with line point+targets SP (in 2D) given: line AB: A + t * (B - A) | A is viewpoint A, B is viewpoint B (selected by best indices) line SP: S + u * (P - S) | S is synthesized point, P is target point formula: (from https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection#Given_two_points_on_each_line) (x_A - x_S)(y_S - y_P) - (y_A - y_S)(x_S - x_P) t = ----------------------------------------------- (x_A - x_B)(y_S - y_P) - (y_A - y_B)(x_S - x_P) encoding: xAS * ySP - yAS * xSP --------------------- xAB * ySP - yAB * xSP if t < 0 or t > 1 --> no intersection else dist = t ''' targets = np.repeat( self.intersections[np.newaxis, int(self.intersections.shape[0] / 2)], self.intersections.shape[0], axis=0)[:, :, :2] AS = vectors_A - self.point[:2] SP = self.point[:2] - targets AB = vectors_A - vectors_B self.interpolation_distances = ( AS[:, :, 0] * SP[:, :, 1] - AS[:, :, 1] * SP[:, :, 0]) / ( AB[:, :, 0] * SP[:, :, 1] - AB[:, :, 1] * SP[:, :, 0]) #for the points that for some reason are outside of the line segment AB, use the closer viewpoint self.interpolation_distances[self.interpolation_distances < 0] = 0 self.interpolation_distances[self.interpolation_distances > 1] = 1 #if the s_point is exactly on the line between A and B (resulting in t = nan), use t = |AS|\|AB| nan_indices = np.isnan(self.interpolation_distances) AS_len = np.sqrt( np.power(AS[nan_indices][:, 0], 2) + np.power(AS[nan_indices][:, 1], 2)) AB_len = np.sqrt( np.power(AB[nan_indices][:, 0], 2) + np.power(AB[nan_indices][:, 1], 2)) self.interpolation_distances[nan_indices] = AS_len / AB_len #in the case that AB_len was zero, there are still nan numbers, so filter again, this time replacing nan with 0 self.interpolation_distances[np.isnan( self.interpolation_distances)] = 0 #visualize where the points and lines etc are to debug #elevation has no impact, since only points on a plane are used # for l in range(0, self.interpolation_distances.shape[1], 60): # uvs = (10,l) # print(self.interpolation_distances[uvs]) # self.show_lr_points(uvs, targets[uvs[0], uvs[1]], self.interpolation_distances[uvs], saveas=utils.OUT + "flow_pos" + str(l) + ".jpg") #round in order to reduce the number of different sets (reduces accuracy but also compute time) np.round(self.interpolation_distances, precision, self.interpolation_distances) #find the distinct pairs of best indices & interpolation distances that will be used so that the interpolated, reprojected images for these pixels only have to be calculated once sets = np.dstack( (self.best_indices_flow, self.interpolation_distances)) unique_pairs = np.unique(np.reshape( sets, (self.best_indices_flow.shape[0] * self.best_indices_flow.shape[1], sets.shape[2])), axis=0) masks = {} image = np.zeros( (self.dev_angles.shape[0], self.dev_angles.shape[1], 3)) for u_pair in unique_pairs: #get the actual indices pair = (self.indices[u_pair[0].astype(np.uint8)], self.indices[u_pair[1].astype(np.uint8)]) # print("calculating image at ", u_pair[2], " between ", pair[0], "and", pair[1]) dist = u_pair[2] #where best_indices and distance is u_pair -> 1 else 0 mask = (sets == u_pair) mask = np.logical_and(np.logical_and(mask[:, :, 0], mask[:, :, 1]), mask[:, :, 2]) #retrieve the flow from the capture set for this image pair flow = self.capture_set.get_flow(pair) interpolator = Interpolator1DoF( self.capture_set.get_capture(pair[0]).img, self.capture_set.get_capture(pair[1]).img, flow=flow) shifted_cube = interpolator.interpolate(dist) shifted_latlong = EnvironmentMap(shifted_cube, "cube").convertTo("latlong").data positions = self.capture_set.get_positions(pair) new_pos = positions[0] + dist * (positions[1] - positions[0]) #get the rays from this viewpoint that hit the intersection points #this step is the same as in self.interpolate theta, phi = calc_ray_angles(new_pos, self.intersections) rays = calc_uvector_from_angle(theta, phi, self.capture_set.radius) u, v = projections.world2latlong( rays[:, :, 0], rays[:, :, 2], rays[:, :, 1] ) #switch y and z because EnvironmentMap has a different representation image += mask[:, :, np.newaxis] * self.reproject( shifted_latlong, np.dstack((u, v))) self.flow_blend = image #store for later visualization return self.flow_blend
elif reduction_type == 'image_real': raise NotImplemented() elif reduction_type == 'right': retval[_triangleRightSide(degrees)] = coeffs for i in set(range( (degrees + 1)**2)) - set(_triangleRightSide(degrees)): l = np.sqrt(i).astype('int') m = abs((l) - (i - l**2)) retval[i] = (-1)**m * np.conj(retval[l**2 + l + m]) return retval if __name__ == '__main__': from matplotlib import pyplot as plt e = EnvironmentMap('envmap.exr', 'angular') e.resize((64, 64)) e.convertTo('latlong') se = SphericalHarmonic(e) err = [] from tqdm import tqdm for i in tqdm(range(32)): recons = se.reconstruct(max_l=i) err.append(np.sum((recons - e.data)**2)) plt.plot(err) plt.figure() plt.imshow(recons)
raise NotImplemented() elif reduction_type == 'right': retval[_triangleRightSide(degrees)] = coeffs for i in set(range((degrees + 1)**2)) - set(_triangleRightSide(degrees)): l = np.sqrt(i).astype('int') m = abs((l) - (i - l**2)) retval[i] = (-1)**m * np.conj(retval[l**2 + l + m]) return retval raise Exception('unknown reduction_type') if __name__ == '__main__': from matplotlib import pyplot as plt e = EnvironmentMap('envmap.exr', 'angular') e.resize((256, 256)) e.convertTo('latlong') # P, nodes = _getP(e, 15) # refP = _getRefP(np.cos(nodes), 15) # for i in range(P.shape[1] - 5, P.shape[1]): # plt.plot(np.linspace(-1, 1, P.shape[0]), P[:,i], label="{}".format(i)) # plt.plot(np.linspace(-1, 1, P.shape[0]), refP[:,i], label="ref{}".format(i)) # plt.legend(); # plt.show() # import pdb; pdb.set_trace() coeffs_fsht = FSHT(e.copy(), 25)
def rotate(self, rotation): envmap = EnvironmentMap(self.img, 'latlong') envmap.rotate('DCM', rotation.as_matrix()) self.img = envmap.data
def latlong2cube(latlong): ''' Converts an image in latlong format to an image in cubemap format ''' return EnvironmentMap(latlong, "latlong").convertTo("cube").data