def prewittgrad(self): """ prewitt gradient """ gradx = np.array([1.0, 1.0, 1.0]).reshape(3, 1) * np.array( [1.0, 0.0, -1.0]) / 3.0 grady = np.array([1.0, 0.0, -1.0]).reshape(3, 1) * np.array( [1.0, 1.0, 1.0]) / 3.0 return gradx, grady
def matchColorToImage(color, img): """ match the color to the image mode """ imgChan = img.shape[-1] if isinstance(color, (float, int)): if isFloat(img): if isinstance(color, int): color = color / 255.0 elif isinstance(color, int): color = int(color * 255) else: colChan = len(color) if imgChan > colChan: msg = "Don't know how to convert color[%d] to image pixel[%d]" % ( len(color), img.shape[-1]) raise NotImplementedError(msg) if imgChan < colChan: color = color[0:imgChan] imgFloat = isFloat(img) colFloat = isFloat(color) if imgFloat != colFloat: if imgFloat: color = np.array(color) / 255.0 else: color = np.int(np.array(color) * 255) return color
def has_ext(): try: a = bh.array([[76,25,11], [27,89,51], [18, 60, 32]], dtype=bh.float32) b = bh.array([[10], [7], [43]], dtype=bh.float32) bh.lapack.gesv(a, b) return True except Exception as e: print("\n\033[31m[ext] Cannot test LAPACK extension methods.\033[0m") print(e) return False
def has_ext(): try: src = bh.array([[1, 1, 1], [1, 1, 1], [1, 1, 0]], dtype=bh.float32) kernel = bh.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=bh.float32) bh.opencv.erode(src, kernel) return True except Exception as e: print("\n\033[31m[ext] Cannot test OpenCV extension methods.\033[0m") print(e) return False
def getChannel(self, channelId: str): """ get a channel in floating point form """ if channelId not in self._activeChannels: if channelId in [ 'r', 'g', 'b' ] or (channelId == 'a' and len(self._activeChannels) < 2): if 'v' in self._activeChannels: return self._activeChannels['v'] res = numpyArray(self.image, floatingPoint=True, loader=None) if isinstance(res.size, int) or len(res.size) < 3: # L self._activeChannels['v'] = res return self._activeChannels['v'] if res.size[2] == 2: # LA self._activeChannels['v'] = res[:, :, 0] self._activeChannels['a'] = res[:, :, 1] elif res.size[2] == 3: # RGB self._activeChannels['r'] = res[:, :, 0] self._activeChannels['g'] = res[:, :, 1] self._activeChannels['b'] = res[:, :, 2] elif res.size[2] == 3: # RGBA self._activeChannels['r'] = res[:, :, 0] self._activeChannels['g'] = res[:, :, 1] self._activeChannels['b'] = res[:, :, 2] self._activeChannels['a'] = res[:, :, 3] elif channelId == 'a': ret = np.array(self.image.size()) ret.fill(1.0) self._activeChannels['a'] = ret return ret return self._activeChannels[channelId]
def random_galaxy(N): """ Generate a galaxy of random bodies """ m = np.array((numpy.arange(0.0, 1.0, step=1.0 / N) + np.float64(10)) * np.float64(m_sol / 10)) x = np.array((numpy.arange(0.0, 1.0, step=1.0 / N) - np.float64(0.5)) * np.float64(r_ly / 100)) y = np.array((numpy.arange(0.0, 1.0, step=1.0 / N) - np.float64(0.5)) * np.float64(r_ly / 100)) z = np.array((numpy.arange(0.0, 1.0, step=1.0 / N) - np.float64(0.5)) * np.float64(r_ly / 100)) vx = np.zeros(N, dtype=np.float64) vy = np.zeros(N, dtype=np.float64) vz = np.zeros(N, dtype=np.float64) assert len(m) == N return m, x, y, z, vx, vy, vz
def prepare_inputs(*inputs, device): out = [bh.array(k) for k in inputs] for o in out: # force allocation on target device tmp = o * 1 # noqa: F841 bh.flush() return out
def gaussianBlur(img, sizeX, sizeY=None, edge='clamp'): """ perform a gaussian blur on an image :param img: can be a pil image, numpy array, etc :param sizeX: the x radius of the blur :param sizeY: the y radius of the blur (if None, use same as X) :param edge: what to do for more pixels when we reach an edge can be: "clamp","mirror","wrap", or a color default is "clamp" :returns: image in a numpy array """ if sizeY is None: sizeY = sizeX if edge == 'clamp': mode = 'nearest' cval = 0.0 elif edge == 'mirror': mode = 'mirror' cval = 0.0 elif edge == 'wrap': mode = 'wrap' cval = 0.0 else: mode = 'constant' cval = np.array(strToColor(edge)) img = numpyArray(img) img = scipy.ndimage.gaussian_filter(img, (sizeY, sizeX), mode=mode, cval=cval) return img
def dilate(a, b=None, c=None): if b is None: b = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=a.dtype) if c is None: c = np.empty_like(a) ufuncs.extmethod("opencv_dilate", c, a, b) return c
def my_kmax(R, k): N = len(R) f = np.ravel(R) #print(f) indices = np.array(heapq.nlargest(k, range(len(f)), f.__getitem__)) j = np.mod(indices, N) i = np.floor(indices / N) #print(indices) return [i, j]
def generating_kernel(a): """ generate a 5x5 kernel Comes from: https://compvisionlab.wordpress.com/2013/05/13/image-blending-using-pyramid/ """ w_1d = np.array([0.25 - a/2.0, 0.25, a, 0.25, 0.25 - a/2.0]) return np.outer(w_1d, w_1d)
def compressed_copy(ary, param): a_min = ary.min() a_range = ary.max() - ary.min() + 1 # Normalize `ary` into uint8 a = (ary - a_min) * 256 / a_range assert (a.min() >= 0) assert (a.max() < 256) a = np.array(a, dtype=np.uint8) # Copy `a` using `param` a = _bh.mem_copy(a, param=param) # un-normalize and convert back to the original dtype of `ary` a = array_create.array(a, dtype=ary.dtype) return (a * a_range + a_min) / 256.0
def directionToNormalColor(azimuth, elevation): """ given a direction, convert it into an RGB normal color :param azimuth: compass direction :param elevation: up/down direction """ azimuth = math.radians(float(azimuth)) elevation = math.radians(float(elevation)) return ( np.array([math.sin(azimuth), math.cos(azimuth), math.cos(elevation)]) + 1) / 2
def run(self): for _ in range(100): a, b, c, d = (np.random.randn(self.nz) for _ in range(4)) out_legacy = self.veros_legacy.fortran.solve_tridiag(a=a, b=b, c=c, d=d, n=self.nz) if self.veros_new.backend_name == "bohrium": a, b, c, d = (bh.array(v) for v in (a, b, c, d)) out_new = numerics.solve_tridiag(self.veros_new, a, b, c, d) if not np.allclose(out_legacy, out_new): return False return True
def run(self): a, b, c, d = (np.random.randn(self.nx, self.ny, self.nz) for _ in range(4)) out_legacy = np.zeros((self.nx, self.ny, self.nz)) for i in range(self.nx): for j in range(self.ny): out_legacy[i, j] = self.veros_legacy.call_fortran_routine( 'solve_tridiag', a=a[i, j], b=b[i, j], c=c[i, j], d=d[i, j], n=self.nz ) if rs.backend == 'bohrium': import bohrium as bh a, b, c, d = (bh.array(v) for v in (a, b, c, d)) out_new = numerics.solve_tridiag(self.veros_new.state, a, b, c, d) np.testing.assert_allclose(out_legacy, out_new)
def run(self): a, b, c, d = (np.random.randn(self.nx, self.ny, self.nz) for _ in range(4)) out_legacy = np.zeros((self.nx, self.ny, self.nz)) for i in range(self.nx): for j in range(self.ny): out_legacy[i, j] = self.veros_legacy.fortran.solve_tridiag( a=a[i, j], b=b[i, j], c=c[i, j], d=d[i, j], n=self.nz) if self.veros_new.backend_name == "bohrium": a, b, c, d = (bh.array(v) for v in (a, b, c, d)) out_new = numerics.solve_tridiag(self.veros_new, a, b, c, d) passed = np.allclose(out_legacy, out_new) return passed
def rgb2hsvArray(rgb): """ Transform an rgb array to an hsv array :param rgb: the input image. can be pil image or numpy array :return hsv: the output array This comes from scikit-image: https://github.com/scikit-image/scikit-image/blob/master/skimage/color/colorconv.py """ rgb = numpyArray(rgb) singleColor = len(rgb.shape) == 1 if singleColor: # convert to one-pixel image rgb = np.array([[rgb]]) out = np.empty_like(rgb) # -- V channel out_v = rgb.max(-1) # -- S channel delta = rgb.ptp(-1) # Ignore warning for zero divided by zero old_settings = np.seterr(invalid='ignore') out_s = delta / out_v out_s[delta == 0.] = 0. # -- H channel # red is max idx = (rgb[:, :, 0] == out_v) out[idx, 0] = (rgb[idx, 1] - rgb[idx, 2]) / delta[idx] # green is max idx = (rgb[:, :, 1] == out_v) out[idx, 0] = 2. + (rgb[idx, 2] - rgb[idx, 0]) / delta[idx] # blue is max idx = (rgb[:, :, 2] == out_v) out[idx, 0] = 4. + (rgb[idx, 0] - rgb[idx, 1]) / delta[idx] out_h = (out[:, :, 0] / 6.) % 1. out_h[delta == 0.] = 0. np.seterr(**old_settings) # -- output out[:, :, 0] = out_h out[:, :, 1] = out_s out[:, :, 2] = out_v # remove NaN out[np.isnan(out)] = 0 if singleColor: out = out[0, 0] return out
def load_data(self): """Load the npz archive specified by --inputfn or None is not set""" if self.args.inputfn is None: return None else: nobh_data = numpy.load(self.args.inputfn) bhary_keys = nobh_data["_bhary_keys"].tolist() ret = {} for k in nobh_data.keys(): if k == "_bhary_keys": continue # Convert numpy arrays into bohrium arrays if bh_is_loaded_as_np and k in bhary_keys: a = nobh_data[k] ret[k] = bohrium.array(a, bohrium=True) else: ret[k] = nobh_data[k] return ret
def smoothNoise(size=(256,256),undersize=0.5,scaleMode=None,seed=None): """ create smooth noise by starting with undersized random noise and then scaling it up :param size: finished size :param undersize: start undersized (percent) then scale up :param scaleMode: PIL scale mode (default is BILINEAR) :param seed: random seed to permit repeatability """ if seed is not None: np.random.seed(seed) if scaleMode is None: scaleMode=Image.BILINEAR noise=np.random.random((int(size[0]*undersize),int(size[0]*undersize))) if undersize!=0: size=(int(size[0]),int(size[1])) noise=np.array(Image.fromarray(noise).resize(size,scaleMode))/255.0 return noise
def toWavelet(img,wavelet='haar',mode='symmetric',level=None): """ :param img: any supported image type to transform into wavelet space :param wavelet: any common, named wavelet, including 'Haar' (default) 'Daubechies' 'Symlet' 'Coiflet' 'Biorthogonal' 'ReverseBiorthogonal' 'DiscreteMeyer' 'Gaussian' 'MexicanHat' 'Morlet' 'ComplexGaussian' 'Shannon' 'FrequencyBSpline' 'ComplexMorlet' or a custom [ [lowpass_decomposition], [highpass_decomposition], [lowpass_reconstruction], [highpass_reconstruction] ] where each is a pair of floating point values :param mode: str or 2-tuple of str, optional Signal extension mode, see Modes (default: "symmetric"). This can also be a tuple containing a mode to apply along each axis in axes. :param level: int, optional Decomposition level (must be >= 0). If level is None (default) then it will be calculated using the dwt_max_level function. See also: https://pywavelets.readthedocs.io/en/latest/ref/index.html """ if mode is None: mode='symmetric' img=numpyArray(img) colorMode=imageMode(img) if len(colorMode)==1: return pywt.wavedec2(img,_wavelet(wavelet),mode,level) ret=[] for ch in range(len(colorMode)): ret.append(np.array(pywt.wavedec2(img[:,:,ch],_wavelet(wavelet),mode,level))) ret=np.dstack(ret) return ret
def getAlpha(image,alwaysCreate:bool=True): """ gets the alpha channel regardless of image type :param image: the image whose mask to get :param alwaysCreate: always returns a numpy array (otherwise, may return None) :return: alpha channel as a PIL image, or numpy array, or possibly None, depending on alwaysCreate """ ret=None if image is None or not hasAlpha(image): if alwaysCreate: ret=np.array(image.size()) ret.fill(1.0) elif isinstance(image,Image.Image): ret=image.getalpha() else: ret=image[:,:,-1] return ret
def get(self, type='smooth', mode='reflect'): """ type can be 'smooth' or 'fine' mode can be 'reflect','constant','nearest','mirror', 'wrap' for handling borders """ gradfn = {'smooth': self.prewittgrad, 'fine': self.basicgrad}[type] gradx, grady = gradfn() # x, y and z below are now the gradient matrices, # each entry from x,y,z is a gradient vector at an image point x = filters.convolve(self.img, gradx, mode=mode) y = filters.convolve(self.img, grady, mode=mode) # norm is the magnitude of the x,y,z vectors, # each entry is the magnitude of the gradient at an image point and z*z = 1 norm = np.sqrt(x * x + y * y + 1) # divide by the magnitude to normalise # as well scale to an image: negative 0-127, positive 127-255 x, y = [a / norm * 127.0 + 128.0 for a in (x, y)] z = np.ones( self.shape) / norm # generate z, matrix of ones, then normalise z = z * 255.0 # all positive # x, -y gives blender form # convert to int, transpose to rgb and return the normal map return np.array([x, -y, z]).transpose(1, 2, 0).astype(np.uint8)
def voronoi(size=(256,256),npoints=30,mode='squared',invert=False,seed=None): """ :param mode: 'simple','squared','twoNearestDiff','twoNearestMult' see also: http://blackpawn.com/texts/cellular/default.html TODO: this could be optomized usinf a cKDtree https://stackoverflow.com/questions/10818546/finding-index-of-nearest-point-in-numpy-arrays-of-x-and-y-coordinates """ size=(int(size[0]),int(size[1])) img=np.ndarray(size) size=np.array(size) if seed is not None: np.random.seed(seed) points=np.random.rand(int(npoints),2)*size # random points where voroni connections will appear xy=xypoints(img).reshape((-1,2)) # flattened list of xy points for every pixel dist=scipy.spatial.distance.cdist(xy,points) # for each pixel, determine distance to all voroni points if mode=='simple': img=np.min(dist,axis=1) elif mode=='squared': img=np.min(dist,axis=1)**2 elif mode=='twoNearestDiff': #smallest=np.argsort(dist,axis=1) smallest=np.argpartition(dist,2,axis=1)[:,0:2] # faster because it stops sorting after 2 img=dist[:,smallest[:,1]]-dist[:,smallest[:,0]] elif mode=='twoNearestMult': #smallest=np.argsort(dist,axis=1) smallest=np.argpartition(dist,2,axis=1)[:,0:2] # faster because it stops sorting after 2 img=dist[:,smallest[:,1]]*dist[:,smallest[:,0]] else: raise NotImplementedError('mode="'+mode+'"') # convert pixel list back into 2d array img=normalize(img.reshape((size[0],size[1]))) if invert: img=1.0-img return img
def run(args): print("*** Testing the equivalency of Bohrium-NumPy and NumPy ***") test_suite_start_time = time.time() for f in args.file: if f.startswith("test_") and f.endswith( "py") and f not in args.exclude: # Remove ".py" m = __import__(f[:-3]) # All test classes starts with "test_" for cls in [o for o in dir(m) if o.startswith("test_") and \ (True if args.test and o in args.test or not args.test else False)]: # Exclude specific test if cls in args.exclude_test: continue cls_obj = getattr(m, cls) cls_inst = cls_obj() cls_inst.args = args # Exclude benchmarks import inspect is_benchmark = BenchHelper.__name__ in [ c.__name__ for c in inspect.getmro(cls_obj) ] if args.exclude_benchmarks and is_benchmark: continue test_okay = True test_start_time = time.time() # All test methods starts with "test_" for mth in [o for o in dir(cls_obj) if o.startswith("test_")]: name = "%s/%s/%s" % (f, cls[5:], mth[5:]) print("Testing " + _C.OKGREEN + str(name) + _C.ENDC, end=" ") sys.stdout.flush() for (np_arys, cmd) in getattr(cls_inst, "init")(): # Exclude complex if args.exclude_complex_dtype: complex_nptypes = [ eval(dtype) for dtype in TYPES.COMPLEX ] index = 0 non_complex = {} for ary in np_arys.values(): if ary.dtype not in complex_nptypes: non_complex[index] = ary index += 1 np_arays = non_complex # Get Bohrium arrays bh_arys = [] for a in np_arys.values(): bh_arys.append(bh.array(a)) # Execute using NumPy (res1, cmd1) = getattr(cls_inst, mth)(np_arys) res1 = res1.copy() # Execute using Bohrium (res2, cmd2) = getattr(cls_inst, mth)(bh_arys) cmd += cmd1 # Compare try: if not np.isscalar(res2) and bh.check(res2): res2 = res2.copy2numpy() except RuntimeError as error_msg: test_okay = False print() print(" " + _C.OKBLUE + "[CMD] %s" % cmd + _C.ENDC) print(" " + _C.FAIL + str(error_msg) + _C.ENDC) else: rtol = cls_inst.config['maxerror'] atol = rtol * 0.1 if not np.allclose( res1, res2, rtol=rtol, atol=atol): test_okay = False if 'warn_on_err' in cls_inst.config: print() print(_C.WARNING + " [Warning] %s" % (name) + _C.ENDC) print(_C.OKBLUE + " [CMD] %s" % cmd + _C.ENDC) print(_C.OKGREEN + " NumPy result: %s" % (res1) + _C.ENDC) print(_C.FAIL + " Bohrium result: %s" % (res2) + _C.ENDC) print(_C.WARNING + " " + str(cls_inst.config['warn_on_err']) + _C.ENDC) print(_C.OKBLUE + " Manual verification is needed." + _C.ENDC) else: print() print(_C.FAIL + " [Error] %s" % (name) + _C.ENDC) print(_C.OKBLUE + " [CMD] %s" % cmd + _C.ENDC) print(_C.OKGREEN + " NumPy result: %s" % (res1) + _C.ENDC) print(_C.FAIL + " Bohrium result: %s" % (res2) + _C.ENDC) sys.exit(1) if test_okay: print( _C.OKBLUE + "({:.2f}s)".format(time.time() - test_start_time) + _C.ENDC, "✓") print("*** Finished in: " + _C.OKBLUE + "{:.2f}s".format(time.time() - test_suite_start_time) + _C.ENDC + " ***")
def run(self, pseudo_input): """ Run the Benchmark script and return the result. Benchmarks are assumed to be installed along with the Bohrium module. """ (target, dtype) = self.get_meta(pseudo_input) # Setup output filename outputfn = "/tmp/%s_%s_%s_output_%s.npz" % (self.script, dtype, target, self.uuid) # Execute the benchmark import benchpress benchmark_path = benchpress.suite_util.benchmark_path( self.script, "python_numpy", ".py") sys_exec = [sys.executable] if target.lower() == "none" else [ sys.executable, "-m", "bohrium" ] # Setup command cmd = sys_exec + [ benchmark_path, '--size=' + self.sizetxt, '--dtype=' + str(dtype), '--target=' + target, '--outputfn=' + outputfn ] # Setup the inputfn if one is needed/provided if self.inputfn: npt_path = os.path.dirname(sys.argv[0]) if not npt_path: npt_path = "./" inputfn = "%s/datasets/%s" % (npt_path, self.inputfn.format(dtype)) cmd.append('--inputfn') cmd.append(inputfn) if not os.path.exists(inputfn): raise Exception('File does not exist: %s\n' % inputfn) env = os.environ.copy() env['BH_PROXY_PORT'] = "4201" # SIP on macOS won't allow passing on DYLD_LIBRARY_PATH in env, so # we attach it to the command instead. if "DYLD_LIBRARY_PATH" in env: cmd = ["DYLD_LIBRARY_PATH=" + env["DYLD_LIBRARY_PATH"]] + cmd # Execute the benchmark (out, err) = shell_cmd(cmd, verbose=self.args.verbose, env=env) if 'elapsed-time' not in out: raise Exception( _C.FAIL + "Cannot find elapsed time, output:\n%s\nWith error:\n%s\n" % (out, err) + _C.ENDC) if not os.path.exists(outputfn): raise Exception( _C.FAIL + 'Benchmark did not produce any output, expected: %s\n' % outputfn + _C.ENDC) # Load the result from disk npzs = np.load(outputfn) res = {} for k in npzs: res[k] = npzs[k] # Delete npz del npzs # Delete the result from disk if os.path.exists(outputfn): os.remove(outputfn) # Convert to whatever namespace it ought to be in res['res'] = bh.array(res['res'], bohrium=target != "None") return (res['res'], ' '.join(cmd))
def __threshold(name, a, thresh, maxval): b = np.array([thresh, maxval], dtype=a.dtype) c = np.empty_like(a) ufuncs.extmethod(name, c, a, b) return c
def run(self, pseudo_input): """ Run the Benchmark script and return the result. Benchmarks are assumed to be installed along with the Bohrium module. """ (target, dtype) = self.get_meta(pseudo_input) # Setup output filename outputfn = "/tmp/%s_%s_%s_output_%s.npz" % ( self.script, dtype, target, self.uuid ) benchmarks_dir, err = subprocess.Popen( # Execute the benchmark ['bp-info', '--benchmarks'], stdout = subprocess.PIPE, stderr = subprocess.PIPE, ).communicate() sys_exec = [sys.executable] if target.lower() == "none" else [sys.executable, "-m", "bohrium"] benchmark_path = os.sep.join([benchmarks_dir.strip(), self.script, "python_numpy", self.script+ ".py"]) # Setup command cmd = sys_exec + [ benchmark_path, '--size=' + self.sizetxt, '--dtype=' + str(dtype), '--target=' + target, '--outputfn=' + outputfn ] # Setup the inputfn if one is needed/provided if self.inputfn: npt_path = os.path.dirname(sys.argv[0]) if not npt_path: npt_path = "./" inputfn = "%s/datasets/%s" % ( npt_path, self.inputfn.format(dtype) ) cmd.append('--inputfn') cmd.append(inputfn) if not os.path.exists(inputfn): raise Exception('File does not exist: %s\n' % inputfn) env = os.environ.copy() env['BH_PROXY_PORT'] = "4201" out = shell_cmd(cmd, verbose=self.args.verbose, env=env) # Execute the benchmark if 'elapsed-time' not in out: raise Exception("Cannot find elapsed time, output:\n%s\n" % out) if not os.path.exists(outputfn): raise Exception('Benchmark did not produce any output, expected: %s\n' % outputfn) npzs = np.load(outputfn) # Load the result from disk res = {} for k in npzs: res[k] = npzs[k] del npzs # Delete npz if os.path.exists(outputfn): # Delete the result from disk os.remove(outputfn) # Convert to whatever namespace it ought to be in res['res'] = bh.array(res['res'], bohrium=target!="None") return (res['res'], ' '.join(cmd))
for (np_arys, cmd) in getattr(cls_inst,"init")(): if args.exclude_complex_dtype: # Exclude complex complex_nptypes = [eval(dtype) for dtype in TYPES.COMPLEX] index = 0 non_complex = {} for ary in np_arys.values(): if ary.dtype not in complex_nptypes: non_complex[index] = ary index += 1 np_arays = non_complex bh_arys = [] # Get Bohrium arrays for a in np_arys.values(): bh_arys.append(bh.array(a)) # Execute using NumPy (res1,cmd1) = getattr(cls_inst,mth)(np_arys) res1 = res1.copy() # Execute using Bohrium (res2,cmd2) = getattr(cls_inst,mth)(bh_arys) cmd += cmd1 try: # Compare if not np.isscalar(res2) and bh.check(res2): res2 = res2.copy2numpy() except RuntimeError as error_msg: test_okay = False print() print(" " + _C.OKBLUE + "[CMD] %s"%cmd + _C.ENDC) print(" " + _C.FAIL + str(error_msg) + _C.ENDC) else:
def colormap(img, colors=None): """ apply the colors to a grayscale image if a color image is provided, convert it (thus, acts like a "colorize" function) :param img: a grayscale image :param colors: [(decimalPercent,color),(...)] if no colors are given, then [(0.0,black),(1.0,white)] if a single color and no percent is given, assume [(0.0,black),(0.5,theColor),(1.0,white)] :return: the resulting image """ img = grayscale(img) if not isFloat(img): img = img / 255.0 if colors is None: colors = [(0.0, (0.0, 0.0, 0.0)), (1.0, (1.0, 1.0, 1.0))] elif not isinstance(colors[0], (tuple, list, np.ndarray)): white = [] black = [] if isinstance(colors, str): colors = strToColor(colors) if isFloat(colors): imax = 1.0 imin = 0.0 else: imax = 255 imin = 0 for _ in range(len(colors)): white.append(imax) black.append(imin) if len(colors) in [2, 4]: # keep same alpha value black[-1] = colors[-1] white[-1] = white[-1] colors = [(0.0, black), (0.5, colors), (1.0, white)] else: colors.sort() # make sure we go from low to high # make sure colors are in the shape we need colors = [[ matchColorToImage(color[0], img), np.array(strToColor(color[1])) ] for color in colors] shape = (img.shape[0], img.shape[1], len(color[1])) img2 = np.ndarray(shape) img = img[..., None] if True: lastColor = None for color in colors: if lastColor is None: img2 += color[1] else: percent = (img - lastColor[0]) * lastColor[0] / color[0] img2 = np.where( np.logical_and(img > lastColor[0], img <= color[0]), (color[1] * percent) + (lastColor[1] * (1 - percent)), img2) lastColor = color img2 = np.where(img > lastColor[0], lastColor[1], img2) else: def gradMap(c): lastColor = None for color in colors: if c < color[0]: if lastColor is None: return color[1] percent = (c - lastColor[0]) / color[0] return (lastColor[1] * percent + color[1]) / (2 * percent) lastColor = color return lastColor[1] img2 = perPixel(gradMap, img, clamp=False) return img2
def run(self, pseudo_input): """ Run the Benchmark script and return the result. Benchmarks are assumed to be installed along with the Bohrium module. """ (target, dtype) = self.get_meta(pseudo_input) # Setup output filename outputfn = "/tmp/%s_%s_%s_output_%s.npz" % ( self.script, dtype, target, self.uuid ) benchmarks_dir, err = subprocess.Popen( # Execute the benchmark ['bp-info', '--benchmarks'], stdout = subprocess.PIPE, stderr = subprocess.PIPE, ).communicate() sys_exec = [sys.executable, "-m", "bohrium"] if target else [sys.executable] benchmark_path = os.sep.join([benchmarks_dir.strip(), self.script, "python_numpy", self.script+ ".py"]) # Setup command cmd = sys_exec + [ benchmark_path, '--size=' +self.sizetxt, '--dtype=' +str(dtype), '--target=' +target, '--outputfn=' +outputfn ] # Setup the inputfn if one is needed/provided if self.inputfn: npt_path = os.path.dirname(sys.argv[0]) if not npt_path: npt_path = "./" inputfn = "%s/datasets/%s" % ( npt_path, self.inputfn.format(dtype) ) cmd.append('--inputfn') cmd.append(inputfn) if not os.path.exists(inputfn): raise Exception('File does not exist: %s' % inputfn) p = subprocess.Popen( # Execute the benchmark cmd, stdout = subprocess.PIPE, stderr = subprocess.PIPE, ) out, err = p.communicate() if 'elapsed-time' not in out: raise Exception("Cannot find elapsed time got stdout(%s) stderr(%s)]" % (out, err)) if not os.path.exists(outputfn): raise Exception('Benchmark did not produce any output, expected: %s' % outputfn) # # We silently accept these errors when output to stderr: # # * The Python object count # * Unsupported fuse model # * Unknown fuse model # if err and not re.match("\[[0-9]+ refs\]", err) \ and 'unsupported fuse model' not in err \ and 'unknown fuse model' not in err: err_chunked = ", ".join(err.split("\n")) print(_C.OKBLUE + "[CMD] %s" % " ".join(cmd) + _C.ENDC) print(_C.WARNING + "[Warning] The CMD above wrote the following to stderr: [%s]" % err_chunked + _C.ENDC) npzs = np.load(outputfn) # Load the result from disk res = {} for k in npzs: with warnings.catch_warnings(): warnings.simplefilter("ignore") res[k] = npzs[k] del npzs # Delete npz if os.path.exists(outputfn): # Delete the result from disk os.remove(outputfn) # Convert to whatever namespace it ought to be in res['res'] = bh.array(res['res'], bohrium=target!="None") return (res['res'], ' '.join(cmd))
def tensordot(a, b, axes=2): """ Compute tensor dot product along specified axes for arrays >= 1-D. Given two tensors (arrays of dimension greater than or equal to one), `a` and `b`, and an array_like object containing two array_like objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s elements (components) over the axes specified by ``a_axes`` and ``b_axes``. The third argument can be a single non-negative integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions of `a` and the first ``N`` dimensions of `b` are summed over. Parameters ---------- a, b : array_like, len(shape) >= 1 Tensors to "dot". axes : variable type * integer_like scalar Number of axes to sum over (applies to both arrays); or * (2,) array_like, both elements array_like of the same length List of axes to be summed over, first sequence applying to `a`, second to `b`. See Also -------- dot, einsum Notes ----- When there is more than one axis to sum over - and they are not the last (first) axes of `a` (`b`) - the argument `axes` should consist of two sequences of the same length, with the first axis to sum over given first in both sequences, the second axis second, and so forth. Examples -------- A "traditional" example: >>> a = np.arange(60.).reshape(3,4,5) >>> b = np.arange(24.).reshape(4,3,2) >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) >>> c.shape (5, 2) >>> c array([[ 4400., 4730.], [ 4532., 4874.], [ 4664., 5018.], [ 4796., 5162.], [ 4928., 5306.]]) >>> # A slower but equivalent way of computing the same... >>> d = np.zeros((5,2)) >>> for i in range(5): ... for j in range(2): ... for k in range(3): ... for n in range(4): ... d[i,j] += a[k,n,i] * b[n,k,j] >>> c == d array([[ True, True], [ True, True], [ True, True], [ True, True], [ True, True]], dtype=bool) An extended example taking advantage of the overloading of + and \\*: >>> a = np.array(range(1, 9)) >>> a.shape = (2, 2, 2) >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) >>> A.shape = (2, 2) >>> a; A array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) array([[a, b], [c, d]], dtype=object) >>> np.tensordot(a, A) # third argument default is 2 array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object) >>> np.tensordot(a, A, 1) array([[[acc, bdd], [aaacccc, bbbdddd]], [[aaaaacccccc, bbbbbdddddd], [aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object) >>> np.tensordot(a, A, 0) # "Left for reader" (result too long to incl.) array([[[[[a, b], [c, d]], ... >>> np.tensordot(a, A, (0, 1)) array([[[abbbbb, cddddd], [aabbbbbb, ccdddddd]], [[aaabbbbbbb, cccddddddd], [aaaabbbbbbbb, ccccdddddddd]]], dtype=object) >>> np.tensordot(a, A, (2, 1)) array([[[abb, cdd], [aaabbbb, cccdddd]], [[aaaaabbbbbb, cccccdddddd], [aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object) >>> np.tensordot(a, A, ((0, 1), (0, 1))) array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object) >>> np.tensordot(a, A, ((2, 1), (1, 0))) array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object) """ try: iter(axes) except: axes_a = list(range(-axes, 0)) axes_b = list(range(0, axes)) else: axes_a, axes_b = axes try: na = len(axes_a) axes_a = list(axes_a) except TypeError: axes_a = [axes_a] na = 1 try: nb = len(axes_b) axes_b = list(axes_b) except TypeError: axes_b = [axes_b] nb = 1 a, b = np.array(a), np.array(b) as_ = a.shape nda = len(a.shape) bs = b.shape ndb = len(b.shape) equal = True if (na != nb): equal = False else: for k in range(na): if as_[axes_a[k]] != bs[axes_b[k]]: equal = False break if axes_a[k] < 0: axes_a[k] += nda if axes_b[k] < 0: axes_b[k] += ndb if not equal: raise ValueError("shape-mismatch for sum") # Move the axes to sum over to the end of "a" # and to the front of "b" notin = [k for k in range(nda) if k not in axes_a] newaxes_a = notin + axes_a N2 = 1 for axis in axes_a: N2 *= as_[axis] newshape_a = (-1, N2) olda = [as_[axis] for axis in notin] notin = [k for k in range(ndb) if k not in axes_b] newaxes_b = axes_b + notin N2 = 1 for axis in axes_b: N2 *= bs[axis] newshape_b = (N2, -1) oldb = [bs[axis] for axis in notin] at = a.transpose(newaxes_a).reshape(newshape_a) bt = b.transpose(newaxes_b).reshape(newshape_b) res = dot(at, bt) return res.reshape(olda + oldb)
def init(self, grid, material, steps): assert cfg['bh_stack'] in ['openmp', 'opencl', 'cuda'] os.environ["BH_STACK"] = self.cfg['bh_stack'] grid.T1 = bohrium.array(grid.T1) grid.T2 = bohrium.array(grid.T2) grid.T3 = bohrium.array(grid.T3) grid.T4 = bohrium.array(grid.T4) grid.T5 = bohrium.array(grid.T5) grid.T6 = bohrium.array(grid.T6) grid.ux_new = bohrium.array(grid.ux_new) grid.uy_new = bohrium.array(grid.uy_new) grid.uz_new = bohrium.array(grid.uz_new) material.C = bohrium.array(material.C) material.P = bohrium.array(material.P) super().init(grid, material, steps)
def run(self, pseudo_input): """ Run the Benchmark script and return the result. Benchmarks are assumed to be installed along with the Bohrium module. """ (target, dtype) = self.get_meta(pseudo_input) # Setup output filename outputfn = "/tmp/%s_%s_%s_output_%s.npz" % ( self.script, dtype, target, self.uuid ) benchmarks_dir, err = subprocess.Popen( # Execute the benchmark ['bp-info', '--benchmarks'], stdout = subprocess.PIPE, stderr = subprocess.PIPE, ).communicate() sys_exec = [sys.executable, "-m", "bohrium"] if target else [sys.executable] benchmark_path = os.sep.join([benchmarks_dir.strip(), self.script, "python_numpy", self.script+ ".py"]) # Setup command cmd = sys_exec + [ benchmark_path, '--size=' +self.sizetxt, '--dtype=' +str(dtype), '--target=' +target, '--outputfn=' +outputfn ] # Setup the inputfn if one is needed/provided if self.inputfn: npt_path = os.path.dirname(sys.argv[0]) if not npt_path: npt_path = "./" inputfn = "%s/datasets/%s" % ( npt_path, self.inputfn.format(dtype) ) cmd.append('--inputfn') cmd.append(inputfn) if not os.path.exists(inputfn): raise Exception('File does not exist: %s' % inputfn) p = subprocess.Popen( # Execute the benchmark cmd, stdout = subprocess.PIPE, stderr = subprocess.PIPE, ) out, err = p.communicate() if 'elapsed-time' not in out: raise Exception("Benchmark error [stdout:%s,stderr:%s]" % (out, err)) if err and not re.match("\[[0-9]+ refs\]", err): #We accept the Python object count raise Exception("Benchmark error[%s]" % err) if not os.path.exists(outputfn): raise Exception('Benchmark did not produce the output: %s' % outputfn) npzs = np.load(outputfn) # Load the result from disk res = {} for k in npzs: res[k] = npzs[k] del npzs # Delete npz if os.path.exists(outputfn): # Delete the result from disk os.remove(outputfn) # Convert to whatever namespace it ought to be in res['res'] = bh.array(res['res'], bohrium=target!="None") return (res['res'], ' '.join(cmd))
#!/usr/bin/dython2 import bohrium as bh a = bh.array(range(9 * 100000)) a = a + a b = a + 2 a = a + b print(a)
def connected_components(a, connectivity=8): b = np.array([connectivity], dtype=a.dtype) c = np.zeros_like(a) ufuncs.extmethod("opencv_connected_components", c, a, b) return c