def isBalanced(root: TreeNode) -> bool: if not root: return True if not isBalanced(root.left) or not isBalanced(root.right): return False return abs(depth(root.left) - depth(root.right)) <= 1
def depth(self, pair='btc_eur'): if 'depth_count' in gv.keys(): count = int(gv['depth_count']) else: count = 20 if not pair in self.pairs.keys(): raise Exception('invalid pair', pair) try: s = self.api.query_public('Depth', { 'pair': self.pairs[pair], 'count': count }) except Exception as e: log.exception(e) raise if s['error']: print("an error occured %s" % s['error']) raise Exception(s['error']) d = [depth(**v) for k, v in s['result'].items()][0] self.curdepth[pair] = [d, time.time()] return d
def depth(self, pair='btc_eur'): #kwargs.setdefault('pair', 'btc_eur') try: s = self.api.get_depth({'pair': pair}) except Exception as e: print(e) d = depth(**s) self.curdepth[pair] = [d, time.time()] return d
def rendering(dir, eng): # z的尺度与x和y相同,大小等同于测试图像大小,位置与测试图像像素点一一对应 # imgs为渲染结果,大小等同于测试图像大小,位置与测试图像像素点一一对应 vector, b = rebuild(dir) #evaluate(b, dir) # 测试集上评估 z = depth(vector, eng) imgs = render(b, dir) #visualize(z, b, imgs) # 显示重建图像与深度 return z, imgs
def rendering(dir): #z的尺度与x和y相同,大小等同于测试图像大小,位置与测试图像像素点一一对应 #imgs为渲染结果,大小等同于测试图像大小,位置与测试图像像素点一一对应 train_lvectors = np.zeros([7, 3]) # the direction of light for line in open(dir + '/train.txt'): i, ang1, ang2 = line.strip().split(",") i = int(i) ang1 = int(ang1) ang2 = int(ang2) train_lvectors[i - 1] = (np.sin(np.pi * ang1 / 180) * np.cos(np.pi * ang2 / 180), np.sin(np.pi * ang2 / 180), np.cos(np.pi * ang1 / 180) * np.cos(np.pi * ang2 / 180)) train_lvectors = -train_lvectors test_lvectors = np.zeros([10, 3]) # the direction of light for line in open(dir + '/test.txt'): i, ang1, ang2 = line.strip().split(",") i = int(i) ang1 = int(ang1) ang2 = int(ang2) test_lvectors[i - 1] = (np.sin(np.pi * ang1 / 180) * np.cos(np.pi * ang2 / 180), np.sin(np.pi * ang2 / 180), np.cos(np.pi * ang1 / 180) * np.cos(np.pi * ang2 / 180)) test_lvectors = -test_lvectors train_images = np.zeros([7, 168, 168]) for num in range(7): image = Image.open(dir + '/train/' + str(num + 1) + '.bmp') train_images[num] = np.asarray(image) n_s = 3 alpha, beta, s, X, Y, Z, vector = rebuild(train_images, train_lvectors, n_s) evaluate(alpha, beta, s, X, Y, Z, n_s, train_lvectors, train_images) imgs = render(alpha, beta, s, X, Y, Z, n_s, test_lvectors) z = depth(vector) return z, imgs
def load_molecuel(sequence): """ Prompts user to choose a method to loads molecule with chosen sequence, and loads molecule via that method. """ # prompt user for molecule loading method and validate input method = input("Molecule loading method (direct, acids, depth, random): ") molecule = 0 if method == 'direct' or method == 'acids': molecule = Molecule(sequence, method) elif method == "depth": molecule = depth(sequence) elif method == "random": molecule = Molecule(sequence, method) else: print('No valid loading method.') return load_molecuel(sequence) return molecule
def order(): ssl._create_default_https_context = ssl._create_unverified_context url = config.api_url+"/v1/trade/orders" buy_one_price,buy_one_amount,sell_one_price,sell_one_amount = depth.depth() params = {} params['amount'] = config.amount params['orderType'] = 'BUY_LIMIT' params['price'] = buy_one_price params['symbol'] = config.symbol param_list = ['%s=%s' % (k, v) for k, v in params.items()] param_list.sort() payload = ['POST', 'api.btcdo.com', '/v1/trade/orders', '&'.join(param_list)] headers = { 'API-Key': config.api_key, 'API-Signature-Method': 'HmacSHA256', 'API-Signature-Version': '1', 'API-Timestamp': str(int(time.time() * 1000)) } headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36' headers['API-Unique-ID'] = uuid.uuid4().hex headers_list = ['%s: %s' % (k.upper(), v) for k, v in headers.items()] headers_list.sort() payload.extend(headers_list) payload.append(json.dumps(params)) payload_str = '\n'.join(payload) # payload_str += '\n<json body data>' # signature: # signature = HmacSHA256(payload_str.encode("UTF-8"), config.secret_key) sign = hmac.new(config.secret_key.encode('utf-8'), payload_str.encode('utf-8'), hashlib.sha256).hexdigest() print('payload:\n----\n' + payload_str + '----\nsignature: ' + sign) headers['API-Signature'] = sign response = config.session.post(url=url,params=params,headers=headers) print(response.text)
def depth(self, pair='btc_eur'): if 'depth_count' in gv.keys(): count = int(gv['depth_count']) else: count = 20 try: if pair == 'btc_ltc': pair = 'ltc_btc' s = self.api.get_param(pair, 'depth') except Exception as e: log.exception(e) raise Exception('could not get depth') d = depth(**s) if pair == 'ltc_btc': d.asks = list(map(lambda t: trade(1 / t.value, t.volume), d.asks)) d.bids = list(map(lambda t: trade(1 / t.value, t.volume), d.bids)) d.asks = d.asks[:count] d.bids = d.bids[-count:] btce.curdepth[pair] = [d, time.time()] return d
def main(inlistrefl): """This is the main commandline program. It takes a text list of the surface reflectance Planet Dove mosaic tiles (ls -1 L*.tif > inlistrefl.txt). The tiles are run to infer the Chl-a value. After removing spurious Chl-a values, the average Chl-a of the tiles is used for all of the tiles that are processed in this batch. That Clh-a value is then used in the subsequent depth and bottom reflectance steps. """ with open(inlistrefl, 'r') as f: templist = f.readlines() inlist = [] for thisfile in templist: inlist.append(thisfile.strip()) del templist chlavals = np.zeros(len(inlist), dtype=np.float32) for k, infile in enumerate(inlist): outhsvfile = os.path.splitext(os.path.basename( infile.strip()))[0] + '_hsv' myrgb2hsv(infile, outhsvfile) print(("Processed RGB to HSV: %s") % (infile)) chlavals[k] = getchla(outhsvfile, infile) print(("Chla: %7.4f for %s") % (chlavals[k], infile)) if (os.path.isfile(outhsvfile)): os.remove(outhsvfile) if (os.path.isfile(outhsvfile + ".hdr")): os.remove(outhsvfile + ".hdr") ## filter out an Nans, negatives and values > 1.0 to get valid Chl-a values good1 = np.less(chlavals, 1.0) good2 = np.greater(chlavals, 0.0) good3 = np.logical_not(np.isnan(chlavals)) goodchla = np.all(np.stack((good1, good2, good3)), axis=0) try: chla_global = np.nanmean(chlavals[goodchla]) except ValueError: print("Problem with values in Chl-a array.") print(("\nMean Chla for: %7.4f\n") % (chla_global)) ## process the files to Depth and bottom reflectance using the global Chl-a value for k, infile in enumerate(inlist): depthfile = os.path.splitext(infile)[0] + "_depth.tif" rbfile = os.path.splitext(infile)[0] + "_rb.tif" try: depth(infile, chla_global, depthfile) except: print("Error: Could not create Depth data.") continue print(("Processed Depth: %s") % (depthfile)) if (os.path.isfile(depthfile)): try: rb(infile, chla_global, depthfile, rbfile) except: print("Error: Could not create Rb data.") continue else: print(( "Error: Could not find depth data file %s, so could not do bottom reflectance." ) % (depthfile)) continue print(("Processed Rb: %s, %d of %d") % (infile, k, len(inlist))) print(("All tiles done!"))
def main(ulx, uly, lrx, lry, outputdir): ullatlon = [] lrlatlon = [] ullatlon.append(ulx) ullatlon.append(uly) lrlatlon.append(lrx) lrlatlon.append(lry) ## done = download_planet(ullatlon, lrlatlon, outputdir) ## check to see how many files are there imglist = [] rawlist = os.listdir(outputdir) for filename in rawlist: if (os.path.isfile(outputdir + filename) and fnmatch.fnmatch(filename, '*_AnalyticMS.tif')): imglist.append(filename) numimgs = len(imglist) ## add stuff to build reflectance lookup tables and apply them ## success = subprocess.call(["extract_atmos_params_spatial_interp.py", outputdir, outputdir+"atmos_params.csv"]) ## f = open(outputdir + "atmos_params.csv", 'r') x = f.readlines() f.close() ## ## for row in x: ## vals = row.split(',') ## success = subprocess.call(["generate_rad_to_refl_lut.py", outputdir+vals[0].strip(), vals[1].strip(), vals[2].strip(), vals[3].strip(), outputdir]) ## refllist = [] ## ## ## for each image, apply its lut to make reflectance image ## for row in x: vals = row.split(',') inradfile = os.path.join(outputdir, vals[0].strip()) inlutfile = os.path.join(outputdir, vals[0].split('.')[0] + "_luts.npz") outreflfile = os.path.join(outputdir, vals[0].split('.')[0] + "_refl") refllist.append(outreflfile) ## success = subprocess.call(["apply_refl_lut.py", inradfile, inlutfile, outreflfile]) ## drv = gdal.GetDriverByName('GTiff') ## for each reflectance image run the steps to get depth and bottom reflectance ## make and empty list that is same length to hold Chla values chlavals = [None] * len(refllist) for i, infile in enumerate(refllist): outhsvfile = os.path.splitext(os.path.basename( infile.strip()))[0] + '_hsv' depthfile = os.path.splitext(infile)[0][0:-5] + "_depth.tif" rbfile = os.path.splitext(infile)[0][0:-5] + "_rb.tif" myrgb2hsv(infile, outhsvfile) print(("Processed RGB to HSV: %s") % (infile)) chlavals[i] = getchla(outhsvfile, infile) print(("Chla: %7.4f for %s") % (chlavals[i], infile)) if (os.path.isfile(outhsvfile)): os.remove(outhsvfile) if (os.path.isfile(outhsvfile + ".hdr")): os.remove(outhsvfile + ".hdr") ## filter out an Nans, negatives and values > 1.0 to get valid Chl-a values ## good1 = np.less(chlavals, 1.0) ## good2 = np.greater(chlavals, 0.0) ## good3 = np.logical_not(np.isnan(chlavals)) ## goodchla = np.all(np.stack((good1, good2, good3)), axis=0) try: chla_global = chlavals[i] except ValueError: print("Problem with value in Chl-a array.") print(("\n%s Chla for: %7.4f\n") % (infile, chla_global)) try: depth(infile, chla_global, depthfile) except: print("Error: Could not create Depth data.") continue print(("Processed Depth: %s") % (depthfile)) if (os.path.isfile(depthfile)): try: rb(infile, chla_global, depthfile, rbfile) except: print("Error: Could not create Rb data.") continue else: print(( "Error: Could not find depth data file %s, so could not do bottom reflectance." ) % (depthfile)) continue print(("Processed Bottom Reflectance: %s") % (rbfile)) print(("%d") % (i)) if (os.path.isfile(infile)): # Open data rasterDS = gdal.Open(infile, gdal.GA_ReadOnly) else: print( ("File: %s does not exist....skipping") % (outputdir + infile)) continue # Get raster georeference info gt = rasterDS.GetGeoTransform() xOrigin = gt[0] yOrigin = gt[3] pixelWidth = gt[1] pixelHeight = gt[5] img_bounds = (gt[0], gt[0] + (rasterDS.RasterXSize * gt[1]), gt[3] + (rasterDS.RasterYSize * gt[5]), gt[3]) x1, y1 = utm.from_latlon(ullatlon[1], ullatlon[0])[0:2] x2, y2 = utm.from_latlon(lrlatlon[1], ullatlon[0])[0:2] x3, y3 = utm.from_latlon(lrlatlon[1], lrlatlon[0])[0:2] x4, y4 = utm.from_latlon(ullatlon[1], lrlatlon[0])[0:2] xmin = min([x1, x2, x3, x4]) xmax = max([x1, x2, x3, x4]) ymin = min([y1, y2, y3, y4]) ymax = max([y1, y2, y3, y4]) focal_bounds = (xmin, xmax, ymin, ymax) (focalinfo, imginfo) = get_overlap_info(focal_bounds, gt[1], img_bounds, gt[1]) # Specify offset and rows and columns to read xoff = int((xmin - xOrigin) / pixelWidth) yoff = int((ymax - yOrigin) / pixelHeight) xcount = int((xmax - xmin) / pixelWidth) + 1 ycount = int((ymin - ymax) / pixelHeight) + 1 ## check to make sure we don't go out of bounds on image ## if (xoff < 0) or (yoff < 0) or ((xcount+xoff) > rasterDS.RasterXSize) or ((ycount+yoff) > rasterDS.RasterYSize): ## print("Skipping %s out of bounds for this polygon"% (infile)) ## continue ncols = rasterDS.RasterXSize nrows = rasterDS.RasterYSize ## subset the water-leaving reflectance file outDS = drv.Create(os.path.splitext(infile)[0]+"_subset.tif", xsize=xcount, \ ysize=ycount, bands=rasterDS.RasterCount, eType=rasterDS.GetRasterBand(1).DataType) outDS.SetGeoTransform((xmin, gt[1], gt[2], ymax, gt[4], gt[5])) outRasterSRS = osr.SpatialReference() outRasterSRS.ImportFromWkt(rasterDS.GetProjectionRef()) outDS.SetProjection(outRasterSRS.ExportToWkt()) for bandnum in range(1, 5): thisBand = rasterDS.GetRasterBand(bandnum) thisData = thisBand.ReadAsArray(imginfo[0], imginfo[1], imginfo[2], imginfo[3]) outBand = outDS.GetRasterBand(bandnum) outBand.WriteArray(thisData) outBand.FlushCache() del outBand inDS = None outDS = None rasterDS = None ## subset the bottom reflectance file if (os.path.isfile(rbfile)): # Open data rasterDS = gdal.Open(rbfile, gdal.GA_ReadOnly) else: print( ("File: %s does not exist....skipping") % (outputdir + rbfile)) continue rbDS = drv.Create(os.path.splitext(rbfile)[0]+"_subset.tif", xsize=xcount, \ ysize=ycount, bands=rasterDS.RasterCount, eType=rasterDS.GetRasterBand(1).DataType) rbDS.SetGeoTransform((xmin, gt[1], gt[2], ymax, gt[4], gt[5])) rbDS.SetProjection(outRasterSRS.ExportToWkt()) for bandnum in range(1, 4): thisBand = rasterDS.GetRasterBand(bandnum) thisData = thisBand.ReadAsArray(imginfo[0], imginfo[1], imginfo[2], imginfo[3]) outBand = rbDS.GetRasterBand(bandnum) outBand.WriteArray(thisData) outBand.FlushCache() del outBand rasterDS = None rbDS = None ## subset the depth file if (os.path.isfile(depthfile)): # Open data rasterDS = gdal.Open(depthfile, gdal.GA_ReadOnly) else: print(("File: %s does not exist....skipping") % (outputdir + depthfile)) continue depthDS = drv.Create(os.path.splitext(depthfile)[0]+"_subset.tif", xsize=xcount, \ ysize=ycount, bands=1, eType=gdal.GDT_Float32) depthDS.SetGeoTransform((xmin, gt[1], gt[2], ymax, gt[4], gt[5])) depthDS.SetProjection(outRasterSRS.ExportToWkt()) thisBand = rasterDS.GetRasterBand(1) thisData = thisBand.ReadAsArray(imginfo[0], imginfo[1], imginfo[2], imginfo[3]) outBand = depthDS.GetRasterBand(1) outBand.WriteArray(thisData) outBand.FlushCache() del outBand rasterDS = None depthDS = None