def cloud_height(img1, img2, layer=0, distance=None): """ Determine the cloud height for each cloud layer in img1 Input: Two image objects Output: Cloud height, and max correlation """ if img1.layers <= 0 or layer <= 0: return [] if img1.max_theta != img2.max_theta: print("The max_theta of the two cameras is different.") return np.nan, np.nan if distance is None: distance = 6367e3 * geo.distance_sphere(img1.lat, img1.lon, img2.lat, img2.lon) max_tan = np.tan(img1.max_theta * deg2rad) im1 = img1.red.astype(np.float32) im2 = img2.red.astype(np.float32) # im1=img1.rgb[:,:,0].astype(np.float32); im2=img2.rgb[:,:,0].astype(np.float32) # mask_tmpl=(img1.cm==layer) mask_tmpl = (img1.cm == 1) if layer == 1 else (~(img1.cm == 1) & (im1 > 0)) res = np.nan try: corr = mncc.mncc(im2, im1, mask1=im2 > 0, mask2=mask_tmpl, ratio_thresh=0.5) if np.any(corr > 0): max_idx = np.nanargmax(corr) deltay, deltax = max_idx // len(corr) - img2.ny + 1, max_idx % len( corr) - img2.nx + 1 deltar = np.sqrt(deltax**2 + deltay**2) height = distance / deltar * img1.nx / (2 * max_tan) score = st.shift_2d(im1, deltax, deltay) score[score <= 0] = np.nan score -= im2 score = np.nanmean(np.abs(score[(im2 > 0)])) score0 = np.abs(im2 - im1) score0 = np.nanmean(score0[(im2 > 0) & (im1 > 0)]) # print('Height',img1.camID,img2.camID,deltay,deltax,height,score0,score) # fig,ax=plt.subplots(1,2,sharex=True,sharey=True); ax[0].set_title(str(deltax)+','+str(deltay)); # ax[0].imshow(im2); ax[1].imshow(im1); plt.show(); if score0 - score <= 0.3 * score0: res = np.nan else: res = min(13000, height) except: print('Cannot determine cloud height.') # print(np.nanmax(corr),height,deltay, deltax) return res
def cost_sun_match(params,dx0,dy0,nx0,ny0): cost=0; rotation = params dx0=int(dx0); dy0=int(dy0); nr0=(nx0+ny0)/4.0 ##### radius of the valid image #####compute the zenith and azimuth angles for each pixel x0,y0=np.meshgrid(np.arange(nx0),np.arange(ny0)) r0=np.sqrt((x0-nx0/2)**2+(y0-ny0/2)**2); # theta=np.pi/2*(r0/nr0); theta=2*np.arcsin(r0/(np.sqrt(2)*nr0)) phi=rotation+np.arctan2(1-x0/nr0,y0/nr0-1) ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition phi=phi%(2*np.pi) theta_filter = theta>max_theta; theta[theta_filter]=np.nan; # for f in sorted(glob.glob(inpath+'HD*2018010317*jpg')): # for f in sorted(glob.glob(inpath+'HD1*201802141908*jpg')): # for f in sorted(glob.glob(inpath+camera+'*20180214185005*jpg')): for f in sorted(glob.glob(inpath+camera+'*20180219173840*jpg')): ####get the image acquisition time, this need to be adjusted whenever the naming convention changes t_cur=datetime.strptime(f[-18:-4],'%Y%m%d%H%M%S'); t_std = t_cur-timedelta(hours=5) #####adjust UTC time into daylight saving time or standard time #####solar zenith and azimuth angles. NOTE: the azimuth from pysolar is #####reckoned with 0 corresponding to south, increasing counterclockwise sz, saz = 90-ps.get_altitude(lat,lon,t_std), ps.get_azimuth(lat,lon,t_std) sz*=deg2rad; saz=(saz%360)*deg2rad; im0=plt.imread(f).astype(np.float32); ####get the spatial pattern of sky radiance from an empirical sky radiance model cos_g=np.cos(sz)*np.cos(theta)+np.sin(sz)*np.sin(theta)*np.cos(phi-saz); gamma = np.arccos(cos_g); denom=(0.91+10*np.exp(-3*sz)+0.45*np.cos(sz)**2)*(1-np.exp(-0.32)) rad = (0.91+10*np.exp(-3*gamma)+0.45*cos_g**2)*(1-np.exp(-0.32/np.cos(theta)))/denom ######read the image to array im0=st.shift_2d(im0,-dx0,-dy0); im0=im0[:ny0,:nx0]; ####cut the appropriate subset of the original image im0[theta_filter,:]=np.nan # #####sun glare removal glare = rad>(np.nanmean(rad)+np.nanstd(rad)*2.5); # im0[glare,:]=np.nan; # plt.figure(); plt.imshow(im0[:,:,0]) cost += np.nanmean(im0[glare,0]) print(dx0,dy0,rotation/deg2rad,cost) return -cost
fft[-1], ratio=0.7) vy *= flag vx *= flag fft.popleft() index = len(ResultTable) ResultTable.loc[index - 1]['LayerNum'] = 1 ResultTable.loc[index - 1]['V1'] = (vy, vx) ResultTable.loc[index - 1]['MaxCorr1'] = max_corr ResultTable.to_csv(outpath_DataFrame + CamID + '_' + Date + '_' + 'Table.csv') #####put the error image into the queue, for use in the multi-layer cloud algorithm red1 = st.shift_2d(q[-1].rgb[:, :, 0].astype(np.float32), -vx, -vy) red1[red1 <= 0] = np.nan red2 = q[-2].rgb[:, :, 0].astype(np.float32) red2[red2 <= 0] = np.nan #red2-=np.nanmean(red2-q[-1].rgb[:,:,0]) er = red2 - red1 ###difference image after cloud motion adjustment er[(red1 == 0) | (red2 == 0)] = np.nan a = er.copy() a[a > 0] = 0 er -= st.rolling_mean2(a, 500) err.append(-st.shift_2d(er, vx, vy)) if len(err ) <= 1: ####secondar layer processing requires three frames continue
cm3 = np.all((rb3 > -43, ot3 > 2.0), axis=0) cm3[rb3 > -32] = 1 c = np.zeros((40, 40)) for ix in range(40): for iy in range(40): c[ix, iy] = ig.cost_fun((ix - 20, iy - 20), r1, r2) #t0=time.time(); c = ndimage.filters.gaussian_filter(c, 2, mode='nearest') #print(time.time()-t0) #neighborhood = ndimage.morphology.generate_binary_structure(2,2) #apply the local maximum filter; all pixel of maximal value in their neighborhood are set to 1 #local_min = ndimage.filters.minimum_filter(c, footprint=neighborhood); is_min = (c == ndimage.filters.minimum_filter(c, 20)) iy_min, ix_min = np.nonzero(is_min) r1_s1 = st.shift_2d(r1, 20 - ix_min[0], 20 - iy_min[0]) rdf = abs(r2 - r1_s1) foo = np.all((rdf > 20, cm2), axis=0) r1_s2 = st.shift_2d(r1, 20 - ix_min[1], 20 - iy_min[1]) rdf2 = abs(r2 - r1_s2) foo = np.argmin((rdf, rdf2), axis=0) foo[abs(rdf - rdf2) < 5] = -2 foo[np.isnan(rdf)] = -1 foo[np.isnan(rdf2)] = -1 foo[cm2 < 0.5] = -1 #foo[np.all((foo==0,abs(st.shift_2d(foo,20-ix_min[0],20-iy_min[0],constant=-2))>0.5),axis=0)]=-1; r3_s1 = st.shift_2d(r3, ix_min[0] - 20, iy_min[0] - 20) r3_s2 = st.shift_2d(r3, ix_min[1] - 20, iy_min[1] - 20) plt.figure()
def preprocess(camera,f,q,err,fft,convolver,flag): img=cam.image(camera,f); ###img object contains four data fields: rgb, red, rbr, and cm img.undistort(rgb=True); ###undistortion if img.red is None: return # ims = Image.fromarray(img.rgb); ims.save(outpath+camID+'/'+os.path.basename(f), "PNG"); continue img.cloud_mask(); ###one-layer cloud masking q.append(img) if len(q)<=1: return ####len(q) is always 2 beyond this point if (q[-1].time-q[-2].time).seconds>=MAX_INTERVAL: q.popleft(); return; #####cloud motion for the dominant layer # im1=q[-2].red.copy().astype(np.float32); im2=q[-1].red.copy().astype(np.float32); # vy,vx,max_corr = cam.cloud_motion(im1,im2,mask1=im1>5,mask2=np.abs(im1-im2)>15, ratio=0.7, threads=4) # print(camera.camID+', ', f[-18:-4]+', first layer2:',max_corr,vy,vx) if convolver is None: shape=(camera.nx,camera.ny) convolver = mncc.Convolver(shape, shape, threads=4, dtype=np.float32) # for ii in range(len(fft)-2,0): im=q[ii].red.astype(np.float32); mask = im>0; #im[~mask]=0 fft.append(convolver.FFT(im,mask,reverse=flag[0]>0)); flag[0] *= -1 vy,vx,max_corr = cam.cloud_motion_fft(convolver,fft[-2],fft[-1],ratio=0.8); if vx is None or vy is None: #####invalid cloud motion q.popleft(); fft.popleft(); return vy*=flag[0]; vx*=flag[0]; fft.popleft(); print(camera.camID+', ', f[-18:-4]+', first layer:',max_corr,vy,vx) # plt.figure(); plt.imshow(q[-2].red); plt.colorbar(); plt.show(); return; q[-2].v+=[[vy,vx]] red1=st.shift_2d(q[-1].rgb[:,:,0].astype(np.float32),-vx,-vy); red1[red1<=0]=np.nan red2=q[-2].rgb[:,:,0].astype(np.float32); red2[red2<=0]=np.nan #red2-=np.nanmean(red2-q[-1].rgb[:,:,0]) er=red2-red1; ###difference image after cloud motion adjustment er[(red1==0)|(red2==0)]=np.nan; a=er.copy(); a[a>0]=0; er-=st.rolling_mean2(a,500); if len(err) <= 0: err += [-st.shift_2d(er,vx,vy)] return err_2=err[0].copy(); err[0] = (-st.shift_2d(er,vx,vy)) # if vy**2+vx**2>=50**2: ######The motion of the dominant layer is fast, likely low clouds. Do NOT trigger the second layer algorithm # return #####process the secondary layer ert=er+err_2; # cm2=(er>15) | (er_p>15); cm2=remove_small_objects(cm2, min_size=500, connectivity=4); scale=red2/np.nanmean(red2); nopen=max(5,int(np.sqrt(vx**2+vy**2)/3)) cm2=(ert>15*scale) & (q[-2].cm); cm2=morphology.binary_opening(cm2,np.ones((nopen,nopen))) cm2=remove_small_objects(cm2, min_size=500, connectivity=4); # sec_layer=np.sum(cm2)/len(cm2.ravel()) ###the amount of clouds in secondary layer sec_layer=np.sum(cm2)/np.sum(q[-2].cm) ###the amount of clouds in secondary layer if sec_layer<5e-2: ###too few pixels in second layer, ignore the second cloud layer print('Second layer is small:', sec_layer*100, '%') return #####cloud motion for the secondary layer mask2=np.abs(err_2)>5; mask2=remove_small_objects(mask2, min_size=500, connectivity=4) mask2=filters.maximum_filter(mask2,20) vy,vx,max_corr = cam.cloud_motion(err[0],err_2,mask1=None,mask2=mask2, ratio=None, threads=4) if vx is None or vy is None: return q[-2].v+=[[vy,vx]] print(camera.camID+', ', f[-18:-4]+', second layer:',max_corr,vy,vx) if np.abs(vy-q[-2].v[0][0])+np.abs(vx-q[-2].v[0][1])>10: # if np.abs(vy)+np.abs(vx)>0: #####obtain the mask for secondar cloud layer using a watershed-like algorithm mred=q[-2].rgb[:,:,0].astype(np.float32)-st.fill_by_mean2(q[-2].rgb[:,:,0],200,mask=~cm2) mrbr=q[-2].rbr-st.fill_by_mean2(q[-2].rbr,200,mask=~cm2) merr=st.rolling_mean2(ert,200,ignore=np.nan); var_err=(st.rolling_mean2(ert**2,200,ignore=np.nan)-merr**2) # mk=(np.abs(q[-2].rgb[:,:,0].astype(np.float32)-mred)<3) & ((total_err)>-2) & (np.abs(q[-2].rbr-mrbr)<0.05) mk=(np.abs(mred)<3) & (ert>-15) & (np.abs(mrbr)<0.05) & (var_err>20*20) cm2=morphology.binary_opening(mk|cm2,np.ones((nopen,nopen))) ####remove line objects produced by cloud deformation cm2=remove_small_objects(cm2, min_size=500, connectivity=4) q[-2].layers=2; q[-2].cm[cm2]=2; #####update the cloud mask with secondary cloud layer # fig,ax=plt.subplots(2,2, sharex=True,sharey=True); ####visualize the cloud masking results # ax[0,0].imshow(q[-2].rgb); ax[0,1].imshow(q[-2].cm) # ax[1,0].imshow(st.shift_2d(q[-1].rgb,-vx,-vy)) # ax[1,1].imshow(er,vmin=-25,vmax=25); plt.show(); return;
valid = xgrid**2+ygrid**2 <= max_tan**2 # theta=np.arctan(np.sqrt(xgrid**2+ygrid**2)) # plt.figure(); plt.imshow(j2-j0); # print(sth) cnt=0; # for f in sorted(glob.glob(inpath+camera+'*2018022117*jpg')): for f in sorted(glob.glob(inpath+camera+'*20180219171940*jpg')): # for f in sorted(glob.glob(inpath+camera+'*20180214192235*jpg')): print(f) cnt+=1; ######read the image to array im0=plt.imread(f).astype(np.float32); im0=st.shift_2d(im0,-di0,-dj0); im0=im0[:nj0,:ni0]; ####cut the appropriate subset of the original image im0[theta_filter,:]=np.nan ####perform undistortion im=np.zeros((nj,ni,3),dtype=np.float32) for ic in range(3): im[:,:,ic]=st.bin_average2_reg(im0[:,:,ic],x,y,xbin,ybin,mask=valid); im[:,:,ic]=st.fill_by_mean2(im[:,:,ic],7, mask=(np.isnan(im[:,:,ic])) & valid ) im0=st.shift_2d(im0,5,0); im2=np.zeros((nj,ni,3),dtype=np.float32) for ic in range(3): im2[:,:,ic]=st.bin_average2_reg(im0[:,:,ic],x2,y2,xbin,ybin,mask=valid); im2[:,:,ic]=st.fill_by_mean2(im2[:,:,ic],7, mask=(np.isnan(im2[:,:,ic])) & valid ) plt.figure(); plt.imshow(im/255); ###undistored image
lat, lon, t_std) sz *= deg2rad saz = (saz % 360) * deg2rad ####get the spatial pattern of sky radiance from an empirical sky radiance model cos_g = np.cos(sz) * np.cos(theta) + np.sin(sz) * np.sin(theta) * np.cos( phi - saz) gamma = np.arccos(cos_g) denom = (0.91 + 10 * np.exp(-3 * sz) + 0.45 * np.cos(sz)**2) * (1 - np.exp(-0.32)) rad = (0.91 + 10 * np.exp(-3 * gamma) + 0.45 * cos_g**2) * (1 - np.exp(-0.32 / np.cos(theta))) / denom ######read the image to array im0 = plt.imread(f).astype(np.float32) im0 = st.shift_2d(im0, -dx0, -dy0) im0 = im0[:ny0, :nx0] ####cut the appropriate subset of the original image im0[theta_filter, :] = np.nan # # ####the next two lines are for parameter tuning purpose only, you can comment them out once you finished tuning; # ####if you set the rotation, center, and shifting parameters correctly the black dot will coincide with the sun. im0[cos_g > 0.997, :] = 0 fig, ax = plt.subplots(1, 1, sharex=True, sharey=True) ax.imshow(im0 / 255) if cnt > 1: break # continue #####sun glare removal glare = rad > (np.nanmean(rad) + np.nanstd(rad) * 3) im0[glare, :] = 0
i] = st.fill_by_mean2(im[cnt, :, :, i], 7, mask=(np.isnan(im[cnt, :, :, i])) & valid) cnt += 1 # fig,ax=plt.subplots(1,2,sharex=True,sharey=True); # ax[0].imshow(im[0,:,:,0]); ###undistored image # ax[1].imshow(im[1,:,:,0]); ###undistored image im[np.isnan(im)] = -255 im[:, invalid, :] = -255 corr = mncc.mncc(im[0, :, :, 0], im[1, :, :, 0], mask1=im[0, :, :, 0] > -250, mask2=im[1, :, :, 0] > -250) deltay, deltax = np.nanargmax(corr) // len(corr) - ny + 1, np.nanargmax( corr) % len(corr) - nx + 1 deltar = np.sqrt(deltax**2 + deltay**2) height = distance / deltar * nx / (2 * max_tan) print(np.nanmax(corr), height, deltay, deltax) # plt.figure(); plt.imshow(corr) im[im < -250] = np.nan fig, ax = plt.subplots(1, 2, sharex=True, sharey=True) ax[0].imshow(im[0, :, :, 0], vmin=-20, vmax=30) ###undistored image ax[1].imshow(st.shift_2d(im[1, :, :, 0], deltax, deltay), vmin=-20, vmax=30) # fig,ax=plt.subplots(1,2,sharex=True,sharey=True); ax[0].imshow(im[0,:,:,0],vmin=20); ###undistored image # ax[1].imshow(st.shift_2d(im[1,:,:,0],deltax,deltay),vmin=20); ###undistored image