def PSetDir(newDir, disk, err, URL=None): """ replace FITS directory returns FITS disk number * newDir = new directory path * err = Python Obit Error/message stack * URL = URL if on a remote host (Only if using OTObit/ParselTongue) """ ################################################################ global FITSdisks, nFITS # Checks if not OErr.OErrIsA(err): raise TypeError("err MUST be an OErr") # retDisk = Obit.FITSSetDir(newDir, disk, err.me) FITSdisks[disk] = newDir nFITS = len(FITSdisks) #print "DEBUG nFITS",nFITS if err.isErr: OErr.printErrMsg(err, "Error replacinging FITS directory") # Update ObitTalk stuff try: FITS.FITS.disks[disk] = FITS.FITSDisk(URL, disk, newDir) except: pass else: pass
def PAddDir(newDir, err, URL=None): """ Add a new FITS directory returns FITS disk number * newDir = new directory path * err = Python Obit Error/message stack * URL = URL if on a remote host (Only if using OTObit/ParselTongue) """ ################################################################ global FITSdisks, nFITS # Checks if not OErr.OErrIsA(err): raise TypeError, "err MUST be an OErr" # retDisk = Obit.FITSAddDir(newDir, err.me) FITSdisks.append(newDir) nFITS = len(FITSdisks) #print "DEBUG nFITS",nFITS if err.isErr: OErr.printErrMsg(err, "Error adding FITS directory") # Update ObitTalk stuff try: FITS.FITS.disks.append(FITS.FITSDisk(URL, retDisk, newDir)) except: pass else: pass return retDisk
def checkArray(self,val,shape,dtype,raiseShape=0): if type(val)==type([]): val=numpy.array(val) elif type(val)==type(""): if os.path.exists(val): print "Loading %s"%val val=FITS.Read(val)[1] else: raise Exception("File not found %s"%val) if type(val)==numpy.ndarray: if dtype is not None: val=val.astype(dtype) if shape is not None: if type(shape)!=type(()): shape=(shape,) if val.shape!=shape: print "Warning - shape not quite right (expecting %s, got %s)?"%(str(shape),str(val.shape)) if raiseShape: raise Exception("checkArray shape") try: val.shape=shape except: print val.shape,shape raise else: raise Exception("checkArray") return val
def load_filter(filter, gen=2): '''Given the filter and generation, attempt to load the data from FITS file and put it in the cache.''' file = os.path.join(base, "%s_mean%d.fits" % (filter, gen)) f = FITS.FITS(file) xs = f['CRVAL1'] + (num.arange(f['NAXIS1']) + 1 - f['CRPIX1']) * f['CDELT1'] ys = f['CRVAL2'] + (num.arange(f['NAXIS2']) + 1 - f['CRPIX2']) * f['CDELT2'] xx, yy = num.meshgrid(xs, ys) points = num.array([num.ravel(xx), num.ravel(yy)]).T values = f.data() mean[(filter, gen)] = interp2d(points, num.ravel(values)) f.close() f = FITS.FITS(file.replace('mean', 'std')) values = f.data() std[(filter, gen)] = interp2d(points, num.ravel(values)) f.close()
def DarcInit(self, data): """data can be a FITS filename, or the contents of the FITS file""" if os.path.exists(data): data = FITS.Read(data)[1] nbytes = data.size * data.itemsize self.sock.send( numpy.array( [0x55555555, DARCINIT, nbytes & 0xffffffff, nbytes >> 32]).astype(numpy.int32)) self.sock.sendall(data) self.checkReply()
def __init__(self, name): """For extracting data from a large FITS file.""" self.bpdict = { 8: numpy.uint8, 16: numpy.int16, 32: numpy.int32, -32: numpy.float32, -64: numpy.float64, -16: numpy.uint16 } self.name = name self.fd = open(self.name, "r") self.HDUoffset = 0 self.hdr = FITS.ReadHeader(self.fd)["parsed"] self.nd = int(self.hdr["NAXIS"]) dims = [] for i in range(self.nd): dims.append(int(self.hdr["NAXIS%d" % (i + 1)])) dims.reverse() self.dims = numpy.array(dims) self.bitpix = int(self.hdr["BITPIX"]) self.dataOffset = self.fd.tell() dsize = self.getDataSize(self.hdr) #Now get the frame list - move to the frame list HDU self.fd.seek(dsize, 1) try: self.frameno = FITS.Read(self.fd, allHDU=0)[1] except: print "Unable to read frame numbers" traceback.print_exc() try: self.timestamp = FITS.Read(self.fd, allHDU=0)[1] except: print "Unable to read timestamps" traceback.print_exc() self.nextHDUoffset = self.fd.tell()
def checkNoneOrArray(self,val,size,dtype): if type(val)==type([]): val=numpy.array(val) elif type(val)==type(""): if os.path.exists(val): print "Loading %s"%val val=FITS.Read(val)[1] else: print "File %s not found"%val raise Exception("File %s not found"%val) if val is None: pass elif type(val)==numpy.ndarray: val=val.astype(dtype) if size is not None: val.shape=size, else: raise Exception("checkNoneOrArray size requested %d"%size) return val
def loadSineData(self,fname,stdThresh=0.25): """ fname is the name of a file that has been created by e.g. doSinePokeGTC() stdThresh is the threshold above which slopes are ignored if their std is higher than the max std * thresh. This allows us to automatically cut out slopes the don't have light.""" sList=[] vList=[] data=FITS.Read(fname) for i in range(len(data)//12): s=data[1+i*12]#the slopes vmes=data[7+i*12]#the actuators (sinusoidal) if stdThresh>0: #define a mask of valid subaps based on those with low rms. sstd=s.std(0)#get standard deviation of each subap maxstd=sstd.max()#find the max... valid=numpy.where(sstd<maxstd*stdThresh,1,0) s*=valid sList.append(s) vList.append(vmes) return vList,sList
def fitsFinalise(self): """finalise a saved on fly fits file...""" if self.asfits and self.finalise: self.finalise=0 self.fd.seek(0,2) pos=self.fd.tell() self.fd.seek(self.hdustart) nbytes=pos-2880-self.hdustart n=nbytes/self.datasize FITS.updateLastAxis(None,n,self.fd) self.fd.seek(0,2)#go to end extra=2880-pos%2880 if extra<2880: self.fd.write(" "*extra) #Now add the frame numbers and timestamps. self.fdfno.seek(0) FITS.updateLastAxis(None,n,self.fdfno) self.fdfno.seek(0) self.fd.write(self.fdfno.read()) pos=self.fd.tell() extra=2880-pos%2880 if extra<2880: self.fd.write(" "*extra) self.fdtme.seek(0) FITS.updateLastAxis(None,n,self.fdtme) self.fdtme.seek(0) self.fd.write(self.fdtme.read()) pos=self.fd.tell() extra=2880-pos%2880 if extra<2880: self.fd.write(" "*extra) self.fdtme.close() self.fdfno.close() try: os.unlink(self.name+"fno") os.unlink(self.name+"tme") except: pass
def fitsFinalise(self): """finalise a saved on fly fits file...""" if self.asfits and self.finalise: self.finalise = 0 self.fd.seek(0, 2) pos = self.fd.tell() self.fd.seek(self.hdustart) nbytes = pos - 2880 - self.hdustart n = nbytes / self.datasize FITS.updateLastAxis(None, n, self.fd) self.fd.seek(0, 2) #go to end extra = 2880 - pos % 2880 if extra < 2880: self.fd.write(" " * extra) #Now add the frame numbers and timestamps. self.fdfno.seek(0) FITS.updateLastAxis(None, n, self.fdfno) self.fdfno.seek(0) self.fd.write(self.fdfno.read()) pos = self.fd.tell() extra = 2880 - pos % 2880 if extra < 2880: self.fd.write(" " * extra) self.fdtme.seek(0) FITS.updateLastAxis(None, n, self.fdtme) self.fdtme.seek(0) self.fd.write(self.fdtme.read()) pos = self.fd.tell() extra = 2880 - pos % 2880 if extra < 2880: self.fd.write(" " * extra) self.fdtme.close() self.fdfno.close() try: os.unlink(self.name + "fno") os.unlink(self.name + "tme") except: pass
def tofits(self,fname,ffrom=None,fto=None,tfrom=None,tto=None): curshape=None curdtype=None fheader=None nentries=0 tlist=[] flist=[] ffits=open(fname,"w") firstHeader=1 while 1: hdr=self.fd.read(self.info.size*self.info.itemsize) if hdr=="": break elif len(hdr)<self.info.size*self.info.itemsize: print "Didn't read all of header" break info=numpy.fromstring(hdr,numpy.int32) fno=int(info[1]) ftime=float(info[2:4].view("d")) databytes=info[0]-(self.info.size-1)*self.info.itemsize fok=tok=0 if (ffrom==None or fno>=ffrom) and (fto==None or fno<=fto): #print fno fok=1 if (tfrom==None or ftime>=tfrom) and (tto==None or ftime<=tto): tok=1 if fok==1 and tok==1: frame=self.fd.read(databytes) if len(frame)!=databytes: print "Didn't read all of frame" break frame=numpy.fromstring(frame,chr(info[4])).byteswap() #can it be put into the existing HDU? If not, finalise current, and start a new one. if curshape!=databytes or curdtype!=chr(info[4]): #end the current HDU FITS.End(ffits) #Update FITS header if fheader!=None: FITS.updateLastAxis(None,nentries,fheader) del(fheader) #fheader.close() fheader=None #now write the frame number and time. ffits.close() if firstHeader==0: FITS.Write(numpy.array(flist).astype("i"),fname,writeMode="a") FITS.Write(numpy.array(tlist),fname,writeMode="a") ffits=open(fname,"a+") FITS.WriteHeader(ffits,[1,databytes/numpy.zeros((1,),chr(info[4])).itemsize],chr(info[4]),firstHeader=firstHeader) ffits.flush() firstHeader=0 fheader=numpy.memmap(fname,dtype="c",mode="r+",offset=ffits.tell()-2880) flist=[] tlist=[] nentries=0 curshape=databytes curdtype=chr(info[4]) #now write the data ffits.write(frame) nentries+=1 flist.append(fno) tlist.append(ftime) else: #skip the data self.fd.seek(databytes-1,1) if self.rd.read(1)=="": print "Didn't read all of the frame" break #now finalise the file. FITS.End(ffits) if fheader!=None: FITS.updateLastAxis(None,nentries,fheader) #fheader.close() del(fheader) fheader=None #now write the frame number and time. ffits.close() FITS.Write(numpy.array(flist).astype("i"),fname,writeMode="a") FITS.Write(numpy.array(tlist),fname,writeMode="a")
"nacts":nacts, "ncam":ncam, "nsub":nsub, "npxly":npxly, "npxlx":npxlx, "ncamThreads":ncamThreads, "pxlCnt":pxlCnt, "subapLocation":subapLocation, "bgImage":bgImage, "darkNoise":darkNoise, "closeLoop":1, "flatField":flatField,#numpy.random.random((npxls,)).astype("f"), "thresholdValue":1.0, "powerFactor":1.,#raise pixel values to this power. "subapFlag":subapFlag, "fakeCCDImage":FITS.Read("/home/ali/pyr.fits")[1],#None,#(numpy.random.random((npxls,))*20).astype("f"), "printTime":0,#whether to print time/Hz "rmx":rmx,#numpy.random.random((nacts,ncents)).astype("f"), "gain":numpy.ones((nacts,),"f"), "E":numpy.zeros((nacts,nacts),"f"),#E from the tomoalgo in openloop. "threadAffinity":None, "threadPriority":numpy.ones((ncamThreads.sum()+1,),numpy.int32)*10, "delay":10000,#will usually be zero (except if you want to set your own frame rate, e.g. if loading images from a file) "clearErrors":0, "camerasOpen":0, "cameraName":"libcamfile.so",#"libsl240Int32cam.so",#"camfile", "cameraParams":cameraParams, "mirrorName":"libmirrorSL240.so", "mirrorParams":mirrorParams, "mirrorOpen":0, "frameno":0,
def loadBuf(fname, hdu=0): data = FITS.Read(fname)[hdu * 2 + 1] b = Buffer(None, size=data.size) b.assign(data) return b
def tofits(self, fname, ffrom=None, fto=None, tfrom=None, tto=None): curshape = None curdtype = None fheader = None nentries = 0 tlist = [] flist = [] ffits = open(fname, "w") firstHeader = 1 while 1: hdr = self.fd.read(self.info.size * self.info.itemsize) if hdr == "": break elif len(hdr) < self.info.size * self.info.itemsize: print "Didn't read all of header" break info = numpy.fromstring(hdr, numpy.int32) fno = int(info[1]) ftime = float(info[2:4].view("d")) databytes = info[0] - (self.info.size - 1) * self.info.itemsize fok = tok = 0 if (ffrom == None or fno >= ffrom) and (fto == None or fno <= fto): #print fno fok = 1 if (tfrom == None or ftime >= tfrom) and (tto == None or ftime <= tto): tok = 1 if fok == 1 and tok == 1: frame = self.fd.read(databytes) if len(frame) != databytes: print "Didn't read all of frame" break frame = numpy.fromstring(frame, chr(info[4])) #can it be put into the existing HDU? If not, finalise current, and start a new one. if curshape != databytes or curdtype != chr(info[4]): #end the current HDU FITS.End(ffits) #Update FITS header if fheader != None: FITS.updateLastAxis(None, nentries, fheader) del (fheader) #fheader.close() fheader = None #now write the frame number and time. ffits.close() if firstHeader == 0: FITS.Write(numpy.array(flist).astype("i"), fname, writeMode="a", doByteSwap=self.doByteSwap) FITS.Write(numpy.array(tlist), fname, writeMode="a", doByteSwap=self.doByteSwap) ffits = open(fname, "a+") FITS.WriteHeader(ffits, [ 1, databytes / numpy.zeros( (1, ), chr(info[4])).itemsize ], chr(info[4]), firstHeader=firstHeader, doByteSwap=self.doByteSwap) ffits.flush() firstHeader = 0 fheader = numpy.memmap(fname, dtype="c", mode="r+", offset=ffits.tell() - 2880) flist = [] tlist = [] nentries = 0 curshape = databytes curdtype = chr(info[4]) #now write the data if self.doByteSwap and numpy.little_endian: ffits.write(frame.byteswap().data) else: ffits.write(frame) flist.append(fno) tlist.append(ftime) nentries += 1 else: #skip the data self.fd.seek(databytes - 1, 1) if self.rd.read(1) == "": print "Didn't read all of the frame" break #now finalise the file. FITS.End(ffits) if fheader is not None: FITS.updateLastAxis(None, nentries, fheader) #fheader.close() del (fheader) fheader = None #now write the frame number and time. ffits.close() FITS.Write(numpy.array(flist).astype("i"), fname, writeMode="a", doByteSwap=self.doByteSwap) FITS.Write(numpy.array(tlist), fname, writeMode="a", doByteSwap=self.doByteSwap)
def pokeSine(self,nFrames,pokeVal=1000.,dmNo=0,baseFreq=5,nrec=2,fname="sinePoke.fits",fno=-20,order=None,nRepeats=1,repeatIfErr=10): """nFrames - number of iterations over which to poke. baseFreq - minimum number of sine waves to fit within nFrames. nrec - number of cycles to record (for averaging purposes) fno - number of frames to allow for sync purposes. Make this more negative for faster systems (or closer to zero for simulation!). order - if None, will increase the freq of neighbouring actuators in a linear fashion. Otherwise, can be the index order in which this should be done. nRepeats - number of times to repeat the recording, with an offset added to the frequency of each actuator each time (so that different dynamics can be explored). repeatIfErr - number of times to repeat if an error is obtained, before giving up. If -ve, will repeat indefinitely Use processSine() function below to process this data... """ if order is not None: raise Exception("order not yet implemented - sorry!") d=darc.Control(self.prefix) nslopes=d.Get("subapFlag").sum()*2 nacts=d.Get("nacts")#self.nactList[dmNo] actOrig=d.Get("actuators") FITS.Write(actOrig,"tmpActOrig.fits") print("Writing original actuators to tmpActOrig.fits") pokeArr=numpy.zeros((nFrames,nacts),numpy.float32) nactDm=self.nactList[dmNo] offset=sum(self.nactList[:dmNo]) writeMode="w" freqOffset=0. dataList=[] errList=[] extraHeader=["POKEVAL = %d"%pokeVal,"NFRAMES = %d"%nFrames,"DMNUMBER= %d"%dmNo,"BASEFREQ= %d"%baseFreq,"NRECORD = %d"%nrec] for rpt in range(nRepeats): err=0 pokeArr[:]=actOrig for i in range(nactDm): freq=baseFreq+(freqOffset+i)%nactDm pokeArr[:,offset+i]+=numpy.sin(numpy.arange(nFrames)/float(nFrames)*2.*numpy.pi*freq)*pokeVal d.Set("actuators",pokeArr) time.sleep(1.) takeData=repeatIfErr if takeData==0: takeData=1 while takeData: err=0 data=d.GetStreamBlock(["rtcActuatorBuf","rtcCentBuf"],nFrames*nrec,asArray=1,fno=fno) #check the data is okay. for key in ["rtcCentBuf","rtcActuatorBuf"]: f=data[key][2] if not numpy.alltrue((f[1:]-f[:-1])==1): print "Cycle %d: Some frames missing from %s"%(rpt,key) err=1 if not numpy.alltrue(data["rtcCentBuf"][2]==data["rtcActuatorBuf"][2]): allframenumbersequal=0 print "Cycle %d: actuator and slope frame numbers don't agree"%rpt err=1 allframetimesequal=1 for key in ["rtcCentBuf","rtcActuatorBuf"]: ftime=data[key][1] fdiff=ftime[1:]-ftime[:-1] if not numpy.alltrue(fdiff<=numpy.median(fdiff)*3): allframetimesequal=0 err=1 print "Cycle %d: Not all frame times within 3x median frame time"%rpt if err==0:#got data okay takeData=0 elif takeData>1: print "Error in data - repeating acquisition (another %d times to try)"%takeData takeData-=1 elif takeData==1: takeData=0 if repeatIfErr==0: print "Error in data - continuing anyway" else: print "Error in data - cannot acquire (is your network fast enough? Is the frame rate too high?)" else: print "Error in data - repeating acquisition (will continue until successful...!)" if repeatIfErr!=0 and err!=0: raise Exception("Unable to capture data") #subtract the actuator offset (midrange) data["rtcActuatorBuf"][0]=data["rtcActuatorBuf"][0]-actOrig dataList.append(data) if fname is not None: FITS.Write(data["rtcCentBuf"][0],fname,writeMode=writeMode,extraHeader=extraHeader) writeMode="a" extraHeader=None FITS.Write(data["rtcCentBuf"][1],fname,writeMode="a")#times FITS.Write(data["rtcCentBuf"][2],fname,writeMode="a")#frameno FITS.Write(data["rtcActuatorBuf"][0],fname,writeMode="a") FITS.Write(data["rtcActuatorBuf"][1],fname,writeMode="a") FITS.Write(data["rtcActuatorBuf"][2],fname,writeMode="a") errList.append(err) freqOffset+=nactDm//nRepeats d.Set("actuators",actOrig) return dataList,errList
dm.pokeSine(2000, pokeVal=pokeVal, nrec=2, dmNo=1, fname=fname, fno=-300, nRepeats=2, repeatIfErr=repeatIfErr) return fname, dm if __name__ == "__main__": rcond = 0.1 thresh = 0.25 if len(sys.argv) > 1: rcond = float(sys.argv[1]) if len(sys.argv) > 2: #1 for simulation, 1000 for bench. amp = float(sys.argv[2]) if len(sys.argv) > 3: #0 for simulation, 0.25 for bench thresh = float(sys.argv[3]) fname, dm = doSinePokeGTC(pokeVal=amp) #dm=calibrate.DMInteraction([373,2],"") # fname="pokeSineBench180705_121816.fits" # fname="pokeSineBench180705_160328.fits" rmx, pmxList, rmxList = dm.processSine(fname, rcond=rcond, stdThresh=thresh) rname = "rmx_%f_%s" % (rcond, fname) FITS.Write(rmx, rname) print "Written %s" % rname
if rmx.shape[1] != gainmx.shape[0]: transpose = 1 rmx = rmx.T gainmx = numpy.dot(rmx, gainmx) if transpose: gainmx = gainmx.T return gainmx if __name__ == "__main__": import sys import FITS if len(sys.argv) < 5: print( "Usage: %s globalGain [modal,gain,list] output.fits slopeToZernMx.fits optionalRmx" % sys.argv[0]) sys.exit(0) gain = sys.argv[1] modalGainList = eval(sys.argv[2]) outMx = sys.argv[3] slopeToZernMx = FITS.Read(sys.argv[4])[1] if slopeToZernMx[0].std() == 0: #first row all zeros - probably piston print("Stripping piston from %s" % sys.argv[4]) slopeToZernMx = slopeToZernMx[1:] rmx = None if len(sys.argv) > 5: rmx = FITS.Read(sys.argv[5])[1] gmx = computeModalGainMx(gain, modalGainList, slopeToZernMx, rmx) FITS.Write(gmx, outMx)
pokeval = 10. if len(sys.argv) > 2: pokeval = float(sys.argv[2]) print "Have you set into calibration mode and taken reference slopes?" d = darc.Control("main") rmx = d.Get("rmx") nacts, nslopes = rmx.shape pmx = numpy.zeros(rmx.shape, numpy.float32) d.Set("addActuators", 0) actuators = numpy.zeros((nacts, ), numpy.float32) actuators[:2] = 32768 #the tip-tilt mirror. for i in range(nacts): print "Poking %d" % i actuators[i] = pokeval if i < 2: actuators[i] += 32768 d.Set("actuators", actuators) time.sleep(1.5) sl = d.SumData("rtcCentBuf", nfr)[0] / nfr / pokeval pmx[i] = sl actuators[i] = 0 if i < 2: actuators[i] = 32768 d.Set("actuators", actuators) FITS.Write(pmx, "pmx.fits") rmx = -numpy.linalg.pinv(pmx, 0.1).T FITS.Write(rmx, "rmx.fits") d.Set("rmx", rmx) print "Saved pmx.fits, rmx.fits and set rmx into darc"
import controlCorba import FITS import sys import time file="tmp.fits" if len(sys.argv)>1: file=sys.argv[1] #Create the corba client c=controlCorba.controlClient() def set(name,val,com="",swap=1,check=1): c.obj.Set(controlCorba.sdata(name),controlCorba.encode([val]),controlCorba.sdata(com),swap,check) #time.sleep(0.01) set("fakeCCDImage",FITS.Read("shimage.fits")[1][0]) FITS.Write(controlCorba.decode(c.obj.AverageCentroids(1)),file) set("thresholdAlgorithm",2) FITS.Write(controlCorba.decode(c.obj.AverageCentroids(1)),file,writeMode="a") set("thresholdAlgorithm",0) FITS.Write(controlCorba.decode(c.obj.AverageCentroids(1)),file,writeMode="a") set("thresholdValue",0) FITS.Write(controlCorba.decode(c.obj.AverageCentroids(1)),file,writeMode="a") set("thresholdAlgorithm",1) FITS.Write(controlCorba.decode(c.obj.AverageCentroids(1)),file,writeMode="a") set("thresholdAlgorithm",2)
def valid(self,label,val,buf): """Checks a value is valid. buf is the buffer that contains all the other parameters""" #buf=self.guibuf if label=="reconstructMode": if(val not in ["simple","truth","open","offset"]): raise Exception(label) elif label=="windowMode": if val not in ["basic","adaptive","global"]: raise Exception(label) elif label in ["cameraName","mirrorName","comment","slopeName","figureName","version","configfile"]: if type(val)!=type(""): raise Exception(label) elif label in ["reconName","calibrateName","bufferName"]: if type(val) not in [type(""),type(None)]: raise Exception(label) elif label=="centroidMode": if type(val)==numpy.ndarray: nsubaps=buf.get("nsub").sum() try: val=self.checkArray(val,nsubaps,"i") except: print "centroidMode array wrong" traceback.print_exc() raise elif val not in ["WPU","CoG","Gaussian","CorrelationCoG","CorrelationGaussian",0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]: print "centroidMode not correct (%s)"%str(val) raise Exception(label) elif label in ["cameraParams","mirrorParams","slopeParams","figureParams","reconParams","calibrateParams","bufferParams"]: if type(val)==type(""): l=4-len(val)%4 if l<4: val+="\0"*l val=numpy.fromstring(val,dtype=numpy.int32) if type(val)==type([]): val=numpy.array(val) if type(val)!=type(None) and type(val)!=numpy.ndarray: print "ERROR in val for %s: %s"%(label,str(val)) raise Exception(label) elif label in ["closeLoop","nacts","thresholdAlgo","delay","maxClipped","camerasFraming","camerasOpen","mirrorOpen","clearErrors","frameno","corrThreshType","nsubapsTogether","nsteps","addActuators","recordCents","averageImg","averageCent","kalmanPhaseSize","figureOpen","printUnused","reconlibOpen","currentErrors","xenicsExposure","calibrateOpen","iterSource","bufferOpen","bufferUseSeq","subapLocType","noPrePostThread","asyncReset","openLoopIfClip","threadAffElSize","mirrorStep","mirrorUpdate","mirrorReset","mirrorGetPos","mirrorDoMidRange","lqgPhaseSize","lqgActSize"]: val=int(val) elif label in ["dmDescription"]: if val.dtype.char!="h": raise Exception("dmDescription type not int16") elif label in ["actuators"]: val=self.checkNoneOrArray(val,None,"f") if val is not None: if val.size%buf.get("nacts")==0: # shape okay... multiple of nacts. pass else: raise Exception("actuators should be array (nacts,) or (X,nacts)") elif label in ["actMax","actMin"]: nact=buf.get("nacts") try: val=self.checkArray(val,nact,None) except: print "WARNING (Check.py) - actMax/actMin as int now depreciated... this may not work (depending on which version of RTC you're running). The error was" #traceback.print_exc() #val=int(val) print "Continuing... using %s"%str(val) #elif label in["lastActs"]: # nact=buf.get("nacts") # val=self.checkNoneOrArray(val,nact,"H") elif label in ["actInit"]: val=self.checkNoneOrArray(val,None,"H") elif label in ["actMapping"]: val=self.checkNoneOrArray(val,None,"i") elif label in ["figureActSource"]: val=self.checkNoneOrArray(val,None,"i") elif label in ["figureActScale","figureActOffset","actScale","actOffset"]: val=self.checkNoneOrArray(val,None,"f") elif label in ["actuatorMask"]: val=self.checkNoneOrArray(val,None,"f") if val is not None: if val.size==buf.get("nacts"): # shape okay... multiple of nacts. pass else: raise Exception("actuatorMask should be array (nacts,)") elif label in ["actSequence"]: actuators=buf.get("actuators") if actuators is None: size=None else: size=actuators.size/buf.get("nacts") val=self.checkNoneOrArray(val,size,"i") elif label in ["bgImage","flatField","darkNoise","pxlWeight","calmult","calsub","calthr"]: val=self.checkNoneOrArray(val,(buf.get("npxlx")*buf.get("npxly")).sum(),"f") elif label in ["thresholdValue"]: if type(val)==type(""): if os.path.exists(val): val=FITS.Read(val)[1] else: val=eval(val) if type(val)==numpy.ndarray: npxls=(buf.get("npxlx")*buf.get("npxly")).sum() if val.size==npxls: val=self.checkArray(val,(npxls,),"f") else: val=self.checkArray(val,(buf.get("nsub").sum(),),"f") else: try: val=float(val) except: print "thresholdValue: %s"%str(type(val)) print "thresholdValue should be float or array of floats" raise elif label in ["useBrightest"]: if type(val)==type(""): if os.path.exists(val): val=FITS.Read(val)[1] else: val=eval(val) if type(val)==numpy.ndarray: val=self.checkArray(val,(buf.get("nsub").sum(),),"i") else: try: val=int(val) except: print "%s: %s"%(label,str(type(val))) print "%s should be int or array of ints size equal to total number of subapertures (valid and invalid)"%label raise elif label in ["fluxThreshold"]: if type(val)==type(""): if os.path.exists(val): val=FITS.Read(val)[1] else: val=eval(val) if type(val)==numpy.ndarray: val=self.checkArray(val,buf.get("subapFlag").sum(),"f") else: try: val=float(val) except: print "%s: %s"%(label,str(type(val))) print "%s should be float or array of floats size equal to tutal number of used subapertures" raise elif label in ["maxAdapOffset"]: if type(val)==type(""): if os.path.exists(val): val=FITS.Read(val)[1] else: val=eval(val) if type(val)==numpy.ndarray: val=self.checkArray(val,buf.get("subapFlag").sum(),"i") else: try: val=int(val) except: print "maxAdapOffset",val print "maxAdapOffset should be int or array of ints of size equal to number of valid subaps %s"%str(type(val)) raise elif label in ["powerFactor","adaptiveWinGain","corrThresh","figureGain","uEyeFrameRate","uEyeExpTime"]: val=float(val) elif label=="bleedGain": if type(val)==numpy.ndarray: if val.dtype.char!='f': val=val.astype('f') else: val=float(val) elif label=="bleedGroups": val=self.checkNoneOrArray(val,buf.get("nacts"),"i") elif label in ["switchTime"]: val=self.checkDouble(val) elif label in ["fakeCCDImage"]: val=self.checkNoneOrArray(val,(buf.get("npxlx")*buf.get("npxly")).sum(),"f") elif label in ["centroidWeight"]: val=self.checkNoneOrFloat(val) elif label in ["gainE","E"]: if val is None: pass else: if type(val)==numpy.ndarray: if val.dtype!=numpy.float32: val=val.astype(numpy.float32) if val.shape!=(buf.get("nacts"),buf.get("nacts")) and val.shape!=(buf.get("subapFlag").sum()*2,buf.get("nacts")): raise Exception("E should be shape nacts,nacts or nslopes,nacts") else:#lazy - this doesn't check for nslopes,nacts... val=self.checkArray(val,(buf.get("nacts"),buf.get("nacts")),"f") elif label in ["gainReconmxT"]: val=self.checkArray(val,(buf.get("subapFlag").sum()*2,buf.get("nacts")),"f") elif label in ["kalmanAtur"]: val=self.checkArray(val,(buf.get("kalmanPhaseSize"),buf.get("kalmanPhaseSize")),"f") elif label in ["kalmanInvN"]: val=self.checkNoneOrArray(val,buf.get("nacts")*buf.get("kalmanPhaseSize"),"f") if val is not None:#now check shape val=self.checkArray(val,(buf.get("nacts"),buf.get("kalmanPhaseSize")),"f") elif label in ["kalmanHinfDM"]: val=self.checkArray(val,(buf.get("kalmanPhaseSize")*3,buf.get("kalmanPhaseSize")),"f") elif label in ["kalmanHinfT"]: val=self.checkArray(val,(buf.get("subapFlag").sum()*2,buf.get("kalmanPhaseSize")*3),"f") elif label in ["kalmanReset","kalmanUsed","printTime","usingDMC","go","pause","switchRequested","startCamerasFraming","stopCamerasFraming","openCameras","closeCameras","centFraming","slopeOpen"]: val=self.checkFlag(val) elif label in ["nsub","ncamThreads","npxlx","npxly"]: val=self.checkArray(val,buf.get("ncam"),"i") elif label in ["pxlCnt","subapFlag"]: val=self.checkArray(val,buf.get("nsub").sum(),"i") elif label in ["refCentroids"]: val=self.checkNoneOrArray(val,buf.get("subapFlag").sum()*2,"f") elif label in ["centCalBounds"]: val=self.checkNoneOrArray(val,buf.get("subapFlag").sum()*2*2,"f") elif label in ["centCalSteps","centCalData"]: ncents=buf.get("subapFlag").sum()*2 val=self.checkNoneOrArray(val,None,"f") if val is not None: nsteps=val.size/ncents if val.size!=nsteps*ncents: raise Exception("%s wrong shape - should be multiple of %d, is %d"%(label,ncents,val.size)) elif label in ["subapLocation"]: slt=buf.get("subapLocType") if slt==0: val=self.checkArray(val,(buf.get("nsub").sum(),6),"i") else: val=self.checkArray(val,None,"i") n=val.size//buf.get("nsub").sum()#get the size and test again val=self.checkArray(val,(buf.get("nsub").sum(),n),"i") elif label in ["subapAllocation"]: val=self.checkNoneOrArray(val,buf.get("nsub").sum(),"i") elif label in ["gain"]: val=self.checkArray(val,buf.get("nacts"),"f") elif label in ["v0"]: val=self.checkNoneOrArray(val,buf.get("nacts"),"f") elif label in ["asyncInitState","asyncScales","asyncOffsets"]: val=self.checkNoneOrArray(val,buf.get("nacts"),"f") elif label in ["asyncCombines","asyncUpdates","asyncStarts","asyncTypes"]: val=self.checkNoneOrArray(val,None,"i") elif label in ["decayFactor"]: val=self.checkNoneOrArray(val,buf.get("nacts"),"f") elif label in ["rmx"]: if val is None and buf.get("reconName") not in ["libreconpcg.so","libreconneural.so","libreconLQG.so","libreconcure.so"]: raise Exception("rmx is None") elif val is not None: val=self.checkArray(val,(buf.get("nacts"),buf.get("subapFlag").sum()*2),"f",raiseShape=1) elif label in ["slopeSumMatrix"]: val=self.checkNoneOrArray(val,None,"f") if (val is not None) and val.size%buf.get("nacts")!=0: raise Exception("slopeSumMatrix wrong size") elif label in ["slopeSumGroup"]: val=self.checkNoneOrArray(val,buf.get("subapFlag").sum()*2,"i") if (val is not None) and numpy.max(val)+1!=(buf.get("slopeSumMatrix").size/buf.get("nacts")): raise Exception("Groupings in slopeSumGroup not consistent with size of slopeSumMatrix") elif label in ["ncam"]: val=int(val) if val<1: raise Exception("Illegal ncam") elif label in ["threadAffinity"]: if val is None: pass elif type(val)==numpy.ndarray: if val.dtype!="i": val=val.astype("i") if val.size%(buf.get("ncamThreads").sum()+1)!=0: raise Exception("threadAffinity error (size not multiple of %d)"%(buf.get("ncamThreads").sum()+1)) else: raise Exception("threadAffinity error (should be an array, or None)") elif label in ["threadPriority"]: val=self.checkNoneOrArray(val,buf.get("ncamThreads").sum()+1,"i") elif label in ["corrFFTPattern","corrPSF"]: if type(val)==numpy.ndarray: val=val.astype(numpy.float32) elif val is not None: raise Exception("corrFFTPattern error") #val=self.checkNoneOrArray(val,(buf.get("npxlx")*buf.get("npxly")).sum(),"f") elif label in ["adaptiveGroup"]: val=self.checkNoneOrArray(val,buf.get("subapFlag").sum(),"i") elif label in ["asyncNames"]: pass#no checking needed... elif label in ["adapWinShiftCnt"]: val=self.checkNoneOrArray(val,buf.get("nsub").sum()*2,"i") elif label in ["centIndexArray"]: if type(val)==type([]): val=numpy.array(val) elif type(val)==type(""): if os.path.exists(val): print "Loading %s"%val val=FITS.Read(val)[1] else: print "File %s not found"%val raise Exception("File %s not found"%val) if val is None: pass elif type(val)==numpy.ndarray: val=val.astype("f") fft=buf.get("corrFFTPattern",None) if fft is None: npxls=(buf.get("npxlx")*buf.get("npxly")).sum() else: npxls=fft.size if val.size not in [npxls,npxls*2,npxls*3,npxls*4]: raise Exception("centIndexArray wrong size") else: raise Exception("centIndexArray") elif label=="actsToSend": val=self.checkNoneOrArray(val,None,"i") elif label in ["mirrorSteps","mirrorMidRange"]:#used for mirrorLLS.c val=self.checkArray(val,buf.get("nacts"),"i") elif label in ["lqgAHwfs"]: val=self.checkArray(val,(buf.get("lqgPhaseSize")*2,buf.get("lqgPhaseSize")),"f",raiseShape=1) elif label in ["lqgAtur"]: val=self.checkArray(val,(buf.get("lqgPhaseSize"),buf.get("lqgPhaseSize")),"f",raiseShape=1) elif label in ["lqgHT"]: val=self.checkArray(val,(buf.get("subapFlag").sum()*2,2*buf.get("lqgPhaseSize")),"f",raiseShape=1) elif label in ["lqgHdm"]: try: val=self.checkArray(val,(2*buf.get("lqgPhaseSize"),buf.get("nacts")),"f",raiseShape=1) except: val=self.checkArray(val,(2,2*buf.get("lqgPhaseSize"),buf.get("nacts")),"f",raiseShape=1) elif label in ["lqgInvN"]: try: val=self.checkArray(val,(buf.get("nacts"),buf.get("lqgPhaseSize")),"f",raiseShape=1) except: val=self.checkArray(val,(2,buf.get("nacts"),buf.get("lqgPhaseSize")),"f",raiseShape=1) elif label in ["lqgInvNHT"]: val=self.checkArray(val,(buf.get("subapFlag").sum()*2,buf.get("nacts")),"f",raiseShape=1) elif CustomCheck is not None: val=CustomCheck.valid(label,val,buf) else: print "Unchecked parameter %s"%label return val
def save(self, fname): """Save the buffer (so that it can be loaded with loadBuf)""" FITS.Write(self.arr, fname)
ctrl = controlCorba.controlClient(controlName=prefix + "Control", debug=0) y = raw_input("Do you want to do pixel calibration? y/n") if y == "y": y = raw_input( "Do you want to do dark map calibration? If so, please turn of light sources. y/n" ) if y == "y": n = raw_input("Number of frames to average?") n = int(n) ctrl.set("bgImage", None) ctrl.set("flatField", None) ctrl.set("darkNoise", None) ctrl.set("thresholdType", 0) dark = ctrl.AverageImage(n, whole=1) ctrl.set("darkNoise", dark) FITS.Write(dark, "darkNoise.fits") y = raw_input( "Do you want to do flat field calibration? If so, please illuminate with a flat field. y/n" ) if y == "y": n = raw_input("Number of frames to average?") n = int(n) ctrl.set("bgImage", None) ctrl.set("flatField", None) ctrl.set("thresholdType", 0) ff = ctrl.AverageImage(n, whole=1) ctrl.set("flatField", ff) FITS.Write(ff, "flatField.fits") y = raw_input( "Do you want to do a background image calibration? If so, please turn off light sources and get your background ready. y/n" )
tmp[:, ny[i] / 2 - 1:npxly[i]:ny[i], nx[i] / 2 - 1:npxlx[i]:nx[i]] = 100 tmp[:, ny[i] / 2:npxly[i]:ny[i], nx[i] / 2 - 1:npxlx[i]:nx[i]] = 100 tmp[:, ny[i] / 2 - 1:npxly[i]:ny[i], nx[i] / 2:npxlx[i]:nx[i]] = 100 tmp[:, ny[i] / 2:npxly[i]:ny[i], nx[i] / 2:npxlx[i]:nx[i]] = 100 tmp *= fftmp tmp += bgtmp corrImg = correlationPSF[indx:indx + npxlx[i] * npxly[i]] corrImg.shape = npxly[i], npxlx[i] corrImg[ny[i] / 2 - 1:npxly[i]:ny[i], nx[i] / 2 - 1:npxlx[i]:nx[i]] = 1 corrImg[ny[i] / 2:npxly[i]:ny[i], nx[i] / 2 - 1:npxlx[i]:nx[i]] = 1 corrImg[ny[i] / 2 - 1:npxly[i]:ny[i], nx[i] / 2:npxlx[i]:nx[i]] = 1 corrImg[ny[i] / 2:npxly[i]:ny[i], nx[i] / 2:npxlx[i]:nx[i]] = 1 indx += npxlx[i] * npxly[i] FITS.Write(camimg, "camImage.fits") #file used when reading from file, subapLocation = numpy.zeros((nsubaps, 6), "i") nsubaps = nsuby * nsubx #cumulative subap nsubapsCum = numpy.zeros((ncam + 1, ), numpy.int32) ncentsCum = numpy.zeros((ncam + 1, ), numpy.int32) for i in range(ncam): nsubapsCum[i + 1] = nsubapsCum[i] + nsubaps[i] ncentsCum[ i + 1] = ncentsCum[i] + subapFlag[nsubapsCum[i]:nsubapsCum[i + 1]].sum() * 2 kalmanPhaseSize = nacts #assume single layer turbulence... HinfT = numpy.random.random((ncents, kalmanPhaseSize * 3)).astype("f") - 0.5 kalmanHinfDM = numpy.random.random( (kalmanPhaseSize * 3, kalmanPhaseSize)).astype("f") - 0.5 Atur = numpy.random.random(
from scipy.interpolate import CloughTocher2DInterpolator as interp2D import FITS import re pat = re.compile(r'([ugriYJHBV])_mean2.fits') f = open('BV_max.dat') lines = f.readlines() lines = map(string.strip, lines) lines = map(string.split, lines) names = [line[0] for line in lines] name_sts = array([float(line[7]) / 30. for line in lines]) file = sys.argv[1] filt = pat.search(file).group(1) f = FITS.FITS(file) xs = f['CRVAL1'] + (arange(1, f['NAXIS1'] + 1) - f['CRPIX1']) * f['CDELT1'] ys = f['CRVAL2'] + (arange(1, f['NAXIS2'] + 1) - f['CRPIX2']) * f['CDELT2'] N = f['NAXIS1'] * f['NAXIS2'] X = dstack(meshgrid(xs, ys)).reshape((N, 2)) fl = interp2D(X, ravel(f.data())) efl = interp2D(X, ravel(FITS.qread(file.replace('mean', 'std')))) x, y, v, efluxes, mesh, xoff, xscale, yoff, yscale, mean_flux = getdata( filt, False) x = x * xscale + xoff y = y * yscale + yoff ids = concatenate([nonzero(diff(y))[0], array([len(y) - 1])]) sts = y[ids]
wave.append(float(fs[1])) flux.append(float(fs[2])) f.close() day = array(day) wave = array(wave) flux = array(flux) # figure out the dimensions val1 = day[0] numwaves = len(nonzero(equal(day, day[0]))) nx = numwaves x0 = wave[0] dx = wave[1] - wave[0] dy = day[numwaves] - day[0] ny = len(day) / nx y0 = day[0] flux.shape = (ny, nx) of = FITS.FITS(outfile, create=1) of.newhead("T", -32, 2, [nx, ny]) of.putdata(flux) print 1, x0, dx, 1, y0, dy of.inskey("NAXIS2", "CRPIX1", 1) of.inskey("CRPIX1", "CRVAL1", x0) of.inskey("CRVAL1", "CDELT1", dx) of.inskey("CDELT1", "CRPIX2", 1) of.inskey("CRPIX2", "CRVAL2", y0) of.inskey("CRVAL2", "CDELT2", dy) of.close
def write(self, data, ftime, fno): if self.asfits: if self.initialised == 0: #Initialise the header self.finalise = 1 self.initialised = 1 self.hdustart = self.fd.tell() shape = [1] + list(data.shape) FITS.WriteHeader(self.fd, shape, data.dtype.char, firstHeader=(self.hdustart == 0), doByteSwap=self.doByteSwap) self.fdfno = open(self.name + "fno", "w+") self.fdtme = open(self.name + "tme", "w+") FITS.WriteHeader(self.fdfno, [ 1, ], "i", firstHeader=0, doByteSwap=self.doByteSwap) FITS.WriteHeader(self.fdtme, [ 1, ], "d", firstHeader=0, doByteSwap=self.doByteSwap) self.dtype = data.dtype.char self.shape = data.shape self.datasize = data.size * data.itemsize if self.shape != data.shape or self.dtype != data.dtype.char: #Have to start a new fits HDU self.fitsFinalise() #So, finalise existing self.finalise = 1 self.fd.seek(0, 2) #move to end of file. self.hdustart = self.fd.tell() shape = [1] + list(data.shape) FITS.WriteHeader(self.fd, shape, data.dtype.char, firstHeader=0, doByteSwap=self.doByteSwap) self.fdfno = open(self.name + "fno", "w+") self.fdtme = open(self.name + "tme", "w+") FITS.WriteHeader(self.fdfno, [ 1, ], "i", firstHeader=0, doByteSwap=self.doByteSwap) FITS.WriteHeader(self.fdtme, [ 1, ], "d", firstHeader=0, doByteSwap=self.doByteSwap) self.dtype = data.dtype.char self.shape = data.shape #and now write the data. if self.doByteSwap and numpy.little_endian: self.fd.write(data.byteswap().data) self.fdfno.write( numpy.array([fno]).astype(numpy.int32).byteswap().data) self.fdtme.write( numpy.array([ftime]).astype(numpy.float64).byteswap().data) else: self.fd.write(data.data) self.fdfno.write(numpy.array([fno]).astype(numpy.int32).data) self.fdtme.write( numpy.array([ftime]).astype(numpy.float64).data) else: self.info[0] = ( self.info.size - 1 ) * self.info.itemsize + data.size * data.itemsize #number of bytes to follow (excluding these 4) self.info[1] = fno self.info[2:4].view(numpy.float64)[0] = ftime self.info.view("c")[16] = data.dtype.char self.fd.write(self.info.data) self.fd.write(data.data)
import sys import numpy import darc import FITS rcond=float(sys.argv[1]) pmx=FITS.Read("pmx.fits")[1]#This contains high order DM and TT. rmx=-numpy.linalg.pinv(pmx,rcond).T#separating out the TT from rest would be better - but I'm too lazy... FITS.Write(rmx,"rmx%g.fits"%rcond) d=darc.Control("main") d.Set("rmx",rmx) print "Saved rmx%g.fits and set in darc"%rcond
ObitSys=OSystem.OSystem ("debug", 1, user, len(adirs), adirs, len(fdirs), fdirs, True, False, err) OErr.printErrMsg(err, "Error with Obit startup") print("NewDebug") import AIPS, FITS, AIPSData, FITSData from OTObit import newDisplay, tvlod, AMcat # This shit really bites AIPS.AIPS.userno = user disk = 0 for ad in adirs: AIPS.AIPS.disks.append(AIPS.AIPSDisk(None, disk, ad)) disk += 1 disk = 0 for fd in fdirs: FITS.FITS.disks.append(FITS.FITSDisk(None, disk, ad)) disk += 1 # List directories #ObitTalkUtil.ListAIPSDirs() #ObitTalkUtil.ListFITSDirs() print("AIPS.AIPS.disks",AIPS.AIPS.disks) #DAMN print "AIPSData",help(AIPSData.AIPSCat) #DAMN AMcat(1) import Image,FitModel,FitRegion, Obit, ODisplay, OErr err=OErr.OErr() from OTObit import newDisplay, tvlod, AMcat, getFITS AMcat(1) ##disp=ODisplay.ODisplay(8765)
import sys import numpy import darc import FITS nfr = 10 if len(sys.argv) > 1: nfr = int(sys.argv[1]) print "Have you set into calibration mode?" d = darc.Control("main") d.Set("refCentroids", None) refslopes = d.SumData("rtcCentBuf", nfr)[0] / nfr d.Set("refCentroids", refslopes) FITS.Write(refslopes, "refslopes.fits") print "Saved refslopes.fits and set in darc"
dmDescription[0] = 1 #1 DM dmDescription[1] = 8 #1st DM has nacts linear actuators tmp = dmDescription[2:] tmp[:] = -1 tmp.shape = 8, 8 dmflag = tel.Pupil(8, 4, 0).fn.ravel() numpy.put(tmp, dmflag.nonzero()[0], numpy.arange(52)) corrClip = 1 corrNStore = 20 corrThresh = 0.0 corrThreshType = 2 #CORR_FRAC_SUB corrUpdateGain = 0. centroidMode = "CorrelationCoG" psf = FITS.Read(fname.strip("\0"))[1].mean(0).ravel() - bgImage psf = numpy.where(psf < 0, 0, psf) import correlation corrdata = correlation.transformPSF( psf, ncam, npxlx, npxly, nsub, subapLocation, subapFlag, pad=8 ) #Note - to get identical slope estimates when doing auto correlation update and using the correlation result for shift and add (corrUpdateToCoG=0), probably need pad to be larger, to avoid wrapping. But in practice, the difference in results is small. corrFFTPattern = corrdata["corrFFTPattern"] corrSubapLoc = corrdata["corrSubapLoc"] corrNpxlx = corrdata["corrNpxlx"] corrNpxlCum = corrdata["corrNpxlCum"] #Now populate the control structure - this is what gets used. control = { "switchRequested":
for j in range(nsubx[k]): indx=nsubapsCum[k]+i*nsubx[k]+j n=(subapLocation[indx,1])*npxlx[k] ##use full row. pxlCnt[indx]=n #pxlCnt[-3:]=npxls#not necessary, but means the RTC reads in all of the pixels... so that the display shows whole image #create a reconstruction matrix rmx=numpy.random.normal(0,1.,(nacts,ncents)).astype("f")#FITS.Read("rmxRTC.fits")[1].transpose().astype("f") print("rmx: %s"%str(rmx.shape)) #Parameters passed to the dynamic libraries upon loading. These will vary depending on what library is in use. fname="camfileimg.fits" if not os.path.exists("camfileimg.fits"): img=numpy.random.normal(10,1,(3,npxly[0],npxlx[0])).astype(numpy.int16) FITS.Write(img,fname) while len(fname)%4!=0:#zero pad to it fits into 32bit int array fname+="\0" cameraParams=numpy.fromstring(fname,dtype="i") slopeParams=None mirrorParams=numpy.zeros((5,),"i") mirrorParams[0]=1000#timeout/ms mirrorParams[1]=1#port mirrorParams[2]=1#thread affinity el size mirrorParams[4]=-1#thread affinity mirrorParams[3]=1#thread prioirty #Now populate the control structure - this is what gets used.
#darc, the Durham Adaptive optics Real-time Controller. #Copyright (C) 2010 Alastair Basden. #This program is free software: you can redistribute it and/or modify #it under the terms of the GNU Affero General Public License as #published by the Free Software Foundation, either version 3 of the #License, or (at your option) any later version. #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU Affero General Public License for more details. #You should have received a copy of the GNU Affero General Public License #along with this program. If not, see <http://www.gnu.org/licenses/>. import controlCorba import FITS #Create the corba client c=controlCorba.controlClient() def set(name,val,com="",swap=1,check=1): c.obj.Set(controlCorba.sdata(name),controlCorba.encode([val]),controlCorba.sdata(com),swap,check) #Acquire a calibrated image, averaged over 10 frames data=c.obj.AverageImage(10,0) #Decode and reshape the return data=controlCorba.decode(data) data.shape=128,128 FITS.Write(data,"tmp.fits")