예제 #1
0
파일: process.py 프로젝트: rovor/RedROVOR
def makeFlat(*fnames, **kwargs):
    '''
    Take the input frames,(which we assume to be flat frames of the same filter) as strings containging filnames
    and combine them into a master Flat. The exact behaviour depends on the following optional keyword arguments

    output -- if provided this is the path to write the resulting flat to, if absent makeFlat will returning the resulting PrimaryHDU
    minmax -- if provided will set how many data points to remove from the top and bottom of the distribution, defaults to 2
    zero -- if provided, the filename of the zero frame to apply first, otherwise assumes that zero correction has already been done
    dark -- if provide, the filename of the dark frame to apply first, otherwise assumes that dark correction has already been done
    '''
    minmax = kwargs.get('minmax',2)
    with ImageList(*fnames) as imlist:
        if 'zero' in kwargs:
            imlist.subZero(kwargs['zero'])
        if 'dark' in kwargs:
            imlist.subDark(kwargs['dark'])
        imlist.normalize()  #normalize the flats
        Flat = imlist.avCombine(minmax=minmax)
    Flat.header.update('imagetyp','flat')
    if 'zero' in kwargs:
        Flat.header.update('ZEROCOR','{0} Zero Image is {1}'.format(getTimeString('%x %X'),kwargs['zero']))
    if 'dark' in kwargs:
        Flat.header.update('DARKCOR','{0} Dark Image is {1}'.format(getTimeString('%x %X'),kwargs['dark']))
    if 'output' in kwargs:
        Flat.writeto(kwargs['output'],clobber=True)
    else:
        return Flat
예제 #2
0
파일: process.py 프로젝트: rovor/RedROVOR
 def avCombine(self,minmax=2):
     '''
     Combine all frames in the ImageList into a single frame by using an arithmetic mean with optional minmax rejection
     minmax defaults to 2, set to 0 for no minmax rejection, there must be at least 2*minmax+1 frames in the ImageList.
     
     At the moment this simply copies the header from the first frame, but we add more sophisticated manipulation of the header later.
     Returns a pyfits.PrimaryHDU
     '''
     result = pyfits.PrimaryHDU( self.averageAll(minmax), self._list[0][0].header)
     #NOTE: NAXIS, NAXIS1, NAXIS2, BITPIX, etc. should be updated to match the data portion
     result.header.update('NCOMBINE', len(self._list)) #store the number of images combined
     result.header.update('IRAF-TLM', getTimeString()) #store the time of last modification
     result.header.update('DATE',getTimeString(),'Date FITS file was generated')
     #TODO add code to modify header, at least mark the average time of observations, possibly the total exposure time
     # etc. 
     return result
예제 #3
0
파일: process.py 프로젝트: rovor/RedROVOR
def makeDark(*fnames, **kwargs):
    '''
    Take the input frames,(which we assume to be dark frames) as strings containging filnames
    and combine them into a master Dark. The exact behaviour depends on the following optional keyword arguments

    output -- if provided this is the path to write the resulting dark to, if absent makeDark will returning the resulting PrimaryHDU
    minmax -- if provided will set how many data points to remove from the top and bottom of the distribution, defaults to 2
    zero -- if provided, the filename of the zero frame to apply first, otherwise assumes that zero correction has already been done
    '''
    minmax = kwargs.get('minmax',2)
    with ImageList(*fnames) as imlist:
        if 'zero' in kwargs:
            #subtract zeroFrame if supplied
            imlist.subZero(kwargs['zero'])
        #now divide all images by their exposure time for scaling
        for frame in imlist:
            frame.data /= float(frame.header['EXPTIME'])
        Dark = imlist.avCombine(minmax=minmax)
    #now update the heaers
    Dark.header.update('imagetyp','dark')
    if 'zero' in kwargs:
        #add header for zero
        Dark.header.update('ZEROCOR','{0} Zero Images is {1}'.format(getTimeString('%x %X'), kwargs['zero']))
    if 'output' in kwargs:
        Dark.writeto(kwargs['output'],clobber=True)
    else:
        return Dark
예제 #4
0
 def __init__(self, sknames, datnames, Markname, round:int=1):
     c_names =  [skname+"_"+datname for skname in sknames for datname in datnames]
     super(OnlyLastSampler, self).__init__("OnlyLast", True, round, len(c_names), c_names)
     self.sknames = sknames
     self.datnames = datnames
     self.filename = "_".join([utl.RunStr, self.name, Markname, utl.getTimeString()])
     self.index_dict = {c_name: None for c_name in c_names}
     self.hasattr_dict = {c_name: False for c_name in c_names}
예제 #5
0
 def __init__(self, skname, datname, round:int=1, scale:int=6, split:int=100, base=10):
     super(LogScaleSampler, self).__init__("LogSampling", False, scale*split+1, round, list(range(round)))
     self.filename = "_".join([utl.RunStr, self.name, datname, skname, utl.getTimeString()])
     self.skname = skname
     self.datname = datname
     self.samplvec = utl.exp_vector(scale, split, base)
     self.dat_index = None
     self.t_index = None
예제 #6
0
파일: process.py 프로젝트: rovor/RedROVOR
 def subZero(self,zero):
     '''subtract a zero from all of the images in place and return self
     zero should be the path to a zero frame
     NOTE: also zerocor header headers'''
     datestr = getTimeString("%B %d %H:%M")  #get string of current date
     with pyfits.open(zero) as zeroFrame:
         for frame in self:  
             frame.data -= zeroFrame[0].data
             frame.header.update('ZEROCOR','{0} Zero Image is {1}'.format(datestr,zero))
     return self
예제 #7
0
파일: process.py 프로젝트: rovor/RedROVOR
    def divFlat(self,flat):
        '''divide flat from all the images in place and return self

        flat should be the path to a flat frame
        NOTE: also update FLATCOR header'''
        datestr = getTimeString("%B %d %H:%M")
        with pyfits.open(flat) as flatFrame:
            for frame in self:
                frame.data /= flatFrame[0].data
                frame.header.update('FLATCOR','{0} with Flat frame {1}'.format(datestr,flat))
        return self
예제 #8
0
def exportToJson(temperature):
	import utils
	import json
	f = open('sensors/temperature.json','w')
	reading = {
							'measure':'Shed Air Temperature',
							'unit':'degrees celcius',
							'unitPrefix':'°C',
							'value': temperature, 
							'lastUpdated': utils.getTimeString()
						}
	f.write(json.dumps(reading))
	f.close()
예제 #9
0
파일: process.py 프로젝트: rovor/RedROVOR
    def subDark(self, dark):
        '''subtract dark from all the images in place and return self

        dark shoulb be the path to a dark frame
        NOTE: also updates headers'''
        datestr = getTimeString("%B %d %H:%M")  #get string of current date
        with pyfits.open(dark) as darkFrame:
            for frame in self:
                #scale dark to the exposure time
                #and subtract from frame for all frames
                frame.data -= darkFrame[0].data * float(frame.header['EXPTIME'])
                frame.header.update('DARKCOR','{0} with Dark frame {1}'.format(datestr,dark))
        return self
예제 #10
0
파일: firstpass.py 프로젝트: rovor/RedROVOR
 def zero_and_dark_subtract(self):
     '''subtract zeros and darks from image files
     and save in the processed folder'''
     self.logger.info("Subtracting zeros and darks")
     #ensure we have everything we need
     self.ensure_frameTypes()
     self.ensure_zero()
     self.ensure_dark()
     for (obj,flist) in self.objects.items():
         #iterate over each object
         for (filt, frames) in splitByHeader(flist,'filter').items():
             baseName = "{0}/{1}{2}-".format(self.processedFolder, obj.replace(' ','_'),filt)
             with process.ImageList(*frames) as imlist:
                 imlist.subZero(self.zeroFrame)
                 imlist.subDark(self.darkFrame)
                 imlist.updateHeaders(ccdproc='{0} CCD Processing done'.format(getTimeString("%x %X")))
                 imlist.saveIndexed(baseName) #save the processed images
예제 #11
0
파일: process.py 프로젝트: rovor/RedROVOR
def applyFlat(flat_path,*fnames, **kwargs):
    '''
    apply a flat to one or more frames, flat_path and fnames should both be 
    filenames if save_path is supplied and not None then all the frames are 
    saved into the folder save_path with the same basename they had before. 
    If save_inplace is supplied and not false, then the images are saved in 
    place with the zero correction
    '''
    imlist = ImageList(*fnames)
    imlist.divFlat(flat_path)
    datestr = getTimeString("%x %X")
    imlist.updateHeaders(ccdproc='{0} CCD Processing done'.format(datestr))
    if 'save_path' in kwargs and kwargs['save_path']:
        imlist.saveToPath(kwargs['save_path'])
        imlist.closeAll() #clean up
    elif 'save_inplace' in kwargs and kwargs['save_inplace']:
        imlist.saveInPlace()
        imlist.closeAll()
    else:
        return imlist
예제 #12
0
    #plt.scatter([logms[i]]*len(vec), vec, color="black", alpha=0.01, s=5)
    # plt.scatter(np.log2(df1[name]), counter, color="black")
    # plt.scatter([plotsampvec[i]] * 100000, np.log2(alldata[i])-plotsampvec[i], color='black', alpha=0.0001)
    # plt.plot(plotsampvec[60:], vars[i][60:], label=names[i])
    #plt.hist(datas[i], bins=50, range=(1000000*(M_center-M_range), 1000000*(M_center+M_range)), density=True, label=names[i], alpha=0.5, color=colors[i])
    plt.plot(base+M_range/M_splits, counter[i], label=names[i], color=colors[i])
    plt.hist(datas[i]/1000000, bins=M_splits, range=(M_center - M_range, M_center + M_range), alpha=0.3, color=colors[i])
    #sns.distplot(datas[i], label=names[i], color=colors[i])
#plt.scatter(5, var4*1200, label=name4[0])
#plt.plot(baselineq, valq, color="blue")
# plt.scatter(1, 1200, alpha=0)
plt.xlabel(r"$\hat{\lambda}/\lambda$")
plt.ylabel("Counts")
#plt.vlines(1, 0, 7900, alpha=0.5, colors="black")
plt.xlim((M_center-M_range, M_center+M_range))
#plt.ylim((0, 7900))
plt.ylim((0, 4250))
plt.title(r"$\lambda=10^6$"+", after 100,000 runs")
#plt.title("Avg, RemArea*Lambda, at 1e6, %d ctn, %d board, ttl 1200bits, LazyCtnPCSA" % (Ctnbit, Belowbit - 1))
#plt.title("Mtg, count, at 1e6, 1200bits, %d rounds" % Round)
#plt.title("SuperCompresion-128, relativeVar, logscale, %d Rounds" % (Round))
#plt.title("SuperCompresion-134, scatterPlot, logscale, %d Rounds" % (Round))
plt.legend(loc="upper right")
plt.grid()
#plt.plot([0, 1200], [utl.AdaLazyCtnPCSA_VarMemProdThry(2.91, 2, 1)]*2, color="black")
#plt.plot(expm, varm, color="black")
#plt.xticks(logms, ms)
#plt.title("1200 bits, simulated 100,000 rounds for cardinality " + r"$10^6$")
plt.savefig("figs/"+utl.VersionStr+"_"+datname+"_"+utl.getTimeString()+".png")
plt.show()
예제 #13
0
		# filter the readings with a specified degrees c bracket
		if ((temp > lastTemp + filterBracket or temp < lastTemp - filterBracket) and not firstReading):
			print 'EXTRANEOUS READING! Reading is greater or less than',filterBracket,'degrees of the previous reading.'
		else:	
			utils.logDataPoint("temperature.rrd",temp)
			# store the last temperature reading if it's good
			lastTemp = temp
			
		# export the graphs, json
		temperature.makePngGraph('Temp C 12 Hours',twelveHours,'web/temperature-12hrs.png')
		temperature.makePngGraph('Temp C 2 Hours',twoHours,'web/temperature-2hrs.png')
		temperature.makePngGraph('Temp C 1 Week',oneWeek,'web/temperature-1wk.png')
		temperature.exportToJson(temp)
		
		# terminal output
		print utils.getTimeString(),'| tempC:',temp
		
		firstReading = False
		
	except Exception as e:
		# terminal output
		print utils.getTimeString(), e
	
	# sleep
	time.sleep(loggingInterval)

		
	           
	

예제 #14
0
def start():
    begin = msg('開始處理文件')

    # 获取各种参数
    configFile = open('./config.json', 'r', encoding='utf-8')
    config = json.loads(configFile.read())
    table = config['table']
    source = config['source']
    data = config['files']['data']
    include = config['files']['include']
    exclude = config['files']['exclude']

    # 连接数据库
    connect = mysql.connector.connect(**config['mysql'])

    # 删除旧表, 创建新表
    cursor = connect.cursor()
    cursor.execute('DROP TABLE IF EXISTS {}'.format(table))
    sql = '''CREATE TABLE {} (
        `id` int(11) NOT NULL AUTO_INCREMENT,
        `author` text COLLATE utf8mb4_unicode_ci DEFAULT NULL,
        `dynasty` text COLLATE utf8mb4_unicode_ci NOT NULL,
        `title` text COLLATE utf8mb4_unicode_ci DEFAULT NULL,
        `rhythmic` text COLLATE utf8mb4_unicode_ci DEFAULT NULL,
        `chapter` text COLLATE utf8mb4_unicode_ci DEFAULT NULL,
        `paragraphs` text COLLATE utf8mb4_unicode_ci NOT NULL,
        `notes` text COLLATE utf8mb4_unicode_ci DEFAULT NULL,
        `collection` text COLLATE utf8mb4_unicode_ci NOT NULL,
        `section` text COLLATE utf8mb4_unicode_ci DEFAULT NULL,
        `content` text COLLATE utf8mb4_unicode_ci DEFAULT NULL,
        `comment` text COLLATE utf8mb4_unicode_ci DEFAULT NULL,
        `tags` text COLLATE utf8mb4_unicode_ci DEFAULT NULL,
        PRIMARY KEY (`id`)
    ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;'''
    cursor.execute(sql.format(table))

    # 循环处理json文件
    arr = []
    l = 0
    total = 0
    for d in data:
        if len(include) and d['collection'] not in include:
            continue
        if len(exclude) and d['collection'] in exclude:
            continue
        res = importData(connect, source, table, d['folder'], d['pattern'],
                         d['dynasty'], d['collection'])
        l = max(l, len(d['collection']))
        if res['count'] is None:
            arr.append({
                'collection': d['collection'],
                'time': res['time'],
                'count': '失敗'
            })
        else:
            arr.append({
                'collection': d['collection'],
                'time': res['time'],
                'count': res['count']
            })
            total += res['count']
    cursor.close()
    connect.close()

    # 最后输出统计信息
    end = msg('所有文件處理完畢, 記錄總數: ' + str(total))
    msg()
    for v in arr:
        count = v['count']
        msg('{}  用時  {}  {}'.format(
            v['collection'].ljust(l + l - len(v['collection'])), v['time'],
            v['count']))
    msg('共計用時  ' + getTimeString(begin, end))
    msg()
예제 #15
0
 def __init__(self, sknames, datname, Markname):
     super(FailureSampler, self).__init__("Failure", True, 1, len(sknames), sknames)
     self.sknames = sknames
     self.datname = datname
     self.filename = "_".join([utl.RunStr, self.name, Markname, utl.getTimeString()])
예제 #16
0
base = np.array(np.arange(100)/100) + 0.5
counter = np.zeros(base.shape, dtype=int)

InsertionInx = 600 # 1e10
InsertionT = 1e6

AllDf = pd.DataFrame(np.array(np.zeros(5000), dtype=np.float64), columns=["0"])

#filenames = ["results/T9/V1811_Mtg_4bits_"+name+".csv" for name in names]
# filenames = ["results/T9/V2243_Mtg_2bits_"+name+".csv" for name in names]
# filenames = ["results/T9/V2243_Mtg_2bits_"+name+".csv" for name in names]
# filenames = ["results/T10/V271453_Mtg_2bits_"+name+".csv" for name in names]
# filenames = ["results/T11/V291944_Mtg_2bits_"+name+".csv" for name in names]
# filenames = ["results/T11/V302300_Mtg_4bits_"+name+".csv" for name in names]
# filenames = ["results/T11/V07010216_Mtg_2bits_"+name+".csv" for name in names]
#filenames = ["results/"+utl.VersionStr+"/"+utl.RunStr+"_"+datname+"_"+str(bitused)+"bits_"+name+".csv" for name in names]
# filenames = ["results/"+utl.VersionStr+"/"+utl.RunStr+"_"+datname+"_"+name+".csv" for name in names]
2#exfilenames = ["results/"+utl.VersionStr+"/"+utl.RunStr+"_"+datname+"_"+name+".csv" for name in exnames]
# filename = "cong/T16/Mtg_CtnSTUnifOffs_1.5_ratio_1e6_07_13_10_39_42.csv"
filename = "results/T17/V07131204_LastRatio_1000000.0_AdaLazyCtnPCSA-2.91-1.5-2.csv"

newdf = pd.read_csv(filename)
newdfn = np.array(newdf)
#exnewdf = pd.read_csv(exfilenames[i])
#exvalues = np.array(exnewdf.iloc[InsertionInx]/InsertionT, dtype=np.float64)
for i in range(5000):
    AllDf["0"][i] = newdfn[0][i+1]

AllDf.to_csv("cong/"+utl.VersionStr+"/"+datname+"_AdaLazyCtnPCSA_1.5_2_ratio_1e6_"+utl.getTimeString()+".csv")
#AllDf.to_csv("cong/"+utl.VersionStr+"/"+datname+"_CtnSTUnifOffs_1.5_ratio_1e6_"+utl.getTimeString()+".csv")