def BPlocAndRunsArray(CLM, minNumIMI): col_1 = find(CLM[:, 8], lambda x: x != 0) # BP locations BPloc = np.empty([len(col_1), 0]) BPloc = np.insert(BPloc, 0, col_1, 1) # Add number of movements until next breakpoint to column 2 col_2 = [] for i in range(BPloc.shape[0] - 1): col_2.append(BPloc[i + 1, 0] - BPloc[i, 0]) col_2.append(CLM.shape[0] - BPloc[BPloc.shape[0] - 1, 0]) BPloc = np.insert(BPloc, 1, col_2, 1) # Mark whether a run of LM meets the minimum requirement for number of IMI col_3 = [] for i in range(BPloc.shape[0]): col_3.append(BPloc[i, 1] > minNumIMI) BPloc = np.insert(BPloc, 2, col_3, 1) # Mark the number of movements in each PLM series col_4 = [] for i in range(BPloc.shape[0]): if BPloc[i, 2] == 1: col_4.append(BPloc[i, 1]) else: col_4.append(0) BPloc = np.insert(BPloc, 3, col_4, 1) return BPloc
def tryShrinking(LM1,dsEMG,fs,min): short = find(LM1[:,3], lambda x: x < min) for i in range(len(short)): start = int(LM1[short[i]][0]) stop = int(LM1[short[i]][1]) while (start - stop)/fs > 0.6 and np.median(dsEMG[start:stop]) < min: stop = stop - fs/10 LM1[short[i],3] = np.median(dsEMG[start:stop]) return short
def returnRuns(vals,duration): vals = np.asarray(vals) k = (np.diff(vals) != 1).astype(int) k = np.insert(k, 0, 1, axis=0) s = np.cumsum(k) x = np.histogram(s,np.arange(1,s[-1]+2))[0] idx = find(k, lambda x: x != 0) idx = np.asarray(idx) startIndices = vals[idx[x>=duration]] stopIndices = startIndices + x[x>=duration] - 1 runs = [startIndices,stopIndices] return runs
def findIndices(data,lowThreshold,highThreshold,minLowDuration,minHighDuration,fs): fullRuns = np.empty([2,2])# [[-1 for x in range(2)] for y in range(2)] minLowDuration = minLowDuration * fs minHighDuration = minHighDuration * fs lowValues = find(data, lambda x: x < lowThreshold) highValues = find(data, lambda x: x > highThreshold) if len(highValues) < 1: fullRuns[0][0] = 0 fullRuns[0][1] = 0 elif len(lowValues) < 1: fullRuns[0][0] = 1 fullRuns[0][1] = 0 lowRuns = returnRuns(lowValues,minLowDuration) highRuns = [] numHighRuns = 0 searchIndex = highValues[0] while searchIndex < data.shape[0]: distToNextLowRun,lengthOfNextLowRun = calcDistToRun(lowRuns,searchIndex) if distToNextLowRun == -1: ## Then we have hit the end, record our data and stop highRuns.append([searchIndex , data.shape[0]]) searchIndex = data.shape[0] else: ##We have hit another low point, so record our data, highRuns.append([searchIndex , searchIndex + distToNextLowRun-1]) searchIndex = searchIndex + distToNextLowRun + lengthOfNextLowRun highValues = np.asarray(highValues) temp = np.argwhere(highValues > searchIndex) if temp.size != 0: searchIndex = highValues[int(temp[0])] numHighRuns = numHighRuns + 1 #Implement a quality control to only keep highRuns > minHighDuration highRuns = np.array(highRuns) runLengths = highRuns[:,1]-highRuns[:,0] ind = find(runLengths, lambda x: x > minHighDuration) fullRuns = highRuns[ind] #print("fullRuns ",fullRuns) return fullRuns
def shrinkWindow(LM,dsEMG,fs,min): empty = find(LM[:,3], lambda x: x < min) #print("empty ",empty) for i in range(len(empty)): initstart = int(LM[empty[i],0]) initstop = int(LM[empty[i],1]) a = np.median(dsEMG[initstart:initstop]) start = int(LM[empty[i]][0]) stop = int(start + fs/2) while a < min and stop < initstop: a = np.median(dsEMG[start:stop]) start = int(start + fs/10) stop = int(start + fs/2) LM[empty[i],3] = a return LM
def PlotAbs(clusterName, starData, ions, fileTag='', plotTitle='', filterBlends=False, referenceCorrect=False, labelPoints=False, useDAOSpec=False, modelAtms=None, pradks=None): pointLabels = None # Lookup the abundance(s) for the passed star, and plot them # (into a .png file). One element per plot. starName = starData[0] starParmTuple = tuple(starData[1:6]) isGiant = RC.isGiantStar(starParmTuple) if isGiant: modelPath = k.GiantModelPath else: modelPath = k.DwarfModelPath if modelAtms == None or pradks == None: modelFiles = mk.findDataFiles(modelPath) modelAtms, pradks = mk.LoadModels(modelFiles) # uncorrLines format: # {elem.ion:[[Wavelength, Ex.Pot., logGf, eqw, logRW, abund],...]} # abDict format: # {elem.ion:[abundance mean, ab. std.dev., # lines]} abdict, uncorrLines, unusedMin, unusedMax = \ AB.CalcAbsAndLines(clusterName+' '+starName, starParmTuple, ionList=ions, modelAtms=modelAtms, pradks=pradks, useDAOlines=useDAOSpec) if isGiant: # Obtain the reference corrections for a giant star - Note: this is # badly broken! So, we're just going to use the Solar corrections for # now: correctDict, referenceLines, lineWeights = \ RC.GetDwarfCorrections(ionList=ions, modelAtms=modelAtms, pradks=pradks, useDAOSpec=useDAOSpec) # correctDict, referenceLines, lineWeights = \ # GetGiantCorrections(ionList=ions, # modelAtms=modelAtms, # pradks=pradks) else: # ...or for a dwarf star. correctDict, referenceLines, lineWeights = \ RC.GetDwarfCorrections(ionList=ions, modelAtms=modelAtms, pradks=pradks, useDAOSpec=useDAOSpec) for ion in ions: # We'll make one plot per ion. pointLabels = [] redData = greenData = blueData = [] if ion not in uncorrLines.keys(): print('No {0:2.1f} lines for {1}.'.format(ion, starName)) continue if (referenceCorrect or filterBlends) and \ ion in referenceLines.keys() and ion in correctDict.keys() and \ ion in lineWeights.keys(): adjustedLines, dataPoints = \ RC.SortAndFilterLines(uncorrLines[ion], ion, starParmTuple, filterBlends=filterBlends, solarCorrect=referenceCorrect, solarLines=referenceLines[ion], solarCorrs=correctDict[ion], lineWeights=lineWeights[ion]) tempData = [] for line in uncorrLines[ion]: corrLine = u.find(lambda l: l[0] == line[0], dataPoints) if corrLine is not None: tempData.append(corrLine) else: tempData.append([line[5], el.STR(line[2], line[1], \ starParmTuple[0]),line[0]]) redData = np.array(tempData) else: dataPoints = np.array([[line[5], el.STR(line[2], line[1], starParmTuple[0]), line[0]] \ for line in uncorrLines[ion]]) if labelPoints: pointLabels = ['{0:4.3f}'.format(line[2]) for line in dataPoints] pointLabels.extend(['{0:4.3f}'.\ format(line[2]) for line in redData]) pointLabels.extend(['{0:4.3f}'.\ format(line[2]) for line in greenData]) pointLabels.extend(['{0:4.3f}'.\ format(line[2]) for line in blueData]) loSlope, loIntercept, rv, pv, err = \ ps.GetDetectionLimit(starParmTuple, ion, modelAtms=modelAtms, pradks=pradks) hiSlope, hiIntercept, rv, pv, err = \ ps.GetCoGLimit(starParmTuple, ion, modelAtms=modelAtms, pradks=pradks) ps.AbSTRPlot(starName, ion, dataPoints, redSet=redData, greenSet=greenData, blueSet=blueData, lowLimit=(loSlope, loIntercept), hiLimit=(hiSlope, hiIntercept), fileTag=fileTag, plotTitle=plotTitle, labelPoints=pointLabels)
def candidate_lms(rLM, lLM, params): CLM = [] if rLM.size != 0 and lLM.size != 0: #print("both full") # Reduce left and right LM arrays to exclude too long movements, but add # breakpoints to the following movement rLM[:, 2] = (rLM[:, 1] - rLM[:, 0]) / params.fs lLM[:, 2] = (lLM[:, 1] - lLM[:, 0]) / params.fs rLM = rLM[rLM[:, 2] >= 0.5, :] lLM = lLM[lLM[:, 2] >= 0.5, :] rLM[rLM[:, 2] > params.maxCLMDuration, 8] = 4 lLM[lLM[:, 2] > params.maxCLMDuration, 8] = 4 # Combine left and right and sort. CLM = rOV2(lLM, rLM, params.fs) elif lLM.size != 0: print("left is full") lLM[:, 2] = (lLM[:, 1] - lLM[:, 0]) / params.fs lLM = lLM[lLM[:, 2] > 0.5, :] lLM[lLM[0:lLM.shape[0] - 1, 2] > params.maxCLMDuration, 8] = 4 #too long mclm CLM = lLM CLM = np.insert(CLM, 10, values=np.zeros(rLM.shape[0]), axis=1) CLM = np.insert(CLM, 11, values=np.zeros(rLM.shape[0]), axis=1) CLM = np.insert(CLM, 12, values=np.zeros(rLM.shape[0]), axis=1) elif rLM.size != 0: print("right is full") rLM[:, 2] = (rLM[:, 1] - rLM[:, 0]) / params.fs rLM = rLM[rLM[:, 2] > 0.5, :] rLM[rLM[0:rLM.shape[0] - 1, 2] > params.maxCLMDuration, 8] = 4 #too long mclm CLM = rLM CLM = np.insert(CLM, 10, values=np.zeros(rLM.shape[0]), axis=1) CLM = np.insert(CLM, 11, values=np.zeros(rLM.shape[0]), axis=1) CLM = np.insert(CLM, 12, values=np.zeros(rLM.shape[0]), axis=1) else: CLM = [] if np.sum(CLM) == 0: return [] # if a bilateral movement consists of one or more monolateral movements # that are longer than 10 seconds (standard), the entire combined movement # is rejected, and a breakpoint is placed on the next movement. When # inspecting IMI of CLM later, movements with the bp code 4 will be # excluded because IMI is disrupted by a too-long LM contains_too_long = find(CLM[:, 8], lambda x: x == 4) for i in range(len(contains_too_long) - 1): CLM[contains_too_long[i] + 1, 8] = 4 CLM = np.delete(CLM, contains_too_long, 0) # add breakpoints if the duration of the combined movement is greater # than 15 seconds (standard) or if a bilateral movement is made up of # greater than 4 (standard) monolateral movements. These breakpoints # are actually added to the subsequent movement, and the un-CLM is # removed. CLM[:, 2] = (CLM[:, 1] - CLM[:, 0]) / params.fs col_9_bclm = find(CLM[0:CLM.shape[0] - 1, 2], lambda x: x > params.maxbCLMDuration) for index in range(len(col_9_bclm)): col_9_bclm[index] = col_9_bclm[index] + 1 CLM[col_9_bclm, 8] = 3 # too long bclm col_9_cmbd = find(CLM[0:CLM.shape[0] - 1, 3], lambda x: x > params.maxbCLMOverlap) for index in range(len(col_9_cmbd)): col_9_cmbd[index] = col_9_cmbd[index] + 1 CLM[col_9_cmbd, 8] = 5 # too many cmbd mvmts for value in range(CLM.shape[0]): if CLM[value, 3] > params.maxbCLMOverlap or CLM[value, 2] > params.maxbCLMDuration: np.delete(CLM, value, 0) CLM[:, 3] = np.zeros(CLM.shape[0]) # clear out the #combined mCLM # If there are no CLM, return an empty vector if CLM.size != 0: # Add IMI (col 4), sleep stage (col # 6). Col 5 is reserved for PLM marks later CLM = getIMI(CLM, params.fs) # add breakpoints if IMI < minIMI. This is according to new standards. # I believe we also need a breakpoint after this movement, so that a # short IMI cannot begin a run of PLM if params.iLMbp == 'on': CLM[CLM[:, 3] < params.minIMIDuration, 8] = 2 #short IMI else: CLM = removeShortIMI(CLM, params) # Add movement start time in minutes (col 7) and sleep epoch number # (col 8) CLM[:, 6] = CLM[:, 0] / (params.fs * 60) CLM[:, 7] = np.rint(CLM[:, 6] * 2 + 0.5) # The area of the leg movement should go here. However, it is not # currently well defined in the literature for combined legs, and we # have omitted it temporarily CLM[:, 9] = np.zeros(CLM.shape[0]) CLM[:, 10] = np.zeros(CLM.shape[0]) CLM[:, 11] = np.zeros(CLM.shape[0]) # 3 add breakpoints if IMI > 90 seconds (standard) CLM[CLM[:, 3] > params.maxIMIDuration, 8] = 1 #print("CLM out of candidate_lms(): ",CLM.shape) return CLM
def MakeTeffPlots(clusterName='NGC-0752', starDataList=None, fileTag='', referenceCorrect=False): # Function plots Teff vs. slope of (XP vs. [Fe/H]) for a range of temperatures. # Intended to provide a spectroscopic confirmation/adjustment # for photometrically-determined parameters. We assume that at the "correct" # temperature, the slope of XP vs. [Fe/H] would be zero. if starDataList is None: starDataList = STP.GetAllStarParms(clusterName=clusterName) dModelFiles = mk.findDataFiles(k.DwarfModelPath) dModelAtms, dPradks = mk.LoadModels(dModelFiles) gModelFiles = mk.findDataFiles(k.GiantModelPath) gModelAtms, gPradks = mk.LoadModels(gModelFiles) # Map Fe I Ab vs. Ex pot. ions = [26.0] abDict = {} dTeffRange = np.linspace(5000, 7000, 81) gTeffRange = np.linspace(4000, 6000, 81) for star in starDataList: starName = star[0] isGiant = RC.isGiantStar(star[1:]) if isGiant: modelAtms = gModelAtms pradks = gPradks tRange = gTeffRange if referenceCorrect: correctDict, referenceLines, lineWeights = \ RC.GetGiantCorrections(ionList=ions, modelAtms=modelAtms, pradks=pradks) else: modelAtms = dModelAtms pradks = dPradks tRange = dTeffRange if referenceCorrect: correctDict, referenceLines, lineWeights = \ RC.GetDwarfCorrections(ionList=ions, modelAtms=modelAtms, pradks=pradks) logG = star[2] vTurb = star[3] met = star[4] slopes = [] for Teff in tRange: unusedDict, uncorrLines, unusedMin, unusedMax = \ AB.CalcAbsAndLines(clusterName+' '+starName, tuple([Teff, star[2], star[3], star[4], star[5]]),\ ionList=ions, modelAtms=modelAtms, pradks=pradks) XPs = [] Abs = [] if referenceCorrect: adjLines, allLines = RC.SortAndFilterLines( uncorrLines[26.0], 26.0, tuple([Teff, star[2], star[3], star[4], star[5]]), solarCorrect=True, solarLines=referenceLines[26.0], solarCorrs=correctDict[26.0], lineWeights=lineWeights[26.0]) if len(adjLines) > 10: # Assume 10 Fe I lines needed for nice plots for line in adjLines: # XP isn't returned, so have to do a lookup into the # original list. XPs.append( u.find(lambda l: l[0] == line[2], uncorrLines[26.0])[1]) Abs.append(line[0]) if len(XPs) == 0 or len(Abs) == 0: # Either no corrections, or corrections failed XPs = [line[1] for line in uncorrLines[26.0]] Abs = [line[5] for line in uncorrLines[26.0]] slope, intercept, rVal, pVal, err = sp.stats.linregress(XPs, Abs) slopes.append(slope) # We have to flip because interp expects the xvalues to be # monotomically increasing interpTs = np.interp([-0.02, 0., 0.02], slopes[::-1], tRange[::-1]) if interpTs[0] > interpTs[1]: minusR = interpTs[1] - interpTs[2] plusR = interpTs[0] - interpTs[1] else: minusR = interpTs[1] - interpTs[0] plusR = interpTs[2] - interpTs[1] fig = pyplot.figure() TeffLabel = r'$T_{{eff}} = {0:4.0f}^{{+{1:4.0f}}}_{{-{2:4.0f}}}$)'.\ format(interpTs[1],minusR,plusR) pyplot.scatter(tRange, slopes, label=TeffLabel) pyplot.axhline(0.0, linestyle=':') pyplot.legend() pyplot.savefig(k.ParmPlotDir + 'Teff/' + star[0] + fileTag + '_FeSlope.png') pyplot.close()
def resteaze_dash(left, right, subjectid): np.seterr(divide='ignore', invalid='ignore') params = init_params() output = init_output(subjectid) if os.path.exists(left): leftPath = os.path.splitext(left)[0] #leftFileNames = os.path.splitext(left)[1] ext = os.path.splitext(left)[1] print("leftpath: " + leftPath) else: print("lefterror") if os.path.exists(right): rightPath = os.path.splitext(right)[0] #rightFileNames = os.path.splitext(right)[1] right_ext = os.path.splitext(right)[1] print("rightpath: " + rightPath) else: print("righterror") #add code to read and process multiple csv's """ read data from csv files """ bandData_left = np.genfromtxt(left, delimiter=',', skip_header=1) bandData_right = np.genfromtxt(right, delimiter=',', skip_header=1) """synching signals from two legs""" leftLeg, rightLeg = syncRE(bandData_left, bandData_right) output.up2Down1 = np.ones((leftLeg.shape[0], 1)) ################################################# """ calculating root-mean-square of the acclerometer movements for both legs """ output.lRMS = rms(leftLeg[:, [1, 2, 3]]) output.rRMS = rms(rightLeg[:, [1, 2, 3]]) ################################################# """ compute LM(leg movements) """ rLM = getLMiPod(params, output.rRMS, output.up2Down1) lLM = getLMiPod(params, output.lRMS, output.up2Down1) ################################################# """ Start Patrick's standard scoring stuff """ bCLM = candidate_lms(rLM, lLM, params) Arousal = calculateArousal(bCLM, leftLeg, rightLeg) output.Arousal = Arousal[Arousal[:, 2] == 1, :] bCLM[:, 10] = Arousal[:, 2] PLM, bCLM = periodic_lms(bCLM, params) PLM = np.asarray(PLM) ################################################# """ score sleep/wake """ output.wake = scoreSleep(params.fs, output.lRMS, PLM, bCLM) WASO = calculateWASO_RE( output.wake, params.minSleepTime, params.fs) # NEED TO UPDATE OUTPUT MATRICES AFTER HERE rLM[:, 5] = output.wake[np.array( rLM[:, 0], dtype=int)] + 1 # still using up2down1 format lLM[:, 5] = output.wake[np.array( lLM[:, 0], dtype=int)] + 1 # still using up2down1 format bCLM[:, 5] = output.wake[np.array( bCLM[:, 0], dtype=int)] + 1 # still using up2down1 format if len(PLM) != 0: PLM[:, 5] = output.wake[np.array( PLM[:, 0], dtype=int)] + 1 # still using up2down1 format else: PLM = [] ################################################# """ Matrices to output """ output.rLM = rLM output.lLM = lLM output.bCLM = bCLM output.PLM = PLM output.PLMS = [] if len(PLM) != 0: xx = find(PLM[:, 5], lambda x: x == 1) output.PLMS = PLM[xx, :] ################################################# """ Quantitative measures to output """ output.TRT = (output.up2Down1.shape[0] / params.fs) / 60 output.TST = output.TRT - WASO.dur output.sleepEff = output.TST / output.TRT output.WASOnum = WASO.num output.WASOdur = WASO.dur output.WASOavgdur = WASO.avgdur if len(output.PLMS) != 0: #change this to !=0 output.PI = sum(np.array(output.PLMS[:, 8] == 0, dtype=int)) / (output.bCLM.shape[0] - 1) output.PLMSArI = sum(output.PLMS[:, 10]) / ( output.TST / 60) # num plms arousals per hr of sleep if len(PLM) != 0: plm_5 = PLM[:, 5] == 1 plm_8 = PLM[:, 8] == 0 plm_combi = PLM[plm_5 | plm_8, 3] output.avglogPLMSIMI = np.mean(np.log(plm_combi)) output.stdlogPLMSIMI = np.std(np.log(plm_combi)) output.avgPLMSDuration = np.mean(PLM[plm_5, 2]) output.stdPLMSDuration = np.std(PLM[plm_5, 2]) output.PLMhr = PLM.shape[0] / (output.TRT / 60) output.PLMShr = sum(PLM[:, 5] == 1) / (output.TST / 60) output.PLMWhr = sum(PLM[:, 5] == 2) / ((output.TRT - output.TST) / 60) output.PLMnum = len(PLM) output.PLMSnum = sum(PLM[:, 5] == 1) output.PLMWnum = sum(PLM[:, 6] == 2) if len(bCLM) != 0: output.avglogCLMSIMI = np.mean(np.log(bCLM[bCLM[:, 5] == 1, 3])) output.stdlogCLMSIMI = np.std(np.log(bCLM[bCLM[:, 5] == 1, 3])) output.avgCLMSDuration = np.mean(bCLM[bCLM[:, 5] == 1, 2]) output.stdCLMSDuration = np.std(bCLM[bCLM[:, 5] == 1, 2]) output.CLMhr = bCLM.shape[0] / (output.TRT / 60) output.CLMShr = sum(bCLM[:, 5] == 1) / (output.TST / 60) output.CLMWhr = sum(bCLM[:, 5] == 2) / ((output.TRT - output.TST) / 60) output.CLMnum = bCLM.shape[0] output.CLMSnum = sum(bCLM[:, 5] == 1) output.CLMWnum = sum(bCLM[:, 5] == 2) output.GLM = bCLM[bCLM[:, 4] == 0, :] #print("avglogCLMSIMI ",output.avglogCLMSIMI) #print("stdlogCLMSIMI ",output.stdlogCLMSIMI) #print("avgCLMSDuration ",output.avgCLMSDuration) #print("stdCLMSDuration ",output.stdCLMSDuration) #print("avglogPLMSIMI ",output.avglogPLMSIMI) #print("stdlogPLMSIMI ",output.stdlogPLMSIMI) #print("avglogPLMSIMI ",output.avgPLMSDuration) #print("stdlogPLMSIMI ",output.stdPLMSDuration) #print("CLMhr ",output.CLMhr) #print("CLMShr ",output.CLMShr) #print("CLMWhr ",output.CLMWhr) #print("PLMhr ",output.PLMhr) #print("PLMShr ",output.PLMShr) #print("PLMWhr ",output.PLMWhr) #print("CLMnum ",output.CLMnum) #print("CLMSnum ",output.CLMSnum) #print("CLMWnum ",output.CLMWnum) #print("PLMSArI ",output.PLMSArI) #print("GLM ",output.GLM.shape) #some more stuff needed for report output.intervalSize = 1 output.fs = params.fs output.pos = 2 * np.ones( leftLeg.shape[0]) # ALL BACKSIDE RIGHT NOW SINCE NO POS VECTOR YET output.ArI = output.Arousal.shape[0] / (output.TST / 60) output.PLMSI = output.PLMShr leftStart = datetime.datetime.fromtimestamp(leftLeg[0, 3] / 1000) leftStart = [ leftStart.year, leftStart.month, leftStart.day, leftStart.hour, leftStart.minute, leftStart.second ] output.sleepStart = np.mod(leftStart[3] * 60 + leftStart[4] - 12 * 60, 24 * 60) output.sleepEnd = output.sleepStart + output.TRT output.date = str(leftStart[1]) + '/' + str(leftStart[2]) + '/' + str( np.mod(leftStart[0], 100)) output.SQ = 0 output.SQhrs = [] nightData = output fileID = open(output.fileName + '.txt', 'w') fileID.write('PatientID: ' + output.fileName + '\n') fileID.write('Record Start: ' + sleepText(nightData.sleepStart) + '\n') fileID.write('Record Stop: ' + sleepText(nightData.sleepEnd) + '\n') fileID.write('Sleep Efficiency: ' + '{:.2f}'.format(nightData.sleepEff) + '\n') fileID.write('Total Sleep Time: ' + '{:.2f}'.format(nightData.TST) + '\n') fileID.write('PLMS/hr: ' + '{:.2f}'.format(nightData.PLMShr) + '\n') fileID.write('Arousals/hr: ' + '{:.2f}'.format(nightData.ArI) + '\n') fileID.write('Sleep Quality: ' + '{:.2f}'.format(nightData.SQ) + '\n') fileID.write('WASO: ' + '{:.2f}'.format(nightData.WASOdur) + '\n') fileID.write('Sleep_quality_per_hr :') for i in range(len(nightData.SQhrs)): fileID.write(fileID + ': ' + '{:.2f}'.format(nightData.SQhrs[0][i]) + '\n') fileID.close() print('PatientID: ' + output.fileName) print('Record Start: ' + sleepText(nightData.sleepStart)) print('Record Stop: ' + sleepText(nightData.sleepEnd)) print('Sleep Efficiency: ' + '{:.2f}'.format(nightData.sleepEff)) print('Total Sleep Time: ' + '{:.2f}'.format(nightData.TST)) print('PLMS/hr: ' + '{:.2f}'.format(nightData.PLMShr)) print('Arousals/hr: ' + '{:.2f}'.format(nightData.ArI)) print('Sleep Quality: ' + '{:.2f}'.format(nightData.SQ)) print('WASO: ' + '{:.2f}'.format(nightData.WASOdur)) print('Sleep_quality_per_hr :') for i in range(len(nightData.SQhrs)): print(fileID + ': ' + '{:.2f}'.format(nightData.SQhrs[0][i]) + '\n') """