def combineLists():
    keepColumns = ['NOM', 'AD:(J2000)', 'DEC (J2000)']
    print('keepColumns = ', keepColumns)
    csvAll = None
    for iList in [longList, shortList]:
        csv = readFRAFile(iList)
        print('csv.header = ', csv.header)
        #        print('csv.data = ',csv.data)
        #        STOP
        for keyword in csv.header:
            print('checking keyword <' + keyword + '>')
            if keyword not in keepColumns:
                print('keyword <' + keyword +
                      '> not found in keepColumns => removing column')
                csv.removeColumn(keyword)
        print('csv.header = ', csv.header)
        print('csv.data = ', csv.data)

        for i in np.arange(0, csv.size(), 1):
            print('changing ', csv.getData('NOM', i))
            csv.setData('NOM', i, csv.getData('NOM', i).replace(' ', ''))
            print('... to ', csv.getData('NOM', i))

        if not csvAll:
            csvAll = csv
        else:
            csvAll.append(csv)
        print('csvAll.size() = ', csvAll.size())

    csvFree.writeCSVFile(csvAll, outList)
Example #2
0
def changePAs(inFileNameData, morphClassHeader, morphClass, GPAHeader,
              angleMean, angleSDev, percentage, outFileName):
    csv = csvFree.readCSVFile(inFileNameData)
    morphClasses = csv.getData(morphClassHeader)

    for iM in range(len(morphClass)):
        nPNeWithMorphClass = 0
        pNeWithMorphClass = []
        for i in range(csv.size()):
            if morphClasses[i] == morphClass[iM]:
                pNeWithMorphClass.append(i)
                nPNeWithMorphClass += 1

        randomIDs = []
        nPNeToChangeGPA = int(nPNeWithMorphClass * percentage / 100)
        while len(randomIDs) < nPNeToChangeGPA:
            rand = randint(0, nPNeToChangeGPA - 1)
            if rand not in randomIDs:
                randomIDs.append(rand)
        print('nPNeToChangeGPA = ', nPNeToChangeGPA)
        print('randomIDs = ', len(randomIDs), ': ', randomIDs)

        # for the circular problem rescale sigma and use random range [-1,1]
        a, b = -1., 1.
        mu, sigma = 0., angleSDev[iM] / 90.
        dist = stats.truncnorm((a - mu) / sigma, (b - mu) / sigma,
                               loc=mu,
                               scale=sigma)
        randAngles = angleMean[iM] + (dist.rvs(nPNeToChangeGPA) * 90.)
        for iR in range(len(randAngles)):
            if randAngles[iR] < 0.:
                randAngles[iR] += 180.
            if randAngles[iR] >= 180.:
                randAngles[iR] -= 180.
        plt.hist(randAngles)
        plt.show()
        randAngles = np.array(randAngles)
        print('mean(randAngles) = ', randAngles.mean(),
              ', sDev(randAngles) = ', randAngles.std())

        for i in range(len(randomIDs)):
            print(
                'i = ', i, ': HASH ID = ',
                csv.getData("idPNMain",
                            pNeWithMorphClass[randomIDs[i]]), ', mainClass=',
                csv.getData(morphClassHeader, pNeWithMorphClass[randomIDs[i]]),
                ', GPA = ',
                csv.getData(GPAHeader, pNeWithMorphClass[randomIDs[i]]))
            csv.setData(GPAHeader, pNeWithMorphClass[randomIDs[i]],
                        str(randAngles[i]))
            print(
                'i = ', i, ': HASH ID = ',
                csv.getData("idPNMain",
                            pNeWithMorphClass[randomIDs[i]]), ', mainClass=',
                csv.getData(morphClassHeader, pNeWithMorphClass[randomIDs[i]]),
                ', GPA = ',
                csv.getData(GPAHeader, pNeWithMorphClass[randomIDs[i]]))
    csvFree.writeCSVFile(csv, outFileName)
Example #3
0
def checkDistances(inFileName, angDiamFileName, outFileName):
    hashSearch = csvFree.readCSVFile(
        hashSearchFileName[:hashSearchFileName.rfind('.')] +
        '_with_header.csv')
    hashFound = csvFree.readCSVFile(inFileName)
    angDiams = csvFree.readCSVFile(angDiamFileName)
    csvOut = csvData.CSVData()
    csvOut.header = hashFound.header
    nNotFound = 0
    ra_3238 = hmsToDeg('17:59:45.20')
    dec_3238 = dmsToDeg('-33:21:13.00')
    toDelete = []
    for i in range(hashFound.size()):
        name = hashFound.getData('id', i)
        idPNMain = hashFound.getData('pndb', i)
        if idPNMain == '':
            csvOut.append(hashFound.getData(i))
        else:
            dist = float(hashFound.getData('dist[arcsec]', i))
            found = False
            for j in range(angDiams.size()):
                if angDiams.getData('idPNMain', j) == idPNMain:
                    if angDiams.getData('InUse', j) == '1':
                        found = True
                        if dist > float(angDiams.getData('MajDiam', j)):
                            csvOut.append([name, '', ''])
                        else:
                            csvOut.append(hashFound.getData(i))
            if not found:
                nNotFound += 1
                print(
                    'Problem: did not find an angular diameter for <' + name +
                    '>: idPNMain = ', idPNMain, ', dist = ', dist)
                if dist > 50.:
                    csvOut.append([name, '', ''])
                else:
                    csvOut.append(hashFound.getData(i))
        for j in range(hashSearch.size()):
            if hashSearch.getData('id',
                                  j) == csvOut.getData('id',
                                                       csvOut.size() - 1):
                ra = hmsToDeg(hashSearch.getData('ra', j))
                dec = dmsToDeg(hashSearch.getData('dec', j))
                angDist = angularDistance(ra, dec, ra_3238, dec_3238)
                print('ra = ', ra, ', dec = ', dec, ': angDist = ', angDist)
                angDistPyAsl = pyasl.getAngDist(ra, dec, ra_3238,
                                                dec_3238) * 3600.
                if angDist < 800:
                    if csvOut.getData('pndb', csvOut.size() - 1) != '3238':
                        toDelete.append([
                            csvOut.getData('pndb',
                                           csvOut.size() - 1), ra, dec,
                            angDist, angDistPyAsl
                        ])
                        csvOut.setData('pndb', csvOut.size() - 1, '3238')
    csvFree.writeCSVFile(csvOut, outFileName)
    for i in toDelete:
        print('toDelete : ', i)
Example #4
0
def randomizePAs(inFileNameData, GPAHeader, outFileName):
    seed(1)
    csv = csvFree.readCSVFile(inFileNameData)
    sequence = [i for i in range(180)]
    shuffle(sequence)
    for i in range(csv.size()):
        rand = choice(sequence)  #random()
        print('rand = ', rand)
        csv.setData(GPAHeader, i, str(int(rand)))
    csvFree.writeCSVFile(csv, outFileName)
Example #5
0
def findClosestMatch(hashFoundFile, outFileName):
    hashFound = csvFree.readCSVFile(hashFoundFile)
    closestMatch = csvData.CSVData()
    closestMatch.header = hashFound.header

    hashFoundNames = np.array([str(n) for n in hashFound.getData('id')])
    print('hashFoundNames = ', hashFoundNames)
    nDoubles = 0
    for name in hashFoundNames:
        name = str(name)
        idx = np.where(hashFoundNames == name)[0]
        if len(idx) == 0:
            arr = bytes(name, 'utf-8')
            arr2 = bytes(hashFoundNames[0], 'utf-8')
            for byte in arr:
                print(byte, end=' ')
            print("\n")
            for byte in arr2:
                print(byte, end=' ')
            print("\n")
            if name == hashFoundNames[0]:
                print('both are the same')
            else:
                print('both are still not the same')
            if str(name) == str(hashFoundNames[0]):
                print('both are the same if using unicode')
            else:
                print('both are still not the same if using unicode')

        print('found name <' + name + '> ', len(idx), ' times in indices ',
              idx)
        lineToAdd = ''
        if len(idx) == 1:
            lineToAdd = hashFound.getData(idx[0])
        else:
            dists = []
            for i in range(len(idx)):
                dist = hashFound.getData('dist[arcsec]', idx[i])
                if dist != '':
                    dists.append(float(dist))
            if len(dists) > 0:
                minId = np.where(dists == np.min(dists))[0][0]
            else:
                minId = 0
            print('idx[', minId, '] = ', idx[minId])
            lineToAdd = hashFound.getData(idx[minId])
        closestMatchNames = closestMatch.getData('id')
        if name not in closestMatchNames:
            print('lineToAdd = ', lineToAdd)
            closestMatch.append(lineToAdd)
        else:
            print('name <' + name + '> already in closestMatch')
            nDoubles += 1
    print('nDoubles = ', nDoubles)
    csvFree.writeCSVFile(closestMatch, outFileName)
Example #6
0
def checkCombinedDiscrepancies(combinedDiscrepancies, allHASHobjects, hashCommonNames):
    csvOut = csvData.CSVData()
    csvOut.header = ['Name','idPNMain','HASH common names', 'RA FRA', 'RA HASH', 'DEC FRA', 'DEC HASH', 'angular distance [arcsec]']
    csvOut.data = []
    emptyData = ['','','', '', '', '', '', '']
    for i in range(combinedDiscrepancies.size()):
        ra = combinedDiscrepancies.getData('RA',i)
        dec = combinedDiscrepancies.getData('DEC',i)
        raDeg = hmsToDeg(ra)
        decDeg = dmsToDeg(dec)
        name = getName(combinedDiscrepancies,'Name',i)
        print('name = <'+name+'>')
        found = False
        for j in range(allHASHobjects.size()):
            if name == allHASHobjects.getData('Name',j).replace(' ',''):
                hashID = allHASHobjects.getData('idPNMain',j)
                angDist = degToArcsec(angularDistancePyAsl(raDeg,decDeg,hmsToDeg(allHASHobjects.getData('RAJ2000',j)),dmsToDeg(allHASHobjects.getData('DECJ2000',j))))
                commonNames = getCommonNames(hashCommonNames,'idPNMain',hashID)
                print('Name = '+name+': HASH ID = ',hashID,': commonNames = ',commonNames,': ra = ',ra,', RA = ',allHASHobjects.getData('RAJ2000',j),', dec = ',dec,', DEC = ',allHASHobjects.getData('DECJ2000',j),', angDist = ',angDist)
                found = True
        if not found:
            print('ERROR: object with name <'+name+'> not found in HASH')
            for j in range(allHASHobjects.size()):
                angDist = degToArcsec(angularDistancePyAsl(raDeg,decDeg,hmsToDeg(allHASHobjects.getData('RAJ2000',j)),dmsToDeg(allHASHobjects.getData('DECJ2000',j))))
                if angDist < maxAngularDistance:
                    hashID = allHASHobjects.getData('idPNMain',j)
                    commonNames = getCommonNames(hashCommonNames,'idPNMain',hashID)
                    if name in commonNames:
                        print('Name = '+name+': HASH ID = ',hashID,': commonNames = ',commonNames,': ra = ',ra,', RA = ',allHASHobjects.getData('RAJ2000',j),', dec = ',dec,', DEC = ',allHASHobjects.getData('DECJ2000',j),', angDist = ',angDist)
                        found = True
                    else:
                        print('Name = <'+name+'>: object found within ',maxAngularDistance,' arcsec: angDist = ',angDist,': ra = ',ra,', RA = ',allHASHobjects.getData('RAJ2000',j),', dec = ',dec,', DEC = ',allHASHobjects.getData('DECJ2000',j),', HASH name = <'+allHASHobjects.getData('Name',j))
                        csvOut.append(emptyData)
                        csvOut.setData('Name',csvOut.size()-1,name)
                        csvOut.setData('idPNMain',csvOut.size()-1,hashID)
                        cNames = commonNames[0]
                        for k in np.arange(1,len(commonNames),1):
                            cNames += ';'+commonNames[k]
                        csvOut.setData('HASH common names',csvOut.size()-1,cNames)
                        csvOut.setData('RA FRA',csvOut.size()-1,ra)
                        csvOut.setData('RA HASH',csvOut.size()-1,allHASHobjects.getData('RAJ2000',j))
                        csvOut.setData('DEC FRA',csvOut.size()-1,dec)
                        csvOut.setData('DEC HASH',csvOut.size()-1,allHASHobjects.getData('DECJ2000',j))
                        csvOut.setData('angular distance [arcsec]',csvOut.size()-1,str(angDist))
    writeCSVFile(csvOut,disrepanciesOutFileName)
Example #7
0
inFileName = '/Users/azuri/daten/uni/HKU/HASH/hash-no-show.csv'

inData = csvFree.readCSVFile(inFileName)
print('inData.size() = ',inData.size())

pnStat = inData.getData('PNstat')
print('pnStat = ',pnStat)

tlps = csvData.CSVData()
tlps.header = inData.header

newCandidates = csvData.CSVData()
newCandidates.header = inData.header

others = csvData.CSVData()
others.header = inData.header

for i in range(inData.size()):
    pnStat = inData.getData('PNstat',i)
    if pnStat in ['T','L','P']:
        tlps.append(inData.getData(i))
    elif pnStat == 'c':
        newCandidates.append(inData.getData(i))
    else:
        others.append(inData.getData(i))

csvFree.writeCSVFile(tlps,inFileName[:-4]+'_TLPs.csv')
csvFree.writeCSVFile(newCandidates,inFileName[:-4]+'_newCandidates.csv')
csvFree.writeCSVFile(others,inFileName[:-4]+'_others.csv')
Example #8
0
for i in range(csvEventRegistration.size()):
    name = csvEventRegistration.getData(
        'First Name (Billing)', i) + ' ' + csvEventRegistration.getData(
            'Last Name (Billing)', i) + ' Wild Wood Guest'
    print('name = ', name)
    if name not in names:
        print(name + ' not found')
        names.append(name)
        csvContactsToImport.append(newLine)
        csvContactsToImport.setData('Name', iContact,
                                    name + ' Wild Wood ' + 'Guest')
        csvContactsToImport.setData('Group Membership', iContact,
                                    '* myContacts')
        csvContactsToImport.setData('Phone 1 - Type', iContact, 'Mobile')
        csvContactsToImport.setData(
            'Phone 1 - Value', iContact,
            csvEventRegistration.getData('Phone (Billing)', i))
        iContact += 1
        print('csvContactsToImport.size() = ', csvContactsToImport.size())
    else:
        print(name + ' found')
        print('csvContactsToImport.size() = ', csvContactsToImport.size())

#for i in np.arange(csvContactsToImport.size()-1,-1,-1):
#    print('checking contact ',i)
#    if existingContacts.find('Phone 1 - Value',csvContactsToImport.getData('Phone 1 - Value',i),0)[0] > -1:
#        print('found ',csvContactsToImport.getData('Name',i))
#        csvContactsToImport.removeRow(i)

csvFree.writeCSVFile(csvContactsToImport, contactsToImport)
    data = []
    lines[iLine] = lines[iLine].replace('\r', '')
    lines[iLine] = lines[iLine].replace('\n', '')
    lines[iLine] = lines[iLine].replace('}', '+-')
    lines[iLine] = lines[iLine].replace(' v ', ' v')
    lines[iLine] = lines[iLine].replace(' r ', ' r')
    lines[iLine] = lines[iLine].replace(' b ', ' b')
    lines[iLine] = lines[iLine].replace(' i ', ' i')
    data = lines[iLine].split(' ')
    if len(data) < len(csv2.header):
        data.insert(3, ' ')
    print('line[', iLine, '] = ', lines[iLine])
    print('data = ', data)
    csv2.append(data)
print('csv2.size() = ', csv2.size())
csvFree.writeCSVFile(csv2, f2name[:-3] + 'csv')

csv1 = csvFree.readCSVFile(f1name)
print('csv1 = ', csv1)
print(csv1.header)
head = csv1.header
print('head[', len(head) - 1, '] = ', head[len(head) - 1])
head[len(head) -
     1] = 'Ref. spectra'  #csv1.header[len(csv1.header)-1].strip('\\r')
csv1.header = head  #csv1.header[len(csv1.header)-1].strip('\\r')
print('csv1.header[',
      len(csv1.header) - 1, '] = ', csv1.header[len(csv1.header) - 1])
print(csv1.header)
for i in range(csv1.size()):
    csv1.setData('Ref. spectra', i,
                 csv1.getData('Ref. spectra', i).replace('\r', ''))
Example #10
0
def calculateErrors(spectrumFileName, idPNMain, csvLinesFileName, show=False):
    print('spectrumFileName = <' + spectrumFileName + '>')
    wLen, sigmaFit = getSigmaVsWLen(spectrumFileName, show=False)
    print('len(wLen) = ', len(wLen), ', len(sigmaFit) = ', len(sigmaFit))
    flux = getImageData(spectrumFileName, 0)
    print('len(flux) = ', len(flux))
    csvLines = csvFree.readCSVFile(csvLinesFileName, '\t', False)
    csvVRad = csvFree.readCSVFile(
        os.path.join(imPath[:imPath.rfind('/')], 'vrad.csv'))
    print('csvVRad.header = ', csvVRad.header)
    filenames = csvVRad.getData('fileName')
    print('filenames = ', filenames)
    vradPos = csvVRad.find('fileName',
                           spectrumFileName[spectrumFileName.rfind('/') + 1:],
                           0)[0]
    if vradPos < 0:
        print('error: did not find spectrumFileName <' + spectrumFileName +
              '>')
        #STOP
    else:
        vrad = float(csvVRad.getData('vrad', vradPos))
        print('vrad = ', type(vrad), ': ', vrad)
        wLen = applyVRadCorrection(wLen, vrad)
        print('vradPos = ', vradPos)
        header = csvLines.header
        keys = list(linesOfInterest.keys())
        for i in range(csvLines.size()):
            if idPNMain == csvLines.getData('NAME', i):
                for iLine in range(len(linesOfInterest)):
                    area = float(csvLines.getData(keys[iLine], i))
                    print('key = ', keys[iLine], ': area = ', area)
                    if area > 0.:
                        x0 = linesOfInterest[keys[iLine]]
                        x = wLen[np.where(np.abs(wLen - x0) < 20.)[0]]
                        thisFlux = flux[np.where(np.abs(wLen - x0) < 3.)[0]]
                        maxFlux = np.max(thisFlux)
                        sigma = area / (maxFlux * 2.13 *
                                        np.sqrt(2. * np.log(2.)))
                        print('x = ', x0, ', a = ', maxFlux, ', sigma = ',
                              sigma)
                        thisFlux = flux[np.where(np.abs(wLen - x0) < 20.)[0]]
                        thisSDev = sigmaFit[np.where(
                            np.abs(wLen - x0) < 20.)[0]]
                        gaussFit = gauss(x, maxFlux, x0, sigma)
                        if show:
                            plt.plot(x, thisFlux, label='flux')
                            plt.plot(x, thisSDev, label='sigma')
                            plt.plot(x, gaussFit, label='fit')
                            plt.legend()
                            plt.show()
                        newArea = getAreaGauss(x,
                                               thisFlux,
                                               maxFlux,
                                               x0,
                                               sigma,
                                               addOnBothSidesOfX=0.,
                                               show=False,
                                               save=None)
                        print('old area = ', area, ', newly fitted area = ',
                              newArea)
                        if show:
                            plt.plot(x, gaussFit, label='fit')
                            plt.plot(x, thisFlux, label='flux')
                        newAreas = []
                        for iRun in range(100):
                            thisFluxWithErr = np.zeros(x.shape,
                                                       dtype='float32')
                            for thisFluxPos in range(x.shape[0]):
                                thisFluxWithErr[thisFluxPos] = gaussFit[
                                    thisFluxPos] + np.random.normal(
                                        0., np.abs(thisSDev[thisFluxPos]))
                            if show:
                                plt.plot(x,
                                         thisFluxWithErr,
                                         label='%d' % (iRun))
                            try:
                                newAreas.append(
                                    getAreaGauss(x,
                                                 thisFluxWithErr,
                                                 maxFlux,
                                                 x0,
                                                 sigma,
                                                 addOnBothSidesOfX=0.,
                                                 show=False,
                                                 save=None)[0])
                            except Exception as e:
                                plt.plot(x, thisFlux, label='original')
                                plt.plot(x,
                                         thisFluxWithErr,
                                         label='with errors')
                                plt.show()
                                newAreas.append(area)
                                STOP
                        if show:
                            plt.legend()
                            plt.show()
                        newAreas = np.array(newAreas)
                        print('newAreas = ', len(newAreas), ': ', newAreas)
                        if show:
                            plt.hist(newAreas)
                            plt.show()
                        sDev = np.std(newAreas)
                        print('sDev = ', sDev)
                        csvLines.setData(keys[iLine] + 'e', i, '%.3E' % (sDev))
        csvFree.writeCSVFile(csvLines, csvLinesFileName, '\t')
def findPNeNotInHASH():
    csvHASH = csvFree.readCSVFile(hashOutList)
    print('csvAll.size() = ', csvHASH.size())
    print('csvAll.header = ', csvHASH.header)

    found = []
    for i in np.arange(0, csvHASH.size(), 1):
        if csvHASH.getData('pndb', i) != '':
            found.append(i)
    print('found = ', found)
    found.reverse()
    print('found = ', found)
    for i in found:
        csvHASH.removeRow(i)
    print('csvAll.size() = ', csvHASH.size())

    keepColumns = [
        "NOM", "AD:(J2000)", "DEC (J2000)", "Dimension en minute d'arc (')",
        "Coordonnées galactiques"
    ]
    print('keepColumns = ', keepColumns)
    csvOut = None
    for iList in [longList, shortList]:
        csv = readFRAFile(iList)
        for keyword in csv.header:
            print('checking keyword <' + keyword + '>')
            if keyword not in keepColumns:
                print('keyword <' + keyword +
                      '> not found in keepColumns => removing column')
                csv.removeColumn(keyword)
        print('csv.header = ', csv.header)
        print('csv.data = ', csv.data)

        hashNames = csvHASH.getData('id')
        remove = []
        for i in np.arange(0, csv.size(), 1):
            if csv.getData('NOM', i).replace(' ', '') not in hashNames:
                remove.append(i)
        remove.reverse()
        for i in remove:
            csv.removeRow(i)

        if "Coordonnées galactiques" not in csv.header:
            csv.addColumn("Coordonnées galactiques")
        for i in np.arange(0, csv.size(), 1):
            lon, lat = raDecToLonLat(hmsToDeg(csv.getData("AD:(J2000)", i)),
                                     dmsToDeg(csv.getData("DEC (J2000)", i)))
            print('lon=', lon, ', lat=', lat)
            png = ''
            if lon < 100:
                png += '0'
            if lon < 10:
                png += '0'
            png += str(lon)
            png = png[:png.rfind('.') + 2]
            #            print('png = <'+png+'>')
            if lat >= 0.:
                png += '+'
#                print('png = <'+png+'>')
            png += str(lat)
            #            print('png = <'+png+'>')
            png = png[:png.rfind('.') + 2]
            #            print('png = <'+png+'>')

            if (lat < 10.) and (lat >= 0.):
                png = png[:png.rfind('+') + 1] + '0' + png[png.rfind('+') + 1:]
            if (lat > -10.) and (lat < 0.):
                png = png[:png.rfind('-') + 1] + '0' + png[png.rfind('-') + 1:]


#                print('png = <'+png+'>')
            print('PNG ' + png)
            csv.setData("Coordonnées galactiques", i, png)
        if not csvOut:
            csvOut = csv
        else:
            csvOut.append(csv)

    # convert diameters from arcmin to arcsec
    for i in np.arange(0, csvOut.size(), 1):
        diamStr = csvOut.getData("Dimension en minute d'arc (')",
                                 i).rstrip(' ')
        print('diamStr = <' + diamStr + '>')
        diamStrOut = ''
        if 'x' in diamStr:
            diamA = float(diamStr[:diamStr.find(' ')]) * 60.
            diamB = float(diamStr[diamStr.rfind(' ') + 1:]) * 60.
            diamStrOut = '%.1f x %.1f' % (diamA, diamB)
        else:
            diamStrOut = '%.1f' % (float(diamStr) * 60.)
        csvOut.setData("Dimension en minute d'arc (')", i, diamStrOut)

    # write output
    csvFree.writeCSVFile(
        csvOut, hashOutList[:hashOutList.rfind('.')] + '_not_in_HASH.csv')
Example #12
0
csvA = csvFree.readCSVFile(
    '/Users/azuri/daten/uni/HKU/HASH/ziggy_Calern_PN_candidates_May2019.csv')
csvA.append(
    csvFree.readCSVFile(
        '/Users/azuri/daten/uni/HKU/HASH/ziggy_Calern_PN_candidates_May2019_II.csv'
    ))
csvA.append(
    csvFree.readCSVFile(
        '/Users/azuri/daten/uni/HKU/HASH/ziggy_Calern_PN_candidates_May2019_III.csv'
    ))

sorted = np.sort(
    csvFree.convertStringVectorToUnsignedVector(csvA.getData('idPNMain')))

print('sorted = ', sorted)

fNameOut = '/Users/azuri/daten/uni/HKU/HASH/ziggy_Calern_PN_candidates_May2019_out.csv'

csvOut = csvData.CSVData()
csvOut.header = csvA.header

for i in np.arange(0, len(sorted), 1):
    found = csvA.find('idPNMain', str(sorted[i]))
    print('i = ', i, ': found = ', found)
    csvOut.append(csvA.getData(found))

print('csvOut.size() = ', csvOut.size())

csvFree.writeCSVFile(csvOut, fNameOut)
Example #13
0
{'fiber': 71, 'centerDistanceX' : -9.85, 'centerDistanceY' : -16.89, 'Halpha6563a': [0.0,0.0,0.0], 'Halpha6563b': [0.0,0.0,0.0], 'OIII5007a': [0.0,0.0,0.0], 'OIII5007b': [0.0,0.0,0.0], 'SII6716a': [6697.64,6347.0,6.761], 'SII6731a' : [6711.26,2755.0,4.769], 'SII6716b' : [0.0,0.0,0.0], 'SII6731b' : [0.0,0.0,0.0], 'ArIII7136' : [0.0,0.0,0.0]},
{'fiber': 72, 'centerDistanceX' : -29.56, 'centerDistanceY' : -33.77, 'Halpha6563a': [0.0,0.0,0.0], 'Halpha6563b': [0.0,0.0,0.0], 'OIII5007a': [0.0,0.0,0.0], 'OIII5007b': [0.0,0.0,0.0], 'SII6716a': [6701.61,2018.0,4.635], 'SII6731a' : [6714.72,1485.0,3.686], 'SII6716b' : [0.0,0.0,0.0], 'SII6731b' : [0.0,0.0,0.0], 'ArIII7136' : [0.0,0.0,0.0]},
{'fiber': 73, 'centerDistanceX' : -29.56, 'centerDistanceY' : 0.0, 'Halpha6563a': [0.0,0.0,0.0], 'Halpha6563b': [0.0,0.0,0.0], 'OIII5007a': [0.0,0.0,0.0], 'OIII5007b': [0.0,0.0,0.0], 'SII6716a': [0.0,0.0,0.0], 'SII6731a' : [0.0,0.0,0.0], 'SII6716b' : [6738.43,9211.0,3.504], 'SII6731b' : [6752.36,5277.0,4.732], 'ArIII7136' : [0.0,0.0,0.0]},
{'fiber': 74, 'centerDistanceX' : -34.49, 'centerDistanceY' : -8.44, 'Halpha6563a': [0.0,0.0,0.0], 'Halpha6563b': [0.0,0.0,0.0], 'OIII5007a': [0.0,0.0,0.0], 'OIII5007b': [0.0,0.0,0.0], 'SII6716a': [6701.5,5182.0,6.961], 'SII6731a' : [6717.77,5797.0,7.725], 'SII6716b' : [0.0,0.0,0.0], 'SII6731b' : [0.0,0.0,0.0], 'ArIII7136' : [0.0,0.0,0.0]},
{'fiber': 75, 'centerDistanceX' : -29.56, 'centerDistanceY' : 16.89, 'Halpha6563a': [0.0,0.0,0.0], 'Halpha6563b': [0.0,0.0,0.0], 'OIII5007a': [0.0,0.0,0.0], 'OIII5007b': [0.0,0.0,0.0], 'SII6716a': [6702.62,2895.0,4.994], 'SII6731a' : [6716.83,5538.0,6.046], 'SII6716b' : [6741.37,722.0,2.374], 'SII6731b' : [6754.7,2541.0,5.616], 'ArIII7136' : [0.0,0.0,0.0]},
{'fiber': 76, 'centerDistanceX' : -34.49, 'centerDistanceY' : 8.44, 'Halpha6563a': [0.0,0.0,0.0], 'Halpha6563b': [0.0,0.0,0.0], 'OIII5007a': [0.0,0.0,0.0], 'OIII5007b': [0.0,0.0,0.0], 'SII6716a': [6698.82,8039.0,6.714], 'SII6731a' : [6713.97,7082.0,5.429], 'SII6716b' : [6740.03,7786.0,6.309], 'SII6731b' : [6755.38,8260.0,7.74], 'ArIII7136' : [0.0,0.0,0.0]},
{'fiber': 77, 'centerDistanceX' : -29.56, 'centerDistanceY' : -16.89, 'Halpha6563a': [0.0,0.0,0.0], 'Halpha6563b': [0.0,0.0,0.0], 'OIII5007a': [0.0,0.0,0.0], 'OIII5007b': [0.0,0.0,0.0], 'SII6716a': [0.0,0.0,0.0], 'SII6731a' : [0.0,0.0,0.0], 'SII6716b' : [0.0,0.0,0.0], 'SII6731b' : [0.0,0.0,0.0], 'ArIII7136' : [0.0,0.0,0.0]},
{'fiber': 78, 'centerDistanceX' : -34.49, 'centerDistanceY' : -25.33, 'Halpha6563a': [0.0,0.0,0.0], 'Halpha6563b': [0.0,0.0,0.0], 'OIII5007a': [0.0,0.0,0.0], 'OIII5007b': [0.0,0.0,0.0], 'SII6716a': [6702.77,1952.0,2.199], 'SII6731a' : [6709.09,1772.0,2.871], 'SII6716b' : [6737.29,6487.0,4.756], 'SII6731b' : [0.0,0.0,0.0], 'ArIII7136' : [0.0,0.0,0.0]},
{'fiber': 79, 'centerDistanceX' : 0.0, 'centerDistanceY' : -16.89, 'Halpha6563a': [0.0,0.0,0.0], 'Halpha6563b': [0.0,0.0,0.0], 'OIII5007a': [0.0,0.0,0.0], 'OIII5007b': [0.0,0.0,0.0], 'SII6716a': [6702.74,1318.0,3.651], 'SII6731a' : [6718.44,2504.0,3.964], 'SII6716b' : [6738.8,1210.0,2.705], 'SII6731b' : [6753.06,823.0,2.725], 'ArIII7136' : [0.0,0.0,0.0]},
{'fiber': 80, 'centerDistanceX' : -64.05, 'centerDistanceY' : 2.81, 'Halpha6563a': [0.0,0.0,0.0], 'Halpha6563b': [0.0,0.0,0.0], 'OIII5007a': [0.0,0.0,0.0], 'OIII5007b': [0.0,0.0,0.0], 'SII6716a': [6705.44,7949.0,7.032], 'SII6731a' : [6721.96,16228.0,8.417], 'SII6716b' : [0.0,0.0,0.0], 'SII6731b' : [0.0,0.0,0.0], 'ArIII7136' : [0.0,0.0,0.0]},
{'fiber': 81, 'centerDistanceX' : -24.64, 'centerDistanceY' : -25.33, 'Halpha6563a': [0.0,0.0,0.0], 'Halpha6563b': [0.0,0.0,0.0], 'OIII5007a': [0.0,0.0,0.0], 'OIII5007b': [0.0,0.0,0.0], 'SII6716a': [0.0,0.0,0.0], 'SII6731a' : [0.0,0.0,0.0], 'SII6716b' : [6732.72,6155.0,7.314], 'SII6731b' : [6747.07,8696.0,10.28], 'ArIII7136' : [0.0,0.0,0.0]},
{'fiber': 82, 'centerDistanceX' : -19.71, 'centerDistanceY' : 0.0, 'Halpha6563a': [0.0,0.0,0.0], 'Halpha6563b': [0.0,0.0,0.0], 'OIII5007a': [0.0,0.0,0.0], 'OIII5007b': [0.0,0.0,0.0], 'SII6716a': [0.0,0.0,0.0], 'SII6731a' : [0.0,0.0,0.0], 'SII6716b' : [0.0,0.0,0.0], 'SII6731b' : [0.0,0.0,0.0], 'ArIII7136' : [0.0,0.0,0.0]},
]

for a in tab:
    data = [str(a['fiber'])]
    data.append('%.2f' % a['centerDistanceX'])
    data.append('%.2f' % a['centerDistanceY'])
    data.append('%.2f' % a['Halpha6563a'][0])
    data.append('%.2f' % a['Halpha6563b'][0])
    data.append('%.2f' % a['OIII5007a'][0])
    data.append('%.2f' % a['OIII5007b'][0])
    data.append('%.2f' % a['SII6716a'][0])
    data.append('%.2f' % a['SII6731a'][0])
    data.append('%.2f' % a['SII6716b'][0])
    data.append('%.2f' % a['SII6731b'][0])
    data.append('%.2f' % a['ArIII7136'][0])
    csv.append(data)

csvFree.writeCSVFile(csv,'/Users/azuri/daten/uni/HKU/Pa30/pa30_sparsepak_data.csv')
import os
import csvFree,csvData

path = '/Users/azuri/daten/uni/HKU/PN alignment'
fNameIn = 'HASH_bipolar_likely_PN.csv'
csv = csvFree.readCSVFile(os.path.join(path,fNameIn))

csvOut = csvData.CSVData()
csvOut.header = csv.header
for i in range(csv.size()):
    if csv.getData('PNstat',i) == 'L':
        lon = float(csv.getData('Glon',i))
        lat = float(csv.getData('Glat',i))
        if ((lon < 10.) or (lon > 350.)) and (((lat > 0.) and (lat < 10.)) or ((lat < 0.) and (lat) > -10.)):
            print('lon = ',lon,', lat = ',lat)
            csvOut.append(csv.getData(i))
csvFree.writeCSVFile(csvOut,os.path.join(path,fNameIn[:-4]+'_bulge.csv'))
for i in np.arange(0, len(rzPNGs), 1):
    lineOut = []
    p = rzPNGs[i]
    found = False
    if rzMorph[i] in ['Bipolar']:
        for j in np.arange(0, len(myPNGs), 1):
            if p == myPNGs[j]:
                print('PNG ' + p + ' found in my data')
                found = True
                nFound += 1
                lineOut.append(myHashData.getData('idPNMain', j))
                lineOut.append(rzData.getData('PA', i))
                lineOut.append('1')
                lineOut.append(rzData.getData('Telescope', i))
                lineOut.append(rzData.getData('GPA', i).split('±')[0])
                lineOut.append(myHashData.getData('DRAJ2000', j))
                lineOut.append(myHashData.getData('DDECJ2000', j))
                lineOut.append(myHashData.getData('Glon', j))
                lineOut.append(myHashData.getData('Glat', j))
                lineOut.append(rzData.getData('GPA', i))
                lineOut.append('')
                csvOut.append(lineOut)

        if not found:
            print('PNG ' + p + ' not found in my data')
            nNotFound += 1
print('found ', nFound, ' PNe in my data')
print('did not find ', nNotFound, ' PNe in my data')

csvFree.writeCSVFile(csvOut, dataFileOut)