Esempio n. 1
0
def findInsidePN():
    angDiams = csvFree.readCSVFile(hashAngDiamFileName)
    angExt = csvFree.readCSVFile(hashAngExtFileName)
    PNMain = csvFree.readCSVFile(hashPNMainFileName)
    csvIphas = csvFree.readCSVFile(inFileName)

    with open(minDistFile, 'w') as f:
        f.write('iphasRow,RAJ2000,DECJ2000,closestHASHPNid,distance,inside\n')
        for i in range(csvIphas.size()):
            ra = csvIphas.getData('RA_H', i) + ':' + csvIphas.getData(
                'RA_M', i) + ':' + csvIphas.getData('RA_S', i)
            dec = csvIphas.getData('DEC_D', i) + ':' + csvIphas.getData(
                'DEC_M', i) + ':' + csvIphas.getData('DEC_S', i)
            id, dist = getMinDistPN(ra, dec, PNMain)
            print('i = ', i, ': closest PN id = ', id, ': dist = ', dist)
            majDiamFound = False
            inside = False
            for j in range(angDiams.size()):
                if (angDiams.getData('idPNMain', j)
                        == id) and (angDiams.getData('InUse', j) == '1'):
                    majDiamFound = True
                    if dist < (float(angDiams.getData('MajDiam', j)) / 2.):
                        inside = True
                        print('i = ', i, ' is inside idPNMain ', id)
                    else:
                        print('i = ', i, ' is NOT inside idPNMain ', id)
            f.write('%d,%s,%s,%s,%d,' % (i, ra, dec, id, int(dist)))
            if majDiamFound and inside:
                f.write('isInside')
            elif majDiamFound and not inside:
                f.write('notInside')
            else:
                f.write('unsure')
            f.write('\n')
Esempio n. 2
0
def checkDistances(inFileName, angDiamFileName, outFileName):
    hashSearch = csvFree.readCSVFile(
        hashSearchFileName[:hashSearchFileName.rfind('.')] +
        '_with_header.csv')
    hashFound = csvFree.readCSVFile(inFileName)
    angDiams = csvFree.readCSVFile(angDiamFileName)
    csvOut = csvData.CSVData()
    csvOut.header = hashFound.header
    nNotFound = 0
    ra_3238 = hmsToDeg('17:59:45.20')
    dec_3238 = dmsToDeg('-33:21:13.00')
    toDelete = []
    for i in range(hashFound.size()):
        name = hashFound.getData('id', i)
        idPNMain = hashFound.getData('pndb', i)
        if idPNMain == '':
            csvOut.append(hashFound.getData(i))
        else:
            dist = float(hashFound.getData('dist[arcsec]', i))
            found = False
            for j in range(angDiams.size()):
                if angDiams.getData('idPNMain', j) == idPNMain:
                    if angDiams.getData('InUse', j) == '1':
                        found = True
                        if dist > float(angDiams.getData('MajDiam', j)):
                            csvOut.append([name, '', ''])
                        else:
                            csvOut.append(hashFound.getData(i))
            if not found:
                nNotFound += 1
                print(
                    'Problem: did not find an angular diameter for <' + name +
                    '>: idPNMain = ', idPNMain, ', dist = ', dist)
                if dist > 50.:
                    csvOut.append([name, '', ''])
                else:
                    csvOut.append(hashFound.getData(i))
        for j in range(hashSearch.size()):
            if hashSearch.getData('id',
                                  j) == csvOut.getData('id',
                                                       csvOut.size() - 1):
                ra = hmsToDeg(hashSearch.getData('ra', j))
                dec = dmsToDeg(hashSearch.getData('dec', j))
                angDist = angularDistance(ra, dec, ra_3238, dec_3238)
                print('ra = ', ra, ', dec = ', dec, ': angDist = ', angDist)
                angDistPyAsl = pyasl.getAngDist(ra, dec, ra_3238,
                                                dec_3238) * 3600.
                if angDist < 800:
                    if csvOut.getData('pndb', csvOut.size() - 1) != '3238':
                        toDelete.append([
                            csvOut.getData('pndb',
                                           csvOut.size() - 1), ra, dec,
                            angDist, angDistPyAsl
                        ])
                        csvOut.setData('pndb', csvOut.size() - 1, '3238')
    csvFree.writeCSVFile(csvOut, outFileName)
    for i in toDelete:
        print('toDelete : ', i)
Esempio n. 3
0
def combineDiscrepancies():
    discrepanciesA = readCSVFile(discrepanciesAFile)
    discrepanciesB = readCSVFile(discrepanciesBFile)

    csvOut = csvData.CSVData()
    csvOut.header = ['Name','RA','DEC']

    for c in [discrepanciesA,discrepanciesB]:
        for i in range(c.size()):
            lineOut = [c.getData('NOM',i),c.getData('AD:(J2000)',i),c.getData('DEC (J2000)',i)]
            csvOut.append(lineOut)
    print('csvOut.size() = ',csvOut.size())
    return csvOut
Esempio n. 4
0
def liuClustersMean():
    nFound = 0
    means = csvFree.readCSVFile(meanFileName,',',False)
    print('means.header = ',means.header)
    allPNe = csvFree.readCSVFile(allPNeFileName,',',False)
    print('allPNe.header = ',allPNe.header)
    for iPN in np.arange(0,allPNe.size(),1):
        for iSC in np.arange(0,means.size(),1):
            if ((float(allPNe.getData('DRAJ2000',iPN)) > float(means.getData('mean(ra)',iSC)) - (3.*float(means.getData('sigma(ra)',iSC))))
                and (float(allPNe.getData('DRAJ2000',iPN)) < float(means.getData('mean(ra)',iSC)) + (3.*float(means.getData('sigma(ra)',iSC))))
                and (float(allPNe.getData('DDECJ2000',iPN)) > float(means.getData('mean(dec)',iSC)) - (3.*float(means.getData('sigma(dec)',iSC))))
                and (float(allPNe.getData('DDECJ2000',iPN)) < float(means.getData('mean(dec)',iSC)) + (3.*float(means.getData('sigma(dec)',iSC))))):
                print(allPNe.getData('idPNMain',iPN),' found in ',means.getData('scName',iSC))
                nFound += 1
    return nFound
Esempio n. 5
0
def getIdFromSearchFile(targetName, hashFoundFile):
    targetN = targetName
    targetN = targetN.replace('_', '').replace('-', '').replace(' ', '')
    if 'BLUE' in targetN:
        targetN = targetN[:targetN.find('BLUE')]
    if 'RED' in targetN:
        targetN = targetN[:targetN.find('RED')]
    if 'cspn' in targetN:
        targetN = targetN[:targetN.find('cspn')]

    print('targetName = <' + targetName + '>')
    dat = csvFree.readCSVFile(hashFoundFile)
    for i in range(dat.size()):
        name = dat.getData('id', i).replace('_',
                                            '').replace('-',
                                                        '').replace(' ', '')
        if 'BLUE' in name:
            name = name[:name.find('BLUE')]
        if 'RED' in name:
            name = name[:name.find('RED')]
        if 'cspn' in name:
            name = name[:name.find('cspn')]

        if name == targetN:
            return dat.getData('pndb', i)
    return -1
Esempio n. 6
0
def changePAs(inFileNameData, morphClassHeader, morphClass, GPAHeader,
              angleMean, angleSDev, percentage, outFileName):
    csv = csvFree.readCSVFile(inFileNameData)
    morphClasses = csv.getData(morphClassHeader)

    for iM in range(len(morphClass)):
        nPNeWithMorphClass = 0
        pNeWithMorphClass = []
        for i in range(csv.size()):
            if morphClasses[i] == morphClass[iM]:
                pNeWithMorphClass.append(i)
                nPNeWithMorphClass += 1

        randomIDs = []
        nPNeToChangeGPA = int(nPNeWithMorphClass * percentage / 100)
        while len(randomIDs) < nPNeToChangeGPA:
            rand = randint(0, nPNeToChangeGPA - 1)
            if rand not in randomIDs:
                randomIDs.append(rand)
        print('nPNeToChangeGPA = ', nPNeToChangeGPA)
        print('randomIDs = ', len(randomIDs), ': ', randomIDs)

        # for the circular problem rescale sigma and use random range [-1,1]
        a, b = -1., 1.
        mu, sigma = 0., angleSDev[iM] / 90.
        dist = stats.truncnorm((a - mu) / sigma, (b - mu) / sigma,
                               loc=mu,
                               scale=sigma)
        randAngles = angleMean[iM] + (dist.rvs(nPNeToChangeGPA) * 90.)
        for iR in range(len(randAngles)):
            if randAngles[iR] < 0.:
                randAngles[iR] += 180.
            if randAngles[iR] >= 180.:
                randAngles[iR] -= 180.
        plt.hist(randAngles)
        plt.show()
        randAngles = np.array(randAngles)
        print('mean(randAngles) = ', randAngles.mean(),
              ', sDev(randAngles) = ', randAngles.std())

        for i in range(len(randomIDs)):
            print(
                'i = ', i, ': HASH ID = ',
                csv.getData("idPNMain",
                            pNeWithMorphClass[randomIDs[i]]), ', mainClass=',
                csv.getData(morphClassHeader, pNeWithMorphClass[randomIDs[i]]),
                ', GPA = ',
                csv.getData(GPAHeader, pNeWithMorphClass[randomIDs[i]]))
            csv.setData(GPAHeader, pNeWithMorphClass[randomIDs[i]],
                        str(randAngles[i]))
            print(
                'i = ', i, ': HASH ID = ',
                csv.getData("idPNMain",
                            pNeWithMorphClass[randomIDs[i]]), ', mainClass=',
                csv.getData(morphClassHeader, pNeWithMorphClass[randomIDs[i]]),
                ', GPA = ',
                csv.getData(GPAHeader, pNeWithMorphClass[randomIDs[i]]))
    csvFree.writeCSVFile(csv, outFileName)
Esempio n. 7
0
def randomizePAs(inFileNameData, GPAHeader, outFileName):
    seed(1)
    csv = csvFree.readCSVFile(inFileNameData)
    sequence = [i for i in range(180)]
    shuffle(sequence)
    for i in range(csv.size()):
        rand = choice(sequence)  #random()
        print('rand = ', rand)
        csv.setData(GPAHeader, i, str(int(rand)))
    csvFree.writeCSVFile(csv, outFileName)
Esempio n. 8
0
def getIds(filename):
    lis = csvFree.readCSVFile(filename)
    ids = lis.getData('pndb')
    idsOut = []
    for id in ids:
        if id not in idsOut:
            idsOut.append(id)
    for id in idsOut:
        print(id, end=',')
    print('\n')
Esempio n. 9
0
def findClosestMatch(hashFoundFile, outFileName):
    hashFound = csvFree.readCSVFile(hashFoundFile)
    closestMatch = csvData.CSVData()
    closestMatch.header = hashFound.header

    hashFoundNames = np.array([str(n) for n in hashFound.getData('id')])
    print('hashFoundNames = ', hashFoundNames)
    nDoubles = 0
    for name in hashFoundNames:
        name = str(name)
        idx = np.where(hashFoundNames == name)[0]
        if len(idx) == 0:
            arr = bytes(name, 'utf-8')
            arr2 = bytes(hashFoundNames[0], 'utf-8')
            for byte in arr:
                print(byte, end=' ')
            print("\n")
            for byte in arr2:
                print(byte, end=' ')
            print("\n")
            if name == hashFoundNames[0]:
                print('both are the same')
            else:
                print('both are still not the same')
            if str(name) == str(hashFoundNames[0]):
                print('both are the same if using unicode')
            else:
                print('both are still not the same if using unicode')

        print('found name <' + name + '> ', len(idx), ' times in indices ',
              idx)
        lineToAdd = ''
        if len(idx) == 1:
            lineToAdd = hashFound.getData(idx[0])
        else:
            dists = []
            for i in range(len(idx)):
                dist = hashFound.getData('dist[arcsec]', idx[i])
                if dist != '':
                    dists.append(float(dist))
            if len(dists) > 0:
                minId = np.where(dists == np.min(dists))[0][0]
            else:
                minId = 0
            print('idx[', minId, '] = ', idx[minId])
            lineToAdd = hashFound.getData(idx[minId])
        closestMatchNames = closestMatch.getData('id')
        if name not in closestMatchNames:
            print('lineToAdd = ', lineToAdd)
            closestMatch.append(lineToAdd)
        else:
            print('name <' + name + '> already in closestMatch')
            nDoubles += 1
    print('nDoubles = ', nDoubles)
    csvFree.writeCSVFile(closestMatch, outFileName)
def readFRAFile(fname):
    csv = csvFree.readCSVFile(fname,',',False)
    if csv.header[0] == '\ufeffNOM':
        csv.renameColumn('\ufeffNOM','NOM')
        print('renamed column "\ufeffNOM" to <'+csv.header[0]+'>')
    if csv.header[len(csv.header)-1] == 'HASH ID\r':
        csv.renameColumn('HASH ID\r','HASH ID')
        print('renamed column "HASH ID\r" to <'+csv.header[len(csv.header)-1]+'>')
    print('csv = ',csv)
    print('csv.header = ',csv.header)
    return csv
Esempio n. 11
0
def makeHashFile():
    inputData = csvFree.readCSVFile(newPNeFile)
    with open(newPNeFile[:newPNeFile.rfind('.')] + '.hash', 'w') as fh:
        for i in range(inputData.size()):
            dra = float(inputData.getData('DRAJ2000', i))
            ddec = float(inputData.getData('DDECJ2000', i))
            lon, lat = raDecToLonLat(dra, ddec)
            ra = degToHMS(dra)
            dec = degToDMS(ddec)
            print('lon = ', lon, ', lat = ', lat, ', dra = ', dra, ', ddec = ',
                  ddec, ', ra = ', ra, ', dec = ', dec)
            png = getPNG(lon, lat)
            fh.write(png + ',' + ra + ',' + dec + '\n')
def getIDfromName(inputFileName):
    with open(inputFileName, 'r') as f:
        names = f.readlines()
    names = [name.rstrip('\n') for name in names]
    allHASHObjectsFName = '/Users/azuri//daten/uni/HKU/observing/all_HASH_objects_full_table.csv'
    allHASHObjects = csvFree.readCSVFile(allHASHObjectsFName)
    allPNeNames = allHASHObjects.getData('Name')
    allPNeNames = [name.replace(' ', '') for name in allPNeNames]
    allPNGNames = allHASHObjects.getData('PNG')
    idPNMain = allHASHObjects.getData('idPNMain')
    ids = []
    for name in names:
        pos = -1
        if name[0] != '#':
            if name[0:3] == 'PNG':
                print('checking PNG ' + name[3:])
                for iPos in range(allHASHObjects.size()):
                    if name[3:].lower().replace('-', '').replace(
                            '+', '').replace(
                                'J', '') == allPNGNames[iPos].lower().replace(
                                    '-', '').replace('+', '').replace('J', ''):
                        pos = iPos


#                    if idPNMain[iPos] == '11240':
#                        print('allPNGNames[',iPos,'] = <'+allPNGNames[iPos].lower().replace('-','').replace('+','')+'>, name[3:] = <'+name[3:].lower().replace('-','').replace('+','')+'>')
#                        print(allPNGNames[iPos].lower().replace('-','').replace('+','') == name[3:].lower().replace('-','').replace('+',''))
            else:
                for iPos in range(allHASHObjects.size()):
                    if name.lower().replace('-', '').replace('+', '').replace(
                            'J', '') == allPNeNames[iPos].lower().replace(
                                '-', '').replace('+', '').replace('J', ''):
                        pos = iPos
                    if False:
                        if idPNMain[iPos] == '2879':
                            print(
                                'allPNeNames[', iPos,
                                '] = <' + allPNeNames[iPos].lower().replace(
                                    '-', '').replace('+', '') + '>, name = <' +
                                name.lower().replace('-', '').replace(
                                    '+', '') + '>')
                            print(allPNeNames[iPos].lower().replace('-', '').
                                  replace('+', '') == name[3:].lower().replace(
                                      '-', '').replace('+', ''))
            if pos == -1:
                print('ERROR: could not find name <' + name + '>')
                STOP
            else:
                ids.append(allHASHObjects.getData('idPNMain', pos))
    return ids
Esempio n. 13
0
def createHashInFile():
    csvIphas = csvFree.readCSVFile(inFileName)
    prevRA = '10:10:10.0'
    prevDEC = '10:10:10.0'
    with open(hashInFileName, 'w') as f:
        for i in range(csvIphas.size()):
            ra = csvIphas.getData('RA_H', i) + ':' + csvIphas.getData(
                'RA_M', i) + ':' + csvIphas.getData('RA_S', i)
            dec = csvIphas.getData('DEC_D', i) + ':' + csvIphas.getData(
                'DEC_M', i) + ':' + csvIphas.getData('DEC_S', i)
            print(i, ': ra = ', ra, ', dec = ', dec)
            #            dist = angularDistance(hmsToDeg(ra), dmsToDeg(dec), hmsToDeg(prevRA), dmsToDeg(prevDEC))
            #            distA = pyasl.getAngDist(hmsToDeg(ra), dmsToDeg(dec), hmsToDeg(prevRA), dmsToDeg(prevDEC))
            #            print(i,': dist = ',dist,', distA = ',distA)
            #            if dist > minDist:
            f.write(str(i) + ',' + ra + ',' + dec + '\n')  #+','+str(dist)
Esempio n. 14
0
def checkHashOutFileRadii():
    if False:
        csvHashList = csvFree.readCSVFile(hashOutFileName)
        for i in range(csvHashList.size()):
            idPNMain = csvHashList.getData('pndb', i)
            diam = 0.
            ext = 0.
            for j in range(angDiams.size()):
                if angDiams.getData('idPNMain', j) == idPNMain:
                    if angDiams.getData('InUse', j) == '1':
                        diam = float(angDiams.getData('MajDiam', j))
            for j in range(angExt.size()):
                if angExt.getData('idPNMain', j) == idPNMain:
                    if angExt.getData('InUse', j) == '1':
                        ext = float(angExt.getData('MajExt', j))
            print('idPNMain = ', idPNMain, ': diam = ', diam, ', ext = ', ext)
Esempio n. 15
0
def groupNewPNeAndAverageCoordinates():
    minDists = csvFree.readCSVFile(minDistFile)

    idsAlreadyGrouped = []
    with open(newPNeFile, 'w') as f:
        f.write('DRAJ2000,DDECJ2000\n')
        for i in range(minDists.size()):
            group = []
            dras = []
            ddecs = []
            if (minDists.getData('inside', i) != 'isInside') and (float(
                    minDists.getData('distance', i)) > minDist):
                if minDists.getData('iphasRow', i) not in idsAlreadyGrouped:
                    idsAlreadyGrouped.append(minDists.getData('iphasRow', i))
                    ra = minDists.getData('RAJ2000', i)
                    dec = minDists.getData('DECJ2000', i)
                    dra = hmsToDeg(ra)
                    ddec = dmsToDeg(dec)
                    group.append(i)
                    dras.append(dra)
                    ddecs.append(ddec)
                    for j in np.arange(i + 1, minDists.size(), 1):
                        if minDists.getData('iphasRow',
                                            j) not in idsAlreadyGrouped:
                            ra = minDists.getData('RAJ2000', j)
                            dec = minDists.getData('DECJ2000', j)
                            thisdra = hmsToDeg(ra)
                            thisddec = dmsToDeg(dec)
                            if angularDistance(dra, ddec, thisdra,
                                               thisddec) < minDist:
                                group.append(minDists.getData('iphasRow', j))
                                idsAlreadyGrouped.append(
                                    minDists.getData('iphasRow', j))
                                dras.append(thisdra)
                                ddecs.append(thisddec)
                    print('found ', len(group),
                          ' different coordinates for apparently the same PN')
                    f.write('%.5f,%.5f\n' % (np.mean(dras), np.mean(ddecs)))
Esempio n. 16
0
def createMySQLCommands():
    csv = csvFree.readCSVFile(inList)
    with open(inList[:inList.rfind('.')] + '.sql', 'w') as f:
        for i in np.arange(0, csv.size(), 1):
            f.write("USE `MainGPN`;\n")
            hashIDs.append(idPNMainStart + i)
            raHMS = csv.getData("RAJ2000", i)
            ra = hmsToDeg(raHMS)
            decDMS = csv.getData("DECJ2000", i)
            dec = dmsToDeg(decDMS)
            refined = csv.getData('Refined coordinates', i)
            refined = refined.replace('  ', ' ')
            if refined != '':
                print('refined = <' + refined + '>')
                raH, raM, raS, decD, decM, decS = refined.split(' ')
                raHMS = raH + ':' + raM + ':' + raS
                ra = hmsToDeg(raHMS)
                decDMS = decD + ':' + decM + ':' + decS
                dec = dmsToDeg(decDMS)
                print('refined coordinates: raHMS = <' + raHMS +
                      '> decDMS = <' + decDMS + '>')
                print('refined coordinates: ra = ', ra, ' dec = ', dec)
            print('raHMS = <' + raHMS + '> decDMS = <' + decDMS + '>')
            print('ra = ', ra, ' dec = ', dec)
            lon, lat = raDecToLonLat(ra, dec)
            print('lon=', lon, ', lat=', lat)
            png = ''
            if lon < 100:
                png += '0'
            if lon < 10:
                png += '0'
            png += str(lon)
            png = png[:png.rfind('.') + 2]
            #            print('png = <'+png+'>')
            if lat >= 0.:
                png += '+'
#                print('png = <'+png+'>')
            png += str(lat)
            #            print('png = <'+png+'>')
            png = png[:png.rfind('.') + 2]
            #            print('png = <'+png+'>')

            if (lat < 10.) and (lat >= 0.):
                png = png[:png.rfind('+') + 1] + '0' + png[png.rfind('+') + 1:]
            if (lat > -10.) and (lat < 0.):
                png = png[:png.rfind('-') + 1] + '0' + png[png.rfind('-') + 1:]
#                print('png = <'+png+'>')
            print('PNG ' + png)
            #            STOP
            f.write(
                "INSERT INTO `PNMain`(`idPNMain`,`PNG`,`refPNG`,`RAJ2000`,`DECJ2000`,`DRAJ2000`,`DDecJ2000`,"
            )
            f.write(
                "`Glon`,`Glat`,`refCoord`,`Catalogue`,`refCatalogue`,`userRecord`,`domain`,`refDomain`,`PNstat`,`refPNstat`,`refSimbadID`,`show`) "
            )
            f.write(
                "VALUES (%d,'%s','%s','%s','%s',%.5f,%.5f,%.5f,%.5f,'%s','%s','%s','%s','%s','%s','%s','%s','%s','%s');\n"
                % (idPNMainStart + i, png, 'sys', raHMS, decDMS, ra, dec, lon,
                   lat, 'DSHPNe', 'DSHPNe', 'ziggy', 'ziggy', 'Galaxy',
                   'ziggy', csv.getData('PNstat', i), 'ziggy', 'sys', 'y'))

            f.write(
                "INSERT INTO `tbCNames`(`idtbCNames`,`Name`,`reference`,`InUse`,`refInUse`,`userRecord`,`idPNMain`,`simbadID`,`flag`)"
            )
            f.write("VALUES (%d,'%s','%s',%d,'%s','%s',%d,'%s','%s');\n" %
                    (idtbCNamesStart + i, csv.getData('Name', i), 'DSHPNe', 1,
                     'sys', 'ziggy', idPNMainStart + i, 'n', 'n'))

            f.write(
                "INSERT INTO `PNMain_tbCNames`(`idPNMain`,`idtbCNames`) VALUES (%d,%d);\n"
                % (idPNMainStart + i, idtbCNamesStart + i))

            majDiamStr = csv.getData("MajDiam",
                                     i).strip('~').rstrip('"').rstrip(':')
            minDiamStr = csv.getData("MinDiam",
                                     i).strip('~').rstrip('"').rstrip(':')
            if majDiamStr in ['stellar?', 'stellar']:
                majDiamStr = '1'
                minDiamStr = '1'


#            if 'x' in diamStr:
            f.write(
                "INSERT INTO `tbAngDiam`(`idtbAngDiam`,`MajDiam`,`MinDiam`,`reference`,`InUse`,`refInUse`,`userRecord`,`idPNMain`,`tempflag`) "
            )
            f.write(
                "VALUES (%d,%.1f,%.1f,'%s',%d,'%s','%s',%d,'%s');\n" %
                (idtbAngDiamStart + i, float(majDiamStr), float(minDiamStr),
                 'DSHPNe', 1, 'sys', 'ziggy', idPNMainStart + i, 'n'))
            #            else:
            #                f.write("INSERT INTO `tbAngDiam`(`idtbAngDiam`,`MajDiam`,`reference`,`InUse`,`refInUse`,`userRecord`,`idPNMain`,`tempflag`) ")
            #                f.write("VALUES (%d,%.1f,'%s',%d,'%s','%s',%d,'%s');\n" % (idtbAngDiamStart+i,
            #                                                                           float(diamStr),
            #                                                                           'FrenchAmateurs',
            #                                                                           1,
            #                                                                           'sys',
            #                                                                           'ziggy',
            #                                                                           idPNMainStart+i,
            #                                                                           'n'))

            f.write(
                "INSERT INTO `PNMain_tbAngDiam`(`idPNMain`,`idtbAngDiam`) VALUES (%d,%d);\n"
                % (idPNMainStart + i, idtbAngDiamStart + i))

            name = csv.getData('Name', i)
            if name[:4] == 'Pa J':
                name = name[3:]
            notes = csv.getData('comment1', i)
            if notes == []:
                notes = csv.getData('comment2', i)
            else:
                notes += ', ' + csv.getData('comment2', i)
            print('notes = <' + notes + '>')
            f.write(
                "INSERT INTO `tbUsrComm`(`idtbUsrComm`,`idPNMain`,`user`,`public`,`comment`,`date`) "
            )
            f.write("VALUES (%d,%d,'%s','%s','%s','%s');\n" %
                    (idtbUsrCommStart + i, idPNMainStart + i, 'ziggy', 'y',
                     notes, '2020-03-31 19:30:00'))

            f.write("USE `MainPNData`;\n")
            f.write(
                "INSERT INTO `DSHPNe`(`idDSHPNe`,`Discoverer`,`ID`,`PNG`,`RAJ2000`,`DECJ2000`,`DRAJ2000`,`DDECJ2000`,`Glon`,`Glat`,`MajDiam`,`MinDiam`,`status`,`discovery`,`narrowImag`,`broadband`,`echelle`,`notes`,`PNMainDist`,`mapFlag`,`idPNMain`) "
            )
            f.write(
                "VALUES (%d,'%s','%s','%s','%s','%s',%.4f,%.4f,%.2f,%.2f,%d,%d,'%s','%s','%s','%s','%s','%s',%d,'%s',%d);\n"
                % (idDSHPNeStart + i, 'Pa', name, png, raHMS,
                   decDMS, ra, dec, lon, lat, float(majDiamStr),
                   float(minDiamStr), 'New Candidates', '', '', '', '', notes,
                   -1, 'y', idPNMainStart + i))

    with open(inList[:inList.rfind('.')] + '.fetch', 'w') as f:
        f.write('hashpn fetch all ' + str(hashIDs[0]))
        for id in np.arange(1, len(hashIDs), 1):
            f.write(',' + str(hashIDs[id]))
        f.write(' -w force\n')
        f.write('hashpn brew all ' + str(hashIDs[0]))
        for id in np.arange(1, len(hashIDs), 1):
            f.write(',' + str(hashIDs[id]))
        f.write(' -w\n')
Esempio n. 17
0
import csvData,csvFree

inFileName = '/Users/azuri/daten/uni/HKU/HASH/hash-no-show.csv'

inData = csvFree.readCSVFile(inFileName)
print('inData.size() = ',inData.size())

pnStat = inData.getData('PNstat')
print('pnStat = ',pnStat)

tlps = csvData.CSVData()
tlps.header = inData.header

newCandidates = csvData.CSVData()
newCandidates.header = inData.header

others = csvData.CSVData()
others.header = inData.header

for i in range(inData.size()):
    pnStat = inData.getData('PNstat',i)
    if pnStat in ['T','L','P']:
        tlps.append(inData.getData(i))
    elif pnStat == 'c':
        newCandidates.append(inData.getData(i))
    else:
        others.append(inData.getData(i))

csvFree.writeCSVFile(tlps,inFileName[:-4]+'_TLPs.csv')
csvFree.writeCSVFile(newCandidates,inFileName[:-4]+'_newCandidates.csv')
csvFree.writeCSVFile(others,inFileName[:-4]+'_others.csv')
Esempio n. 18
0
import numpy as np
import csvFree, csvData

eventRegistrationFile = '/Users/azuri/daten/parties/Wild Wood/contacts/Completed orders export.csv'
oldContactsFile = '/Users/azuri/daten/parties/Wild Wood/contacts/contacts.csv'
contactsToImport = '/Users/azuri/daten/parties/Wild Wood/contacts/contacts_to_import.csv'
existingContactsFile = '/Users/azuri/daten/parties/Wild Wood/existing_contacts.csv'
volunteersFile = '/Users/azuri/daten/parties/Wild Wood/Wild Woods Volunteers Questionnaire.csv'

csvEventRegistration = csvFree.readCSVFile(eventRegistrationFile)
csvOldContacts = csvFree.readCSVFile(oldContactsFile)
existingContacts = csvFree.readCSVFile(existingContactsFile)
volunteers = csvFree.readCSVFile(volunteersFile)

csvContactsToImport = csvData.CSVData()
csvContactsToImport.header = csvOldContacts.header

newLine = ['' for i in csvOldContacts.header]

print('csvEventRegistration.header = ', csvEventRegistration.header)

iContact = 0
names = []
for i in range(csvEventRegistration.size()):
    name = csvEventRegistration.getData(
        'First Name (Billing)', i) + ' ' + csvEventRegistration.getData(
            'Last Name (Billing)', i) + ' Wild Wood Guest'
    print('name = ', name)
    if name not in names:
        print(name + ' not found')
        names.append(name)
Esempio n. 19
0
import numpy as np

import csvData
import csvFree
from myUtils import hmsToDeg, dmsToDeg, raDecToLonLat

barlowTables = [
    '/Users/azuri/daten/uni/HKU/HASH/Barlow1.csv',
    '/Users/azuri/daten/uni/HKU/HASH/Barlow2.csv'
]
hashTable = '/Users/azuri/daten/uni/HKU/HASH/PNMain_full_June-25-2020.csv'  #April-28-2020.csv'#
cNamesTable = '/Users/azuri/daten/uni/HKU/HASH/hash-commonNames.csv'

csvHash = csvFree.readCSVFile(hashTable)
print('csvHash = ', csvHash)

cNames = csvFree.readCSVFile(cNamesTable)


def getRAandDECFromIPHAS(iphas):
    iphas = iphas[iphas.find(' ') + 1:]
    iphasTmp = iphas.replace('J', '')
    iphasTmp = iphasTmp.replace('X', '')
    iphasmp = iphasTmp.replace('-', '+')
    raStr = iphasmp[:iphasmp.find('+')]
    decStr = iphasTmp[iphasmp.find('+'):]
    print('iphas = <' + iphas + ': raStr = <' + raStr + '>, decStr = <' +
          decStr + '>')
    ras = raStr[raStr.find('.') - 2:]
    raStr = raStr[:raStr.find('.') - 2]
    ram = raStr[-2:]
Esempio n. 20
0

def rFromDAndTheta(D, theta):
    return D * theta / 206.265


inFileName3 = '/Users/azuri/daten/uni/HKU/PNe/Frew - The H-alpha surface brightness - radius relation- a robust statistical distance indicator for planetary nebulae_Supplementary_Data/Table_A3_new.csv'
inFileName4 = '/Users/azuri/daten/uni/HKU/PNe/Frew - The H-alpha surface brightness - radius relation- a robust statistical distance indicator for planetary nebulae_Supplementary_Data/Table_A4_new.csv'

r = None
for i in range(2):
    if i == 0:
        inFileName = inFileName4
    else:
        inFileName = inFileName3
    data = csvFree.readCSVFile(inFileName, '&', False)
    #    print('header = ',len(data.header),': ',data.header)

    if i == 0:
        strvecD = data.getData('D_mean [kpc]')
        strvecD = [d[1:d.find(' ')] for d in strvecD]
        #        print('strvecD = ',strvecD)
        distances = np.array(
            csvFree.convertStringVectorToDoubleVector(strvecD))
        strvecLogR = data.getData('log r[pc]')
    else:
        strvecD = data.getData('D [pc]')
        strvecD = [
            d.replace(' ', '').strip('\t').rstrip('\t') for d in strvecD
        ]
        strvecD = [d[1:d.find('$')] for d in strvecD]
Esempio n. 21
0
import numpy as np
import astropy.units as u
import csvFree, csvData
from myUtils import angularDistance, hmsToDeg, dmsToDeg

f1name = '/Users/azuri/daten/uni/HKU/interns_projects/simba/Weidman-table1.csv'
f2name = '/Users/azuri/daten/uni/HKU/interns_projects/simba/Weidmann2020-cs-parameters.txt'
sqlFileOut = '/Users/azuri/daten/uni/HKU/interns_projects/simba/Weidman-table1.sql'
hashFile = '/Users/azuri/daten/uni/HKU/HASH/PNMain_full_June-30-2020.csv'
csvHash = csvFree.readCSVFile(hashFile)
simbaFile = '/Users/azuri/daten/uni/HKU/interns_projects/simba/All list v1.csv'
#simbaFile = '/Users/azuri/daten/uni/HKU/interns_projects/simba/simba_table.csv'
csvSimba = csvFree.readCSVFile(simbaFile)

with open(f2name, encoding="utf8", errors='ignore') as f:
    lines = f.readlines()

csv2 = csvData.CSVData()
csv2.header = [
    'PNG', 'log g', 'ref log g', 'met', 'log T', 'ref log T',
    'log (L_star/L_sun)', 'ref log(L_star/L_sun', 'mag', 'ref mag'
]
for iLine in np.arange(1, len(lines), 1):
    data = []
    lines[iLine] = lines[iLine].replace('\r', '')
    lines[iLine] = lines[iLine].replace('\n', '')
    lines[iLine] = lines[iLine].replace('}', '+-')
    lines[iLine] = lines[iLine].replace(' v ', ' v')
    lines[iLine] = lines[iLine].replace(' r ', ' r')
    lines[iLine] = lines[iLine].replace(' b ', ' b')
    lines[iLine] = lines[iLine].replace(' i ', ' i')
Esempio n. 22
0
labels = 'ESO','Kohoutek','Amateurs','IPHAS','MASH','New HASH','Others'
explode = (0, 0, 0, 0.01, 0.01, 0.01, 0)
fig1,ax1 = plt.subplots()
ax1.pie([nESO,nK-nESO,nFrenchAmateurs+nDSH,nIPHAS,nMASH,nHASHgPNe-nK-nOthers-nMASH,nOthers-nFrenchAmateurs-nDSH-nIPHAS],
        labels=labels,
        autopct='%1.1f%%',
        shadow=False,
        #explode=explode, 
        startangle=90)
ax1.axis('equal')  # Equal aspect ratio ensures that pie is drawn as a circle.
plt.savefig('/Users/azuri/daten/uni/HKU/APN8e/catalogues_gPNe.png', bbox_inches='tight', transparent=True)
plt.show()

hashCSVFile = '/Users/azuri/daten/uni/HKU/APN8e/hash_tlp.csv'

hash = csvFree.readCSVFile(hashCSVFile)

nR = len(hash.find('mainClass','R'))
nE = len(hash.find('mainClass','E'))
nB = len(hash.find('mainClass','B'))
nI = len(hash.find('mainClass','I'))
nA = len(hash.find('mainClass','A'))
nS = len(hash.find('mainClass','S'))

labels = 'R','E','B','I','A','S'

fig1,ax1 = plt.subplots()
ax1.pie([nR,nE,nB,nI,nA,nS], labels=labels, autopct='%1.1f%%',
        shadow=False, startangle=90)
ax1.axis('equal')  # Equal aspect ratio ensures that pie is drawn as a circle.
plt.savefig('/Users/azuri/daten/uni/HKU/APN8e/morphologies_gPNe.png', bbox_inches='tight', transparent=True)
Esempio n. 23
0
import csvFree, csvData

fitsFilesFile = '/Users/azuri/daten/uni/HKU/HASH/HASH_spectra/vanessa/hash_FitsFiles.csv'
spectraInfoFile = '/Users/azuri/daten/uni/HKU/HASH/HASH_spectra/vanessa/hash_spectraInfo.csv'

csvFiles = csvFree.readCSVFile(fitsFilesFile)
csvInfo = csvFree.readCSVFile(spectraInfoFile)

#idFiles = ['/Users/azuri/daten/uni/HKU/HASH/HASH_spectra/vanessa/getSpectraID_bipolar.dat',
#           '/Users/azuri/daten/uni/HKU/HASH/HASH_spectra/vanessa/getSpectraID_elliptical.dat',
#           '/Users/azuri/daten/uni/HKU/HASH/HASH_spectra/vanessa/getSpectraID_round.dat',
#           ]
#idFiles = ['/Users/azuri/daten/uni/HKU/HASH/HASH_spectra/burton/bipolar.txt',
#           '/Users/azuri/daten/uni/HKU/HASH/HASH_spectra/burton/elliptical.txt',
#           '/Users/azuri/daten/uni/HKU/HASH/HASH_spectra/burton/round.txt',
#           ]
idFiles = [
    '/Users/azuri/daten/uni/HKU/HASH/HASH_spectra/helen/round.txt',
    #           '/Users/azuri/daten/uni/HKU/HASH/HASH_spectra/helen/bipolar.txt',
    #           '/Users/azuri/daten/uni/HKU/HASH/HASH_spectra/helen/elliptical.txt',
]

for idFile in idFiles:
    prefix = idFile[idFile.rfind('/') + 1:idFile.rfind('.')]
    print('prefix = <' + prefix + '>')
    with open(idFile, 'r') as f:
        ids = f.readlines()
    for i in range(len(ids)):
        ids[i] = ids[i].strip()
    print('ids = ', ids)
    with open(idFile[:idFile.rfind('/') + 1] + prefix + '_fetch.bat',
Esempio n. 24
0
                    hashID = allHASHobjects.getData('idPNMain',j)
                    commonNames = getCommonNames(hashCommonNames,'idPNMain',hashID)
                    if name in commonNames:
                        print('Name = '+name+': HASH ID = ',hashID,': commonNames = ',commonNames,': ra = ',ra,', RA = ',allHASHobjects.getData('RAJ2000',j),', dec = ',dec,', DEC = ',allHASHobjects.getData('DECJ2000',j),', angDist = ',angDist)
                        found = True
                    else:
                        print('Name = <'+name+'>: object found within ',maxAngularDistance,' arcsec: angDist = ',angDist,': ra = ',ra,', RA = ',allHASHobjects.getData('RAJ2000',j),', dec = ',dec,', DEC = ',allHASHobjects.getData('DECJ2000',j),', HASH name = <'+allHASHobjects.getData('Name',j))
                        csvOut.append(emptyData)
                        csvOut.setData('Name',csvOut.size()-1,name)
                        csvOut.setData('idPNMain',csvOut.size()-1,hashID)
                        cNames = commonNames[0]
                        for k in np.arange(1,len(commonNames),1):
                            cNames += ';'+commonNames[k]
                        csvOut.setData('HASH common names',csvOut.size()-1,cNames)
                        csvOut.setData('RA FRA',csvOut.size()-1,ra)
                        csvOut.setData('RA HASH',csvOut.size()-1,allHASHobjects.getData('RAJ2000',j))
                        csvOut.setData('DEC FRA',csvOut.size()-1,dec)
                        csvOut.setData('DEC HASH',csvOut.size()-1,allHASHobjects.getData('DECJ2000',j))
                        csvOut.setData('angular distance [arcsec]',csvOut.size()-1,str(angDist))
    writeCSVFile(csvOut,disrepanciesOutFileName)

if __name__ == "__main__":
    allHASHobjects = readCSVFile(allHASHobjectsFile)
    allFRAobjects = readCSVFile(allFRAobjectsFile)
    hashOut = readCSVFile(hashOutFile)
    hashCommonNames = readCSVFile(hashCommonNamesFile)

    combinedDiscrepancies = combineDiscrepancies()
    checkCombinedDiscrepancies(combinedDiscrepancies, allHASHobjects, hashCommonNames)

Esempio n. 25
0
add = (2.5 * np.log10(np.square(distance) / 100.))
mv_max = Mv_max + add
mv_min = Mv_min + add
print('mv_max = ', mv_max, ', mv_min = ', mv_min)

delta_m_15_B_max = 2.4  # mag / 15 days (Stritzinger 2015)
delta_m_15_B_min = 1.25  # mag / 15 days (Stritzinger 2015)

delta_m_15_R_max = 1.0  # mag / 15 days (Jha 2017)
delta_m_15_R_min = 0.2  # mag / 15 days (Jha 2017)

time = np.arange(0, 300, 0.1)

table3Name = '/Users/azuri/daten/uni/HKU/Pa30/Stritzinger2015/table3.dat'
table3 = csvFree.readCSVFile(table3Name, '\t', False)

print('table3.header = ', table3.header)
dates = 2450000. + np.array([float(i) for i in table3.getData('JD2450000+')])

Bmag = np.array(table3.getData('B (mag)'))
print('Bmag = ', Bmag)
print('Bmag[1] = ', Bmag[1])
idx = np.where(Bmag != '-')
print('idx = ', idx)
print('idx[0] = ', idx[0])
Bdates = dates[idx]
BmagPlot = Bmag[idx]
BmagPlot = [float(b[:b.find('(')]) for b in BmagPlot]
Bdates = Bdates - (Bdates[np.where(BmagPlot == np.min(BmagPlot))[0]])
plt.scatter(Bdates, BmagPlot, c='b', label='B')
Esempio n. 26
0
                        newAreas = np.array(newAreas)
                        print('newAreas = ', len(newAreas), ': ', newAreas)
                        if show:
                            plt.hist(newAreas)
                            plt.show()
                        sDev = np.std(newAreas)
                        print('sDev = ', sDev)
                        csvLines.setData(keys[iLine] + 'e', i, '%.3E' % (sDev))
        csvFree.writeCSVFile(csvLines, csvLinesFileName, '\t')


if __name__ == '__main__':
    spectraDir = '/Users/azuri/spectra/GTC'
    (_, _, filenames) = next(os.walk(spectraDir))
    csvLinesFileName = '/Users/azuri/daten/uni/HKU/IPHAS-GTC/observation.dat'
    hash_fitsFiles = csvFree.readCSVFile(
        '/Users/azuri/daten/uni/HKU/IPHAS-GTC/fitsfiles.csv')
    #    spectrumFileName = '/Users/azuri/spectra/GTC/LDu1_sum.fits'
    for spectrumFileName in filenames:
        print('spectrumFileName[-5:] = <' + spectrumFileName[-5:] + '>')
        if ((spectrumFileName[-5:] == '.fits')
                and (spectrumFileName != 'strange_blue_star_GT220816.fits')
                and ('SNR' not in spectrumFileName)
                and (spectrumFileName != 'K1-6a_GT160516.fits')):
            print('starting')
            spectrumFileName = os.path.join(spectraDir, spectrumFileName)
            idPNMain = None
            for i in range(hash_fitsFiles.size()):
                if spectrumFileName[spectrumFileName.rfind('/') +
                                    1:] == hash_fitsFiles.getData(
                                        'fileName', i):
                    idPNMain = hash_fitsFiles.getData('idPNMain', i)
Esempio n. 27
0
def calculateErrors(spectrumFileName, idPNMain, csvLinesFileName, show=False):
    print('spectrumFileName = <' + spectrumFileName + '>')
    wLen, sigmaFit = getSigmaVsWLen(spectrumFileName, show=False)
    print('len(wLen) = ', len(wLen), ', len(sigmaFit) = ', len(sigmaFit))
    flux = getImageData(spectrumFileName, 0)
    print('len(flux) = ', len(flux))
    csvLines = csvFree.readCSVFile(csvLinesFileName, '\t', False)
    csvVRad = csvFree.readCSVFile(
        os.path.join(imPath[:imPath.rfind('/')], 'vrad.csv'))
    print('csvVRad.header = ', csvVRad.header)
    filenames = csvVRad.getData('fileName')
    print('filenames = ', filenames)
    vradPos = csvVRad.find('fileName',
                           spectrumFileName[spectrumFileName.rfind('/') + 1:],
                           0)[0]
    if vradPos < 0:
        print('error: did not find spectrumFileName <' + spectrumFileName +
              '>')
        #STOP
    else:
        vrad = float(csvVRad.getData('vrad', vradPos))
        print('vrad = ', type(vrad), ': ', vrad)
        wLen = applyVRadCorrection(wLen, vrad)
        print('vradPos = ', vradPos)
        header = csvLines.header
        keys = list(linesOfInterest.keys())
        for i in range(csvLines.size()):
            if idPNMain == csvLines.getData('NAME', i):
                for iLine in range(len(linesOfInterest)):
                    area = float(csvLines.getData(keys[iLine], i))
                    print('key = ', keys[iLine], ': area = ', area)
                    if area > 0.:
                        x0 = linesOfInterest[keys[iLine]]
                        x = wLen[np.where(np.abs(wLen - x0) < 20.)[0]]
                        thisFlux = flux[np.where(np.abs(wLen - x0) < 3.)[0]]
                        maxFlux = np.max(thisFlux)
                        sigma = area / (maxFlux * 2.13 *
                                        np.sqrt(2. * np.log(2.)))
                        print('x = ', x0, ', a = ', maxFlux, ', sigma = ',
                              sigma)
                        thisFlux = flux[np.where(np.abs(wLen - x0) < 20.)[0]]
                        thisSDev = sigmaFit[np.where(
                            np.abs(wLen - x0) < 20.)[0]]
                        gaussFit = gauss(x, maxFlux, x0, sigma)
                        if show:
                            plt.plot(x, thisFlux, label='flux')
                            plt.plot(x, thisSDev, label='sigma')
                            plt.plot(x, gaussFit, label='fit')
                            plt.legend()
                            plt.show()
                        newArea = getAreaGauss(x,
                                               thisFlux,
                                               maxFlux,
                                               x0,
                                               sigma,
                                               addOnBothSidesOfX=0.,
                                               show=False,
                                               save=None)
                        print('old area = ', area, ', newly fitted area = ',
                              newArea)
                        if show:
                            plt.plot(x, gaussFit, label='fit')
                            plt.plot(x, thisFlux, label='flux')
                        newAreas = []
                        for iRun in range(100):
                            thisFluxWithErr = np.zeros(x.shape,
                                                       dtype='float32')
                            for thisFluxPos in range(x.shape[0]):
                                thisFluxWithErr[thisFluxPos] = gaussFit[
                                    thisFluxPos] + np.random.normal(
                                        0., np.abs(thisSDev[thisFluxPos]))
                            if show:
                                plt.plot(x,
                                         thisFluxWithErr,
                                         label='%d' % (iRun))
                            try:
                                newAreas.append(
                                    getAreaGauss(x,
                                                 thisFluxWithErr,
                                                 maxFlux,
                                                 x0,
                                                 sigma,
                                                 addOnBothSidesOfX=0.,
                                                 show=False,
                                                 save=None)[0])
                            except Exception as e:
                                plt.plot(x, thisFlux, label='original')
                                plt.plot(x,
                                         thisFluxWithErr,
                                         label='with errors')
                                plt.show()
                                newAreas.append(area)
                                STOP
                        if show:
                            plt.legend()
                            plt.show()
                        newAreas = np.array(newAreas)
                        print('newAreas = ', len(newAreas), ': ', newAreas)
                        if show:
                            plt.hist(newAreas)
                            plt.show()
                        sDev = np.std(newAreas)
                        print('sDev = ', sDev)
                        csvLines.setData(keys[iLine] + 'e', i, '%.3E' % (sDev))
        csvFree.writeCSVFile(csvLines, csvLinesFileName, '\t')
Esempio n. 28
0
def addNewPNeToHASH():
    inputData = csvFree.readCSVFile(newPNeFile)
    PNMain = csvFree.readCSVFile(hashPNMainFileName)
    hashOut = csvFree.readCSVFile(newHashOutFile)
    pneInHash = hashOut.getData('id')
    pngs = PNMain.getData('PNG')
    pngsInHash = []
    with open(sqlCommandsFile, 'w') as f:
        with open(iphasTable, 'w') as ft:
            ft.write('CREATE TABLE IF NOT EXISTS MainPNData.' + catalogName +
                     ' (\n')
            ft.write('id' + catalogName +
                     ' INT AUTO_INCREMENT PRIMARY KEY UNIQUE,\n')
            ft.write('idPNMain INT,\n')
            ft.write('mapFlag VARCHAR(1) NOT NULL\n')
            ft.write(');\n')

            ft.write("USE `MainPNData`;\n")

            ids = []
            f.write("USE `MainGPN`;\n")
            for i in range(inputData.size()):
                dra = float(inputData.getData('DRAJ2000', i))
                ddec = float(inputData.getData('DDECJ2000', i))
                lon, lat = raDecToLonLat(dra, ddec)
                ra = degToHMS(dra)
                dec = degToDMS(ddec)
                print('lon = ', lon, ', lat = ', lat)
                png = getPNG(lon, lat)
                print('png = <' + png + '>')
                if png in pngs:
                    print('PNG ' + png + ' already in HASH')
                    pngsInHash.append(png)
                    png = png + 'a'
                if png in pngs:
                    pngsInHash.append(png)
                    print('PNG ' + png + ' already in HASH')
                    png = png[:-1] + 'b'
                if png in pngs:
                    pngsInHash.append(png)
                    print('PNG ' + png + ' already in HASH')
                    png = png[:-1] + 'c'
                if png in pngs:
                    pngsInHash.append(png)
                    print('PNG ' + png + ' already in HASH')
                    png = png[:-1] + 'd'
                if png in pngs:
                    pngsInHash.append(png)
                    print('PNG ' + png + ' already in HASH')
                    png = png[:-1] + 'e'

                if (png in pneInHash) and (hashOut.getData(
                        'pndb', pneInHash.index(png)) != ''):
                    print('PNG ' + png,
                          ' found in pneInHash: pneInHash.index(', png, ') = ',
                          pneInHash.index(png))
                    idPNMain = int(
                        hashOut.getData('pndb', pneInHash.index(png)))
                    # add IPHAS name to common names
                else:
                    idPNMain = idPNMainStart + i
                    ids.append(idPNMain)
                    f.write(
                        "INSERT INTO `PNMain`(`idPNMain`,`PNG`,`refPNG`,`RAJ2000`,`DECJ2000`,`DRAJ2000`,`DDecJ2000`,"
                    )
                    f.write(
                        "`Glon`,`Glat`,`refCoord`,`Catalogue`,`refCatalogue`,`userRecord`,`domain`,`refDomain`,`PNstat`,`refPNstat`,`refSimbadID`,`show`) "
                    )
                    f.write(
                        "VALUES (%d,'%s','%s','%s','%s',%.5f,%.5f,%.5f,%.5f,'%s','%s','%s','%s','%s','%s','%s','%s','%s','%s');\n"
                        % (
                            idPNMain,
                            png,  #csvBarlow.getData('PNG',iRow),
                            'sys',
                            ra,
                            dec,
                            dra,
                            ddec,
                            lon,
                            lat,
                            'ziggy',
                            'ziggy',
                            'ziggy',
                            'ziggy',
                            'Galaxy',
                            'ziggy',
                            'c',
                            'ziggy',
                            'sys',
                            'y'))

                ft.write("INSERT INTO `" + catalogName +
                         "`(`idPNMain`,`mapflag`) ")
                ft.write("VALUES (%d,'%s');\n" % (idPNMain, 'y'))
                iphasName = getIPHASName(ra, dec)
                f.write(
                    "INSERT INTO `PNMain_tbCNames`(`idPNMain`,`idtbCnames`) VALUES (%d,%d);\n"
                    % (idPNMain, idtbCNamesStart + i))
                f.write(
                    "INSERT INTO `tbCNames`(`idtbCNames`,`Name`,`InUse`,`refInUse`,`userRecord`,`idPNMain`,`simbadID`,`flag`) "
                )
                f.write("VALUES (%d,'%s',%d,'%s','%s',%d,'%s','%s');\n" %
                        (idtbCNamesStart + 1, iphasName, 1, 'sys', 'sys',
                         idPNMain, 'n', 'n'))
                f.write(
                    "INSERT INTO `PNMain_tbAngDiam`(`idPNMain`,`idtbAngDiam`) VALUES (%d,%d);\n"
                    % (idPNMain, idtbAngDiamStart + i))
                f.write(
                    "INSERT INTO `tbAngDiam`(`idtbAngDiam`,`MajDiam`,`InUse`,`userRecord`,`idPNMain`,`tempflag`) "
                )
                f.write("VALUES (%d,%.0f,%d,'%s',%d,'%s');\n" %
                        (idtbAngDiamStart + i, 300., 1, 'sys', idPNMain, 'n'))

    with open(hashpnFile, 'w') as hf:
        for id in ids:
            hf.write('hashpn fetch all ' + str(id) + ' -w force\n')
            hf.write('hashpn brew all ' + str(id) + ' -w\n')
            hf.write('echo "finished HASH ID %d" >> logfile_IPHAS.log\n' % id)

    print('pngsInHash = ', pngsInHash)
Esempio n. 29
0
import os
import csvFree,csvData

path = '/Users/azuri/daten/uni/HKU/PN alignment'
rzFile = os.path.join(path, 'Rees_Zijlstra_table_with_HASH-ID.csv')
hashFile = os.path.join(path, 'HASH_true_PNe+004.2-05.9+005.9-02.6.csv')
myFile = os.path.join(path, 'PN-alignments.csv')

rzData = csvFree.readCSVFile(rzFile)
hashData = csvFree.readCSVFile(hashFile)
myData = csvFree.readCSVFile(myFile)

for i in range(rzData.size()):
    hashID = rzData.getData('HASH ID',i)
    rzGPA = float(rzData.getData('GPA',i))
    found = False
    for j in range(hashData.size()):
        if hashID == hashData.getData('idPNMain',j):
            print('HASH ID = ',hashID,': rzGPA = ',rzGPA,', HASH GPA = ',hashData.getData('GPA',j))
            continue
    for j in range(myData.size()):
        if hashID == myData.getData('HASH ID',j):
            myGPA = float(myData.getData('GPA',j))
            if myGPA > 180.:
                myGPA -= 180.
            if myGPA < 0.:
                myGPA += 180.
            print('HASH ID = ',hashID,': rzGPA = ',rzGPA,', my GPA = ',myGPA,', difference = ',myGPA-rzGPA)
Esempio n. 30
0
hashAngDiamFileName = '/Users/azuri/daten/uni/HKU/HASH/hash_tbAngDiam_111120.csv'
hashPNMain_CNamesFileName = '/Users/azuri/daten/uni/HKU/HASH/hash_PNMain_tbCNames_111120.csv'
hashCNamesFileName = '/Users/azuri/daten/uni/HKU/HASH/hash_tbCNames_111120.csv'
hashAngExtFileName = '/Users/azuri/daten/uni/HKU/HASH/hash_tbAngExt_290920.csv'
hashPNMainFileName = '/Users/azuri/daten/uni/HKU/HASH/hash_PNMain_111120.csv'

minDistFile = '/Users/azuri/daten/uni/HKU/HASH/IPHAS_listALL2_MASPN_sort_hashDists.csv'
newPNeFile = '/Users/azuri/daten/uni/HKU/HASH/IPHAS_listALL2_MASPN_sort_new_grouped.csv'
newHashOutFile = '/Users/azuri/daten/uni/HKU/HASH/IPHAS_listALL2_MASPN_sort_new_grouped_hashout.csv'
sqlCommandsFile = '/Users/azuri/daten/uni/HKU/HASH/IPHAS_listALL2_MASPN_sort_new_grouped_add_to_HASH.sql'
iphasTable = '/Users/azuri/daten/uni/HKU/HASH/IPHAS_listALL2_MASPN_sort_new_grouped_table.sql'
catalogName = 'Sabin_IPHASPNe_Nov2020'
hashpnFile = '/Users/azuri/daten/uni/HKU/HASH/IPHAS_listALL2_MASPN_sort_new_grouped_table.hash'

minDist = 100.
csv = csvFree.readCSVFile(hashPNMainFileName)
idPNMainStart = int(csv.getData('idPNMain', csv.size() - 1)) + 1
csv = csvFree.readCSVFile(hashPNMain_CNamesFileName)
idtbCNamesStart = int(csv.getData('idtbCNames', csv.size() - 1)) + 1
csv = csvFree.readCSVFile(hashPNMain_tbAngDiamFileName)
idtbAngDiamStart = int(csv.getData('idtbAngDiam', csv.size() - 1)) + 1


def createHashInFile():
    csvIphas = csvFree.readCSVFile(inFileName)
    prevRA = '10:10:10.0'
    prevDEC = '10:10:10.0'
    with open(hashInFileName, 'w') as f:
        for i in range(csvIphas.size()):
            ra = csvIphas.getData('RA_H', i) + ':' + csvIphas.getData(
                'RA_M', i) + ':' + csvIphas.getData('RA_S', i)