def genAscii2(cfg, products): from shutil import copy2 as copy import logging logger = logging.getLogger(__name__) gp.OverWriteOutput = 1 outpath = checkPath('/'.join([startpath, cfg.get('Main', 'outpath')])) print 'OUTPATH exists', outpath, os.path.exists(outpath) print 'Making', checkPath(outpath) for grid in products: # Set local variables if grid in cfg.get('Main', 'coordlist').split(','): inpath = cfg.get('Main', 'coordpath') # copy('/'.join([inpath,grid + '.txt']),outpath) else: inpath = cfg.get('Main', 'inpath') InRaster = '/'.join([startpath, inpath, grid]) OutAsciiFile = '/'.join([outpath, grid + '.txt']) logger.info("Processing %s, InRaster %s, Out %s" % (grid, InRaster, OutAsciiFile)) try: print('genAscii, IN: %s OUT: %s' % (InRaster, OutAsciiFile)) # Process: RasterToASCII_conversion gp.RasterToASCII_conversion(InRaster, OutAsciiFile) except: # Print error message if an error occurs print gp.GetMessages()
def main(): #Full Path and make sure forward slash is at the end!" path = '/mnt/c/Users/DrewNicolette/Desktop/testCatalog/testNew/' #Case sensitive column names!!" column_names = ['Id','First','Last','Add','City','State','Zip','Married'] prev_file = 'cdfDataCatalog_prev.csv' curr_file = 'cdfDataCatalog_curr.csv' key='Id' f.checkPath(path) f.checkPrevFileExists(path,prev_file) f.checkArchiveDirsExist(path) if f.checkSum(path) == True: print("\nNo change in files") f.movePrevFile(path,prev_file) time.sleep(1) f.RenameCurrtoPrev(path,curr_file,prev_file) os._exit(0) c.addResultsFile(path,column_names,key) f.movePrevFile(path,prev_file) time.sleep(1) f.RenameCurrtoPrev(path,curr_file,prev_file) time.sleep(1) f.checkIfTSVExists(path,prev_file) time.sleep(1) f.convertToTabDelimitted(path,prev_file)
def createReference(cfg): import logging logger = logging.getLogger(__name__) outpath = cfg.get('Main', 'outpath') inpath = checkPath('/'.join([startpath, outpath, 'clean'])) res10 = [] outputmerged = open('/'.join([inpath, 'outfile.txt']), 'r') combo = open('/'.join([inpath, 'comboevt.asc']), 'wb') combo.write("""ncols 5226 nrows 5449 xllcorner -37215 yllcorner 1879835 cellsize 100 NODATA_value -9999""") source = outputmerged.xreadlines() x = 0 for item in source: firstitem = str(item).split() for blow in firstitem: x += 1 if blow == '-9999': combo.write('-9999' + ' ') else: combo.write(str(x) + ' ') combo.write('\n') combo.close()
def delHeader(cfg, products, headerlines): """ strip the header file from each input file by 11 lines improvement: strip in initial conversion of .txt -> .asc (genAscii function) """ inpath = '/'.join([startpath, cfg.get('Main', 'outpath')]) outpath = checkPath('/'.join([inpath, 'clean'])) for p in products: #start grid products loop ascfile = '/'.join([inpath, p + '.asc']) outfile = open('/'.join([outpath, p + '.asc']), 'wb') print ascfile f = open(ascfile, 'r') x = 0 # create iterable goodlines = open('c:/tmp/file.txt', 'w') for line in f: if x > headerlines: #delete first 11 lines of each file goodlines.write(line) x += 1 goodlines.close() # output = open(root + '/' + 'cleanFiles' + '/%s' %(p) + '.asc', 'w') goodopen = open('c:/tmp/file.txt', 'r') for item in goodopen: outfile.write(item) print ascfile, 'header has been stripped clean'
def main(): path = '/home/drewnicolette/Desktop/compare/' column_names = ['id','first','last','salary'] f.checkPath(path) f.checkPrevFileExists(path) f.checkArchiveDirsExist(path) if f.checkSum(path) == True: print("\nNo change in files") f.movePrevFile(path) time.sleep(1) f.RenameCurrtoPrev(path) os._exit(0) c.addResultsFile(path,column_names) #Check if results file has zero or one file in it #NiFi should be running and consuming those files so there should be more than one f.movePrevFile(path) time.sleep(1) f.RenameCurrtoPrev(path)
def transposeAscii(cfg, products): outpath = checkPath('/'.join([startpath, cfg.get('Main', 'outpath')])) for grid in products: infile = '/'.join([outpath, grid + '.txt']) outfile = open('/'.join([outpath, grid + '.asc']), 'wb') print infile try: f = open(infile, 'r') for line in f.xreadlines( ): #.split(): #split the line up on whitespaces outline = [x + '\n' for x in line.split()] outfile.writelines(outline) #write the item from the line f.close() except IOError: print "Error opening %s" % infile
def mergeFiles(cfg, products): import logging logger = logging.getLogger(__name__) outpath = cfg.get('Main', 'outpath') inpath = checkPath('/'.join([startpath, outpath, 'clean'])) fD = {} for p in products: fn = '/'.join([inpath, '%s.asc' % (p)]) fD[p] = open(fn, 'r') # return fD outputmerged = open('/'.join([inpath, 'outfile.txt']), 'wb') #outputmerged = open(root + '/' + 'cleanFiles/' + 'outfile.txt', 'w') ## outputmerged.writelines("""FIREHARM input file Created by Jason M. Herynk SEM ##site evt nfdr fb40 flm treel dem asp slp lat lon lai sdep sand silt clay rshd dbh bcf lcr ch cbd \n""") print fD.keys() for line in fD['siter'].xreadlines(): # ''' merged = ( line.rstrip() + ' ' + fD['evt'].readline().strip() + ' ' + fD['nfdrr'].readline().strip() + ' ' + fD['fbfm'].readline().strip() + ' ' + fD['flm'].readline().strip() + ' ' + fD['tlg'].readline().strip() + ' ' + fD['dem'].readline().strip() + ' ' + fD['asp'].readline().strip() + ' ' + fD['slp'].readline().strip() + ' ' + fD['latr'].readline().strip() + ' ' + fD['lonr'].readline().strip() + ' ' + fD['lair'].readline().strip() + ' ' + fD['sdepr'].readline().strip() + ' ' + fD['sand'].readline().strip() + ' ' + fD['silt'].readline().strip() + ' ' + fD['clay'].readline().strip() + ' ' + fD['m3kgd'].readline().strip() + ' ' + fD['dbhr'].readline().strip() + ' ' + fD['bcfr'].readline().strip() + ' ' + fD['lcrr'].readline().strip() + ' ' + fD['ch'].readline().strip() + ' ' + fD['cbd'].readline().strip()) # ''' outputmerged.write(merged) outputmerged.write('\n') outputmerged.close() return None
def timesFixer(cfg, ten, hundred, thousand, million): outpath = checkPath('/'.join( [startpath, cfg.get('Main', 'outpath'), 'clean'])) for p in million: logger.debug('Processing %s for million' % p) fn = '/'.join([outpath, '%s.asc' % (p)]) FileName = open(fn, 'r') calclines = open('c:/tmp/calctmp.txt', 'w') for item in FileName: if item == '-9999\n': calclines.write(item) elif p == 'lair' and item == '1\n': calclines.write(item) else: calc = str(float(item) / 1000000) calclines.write(calc) calclines.write('\n') FileName.close() calclines.close() output = open(fn, 'w') calclines = open('c:/tmp/calctmp.txt', 'r') for item in calclines: output.write(item) calclines.close() output.close() for p in thousand: logger.debug('Processing %s for thousand' % p) fn = '/'.join([outpath, '%s.asc' % (p)]) FileName = open(fn, 'r') calclines = open('c:/tmp/calctmp.txt', 'w') for item in FileName: if item == '-9999\n': calclines.write(item) elif p == 'lcrr' and item == '1000\n': calclines.write(item) else: calc = str(float(item) / 1000) calclines.write(calc) calclines.write('\n') FileName.close() calclines.close() output = open(fn, 'w') calclines = open('c:/tmp/calctmp.txt', 'r') for item in calclines: output.write(item) calclines.close() output.close() for p in hundred: logger.debug('Processing %s for hundred' % p) fn = '/'.join([outpath, '%s.asc' % (p)]) FileName = open(fn, 'r') calclines = open('c:/tmp/calctmp.txt', 'w') for item in FileName: if item == '-9999\n': calclines.write(item) else: calc = str(float(item) / 100) calclines.write(calc) calclines.write('\n') FileName.close() calclines.close() output = open(fn, 'w') calclines = open('c:/tmp/calctmp.txt', 'r') for item in calclines: output.write(item) calclines.close() output.close() for p in ten: logger.debug('Processing %s for ten' % p) fn = '/'.join([outpath, '%s.asc' % (p)]) FileName = open(fn, 'r') calclines = open('c:/tmp/calctmp.txt', 'w') for item in FileName: if item == '-9999\n': calclines.write(item) else: calc = str(float(item) / 10) calclines.write(calc) calclines.write('\n') FileName.close() calclines.close() output = open(fn, 'w') calclines = open('c:/tmp/calctmp.txt', 'r') for item in calclines: output.write(item) output.close() calclines.close()