Пример #1
0
 def process(self, filename, csv_folder):
     #unzip file
     with zipfile.ZipFile(filename) as myzip:
         for name in myzip.namelist():
             myzip.extract(name, csv_folder)
     #get the protocol file
     protocol_path = os.path.join(csv_folder, "protocol.obsprot")
     fgdb_folder = self.root_folder
     database, protocol_json = CsvLoader.DatabaseCreator.database_for_protocol_file(protocol_path, fgdb_folder)
     #CSVLoad file
     CsvLoader.process_csv_folder(csv_folder, protocol_json, database)
Пример #2
0
def process(archive):
    extraction_folder = tempfile.mkdtemp()
    try:
        # unzip file
        with zipfile.ZipFile(archive) as myzip:
            for name in myzip.namelist():
                myzip.extract(name, extraction_folder)
        # get the protocol file
        protocol_path = os.path.join(extraction_folder, "protocol.obsprot")
        fgdb_folder = os.path.dirname(archive)
        database, protocol_json = CsvLoader.DatabaseCreator.database_for_protocol_file(protocol_path, fgdb_folder)
        # CSVLoad file
        CsvLoader.process_csv_folder(extraction_folder, protocol_json, database)
    finally:
        shutil.rmtree(extraction_folder)
Пример #3
0
def beat_loc_and_signal(address, begin, end):
    file_beat = json.load(open(address + '/beat_loc.json'))[begin:end]

    # raw = CsvLoader.load_dummy(1)
    file_raw = CsvLoader.load(address + '/record.csv')[begin:end]

    return file_beat, file_raw
Пример #4
0
    def upload_data(self, file, extension):
        if extension.upper() == ".JSON" or extension.upper() == ".GEOJSON":
            self._json_dict = json.loads(file)
        elif extension.upper() == ".CSV":
             self._json_dict = CsvLoader.loadAsJson(file)

        if not self._json_dict:
            self._status += "Can't convert data into right json format"
        else:
            if self._check_case_variant():
                self._parse_features()

        logging.debug(self._status)
        return self._status
Пример #5
0
import CsvLoader
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import lfilter, convolve

data_source = CsvLoader.load()[:600]

'''
LOW PASS FILTERING
'''
b=[1,0,0,0,0,0,-2,0,0,0,0,0,1]
a=[1,-2,1]
lowed = lfilter(b,a, data_source)

'''
HIGH PASS FILTERING
'''
b = [-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,32,-32,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
a = [1,-1]
highed = lfilter(b,a, lowed)

'''
DERRIVIATE
'''
h = [x/8. for x in [-1, -2, 0, 2, 1]]
derr = convolve(highed, h)[2:]  # cancel delay
abss = [abs(x) for x in derr]
max_abs = max(abss)
derr = [x/max_abs for x in derr]

'''
Пример #6
0
  def evaluateSample(self, myInput, samplerType, kwargs):
    """
        This will evaluate an individual sample on this model. Note, parameters
        are needed by createNewInput and thus descriptions are copied from there.
        @ In, myInput, list, the inputs (list) to start from to generate the new one
        @ In, samplerType, string, is the type of sampler that is calling to generate a new input
        @ In, kwargs, dict,  is a dictionary that contains the information coming from the sampler,
           a mandatory key is the sampledVars that contains a dictionary {'name variable':value}
        @ Out, returnValue, tuple, This will hold two pieces of information,
          the first item will be the input data used to generate this sample,
          the second item will be the output of this model given the specified
          inputs
    """
    inputFiles = self.createNewInput(myInput, samplerType, **kwargs)
    self.currentInputFiles, metaData = (copy.deepcopy(inputFiles[0]),inputFiles[1]) if type(inputFiles).__name__ == 'tuple' else (inputFiles, None)
    returnedCommand = self.code.genCommand(self.currentInputFiles,self.executable, flags=self.clargs, fileArgs=self.fargs, preExec=self.preExec)

    ## Given that createNewInput can only return a tuple, I don't think these
    ## checks are necessary (keeping commented out until someone else can verify):
    # if type(returnedCommand).__name__ != 'tuple':
    #   self.raiseAnError(IOError, "the generateCommand method in code interface must return a tuple")
    # if type(returnedCommand[0]).__name__ != 'list':
    #   self.raiseAnError(IOError, "the first entry in tuple returned by generateCommand method needs to be a list of tuples!")
    executeCommand, self.outFileRoot = returnedCommand

    precommand = kwargs['precommand']
    postcommand = kwargs['postcommand']
    bufferSize = kwargs['bufferSize']
    fileExtensionsToDelete = kwargs['deleteOutExtension']
    deleteSuccessfulLogFiles = kwargs['delSucLogFiles']

    codeLogFile = self.outFileRoot
    if codeLogFile is None:
      codeLogFile = os.path.join(metaData['subDirectory'],'generalOut')

    ## Before we were temporarily changing directories in order to copy the
    ## correct directory to the subprocess. Instead, we can just set the
    ## directory after we copy it over. -- DPM 5/5/2017
    sampleDirectory = os.path.join(os.getcwd(),metaData['subDirectory'])
    localenv = dict(os.environ)
    localenv['PWD'] = str(sampleDirectory)
    outFileObject = open(os.path.join(sampleDirectory,codeLogFile), 'w', bufferSize)

    found = False
    for index, inputFile in enumerate(self.currentInputFiles):
      if inputFile.getExt() in self.code.getInputExtension():
        found = True
        break
    if not found:
      self.raiseAnError(IOError,'None of the input files has one of the extensions requested by code '
                                  + self.subType +': ' + ' '.join(self.getInputExtension()))
    commands=[]
    for runtype,cmd in executeCommand:
      newCommand=''
      if runtype.lower() == 'parallel':
        newCommand += precommand
        newCommand += cmd+' '
        newCommand += postcommand
        commands.append(newCommand)
      elif runtype.lower() == 'serial':
        commands.append(cmd)
      else:
        self.raiseAnError(IOError,'For execution command <'+cmd+'> the run type was neither "serial" nor "parallel"!  Instead received: ',runtype,'\nPlease check the code interface.')

    command = ' && '.join(commands)+' '

    command = command.replace("%INDEX%",kwargs['INDEX'])
    command = command.replace("%INDEX1%",kwargs['INDEX1'])
    command = command.replace("%CURRENT_ID%",kwargs['CURRENT_ID'])
    command = command.replace("%CURRENT_ID1%",kwargs['CURRENT_ID1'])
    command = command.replace("%SCRIPT_DIR%",kwargs['SCRIPT_DIR'])
    command = command.replace("%FRAMEWORK_DIR%",kwargs['FRAMEWORK_DIR'])
    ## Note this is the working directory that the subprocess will use, it is
    ## not the directory I am currently working. This bit me as I moved the code
    ## from the old ExternalRunner because in that case this was filled in after
    ## the process was submitted by the process itself. -- DPM 5/4/17
    command = command.replace("%WORKING_DIR%",sampleDirectory)
    command = command.replace("%BASE_WORKING_DIR%",kwargs['BASE_WORKING_DIR'])
    command = command.replace("%METHOD%",kwargs['METHOD'])
    command = command.replace("%NUM_CPUS%",kwargs['NUM_CPUS'])

    self.raiseAMessage('Execution command submitted:',command)
    if platform.system() == 'Windows':
      command = self._expandForWindows(command)
      self.raiseAMessage("modified command to", repr(command))
      for key, value in localenv.items():
        localenv[key]=str(value)
    elif not self.code.getRunOnShell():
      command = self._expandCommand(command)
    print(f'DEBUGG command: |{command}|')
    ## reset python path
    localenv.pop('PYTHONPATH',None)
    ## This code should be evaluated by the job handler, so it is fine to wait
    ## until the execution of the external subprocess completes.
    process = utils.pickleSafeSubprocessPopen(command, shell=self.code.getRunOnShell(), stdout=outFileObject, stderr=outFileObject, cwd=localenv['PWD'], env=localenv)
    if self.maxWallTime is not None:
      timeout = time.time() + self.maxWallTime
      while True:
        time.sleep(0.5)
        process.poll()
        if time.time() > timeout and process.returncode is None:
          self.raiseAWarning('walltime exeeded in run in working dir: '+str(metaData['subDirectory'])+'. Killing the run...')
          process.kill()
          process.returncode = -1
        if process.returncode is not None or time.time() > timeout:
          break
    else:
      process.wait()

    returnCode = process.returncode
    # procOutput = process.communicate()[0]

    ## If the returnCode is already non-zero, we should maintain our current
    ## value as it may have some meaning that can be parsed at some point, so
    ## only set the returnCode to -1 in here if we did not already catch the
    ## failure.
    if returnCode == 0 and 'checkForOutputFailure' in dir(self.code):
      codeFailed = self.code.checkForOutputFailure(codeLogFile, metaData['subDirectory'])
      if codeFailed:
        returnCode = -1
    # close the log file
    outFileObject.close()
    ## We should try and use the output the code interface gives us first, but
    ## in lieu of that we should fall back on the standard output of the code
    ## (Which was deleted above in some cases, so I am not sure if this was
    ##  an intentional design by the original developer or accidental and should
    ##  be revised).
    ## My guess is that every code interface implements this given that the code
    ## below always adds .csv to the filename and the standard output file does
    ## not have an extension. - (DPM 4/6/2017)
    outputFile = codeLogFile
    if 'finalizeCodeOutput' in dir(self.code) and returnCode == 0:
      finalCodeOutputFile = self.code.finalizeCodeOutput(command, codeLogFile, metaData['subDirectory'])
      ## Special case for RAVEN interface --ALFOA 09/17/17
      ravenCase = False
      if type(finalCodeOutputFile).__name__ == 'dict':
        ravenCase = True
      if ravenCase and self.code.__class__.__name__ != 'RAVEN':
        self.raiseAnError(RuntimeError, 'The return argument from "finalizeCodeOutput" must be a str containing the new output file root!')
      if finalCodeOutputFile and not ravenCase:
        outputFile = finalCodeOutputFile

    ## If the run was successful
    if returnCode == 0:
      ## This may be a tautology at this point --DPM 4/12/17
      ## Special case for RAVEN interface. Added ravenCase flag --ALFOA 09/17/17
      if outputFile is not None and not ravenCase:
        outFile = Files.CSV()
        ## Should we be adding the file extension here?
        outFile.initialize(outputFile+'.csv',self.messageHandler,path=metaData['subDirectory'])

        csvLoader = CsvLoader.CsvLoader(self.messageHandler)
        # does this CodeInterface have sufficiently intense (or limited) CSV files that
        #   it needs to assume floats and use numpy, or can we use pandas?
        loadUtility = self.code.getCsvLoadUtil()
        csvData = csvLoader.loadCsvFile(outFile.getAbsFile(), nullOK=False, utility=loadUtility)
        returnDict = csvLoader.toRealization(csvData)

      if not ravenCase:
        self._replaceVariablesNamesWithAliasSystem(returnDict, 'inout', True)
        returnDict.update(kwargs)
        returnValue = (kwargs['SampledVars'],returnDict)
        exportDict = self.createExportDictionary(returnValue)
      else:
        # we have the DataObjects -> raven-runs-raven case only so far
        # we have two tasks to do: collect the input/output/meta/indexes from the INNER raven run, and ALSO the input from the OUTER raven run.
        #  -> in addition, we have to fix the probability weights.
        ## get the number of realizations
        ### we already checked consistency in the CodeInterface, so just get the length of the first data object
        numRlz = len(utils.first(finalCodeOutputFile.values()))
        ## set up the return container
        exportDict = {'RAVEN_isBatch':True,'realizations':[]}
        ## set up each realization
        for n in range(numRlz):
          rlz = {}
          ## collect the results from INNER, both point set and history set
          for dataObj in finalCodeOutputFile.values():
            # TODO FIXME check for overwriting data.  For now just replace data if it's duplicate!
            new = dict((var,np.atleast_1d(val)) for var,val in dataObj.realization(index=n,unpackXArray=True).items())
            rlz.update( new )
          ## add OUTER input space
          # TODO FIXME check for overwriting data.  For now just replace data if it's duplicate!
          new = dict((var,np.atleast_1d(val)) for var,val in kwargs['SampledVars'].items())
          rlz.update( new )
          ## combine ProbabilityWeights # TODO FIXME these are a rough attempt at getting it right!
          rlz['ProbabilityWeight'] = np.atleast_1d(rlz.get('ProbabilityWeight',1.0) * kwargs.get('ProbabilityWeight',1.0))
          rlz['PointProbability'] = np.atleast_1d(rlz.get('PointProbability',1.0) * kwargs.get('PointProbability',1.0))
          # FIXME: adding "_n" to Optimizer samples scrambles its ability to find evaluations!
          ## temporary fix: only append if there's multiple realizations, and error out if sampler is an optimizer.
          if numRlz > 1:
            if '_' in kwargs['prefix']:
              self.raiseAnError(RuntimeError,'OUTER RAVEN is using an OPTIMIZER, but INNER RAVEN is returning multiple realizations!')
            addon = '_{}'.format(n)
          else:
            addon = ''
          rlz['prefix'] = np.atleast_1d(kwargs['prefix']+addon)
          ## add the rest of the metadata # TODO slow
          for var,val in kwargs.items():
            if var not in rlz.keys():
              rlz[var] = np.atleast_1d(val)
          self._replaceVariablesNamesWithAliasSystem(rlz,'inout',True)
          exportDict['realizations'].append(rlz)

      ## The last thing before returning should be to delete the temporary log
      ## file and any other file the user requests to be cleared
      if deleteSuccessfulLogFiles:
        self.raiseAMessage(' Run "' +kwargs['prefix']+'" ended smoothly, removing log file!')
        codeLofFileFullPath = os.path.join(metaData['subDirectory'],codeLogFile)
        if os.path.exists(codeLofFileFullPath):
          os.remove(codeLofFileFullPath)

      ## Check if the user specified any file extensions for clean up
      for fileExt in fileExtensionsToDelete:
        fileList = [ os.path.join(metaData['subDirectory'],f) for f in os.listdir(metaData['subDirectory']) if f.endswith(fileExt) ]
        for f in fileList:
          os.remove(f)

      return exportDict

    else:
      self.raiseAMessage(" Process Failed "+str(command)+" returnCode "+str(returnCode))
      absOutputFile = os.path.join(sampleDirectory,outputFile)
      if os.path.exists(absOutputFile):
        self.raiseAMessage(repr(open(absOutputFile,"r").read()).replace("\\n","\n"))
      else:
        self.raiseAMessage(" No output " + absOutputFile)

      ## If you made it here, then the run must have failed
      return None
Пример #7
0
import CsvLoader
source = CsvLoader.load()

"""
Delay data: 12 second (2400 sample)
Sample Rate: 200/second (5ms)

Using PanTom
1. import PanTom.py
2. instantiate Detector object
3. call function add_data(raw)

return:
check the returned[0] if
['filling']:
    function return 'filling', detection step is skipped, algorithm still filling buffer
[Filtered Data, PVC, PAC, BUNDLE_BRANCH, AtrialTachycardia, VentricularTachycardia, BundleBranchBlock] :
    [0] is filtered data, send to chart via MQTT
    [1..6] is classification
"""

from PanTom import *
detector = Detector()
for raw in source:
    # detector.add_data(raw, True)  # view plot
    detector.add_data(raw)  # deployment

    # dat = detector.add_data(raw)  # print test, return
    # if dat[0] != 'filling':
    #     print dat[0], dat[1]
Пример #8
0
# Test sample
# numbers = [100]  # normal
numbers = [106]  # many PVC

INDEX_LIMIT = 1000


def build_message(index, sample):
    next_index = 1 if index + 1 > INDEX_LIMIT else index + 1
    return next_index, str(index) + ':' + str(sample)


if __name__ == '__main__':
    client = mqtt.Client(client_id=CLIENT_ID)
    client.connect("localhost", 1883, 60)
    for number in numbers:
        number = 'TA-Data/MIT_BIH/' + str(number)
        index = 0

        print(number)

        # raw = CsvLoader.load(number + '/record.csv')[:2935]  # 8 second
        # raw = CsvLoader.load(number + '/record.csv')[:12000] # 1 minute
        raw = CsvLoader.load(number + '/record.csv')[9360:]  # cut up
        # raw = CsvLoader.load(number + '/record.csv') # till end
        for data in raw:
            index, message = build_message(index, data)
            client.publish(CLIENT_ID + "/sensor", message)
            sleep(0.005)
Пример #9
0
# # for ecg in ecg_h:
# #     print ecg
#
# # # ax(3) = subplot(323)
# # fig, (ax_orig, ax_filter) = plt.subplots(2, 1, sharex=True)
# # ax_orig.plot(data_source)
# # ax_orig.set_title('Original pulse')
# # ax_filter.plot(ecg_h)
# # ax_filter.set_title('Band Pass Filtered')
# # fig.tight_layout()
# # plt.show()

import matplotlib.pyplot as plt
import CsvLoader
from PanTom import *

detector = Detector()
source = CsvLoader.load()

for data in source:
    dat = detector.add_data(data, True)
    if dat[0]!='filling':
        print dat[0], dat[1]


# fig, (ax_orig, ax_filter) = plt.subplots(2, 1, sharex=True)
# ax_orig.plot(data_source[:200])
# ax_filter.plot(res)
# plt.tight_layout()
# plt.show()
Пример #10
0
import paho.mqtt.client as mqtt
import CsvLoader
import time

from PanTom import Detector
detector = Detector()

CLIENT_ID = '02WXO01'

data_csv = CsvLoader.load()
client = mqtt.Client(client_id=CLIENT_ID)
client.connect("localhost", 1883, 60)

# device_id = msg.topic.split('/')[1]
device_id = "02WXO01"

for raw in data_csv:
    data = detector.add_data(float(raw))
    if data[0] != 'filling':

        # send bpm for the first time
        bpm = round(float(sum(data[7])) / len(data[7]), 2)
        # client.publish("bpm/"+device_id, bpm, qos=2)  # ini bikin errorr
        client.publish("bpm/" + device_id, bpm)

        for filtered_data in data[0]:
            print "forwarding data to dashboard", filtered_data
            # client.publish("visual/"+device_id, filtered_data, qos=2)
            client.publish("visual/" + device_id, filtered_data)

            time.sleep(0.010)
Пример #11
0
  def evaluateSample(self, myInput, samplerType, kwargs):
    """
        This will evaluate an individual sample on this model. Note, parameters
        are needed by createNewInput and thus descriptions are copied from there.
        @ In, myInput, list, the inputs (list) to start from to generate the new one
        @ In, samplerType, string, is the type of sampler that is calling to generate a new input
        @ In, kwargs, dict,  is a dictionary that contains the information coming from the sampler,
           a mandatory key is the sampledVars that contains a dictionary {'name variable':value}
        @ Out, returnValue, tuple, This will hold two pieces of information,
          the first item will be the input data used to generate this sample,
          the second item will be the output of this model given the specified
          inputs
    """
    inputFiles = self.createNewInput(myInput, samplerType, **kwargs)
    self.currentInputFiles, metaData = (copy.deepcopy(inputFiles[0]),inputFiles[1]) if type(inputFiles).__name__ == 'tuple' else (inputFiles, None)
    returnedCommand = self.code.genCommand(self.currentInputFiles,self.executable, flags=self.clargs, fileArgs=self.fargs, preExec=self.preExec)

    ## Given that createNewInput can only return a tuple, I don't think these
    ## checks are necessary (keeping commented out until someone else can verify):
    # if type(returnedCommand).__name__ != 'tuple':
    #   self.raiseAnError(IOError, "the generateCommand method in code interface must return a tuple")
    # if type(returnedCommand[0]).__name__ != 'list':
    #   self.raiseAnError(IOError, "the first entry in tuple returned by generateCommand method needs to be a list of tuples!")
    executeCommand, self.outFileRoot = returnedCommand

    precommand = kwargs['precommand']
    postcommand = kwargs['postcommand']
    bufferSize = kwargs['bufferSize']
    fileExtensionsToDelete = kwargs['deleteOutExtension']
    deleteSuccessfulLogFiles = kwargs['delSucLogFiles']

    codeLogFile = self.outFileRoot
    if codeLogFile is None:
      codeLogFile = os.path.join(metaData['subDirectory'],'generalOut')

    ## Before we were temporarily changing directories in order to copy the
    ## correct directory to the subprocess. Instead, we can just set the
    ## directory after we copy it over. -- DPM 5/5/2017
    sampleDirectory = os.path.join(os.getcwd(),metaData['subDirectory'])
    localenv = dict(os.environ)
    localenv['PWD'] = str(sampleDirectory)

    outFileObject = open(os.path.join(sampleDirectory,codeLogFile), 'w', bufferSize)

    found = False
    for index, inputFile in enumerate(self.currentInputFiles):
      if inputFile.getExt() in self.code.getInputExtension():
        found = True
        break
    if not found:
      self.raiseAnError(IOError,'None of the input files has one of the extensions requested by code '
                                  + self.subType +': ' + ' '.join(self.getInputExtension()))
    commands=[]
    for runtype,cmd in executeCommand:
      newCommand=''
      if runtype.lower() == 'parallel':
        newCommand += precommand
        newCommand += cmd+' '
        newCommand += postcommand
        commands.append(newCommand)
      elif runtype.lower() == 'serial':
        commands.append(cmd)
      else:
        self.raiseAnError(IOError,'For execution command <'+cmd+'> the run type was neither "serial" nor "parallel"!  Instead received: ',runtype,'\nPlease check the code interface.')

    command = ' && '.join(commands)+' '

    command = command.replace("%INDEX%",kwargs['INDEX'])
    command = command.replace("%INDEX1%",kwargs['INDEX1'])
    command = command.replace("%CURRENT_ID%",kwargs['CURRENT_ID'])
    command = command.replace("%CURRENT_ID1%",kwargs['CURRENT_ID1'])
    command = command.replace("%SCRIPT_DIR%",kwargs['SCRIPT_DIR'])
    command = command.replace("%FRAMEWORK_DIR%",kwargs['FRAMEWORK_DIR'])
    ## Note this is the working directory that the subprocess will use, it is
    ## not the directory I am currently working. This bit me as I moved the code
    ## from the old ExternalRunner because in that case this was filled in after
    ## the process was submitted by the process itself. -- DPM 5/4/17
    command = command.replace("%WORKING_DIR%",sampleDirectory)
    command = command.replace("%BASE_WORKING_DIR%",kwargs['BASE_WORKING_DIR'])
    command = command.replace("%METHOD%",kwargs['METHOD'])
    command = command.replace("%NUM_CPUS%",kwargs['NUM_CPUS'])

    self.raiseAMessage('Execution command submitted:',command)
    if platform.system() == 'Windows':
      command = self._expandForWindows(command)
      self.raiseAMessage("modified command to" + repr(command))

    ## This code should be evaluated by the job handler, so it is fine to wait
    ## until the execution of the external subprocess completes.
    process = utils.pickleSafeSubprocessPopen(command, shell=True, stdout=outFileObject, stderr=outFileObject, cwd=localenv['PWD'], env=localenv)
    process.wait()

    returnCode = process.returncode
    # procOutput = process.communicate()[0]

    ## If the returnCode is already non-zero, we should maintain our current
    ## value as it may have some meaning that can be parsed at some point, so
    ## only set the returnCode to -1 in here if we did not already catch the
    ## failure.
    if returnCode == 0 and 'checkForOutputFailure' in dir(self.code):
      codeFailed = self.code.checkForOutputFailure(codeLogFile, metaData['subDirectory'])
      if codeFailed:
        returnCode = -1
    # close the log file
    outFileObject.close()
    ## We should try and use the output the code interface gives us first, but
    ## in lieu of that we should fall back on the standard output of the code
    ## (Which was deleted above in some cases, so I am not sure if this was
    ##  an intentional design by the original developer or accidental and should
    ##  be revised).
    ## My guess is that every code interface implements this given that the code
    ## below always adds .csv to the filename and the standard output file does
    ## not have an extension. - (DPM 4/6/2017)
    outputFile = codeLogFile
    if 'finalizeCodeOutput' in dir(self.code):
      finalCodeOutputFile = self.code.finalizeCodeOutput(command, codeLogFile, metaData['subDirectory'])
      if finalCodeOutputFile:
        outputFile = finalCodeOutputFile

    ## If the run was successful
    if returnCode == 0:

      returnDict = {}
      ## This may be a tautology at this point --DPM 4/12/17
      if outputFile is not None:
        outFile = Files.CSV()
        ## Should we be adding the file extension here?
        outFile.initialize(outputFile+'.csv',self.messageHandler,path=metaData['subDirectory'])

        csvLoader = CsvLoader.CsvLoader(self.messageHandler)
        csvData = csvLoader.loadCsvFile(outFile)
        headers = csvLoader.getAllFieldNames()

        ## Numpy by default iterates over rows, thus we transpose the data and
        ## zip it with the headers in order to do store it very cleanly into a
        ## dictionary.
        for header,data in zip(headers, csvData.T):
          returnDict[header] = data

      self._replaceVariablesNamesWithAliasSystem(returnDict, 'input', True)
      self._replaceVariablesNamesWithAliasSystem(returnDict, 'output', True)

      ## The last thing before returning should be to delete the temporary log
      ## file and any other file the user requests to be cleared
      if deleteSuccessfulLogFiles:
        self.raiseAMessage(' Run "' +kwargs['prefix']+'" ended smoothly, removing log file!')
        codeLofFileFullPath = os.path.join(metaData['subDirectory'],codeLogFile)
        if os.path.exists(codeLofFileFullPath):
          os.remove(codeLofFileFullPath)

      ## Check if the user specified any file extensions for clean up
      for fileExt in fileExtensionsToDelete:
        if not fileExt.startswith("."):
          fileExt = "." + fileExt

        fileList = [ os.path.join(metaData['subDirectory'],f) for f in os.listdir(metaData['subDirectory']) if f.endswith(fileExt) ]

        for f in fileList:
          os.remove(f)

      returnValue = (kwargs['SampledVars'],returnDict)
      return returnValue
    else:
      self.raiseAMessage(" Process Failed "+str(command)+" returnCode "+str(returnCode))
      absOutputFile = os.path.join(sampleDirectory,outputFile)
      if os.path.exists(absOutputFile):
        self.raiseAMessage(repr(open(absOutputFile,"r").read()).replace("\\n","\n"))
      else:
        self.raiseAMessage(" No output " + absOutputFile)

      ## If you made it here, then the run must have failed
      return None