def runSequentially(self):
        # set up everything needed for logging the errors
        root = logging.getLogger()
        root.setLevel(logging.INFO)
        statusFileName = path_helpers.createPathWExtention(self.dstDir,self.jobname,".run")
        fileHandler = logging.FileHandler(statusFileName)
        logFormat = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
        fileHandler.setFormatter(logFormat)
        root.addHandler(fileHandler)

        numberOfFiles = len(self.files)
        numberOfErrors = 0
        bTime= time.time()
        
        # The file processing occurs here
        logQueue = None
        for i, (filename, infodict) in enumerate(zip(self.files, self.infoDicts)):       
            if numberOfErrors > self.errorNum:
                root.info("The job encountered %d errors and the max number of them allowed is %d" %(numberOfErrors,self.errorNum))
                break
            try:
                # returns 1 if file was processed and 0 if file was skipped
                exitcode = filerunner.FileRunner(logQueue,filename, self.version,
                                                 self.lastVersion, self.modname, self.updatemod,
                                                 self.params, self.funcDicts,self.srcDir,
                                                 self.dstDir, self.rawDataDir, infodict['reference_Eo'], infodict['technique_name'])

                if exitcode.exitSuccess:
                    root.info('File %s completed  %d/%d' %(os.path.basename(filename),i+1,numberOfFiles))
            except Exception as someException:
                # root.exception will log an ERROR with printed traceback;
                # root.error will log an ERROR without traceback
                # root.exception(someException)
                root.error('Exception raised in file %s:\n' %filename +repr(someException))
                numberOfErrors +=1
                exitcode = -1

        eTime= time.time()
        root.info("Processed for %s H:M:S" %(str(datetime.timedelta(seconds=eTime-bTime)),))
        timeStamp = time.strftime('%Y%m%d%H%M%S',time.gmtime())

        # closing the fileHandler is important or else we cannot rename the file
        root.removeHandler(fileHandler)
        fileHandler.close()

        # the renaming of the run file based on the way the file processing ended
        if numberOfErrors > self.errorNum:
            try:
                os.rename(statusFileName, path_helpers.createPathWExtention(self.dstDir,self.jobname,".error"))
            except:
                os.rename(statusFileName, path_helpers.createPathWExtention(self.dstDir,self.jobname+timeStamp,".error"))
        else:
            try:
                os.rename(statusFileName, path_helpers.createPathWExtention(self.dstDir,self.jobname,".done"))
            except:
                os.rename(statusFileName, path_helpers.createPathWExtention(self.dstDir,self.jobname+timeStamp,".done"))
    def runParallel(self):
        # the path to which to log - will change depending on the way
        #   processing ends and if a statusFile with the same
        #   name already exists
        statusFileName = path_helpers.createPathWExtention(self.dstDir,self.jobname,".run")

        # set up the manager and objects required for logging due to multiprocessing
        pmanager = Manager()
        # this queue takes messages from individual processes and passes them
        #   to the QueueListener
        loggingQueue = pmanager.Queue()
        processPool = Pool()
        # handler for the logging file
        fileHandler = logging.FileHandler(statusFileName)
        logFormat = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
        fileHandler.setFormatter(logFormat)
        # the QueueListener takes messages from the logging queue and passes
        #   them through another queue to the fileHandler (logs safely because
        #   only this main process writes to the fileHandler)
        fileLogger = QueueListener(loggingQueue, fileHandler)
        fileLogger.start()

        # keep track of when processing started
        bTime = time.time()
        
        # the jobs to process each of the files
#        jobs = [(loggingQueue, filename, self.version, self.lastVersion,
#                 self.modname, self.updatemod,self.params, self.funcDicts,
#                 self.srcDir, self.dstDir, self.rawDataDir)
#                for filename in self.files]

        jobs = [(loggingQueue, filename, self.version, self.lastVersion,
                 self.modname, self.updatemod, self.params, self.funcDicts,
                 self.srcDir, self.dstDir, self.rawDataDir, infodict['reference_Eo'], infodict['technique_name']) \
                 for (filename, infodict)
                in zip(self.files, self.infoDicts)]  
        
        processPool.map(makeFileRunner, jobs)
        # keep track of when processing ended
        eTime = time.time()
        timeStamp = time.strftime('%Y%m%d%H%M%S',time.gmtime())

        # clean up the pool
        processPool.close()
        processPool.join()

        root = logging.getLogger()
        if fileLogger.errorCount > self.errorNum:
            root.info("The job encountered %d errors and the max number of them allowed is %d" %(fileLogger.errorCount,self.errorNum))
        root.info("Processed for %s H:M:S" %(str(datetime.timedelta(seconds=eTime-bTime)),))
    
        fileLogger.stop()
        fileHandler.close()

        if fileLogger.errorCount > self.errorNum:
            try:
                os.rename(statusFileName, path_helpers.createPathWExtention(self.dstDir,self.jobname,".error"))
            except:
                os.rename(statusFileName, path_helpers.createPathWExtention(self.dstDir,self.jobname+timeStamp,".error"))
        else:
            try:
                os.rename(statusFileName, path_helpers.createPathWExtention(self.dstDir,self.jobname,".done"))
            except:
                os.rename(statusFileName, path_helpers.createPathWExtention(self.dstDir,self.jobname+timeStamp,".done"))