Esempio n. 1
0
    def report(self, available):

        print '  :'
        print '  :   LEVEL    : ' + ' '.join(
            [helpers.intf(level, table=1) for level in self.levels])
        print '  :--------------' + '-'.join(
            [helpers.intf(None, table=1, bar=1) for level in self.levels])

        print '  : Computed   :',
        for level in self.levels:
            print helpers.intf(self.available()[level], table=1),
        print

        if available:

            print '  : Optimal    :',
            for level in self.levels:
                print helpers.intf(self.optimal[level], table=1),
            print

            print '  : Updated    :',
            for level in self.levels:
                print helpers.intf(self.available()[level] +
                                   self.additional[level],
                                   table=1),
            print

            print '  : Additional :',
            for level in self.levels:
                print helpers.intf(self.additional[level], table=1),
            print
Esempio n. 2
0
    def run(self):

        # create MC simulations
        self.create_MCs(self.config.samples.indices.additional,
                        self.config.iteration)

        # report samples that will be computed
        if not self.config.deterministic:
            print
            print ' :: SAMPLES TO COMPUTE:',
            for count in self.config.samples.counts.additional:
                print helpers.intf(count, table=0),
            print

        # validate MC simulations
        for mc in self.mcs:
            mc.validate()

        header = '  :  LEVEL  |'
        separator = '  :----------'
        if not self.config.recycle:
            header += '   TYPE   |'
            separator += '-----------'
        header += '  RESOLUTION  |  SAMPLES  |  HARDWARE  |'
        separator += '---------------------------------------|'
        if local.cluster:
            header += '  WALLTIME  |  BATCH  ->  JOBS   |'
            separator += '---------------------------------|'
            if local.ensembles and not self.config.deterministic:
                header += '  MERGE  ->  ENSEMBLES  '
                separator += '------------------------'
        print header
        print separator

        # initialize submission file
        if self.config.deterministic:
            f = open(self.submission_file, 'wa')
        else:
            f = open(self.submission_file + '.%d' % self.config.iteration,
                     'wa')

        f.write(header + '\n')
        f.write(separator + '\n')

        # run MC simulations and update submission file
        for mc in self.mcs:
            info = mc.run()
            f.write(info + '\n')

        # finalize submission file
        f.write('\n')
        f.close()
Esempio n. 3
0
    def speedup(self, indicators, counts, forecast=False):

        if not self.available or self.total_error == 0:
            helpers.warning(
                'Speedup can not be estimated since total sampling error is not available'
            )
            self.speedup_mlmc = None
            self.speedup_ocv = None
            return

        if forecast:
            counts = counts.loaded + counts.additional
        else:
            counts = counts.loaded

        error = self.total(
            self.errors(indicators.variance_diff_opt['infered'], counts))

        # compute MLMC vs. MC speedup
        FINEST = numpy.max(
            [level for level in self.levels if counts[level] > 0])
        work_mlmc = sum([
            indicators.pairworks[level] * counts[level]
            for level in self.levels
        ])
        #variance_mc = numpy.max ( [ indicators.variance [0] ['infered'] [level] for level in self.levels [0 : FINEST + 1] ] )
        variance_mc = indicators.variance[0]['infered'][FINEST]
        samples_mc = numpy.ceil(variance_mc / error**2)
        work_mc = indicators.works[FINEST] * samples_mc
        self.speedup_mlmc = work_mc / work_mlmc

        # avoid round-off errors for pure MC runs
        if len(self.levels) == 1:
            self.speedup_mlmc = 1.0
            samples_mc = counts[0]

        # report
        print
        if forecast: print ' :: FORECAST'
        print ' :: SPEEDUP (MLMC vs. MC): %.1f' % self.speedup_mlmc + (
            ' [finest level: %d]' % FINEST if FINEST != self.L else '')
        print '  : -> MLMC budget : %s CPU hours' % helpers.intf(
            numpy.ceil(work_mlmc))
        print '  : ->   MC budget : %s CPU hours' % helpers.intf(
            numpy.ceil(work_mc))
        print '  : ->   MC samples: %s' % helpers.intf(samples_mc)

        # compute and report OCV MLMC vs. PLAIN MLMC speedup
        # REMARK: since samples were optimized for OSV, this is not an accurate measure for speedup
        # REMARK: speedup of the variance reduction cost functional is already reported in indicators.optimize()
        '''
    def report_budget(self):

        print
        print ' :: BUDGET:'

        budget_used = float(
            sum([
                self.pairworks[level] * self.counts.available()[level]
                for level in self.levels
            ]))
        budget_left = float(self.budget - budget_used)
        if self.available:
            budget_reqd = float(
                sum([
                    self.pairworks[level] * self.counts.additional[level]
                    for level in self.levels
                ]))

        print '  : -> Specified budget: %s CPU hours [%s NODE hours]' % (
            helpers.intf(numpy.ceil(self.budget), table=1),
            helpers.intf(numpy.ceil(self.budget / local.cores), table=1))
        print '  : -> Consumed  budget: %s CPU hours [%s NODE hours]' % (
            helpers.intf(numpy.ceil(budget_used), table=1),
            helpers.intf(numpy.ceil(budget_used / local.cores), table=1))
        print '  : -> Remaining budget: %s CPU hours [%s NODE hours]' % (
            helpers.intf(numpy.ceil(budget_left), table=1),
            helpers.intf(numpy.ceil(budget_left / local.cores), table=1))
        if self.available:
            print '  : -> Requested budget: %s CPU hours [%s NODE hours]' % (
                helpers.intf(numpy.ceil(budget_reqd), table=1),
                helpers.intf(numpy.ceil(budget_reqd / local.cores), table=1))
Esempio n. 5
0
    def load(self):

        config = self.config

        prefix = '  :      %d  |  %s  |    %s  | ' % (config.level, [
            ' FINE ', 'COARSE'
        ][config.type], intf(len(config.samples), table=1))
        progress = Progress(prefix=prefix,
                            steps=len(config.samples),
                            length=33)
        progress.init()

        for i, sample in enumerate(config.samples):
            if self.params.verbose >= 2:
                self.results[i] = config.solver.load(config.level, config.type,
                                                     sample)
            else:
                try:
                    self.results[i] = config.solver.load(
                        config.level, config.type, sample)
                except:
                    self.results[i] = None
            progress.update(i + 1)

        progress.reset()

        loaded = [i for i, result in enumerate(self.results) if result != None]

        self.available = (len(loaded) > 0)

        return loaded
Esempio n. 6
0
    def report(self):

        if not self.available:
            helpers.warning('ERRORS not available')
            return

        print
        print ' :: ERRORS: (normalized to %s [~%.1e])' % (helpers.scif(
            self.normalization), self.normalization)

        print '  :'
        print '  :  LEVEL  :' + ' '.join(
            ['  ' + helpers.intf(level, table=1) for level in self.levels])
        print '  :----------' + '-'.join(
            [helpers.scif(None, table=1, bar=1) for level in self.levels])
        print '  :  ERROR  :',
        for level in self.levels:
            print helpers.scif(self.relative_error[level], table=1),
        print

        print '  :'
        print '  : Total sampling error       : %s [~%.1e]' % (helpers.scif(
            self.total_relative_error), self.total_relative_error)

        print '  : Deterministic error (bias) : %s [~%.1e]' % (helpers.scif(
            self.relative_bias), self.relative_bias)

        if numpy.isnan(self.total_relative_error) or numpy.isinf(
                self.total_relative_error):
            self.available = 0
    def query(self):

        message = 'specify the required computational budget'
        hint = 'press ENTER to leave %s CPU hours' % helpers.intf(self.budget)
        default = self.budget
        budget = helpers.query(message,
                               hint=hint,
                               type=float,
                               default=default,
                               format='intf',
                               exit=0)
        modified = budget != self.budget
        self.budget = budget
        return modified
Esempio n. 8
0
    def info(self):

        config = self.config

        typestr = [' FINE ', 'COARSE'][config.type]

        resolution = config.solver.resolution_string(config.discretization)
        resolution = resolution.rjust(len('RESOLUTION'))

        format = '  :  %5d  |'
        args = (config.level, )
        if not config.solver.recycle:
            format += '  %s  |'
            args += (typestr, )

        format += '  %s  |    %s  |  %s %s   |'
        args += (resolution, intf(len(config.samples), table=1))
        if self.parallelization.cores % local.cores == 0:
            args += (intf(self.parallelization.cores / local.cores,
                          table=1), 'N')
        else:
            args += (intf(self.parallelization.cores, table=1), 'C')

        if self.parallelization.walltime and local.cluster:
            format += '   %2dh %2dm  |  %s  ->  %s  |'
            if self.parallelization.batch and self.parallelization.batchmax != None:
                batch = intf(self.parallelization.batchmax, table=1)
                count = intf(math.ceil(
                    float(len(config.samples)) /
                    self.parallelization.batchmax),
                             table=1)
            else:
                batch = intf(0, table=1, empty=1)
                count = intf(len(config.samples), table=1, empty=1)
            args += (self.parallelization.hours, self.parallelization.minutes,
                     batch, count)
            if local.ensembles and self.parallelization.batch:
                merge = intf(self.parallelization.mergemax, table=1)
                format += '  %s  ->'
                args += (merge, )

        return format % args
Esempio n. 9
0
 def resolution_string (self, d):
   from helpers import intf
   return intf (d)
Esempio n. 10
0
  def dispatch (self, level, type, parallelization):

    # get directory
    directory = self.directory (level, type)

    # copy parallelization for further modifications
    parallelization = copy.deepcopy (parallelization)

    # if available, use scheduler's dispatch routine
    if self.scheduler.dispatch != None:

      # set label
      label = self.label (level, type)

      jobs = [ self.job (args, wrap=False) for args in self.batch ]

      # dispatch and get info
      info = self.scheduler.dispatch (self.batch, jobs, directory, label, parallelization)

      # empty queue
      self.batch = []

      return info

    # if batch mode -> submit batch job(s)
    if local.cluster and parallelization.batch:

      # suffix format for batch jobs and ensembles
      suffix_format = '.%s%03d'

      # split batch job into smaller batches according to 'parallelization.batchmax'
      if parallelization.batchmax:
        batches = helpers.chunks (self.batch, parallelization.batchmax)
      else:
        batches = [ self.batch [:] ]

      # if merging into ensembles is disabled
      if not local.ensembles:

        # submit each batch
        for index, batch in enumerate (batches):

          # set batch in parallelization (last batch might be smaller)
          parallelization.batch = len (batch)

          # construct batch job from all jobs in the current batch
          batch = '\n'.join ( [ self.wrap (self.job (args), args ['sample']) for args in batch ] )

          # set suffix
          suffix = suffix_format % ('b', index + 1)

          # set label
          label = self.label (level, type, suffix=suffix)

          # submit
          self.execute ( self.submit (batch, parallelization, label, directory, suffix=suffix, timer=1), directory )

        # empty queue
        self.batch = []

        return ''

      # else if merging into ensembles is enabled
      else:

        # check if blocks need to be split into subblocks
        subblocks = max (1, local.min_cores / parallelization.cores)

        # form blocks each containing grouped 'subblocks' batch jobs
        blocks = helpers.chunks (batches, subblocks)

        # warn if the first block is not fully utilized
        if len (blocks) > 1:
          utilized = parallelization.cores * len (blocks [0]) >= local.min_cores
        else:
          return 'SKIPPED'
        '''
        if not utilized:
          message = 'Requested number of cores and samples does not fully utilize the smallest block'
          details = '%s * %s < %s' % ( helpers.intf (parallelization.cores), helpers.intf (len (blocks [0])), helpers.intf (local.min_cores) )
          advice  = 'Increase paralellization ratio for this level'
          helpers.warning (message, details=details, advice=advice)
          # TODO: in such case, should batchsize be reduced (for all under-utilized blocks) to improve the utilization?
        '''

        # check if the number of sub-blocks does not exceed machine limit
        if local.max_ensemble != None and len (blocks) * subblocks > local.max_ensemble:
          message = 'Maximum number of ensemble jobs exceeded:'
          details = '%d > %d' (len (blocks) * subblocks > local.max_ensemble)
          advice  = 'Reduce the number of ensemble jobs or use more nodes per job and apply batching.'
          helpers.error (message, details, advice)

        # split blocks into ensembles (with ensemble sizes being powers of 2)
        binary = bin ( len (blocks) )
        decomposition = [ 2**(len(binary) - 1 - power) if flag == '1' else 0 for power, flag in enumerate(binary) ]
        decomposition = [ size for size in decomposition if size != 0 ]

        # respect parallelization.mergemax
        filtered = []
        for i, size in enumerate (decomposition):
          if parallelization.mergemax == None or size * subblocks <= parallelization.mergemax:
            filtered += [size]
          else:
            chunks = 2 ** int ( math.ceil ( math.log ( float (size * subblocks) / parallelization.mergemax, 2) ) )
            filtered += [ size / chunks ] * chunks
        decomposition = filtered

        # submit each ensemble
        index     = 0
        submitted = 0
        for i, merge in enumerate (decomposition):

          # set suffix
          suffix = suffix_format % ('e', i + 1)

          # set label
          label = self.label (level, type, suffix=suffix)

          # initialize ensemble job
          ensemble = ''

          # set batch and merge in parallelization
          parallelization.batch = len (blocks [0][0])
          parallelization.merge = merge

          # submit each block
          for block, batches in enumerate (blocks [submitted : submitted + merge]):

            # header for the subensemble job
            if local.block != None:
              ensemble += '\n# === BLOCK %d\n' % block

            # initialize subensemble job
            subensemble = ''

            # determine the shape of a subblock
            shape = local.get_shape (parallelization.nodes)

            # add corner initialization
            if shape != None:
              subensemble += local.corners % { 'block' : block, 'shape' : shape } + '\n'

            # submit each batch
            for corner, batch in enumerate (batches):

              # increment 'index' counter
              index += 1

              # header for the batch job
              subensemble += '\n# === BATCH JOB %d' % index

              # additional header information
              if local.block != None:
                subensemble += ' [block %d, corner %d]' % (block, corner)

              # end of header for the batch job
              subensemble += '\n'
              
              # append additional parameters to 'args'
              jobs = []
              for args in batch:

                # add batch job of 'shape' to 'corner' within block which is part of an entire ensemble
                jobs.append ( self.wrap (self.job (args, block, corner, shape), args ['sample']) )

              # construct batch job
              batch = '\n'.join (jobs)

              # add timer
              if local.timer:
                batch = local.timer.rstrip() % { 'job' : '\n\n' + batch + '\n', 'timerfile' : self.timerfile + suffix_format % ('b', index) }

              # fork to background (such that other batch jobs in subensemble could proceed)
              if subblocks > 1:
                batch = self.fork (batch)

              # add batch job to the subensemble
              subensemble += batch
            
            # add synchronization
            if subblocks > 1:
              subensemble += self.sync ()

            # add block booting and block freeing
            subensemble = self.boot (subensemble, block)

            # fork to background (such that other subensemble jobs in ensemble could proceed)
            if merge > 1:
              subensemble = self.fork (subensemble)

            # add batch job to the ensemble
            ensemble += subensemble
          
          # add synchronization
          if merge > 1:
            ensemble += self.sync()

          # copy parallelization to prevent modifications
          submit_parallelization = copy.deepcopy (parallelization)

          # adjust parallelization according to the number of subblocks
          submit_parallelization.nodes *= subblocks
          submit_parallelization.cores *= subblocks

          # submit
          self.execute ( self.submit (ensemble, submit_parallelization, label, directory, suffix=suffix, boot=0, timer=0), directory )

          # update 'submitted' counter
          submitted += size

        # empty queue
        self.batch = []

        # return information about ensembles
        from helpers import intf
        info = [ '%s (%s N)' % ( intf (subblocks * merge), intf (parallelization.nodes * subblocks * merge) ) for merge in decomposition ]
        return ' + '.join (info) + (' [not fully utilized]' if not utilized else '')
    
    return ''
Esempio n. 11
0
 def report (self):
   
   print
   print   ' :: CONFIGURATION:    '
   print   '  : MACHINE      :    %-30s' % local.name                         + '    ' + '[TYPE: %s]' % ('cluster'       if local.cluster      else 'standalone')
   print   '  : SOLVER       :    %-30s' % self.solver    .__class__.__name__ + '    ' + '[MODE: %s]' % ('deterministic' if self.deterministic else 'stochastic')
   if self.levels > 0 and not self.deterministic:
     print '                 |->  %-30s' % 'WORK RATIOS'                      + '    ' + '%s' % ' '.join ( [ helpers.intf (ratio) for ratio in self.work_ratios ] )
     print '                 |->  %-30s' % 'CORE RATIOS'                      + '    ' + '%s' % ' '.join ( [ helpers.intf (ratio) for ratio in self.core_ratios ] )
   print   '                 |->  %-30s' % 'INIT SCRIPT'                      + '    ' + '%s' % self.solver.init.__name__ if self.solver.init != None else 'None'
   print   '  : SAMPLES      :    %-30s' % self.samples   .__class__.__name__
   if not self.deterministic:
     print '  : SCHEDULER    :    %-30s' % self.scheduler .__class__.__name__
   print   '  : ROOT         :    %-30s' % self.root
   print   '  : RECYCLE      :    %-30s' % ( 'ENABLED' if self.recycle else 'DISABLED' )
   print   '  : INFERENCE    :    %-30s' % ( self.inference + (' [enforced]' if self.enforce else ' [not enforced]') )
   print   '  : OPTIMAL C.V. :    %-30s' % ( 'ENABLED' if self.ocv else 'DISABLED' )
Esempio n. 12
0
    def report(self):

        # === report measured values

        print
        print ' :: MEASURED INDICATORS: (normalized to %s)' % helpers.scif(
            self.normalization)
        print '  :'
        print '  :       LEVEL       : ' + ' '.join(
            ['  ' + helpers.intf(level, table=1) for level in self.levels])
        print '  :---------------------' + '-'.join(
            [helpers.scif(None, table=1, bar=1) for level in self.levels])

        # report 'correlation'
        self.correlation.report('measured')

        # splitter
        print '  :---------------------' + '-'.join(
            [helpers.scif(None, table=1, bar=1) for level in self.levels])

        # report 'mean (fine)'
        self.mean[self.FINE].report('measured', self.normalization)

        # report 'mean (coarse)'
        self.mean[self.COARSE].report('measured', self.normalization)

        # report 'variance (fine)'
        self.variance[self.FINE].report('measured', self.normalization**2)

        # report 'variance (coarse)'
        self.variance[self.COARSE].report('measured', self.normalization**2)

        # report 'mean diff'
        self.mean_diff.report('measured', self.normalization)

        # report 'variance diff'
        self.variance_diff.report('measured', self.normalization**2)

        # report 'covariance'
        self.covariance.report('measured', self.normalization**2)
        '''
    # splitter
    print '  :---------------------' + '-'.join ( [ helpers.scif (None, table=1, bar=1) for level in self.levels ] )

    # report 'coefficients' and OCV MLMC vs. PLAIN MLMC speedup from coefficient optimization
    print '  : %-18s:' % 'COEFFICIENT',
    for level in self.levels:
      print helpers.scif (self.coefficients.values [level], table=1),
    if self.coefficients.optimization != None:
      print '[OPTIMIZATION: %.2f]' % self.coefficients.optimization,
    print
    
    # report 'mean diff opt'
    self.mean_diff_opt.report ('measured', self.normalization)

    # report 'variance diff opt'
    self.variance_diff_opt.report ('measured', self.normalization ** 2)
    '''

        # === report infered values

        if not self.inference:
            print
            print ' :: INFERENCE OF INDICATORS IS DISABLED'
            return

        if not self.infered:
            print
            print ' :: INFERENCE OF INDICATORS IS NOT NECESSARY'
            return

        print
        print ' :: INFERED INDICATORS: (w.r.t. \'%s\'%s, normalized to %s)' % (
            self.inference, ' [enforced]' if self.enforce else
            ' [not enforced]', helpers.scif(self.normalization))
        print '  :'
        print '  :       LEVEL       : ' + ' '.join(
            ['  ' + helpers.intf(level, table=1) for level in self.levels])
        print '  :---------------------' + '-'.join(
            [helpers.scif(None, table=1, bar=1) for level in self.levels])

        # report 'correlation'
        self.correlation.report('infered')

        # splitter
        print '  :---------------------' + '-'.join(
            [helpers.scif(None, table=1, bar=1) for level in self.levels])

        # report 'mean (fine)'
        self.mean[self.FINE].report('infered', self.normalization)

        # report 'mean (coarse)'
        self.mean[self.COARSE].report('infered', self.normalization)

        # report 'variance (fine)'
        self.variance[self.FINE].report('infered', self.normalization**2)

        # report 'variance (coarse)'
        self.variance[self.COARSE].report('infered', self.normalization**2)

        # report 'mean diff'
        self.mean_diff.report('infered', self.normalization)

        # report 'variance diff'
        self.variance_diff.report('infered', self.normalization**2)

        # report 'covariance'
        self.covariance.report('infered', self.normalization**2)
Esempio n. 13
0
    def optimize(self, mcs, indices, L0, forecast=False):

        if self.ocv:
            print
            if forecast:
                print ' :: FORECAST'
            print ' :: Optimizing INDICATORS...',
            sys.stdout.flush()

        # === OPTIMAL control variate COEFFICIENTS

        # compute optimal control variate coefficients
        if self.ocv:
            if forecast:
                self.coefficients.optimize(self)
            else:
                counts = [len(indices[level]) for level in self.levels]
                self.coefficients.optimize(self, samples=counts)

        # re-evaluate distances between indicators for every two consecute levels of each sample for the specified indices
        distances = self.distances(mcs, indices)

        # === MEAN DIFF and VARIANCE DIFF level distance indicators
        # (WITH optimal control variate coefficients computed above)

        self.mean_diff_opt = Indicator('MEAN     DIFF OPT',
                                       self.levels,
                                       start=self.L0 + 1)
        self.variance_diff_opt = Indicator('VARIANCE DIFF OPT',
                                           self.levels,
                                           start=self.L0 + 1)

        # compute optimized level distances (measured values)
        for level in self.levels:
            self.mean_diff_opt['measured'][level] = numpy.mean(
                numpy.abs(distances[level]))
            self.variance_diff_opt['measured'][level] = numpy.var(
                distances[level],
                ddof=1) if len(distances[level]) > 1 else float('nan')

        # compute magnitudes of optimized level distances (infered values)
        # remark: infering optimized differences from measured optimized differences could lead to inconsistencies with other infered indicators
        self.mean_diff_opt['infered'][self.L0] = self.coefficients.values[
            self.L0] * self.mean[self.FINE]['infered'][self.L0]
        self.variance_diff_opt['infered'][self.L0] = self.coefficients.values[
            self.L0]**2 * self.variance[self.FINE]['infered'][self.L0]
        for level in self.levels[self.L0 + 1:]:
            self.mean_diff_opt['infered'][level] = self.coefficients.values[
                level] * self.mean[self.FINE]['infered'][level]
            self.mean_diff_opt['infered'][level] -= self.coefficients.values[
                level - 1] * self.mean[self.COARSE]['infered'][level]
            self.mean_diff_opt['infered'][level] = numpy.abs(
                self.mean_diff_opt['infered'][level])
            self.variance_diff_opt['infered'][
                level] = self.coefficients.values[level]**2 * self.variance[
                    self.FINE]['infered'][level]
            self.variance_diff_opt['infered'][
                level] += self.coefficients.values[
                    level -
                    1]**2 * self.variance[self.COARSE]['infered'][level]
            self.variance_diff_opt['infered'][
                level] -= 2 * self.coefficients.values[
                    level] * self.coefficients.values[
                        level - 1] * self.covariance['infered'][level]
            self.variance_diff_opt['infered'][level] = numpy.maximum(
                0, self.variance_diff_opt['infered'][level])

        if self.ocv:
            print 'done.'

        # report only of OCV is enabled and successful
        if self.ocv:

            # === report measured values
            print
            if forecast:
                print ' :: FORECAST'
            print ' :: OPTIMIZED INDICATORS: (normalized to %s)' % helpers.scif(
                self.normalization)
            print '  :'
            print '  :       LEVEL       : ' + ' '.join(
                ['  ' + helpers.intf(level, table=1) for level in self.levels])
            print '  :---------------------' + '-'.join(
                [helpers.scif(None, table=1, bar=1) for level in self.levels])

            # report 'coefficients' and OCV MLMC vs. PLAIN MLMC speedup from coefficient optimization
            print '  : %-18s:' % 'COEFFICIENT',
            for level in self.levels:
                print helpers.scif(self.coefficients.values[level], table=1),
            print

            # report 'mean diff opt'
            self.mean_diff_opt.report('infered', self.normalization)

            # report 'variance diff opt'
            self.variance_diff_opt.report('infered', self.normalization**2)

            # report optimization performance
            if self.coefficients.optimization != None:
                print
                if forecast:
                    print ' :: FORECAST'
                print ' :: OPTIMIZATION (OCV vs. PLAIN): %.2f' % self.coefficients.optimization
Esempio n. 14
0
    def load(self):

        # load status of MLMC simulation
        if self.params.verbose:
            self.status.load(self.config)
        else:
            try:
                self.status.load(self.config)
            except:
                message = 'MLMC status could not be loaded from'
                details = os.path.join(self.config.root,
                                       self.status.status_file + '*')
                advice = 'Run PyMLMC with \'-v 1\' option for verbose mode or with \'-r\' option to restart the simulation'
                helpers.error(message, details, advice)

        if not self.config.deterministic:

            # load samples history
            self.config.samples.load(self.config)

            # load indicators history
            self.indicators.load(self.config)

            # load coefficients history
            self.indicators.coefficients.load(self.config)

            # load errors history
            self.errors.load(self.config)

        # recreate MC simulations
        self.create_MCs(self.config.samples.indices.combined,
                        self.config.iteration)

        # if non-interactive session -> wait for jobs to finish
        if not self.params.interactive:
            self.join()

        # open progress file
        f = open(self.progress_file, 'w')

        # load the results from MC simulations and report
        from helpers import intf
        header = '\n :: LOADING RESULTS:'

        if self.config.recycle:
            header += '\n' + '  :  LEVEL  |  SAMPLES  |  LOADED  |  FAILED  |  PENDING  |  INVALID  |'
            header += '\n' + '  :-------------------------------------------------------------------|'
            format = '  :      %d  |    %s  |   %s  |   %s  |   %s   |   %s'

        else:
            header += '\n' + '  :  LEVEL  |   TYPE   |  SAMPLES  |  LOADED  |  FAILED  |  PENDING  |  INVALID  |'
            header += '\n' + '  :------------------------------------------------------------------------------|'
            format = '  :      %d  |  %s  |    %s  |   %s  |   %s  |   %s   |   %s'

        print header

        # candidate for the coarsest level
        self.L0 = None

        # buffer
        buffer = ''

        # load all levels
        for level in self.config.levels:

            loaded = [[], []]
            invalid = [[], []]

            # load both types
            for type in reversed(self.config.types(level)):

                mc = self.mcs[self.config.pick[level][type]]
                pending = mc.pending()
                loaded[type] = mc.load()
                invalid[type] = mc.invalid()

                # report
                typestr = [' FINE ', 'COARSE'][mc.config.type]
                samplesstr = intf(len(mc.config.samples), table=1)
                loadedstr = intf(len(loaded[type]), table=1, empty=1)
                failedstr = intf(len(mc.config.samples) - len(loaded[type]),
                                 table=1,
                                 empty=1)
                pendingstr = intf(pending, table=1, empty=1)
                invalidstr = intf(len(invalid[type]), table=1, empty=1)
                if len(invalid[type]) > 0 and len(invalid[type]) <= 16:
                    invalidstr += '   : ' + ' '.join(
                        ['%d' % index for index in invalid[type]])
                else:
                    invalidstr += '   |'
                if self.config.recycle:
                    string = format % (mc.config.level, samplesstr, loadedstr,
                                       failedstr, pendingstr, invalidstr)
                    print string
                    buffer += '\n' + string
                else:
                    string = format % (mc.config.level, typestr, samplesstr,
                                       loadedstr, failedstr, pendingstr,
                                       invalidstr)
                    print string
                    buffer += '\n' + string

                # remove invalid samples
                loaded[type] = list(set(loaded[type]) - set(invalid[type]))

                # check if at least one sample at some level and type
                if mc.available:
                    self.available = 1

            # loading is level-dependent (i.e. for non-coarsest levels, samples of both types should be loaded)
            if self.L0 == None:
                if len(loaded[self.config.FINE]) > 0:
                    self.config.samples.indices.loaded[level] = loaded[
                        self.config.FINE]
                    self.L0 = level
                else:
                    self.config.samples.indices.loaded[level] = []
            else:
                if self.config.recycle:
                    self.config.samples.indices.loaded[level] = list(
                        set(loaded[self.config.FINE])
                        & set(self.config.samples.indices.loaded[level - 1]))
                else:
                    self.config.samples.indices.loaded[level] = list(
                        set(loaded[self.config.FINE])
                        & set(loaded[self.config.COARSE]))

            # compute auxiliary counts
            self.config.samples.counts.loaded[level] = len(
                self.config.samples.indices.loaded[level])
            self.config.samples.counts.failed[
                level] = self.config.samples.counts.combined[
                    level] - self.config.samples.counts.loaded[level]

            # store invalid indices and counts
            self.config.samples.indices.invalid[level] = list(
                set(invalid[self.config.FINE])
                | set(invalid[self.config.COARSE]))
            self.config.samples.counts.invalid[level] = len(
                self.config.samples.indices.invalid[level])

        # save progress to a file
        f.write(header + buffer)

        # report how many pairs of fine and course samples were loaded
        header = '\n :: LOADED VALID PAIRS (FINE & COARSE):'
        header += '\n' + '  :  LEVEL  |  SAMPLES  |  INCLUDED  |  EXCLUDED  |'
        header += '\n' + '  :-----------------------------------------------|'
        format = '  :      %d  |    %s  |    %s   |    %s   |'

        print header

        # buffer
        buffer = ''

        for level in self.config.levels:
            loadedstr = intf(self.config.samples.counts.loaded[level],
                             table=1,
                             empty=1)
            failedstr = intf(self.config.samples.counts.failed[level],
                             table=1,
                             empty=1)
            string = format % (level,
                               intf(self.config.samples.counts.combined[level],
                                    table=1), loadedstr, failedstr)
            buffer += '\n' + string
            print string

        # save progress to a file
        f.write('\n')
        f.write(header + buffer)

        # close progress file
        f.close()

        # report detailed progrees of individual samples
        self.progress()

        # update the computed number of samples
        self.config.samples.append()

        # check availability
        if not self.available:
            helpers.error('No results were loaded - exiting...')

        # query for progress
        else:
            helpers.query('Loading complete! Continue?')
Esempio n. 15
0
    def join(self):

        from helpers import intf

        print
        self.finished = 1

        # deterministic reporting
        if self.config.deterministic:

            print ' :: STATUS of simulation:'
            format = '  : %s'

            # for all MC simulations
            for mc in self.mcs:

                # check how many samples are still pending
                pending = mc.pending()

                # if simulation is finished, report runtime
                if pending == 0:

                    walltime = self.status.list['walltimes'][mc.config.level][
                        mc.config.type]
                    runtime = mc.timer(walltime, self.config.scheduler.batch)
                    if runtime['max'] != None:
                        runtimestr = time.strftime('%H:%M:%S',
                                                   time.gmtime(runtime['max']))
                        if walltime != 'unknown':
                            walltimestr = time.strftime(
                                '%H:%M:%S', time.gmtime(walltime * 3600))
                            percent = round(100 * (runtime['max'] / 3600) /
                                            walltime)
                            print format % ('Completed in ' + runtimestr +
                                            (' [%2d%% of %s]' %
                                             (percent, walltimestr)))
                        else:
                            print format % ('Completed in ' + runtimestr)
                    else:
                        print format % 'Completed in N/A'

                # report if some simulations are pending
                else:

                    self.finished = 0
                    print format % 'Pending'

        # stochastic reporting
        else:

            header = '  :  LEVEL  |'
            separator = '  :----------'
            format = '  :      %d  |'

            if not self.config.recycle:
                header += '   TYPE   |'
                separator += '-----------'
                format += '  %s  |'

            header += '  SAMPLES  |  FINISHED  |  PENDING  |  WALLTIME  |        RUNTIME        |     USAGE     |     BUDGET    |   EFFICIENCY  |'
            separator += '-------------------------------------------------------------------------------------------------------------------------|'
            format += '    %s  |    %s   |   %s   |  %s  |  %s - %s  |  %s - %s  |  %s - %s  |  %s - %s  |'

            header += '  BATCH  |     BATCH RUNTIME     |'
            separator += '---------------------------------|'
            format += '  %s  |  %s - %s  |'

            print ' :: STATUS of MC simulations:'
            print header
            print separator

            # for all MC simulations
            for mc in self.mcs:

                # skip type == 1 in recycle mode
                if self.config.recycle and mc.config.type == self.config.COARSE:
                    continue

                # check how many samples are already finished
                finished = mc.finished()

                # check how many samples are still pending
                pending = mc.pending()

                args = (mc.config.level, )
                if not self.config.recycle:
                    args += ([' FINE ', 'COARSE'][mc.config.type], )
                args += (intf(len(mc.config.samples),
                              table=1), intf(finished, table=1, empty=1),
                         intf(pending, table=1, empty=1))

                # we are not finished if at least one simulation is pending
                if pending > 0:
                    self.finished = 0

                # report walltime
                walltime_sample = self.status.list['walltimes'][
                    mc.config.level][mc.config.type]
                if walltime_sample != 'unknown':
                    walltime_sample_str = helpers.timef(walltime_sample * 3600)
                    args += (walltime_sample_str, )
                else:
                    args += ('   N/A  ', )

                # if some samples are finished, report runtimes, budget, efficiency, batching, etc.
                if finished > 0:

                    # parallelization
                    parallelization = self.status.list['parallelization'][
                        mc.config.level][mc.config.type]

                    # runtimes, walltime usage, budget usage, etc. of individual samples
                    runtime_sample = mc.timer(walltime_sample)
                    if runtime_sample['min'] != None and runtime_sample[
                            'max'] != None:

                        # runtimes
                        min_runtime_sample_str = helpers.timef(
                            runtime_sample['min'])
                        max_runtime_sample_str = helpers.timef(
                            runtime_sample['max'])
                        args += (min_runtime_sample_str,
                                 max_runtime_sample_str)

                        # walltime usage
                        if walltime_sample != 'unknown':
                            walltime_sample_percent_min = round(
                                100 * (runtime_sample['min'] / 3600) /
                                walltime_sample)
                            walltime_sample_percent_max = round(
                                100 * (runtime_sample['max'] / 3600) /
                                walltime_sample)
                            args += ('%3d%%' % walltime_sample_percent_min,
                                     '%3d%%' % walltime_sample_percent_max)
                        else:
                            args += ('    ', '    ')

                        # budget usage
                        budget_sample = float(self.config.works[
                            mc.config.level -
                            mc.config.type]) / parallelization
                        budget_sample_percent_min = min(
                            999,
                            round(100 * (runtime_sample['min'] / 3600) /
                                  budget_sample))
                        budget_sample_percent_max = min(
                            999,
                            round(100 * (runtime_sample['max'] / 3600) /
                                  budget_sample))
                        args += ('%3d%%' % budget_sample_percent_min,
                                 '%3d%%' % budget_sample_percent_max)

                    # default values if runtime measurements are not available
                    else:

                        args += ('   N/A  ', '   N/A  ')

                        # LEGACY: instead, report walltime and budget usage for entire batches
                        batch = self.status.list['batch'][mc.config.level][
                            mc.config.type]
                        runtime_batch = mc.timer(walltime_sample, batch=1)
                        if runtime_batch['min'] != None and runtime_batch[
                                'max'] != None:

                            # walltime usage
                            if walltime_sample != 'unknown':
                                walltime_batch = walltime_sample * batch if batch != None else walltime_sample
                                walltime_batch_percent_min = round(
                                    100 * (runtime_batch['min'] / 3600) /
                                    walltime_batch)
                                walltime_batch_percent_max = round(
                                    100 * (runtime_batch['max'] / 3600) /
                                    walltime_batch)
                                args += ('%3d%%' % walltime_batch_percent_min,
                                         '%3d%%' % walltime_batch_percent_max)
                            else:
                                args += ('    ', '    ')

                            # budget usage
                            budget_sample = float(self.config.works[
                                mc.config.level -
                                mc.config.type]) / parallelization
                            budget_batch = budget_sample * batch if batch != None else budget_sample
                            budget_batch_percent_min = round(
                                100 * (runtime_batch['min'] / 3600) /
                                budget_batch)
                            budget_batch_percent_max = round(
                                100 * (runtime_batch['max'] / 3600) /
                                budget_batch)
                            args += ('%3d%%' % budget_batch_percent_min,
                                     '%3d%%' % budget_batch_percent_max)

                        else:
                            args += ('    ', '    ')
                            args += ('    ', '    ')

                    # efficiency
                    efficiency_sample = mc.efficiency()
                    if efficiency_sample['min'] != None and efficiency_sample[
                            'max'] != None:
                        args += ('%3d%%' % efficiency_sample['min'],
                                 '%3d%%' % efficiency_sample['max'])

                    # LEGACY: instead, report efficiency for entire batches
                    else:
                        efficiency_batch = mc.efficiency(batch=1)
                        if efficiency_batch['min'] != None and efficiency_batch[
                                'max'] != None:
                            args += ('%3d%%' % efficiency_batch['min'],
                                     '%3d%%' % efficiency_batch['max'])
                        else:
                            args += ('    ', '    ')

                    # batch
                    batch = self.status.list['batch'][mc.config.level][
                        mc.config.type]
                    batch_str = helpers.intf(batch, table=1, empty=1)
                    args += (batch_str, )

                    # runtimes, usage, budget of the entire batches
                    runtime_batch = mc.timer(batch=1)
                    if runtime_batch['min'] != None and runtime_batch[
                            'max'] != None:

                        # runtimes
                        min_runtime_batch_str = helpers.timef(
                            runtime_batch['min'])
                        max_runtime_batch_str = helpers.timef(
                            runtime_batch['max'])
                        args += (min_runtime_batch_str, max_runtime_batch_str)
                        '''
            # walltime usage
            if walltime_sample != 'unknown':
              walltime_batch = walltime_sample * batch if batch != None else walltime_sample
              walltime_batch_percent_min  = round ( 100 * (runtime_batch  ['min'] / 3600) / walltime_batch )
              walltime_batch_percent_max  = round ( 100 * (runtime_batch  ['max'] / 3600) / walltime_batch )
            '''

                    # default values if runtime measurements are not available
                    else:
                        args += ('   N/A  ', '   N/A  ')

                    print format % args

                # report that all simulations are pending
                else:

                    args += ('        ', '        ', '    ', '    ', '    ',
                             '    ', '    ', '    ')
                    batch = self.status.list['batch'][mc.config.level][
                        mc.config.type]
                    batch_str = helpers.intf(batch, table=1, empty=1)
                    args += (batch_str, '        ', '        ')
                    print format % args

        if not self.finished:
            # issue a warning and query for progress
            helpers.query('Ignore and continue nevertheless?',
                          warning='Some simulations are still pending')
Esempio n. 16
0
 def resolution_string(self, d):
     from helpers import intf
     if d['NX'] == d['NY'] and d['NX'] == d['NZ']:
         return intf(d['NX']) + '^3'
     else:
         return intf(d['NX']) + 'x' + intf(d['NY']) + 'x' + intf(d['NZ'])