Ejemplo n.º 1
0
def gp_eval(expression, vars, verbose=True, iteration=1):
    if (iteration > 20):
        raise OverflowError, "Iteration depth exceeded in function evaluation."
    try:
        # Quick escape route if we're just evaluating a variable name
        if expression in vars: return vars[expression]
        # And also for really simple numbers
        try:
            evalexp = float(expression)
        except:
            pass
        else:
            return evalexp

        gp_userspace.passed_to_funcwrap = {
            'vars': vars,
            'iter': iteration,
            'verbose': verbose
        }
        return float(
            eval(expression, gp_userspace.function_namespace.copy(), vars))
    except KeyboardInterrupt:
        raise
    except:
        if (verbose):
            gp_error("Error evaluating expression %s" % expression)
            gp_error("Error:", sys.exc_info()[1], "(", sys.exc_info()[0], ")")
        raise
Ejemplo n.º 2
0
def gp_function_datagrid(xrast, functions, xname, usingrowcol, using_list, select_criterion, select_cont, every_list, vars, style, verb_errors=True, firsterror=None):
  datagrid   = []
  local_vars = vars.copy()

  # Construct a description string
  if (len(functions)==1):
   description = "in function %s"%functions[0]
  else:
   description = "in functions %s"%', '.join(functions)

  # Obtain the set of functions
  datagrid = make_datagrid(iterate_function(xrast, functions, xname, local_vars), description, "x=", 0, usingrowcol, using_list, select_criterion, select_cont, every_list, local_vars, style, verb_errors, firsterror)

  # If function evaluation produced no data we check for unevaluable functions.
  # Alternatively there may have been a bad select criterion (in which case the
  # user will already have warnings) or an overly restrictive select criterion
  # (in which case it's their fault anyway and might even be what they wanted).
  if (len(datagrid) == 1): 
   local_vars[xname] = xrast[0]
   for item in functions:
    try:
     val = gp_eval.gp_eval(item,local_vars,verbose=False)
    except KeyboardInterrupt: raise
    except:
     if verb_errors: gp_error("Error evaluating expression '%s':"%item)
     raise
   # If there *was* no select criterion then there is a bug here
   if (verb_errors):
    if (select_criterion == ''): gp_warning("Warning: Evaluation of %s produced no data!"%(description[3:]))
    else                       : gp_warning("Warning: Evaluation of %s with select criterion %s produced no data!"%(description[3:],select_criterion))
    
  return datagrid
Ejemplo n.º 3
0
def get_bins(ranges, xmin, xmax, binwidth, binorigin):
  # If we have limits to where we're binning data from then there's no point in considering data outside it.
  if (len(ranges)==0):
   xbinmin = xmin
   xbinmax = xmax
  else:
   if (ranges[0][0] == None):
    xbinmin = xmin
   else:
    xbinmin = ranges[0][0]
   if (ranges[0][1] == None):
    xbinmax = xmax
   else:
    xbinmax = ranges[0][1]

  # Turn the bin origin setting into something more useful
  binorigin -= binwidth*floor(binorigin/binwidth)

  # Expand the ends of the outside bins to match the provided bin pattern
  xbinmin = floor((xbinmin-binorigin)/binwidth)*binwidth + binorigin
  xbinmax = ceil((xbinmax-binorigin)/binwidth)*binwidth + binorigin

  # Deal with the case where the minimum datum and the lower edge of the bottom bin are in exactly the same place by adding one more bin
  if (xmin == xbinmin and (len(ranges)==0 or ranges[0][0]==None)): xbinmin -= binwidth

  Nbins = (int)((xbinmax-xbinmin)/binwidth) + 1

  # Check for a silly number of bins
  if (Nbins > 1e6):
   gp_error('Too many bins (%d) for histogram command!'%Nbins)
   return None
  if (Nbins > 1e4): gp_warning('You have specified a large number (%d) of bins in the histogram command.  This may require a large amount of memory and time...'%Nbins)

  bins = [i*binwidth+xbinmin for i in range(Nbins)]  
  return bins
Ejemplo n.º 4
0
def directive_arrow(command,commandtext,linestyles,vars,settings,interactive):
 if (gp_settings.settings_global['MULTIPLOT'] != 'ON'): plotorder_clear()
 
 x0 = command['x1'] ; y0 = command['y1']
 x1 = command['x2'] ; y1 = command['y2']

 this_plotdesc = {'itemtype':'arrow',
                  'number'  :len(multiplot_plotdesc),
                  'x_pos'   :x0,
                  'y_pos'   :y0,
                  'x2_pos'  :x1,
                  'y2_pos'  :y1,
                  'style'   :command,
                  'settings':settings.copy(),
                  'deleted' :'OFF',
                  'listdesc':commandtext
                  }
 multiplot_plotdesc.append(this_plotdesc)

 if (gp_settings.settings_global['MULTIPLOT'] == 'ON') and interactive:
  gp_report("Arrow added to multiplot with reference %d."%this_plotdesc['number'])

 if (gp_settings.settings_global['DISPLAY'] == "ON"):
  try:
   unsuccessful_plot_operations = gp_plot.multiplot_plot(linestyles,vars,settings,multiplot_plotdesc)
  except KeyboardInterrupt: raise
  except:
   gp_error("Error:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
   return

  if (gp_settings.settings_global['MULTIPLOT'] == 'ON'):
   if (this_plotdesc['number'] in unsuccessful_plot_operations) and (gp_settings.settings_global['DISPLAY'] == "ON"):
    multiplot_plotdesc.pop()
    if interactive:
     gp_report("Arrow has been removed from multiplot, because it generated an error.")
Ejemplo n.º 5
0
def parse(line, vars):
  for command,vardict in commands: # Try each command in turn to see if it fits
    match   = False # Match doesn't necessarily mean a command fully fit; it means the command began to fit, so this was the right command
    linepos = 0 # Measures how far along the line we've got
    dict    = {}
    expecting = "" # String returned in syntax error saying "these options would have been valid at this point"
    algebra_linepos = None
    algebra_error = ""
    [linepos, success, expecting, algebra_linepos, algebra_error, match, dict] = parse_descend(line, vars, linepos, expecting, algebra_linepos, algebra_error, command, match, dict)
    if not match: continue # This command didn't even begin to match
    if (not success) or (line[linepos:].strip() != ""): # This command matched, but there was a syntax error along the way
      if (algebra_linepos == None): errorstr = "Syntax Error -- "
      else                        : errorstr = "At this point, was "
      if not success: # User input didn't match command specification
        errorstr += "expecting %s.\n"%expecting
      else:
        if (expecting == ""): errorstr += "unexpected trailing matter at the end of command.\n"
        else: errorstr += "expecting %s or end of command.\n"%expecting
      for i in range(linepos): errorstr += " "
      errorstr += " |\n"
      for i in range(linepos): errorstr += " "
      errorstr += "\\|/\n "+line
      if (algebra_linepos != None):
        errorstr += "\n"
        for i in range(algebra_linepos): errorstr += " "
        errorstr += "/|\\\n"
        for i in range(algebra_linepos): errorstr += " "
        errorstr += " |\n"
        errorstr += algebra_error
      gp_error("\n"+errorstr+"\n")
      return None # Return None in case of failure
    return dict # Return dictionary of variables set in parsing the command if success
  return {'directive':'unrecognised'} # We did not recognise command
Ejemplo n.º 6
0
def child_support_agency_init():
 fork = os.fork()
 if (fork != 0):
  return # Parent process
 else:
  os.makedirs(gp_settings.tempdir,mode=0700) # Create temporary working directory
  if ((not os.path.isdir(gp_settings.tempdir)) or (not (os.stat(gp_settings.tempdir)[stat.ST_UID] == os.getuid()))):
   gp_error("Fatal Error: Security error whilst trying to create temporary directory")
   sys.exit(0)

  os.chdir(gp_settings.tempdir)
  csa_main() # Child process
  os._exit(0)
Ejemplo n.º 7
0
def do_pyxplot(filename):
  if verbose:
    gp_report("[%s] Running %s."%(time.ctime(),filename)) # In verbose mode, we produce log output
    os.system("pyxplot %s > /dev/null"%filename) # Always discard stdout
    gp_report("[%s] Completed %s."%(time.ctime(),filename))
  else:
    sp = subprocess.Popen("pyxplot %s"%filename, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE);
    streamin  = sp.stdin;
    streamout = sp.stdout;
    streamerr = sp.stderr;
    stringout = streamout.read()
    stringerr = streamerr.read()
    streamin.close() ; streamout.close() ; streamerr.close()
    if (len(stringerr.strip()) > 0): # In quiet mode, we only produce log output if we encounter an error
      gp_report("[%s] Running %s."%(time.ctime(),filename))
      gp_error(stringerr.strip())
      gp_report("[%s] Completed %s."%(time.ctime(),filename))
Ejemplo n.º 8
0
def child_support_agency_init():
    fork = os.fork()
    if (fork != 0):
        return  # Parent process
    else:
        os.makedirs(gp_settings.tempdir,
                    mode=0700)  # Create temporary working directory
        if ((not os.path.isdir(gp_settings.tempdir)) or
            (not (os.stat(gp_settings.tempdir)[stat.ST_UID] == os.getuid()))):
            gp_error(
                "Fatal Error: Security error whilst trying to create temporary directory"
            )
            sys.exit(0)

        os.chdir(gp_settings.tempdir)
        csa_main()  # Child process
        os._exit(0)
Ejemplo n.º 9
0
def parse(line, vars):
    for command, vardict in commands:  # Try each command in turn to see if it fits
        match = False  # Match doesn't necessarily mean a command fully fit; it means the command began to fit, so this was the right command
        linepos = 0  # Measures how far along the line we've got
        dict = {}
        expecting = ""  # String returned in syntax error saying "these options would have been valid at this point"
        algebra_linepos = None
        algebra_error = ""
        [
            linepos, success, expecting, algebra_linepos, algebra_error, match,
            dict
        ] = parse_descend(line, vars, linepos, expecting, algebra_linepos,
                          algebra_error, command, match, dict)
        if not match: continue  # This command didn't even begin to match
        if (not success) or (
                line[linepos:].strip() != ""
        ):  # This command matched, but there was a syntax error along the way
            if (algebra_linepos == None): errorstr = "Syntax Error -- "
            else: errorstr = "At this point, was "
            if not success:  # User input didn't match command specification
                errorstr += "expecting %s.\n" % expecting
            else:
                if (expecting == ""):
                    errorstr += "unexpected trailing matter at the end of command.\n"
                else:
                    errorstr += "expecting %s or end of command.\n" % expecting
            for i in range(linepos):
                errorstr += " "
            errorstr += " |\n"
            for i in range(linepos):
                errorstr += " "
            errorstr += "\\|/\n " + line
            if (algebra_linepos != None):
                errorstr += "\n"
                for i in range(algebra_linepos):
                    errorstr += " "
                errorstr += "/|\\\n"
                for i in range(algebra_linepos):
                    errorstr += " "
                errorstr += " |\n"
                errorstr += algebra_error
            gp_error("\n" + errorstr + "\n")
            return None  # Return None in case of failure
        return dict  # Return dictionary of variables set in parsing the command if success
    return {'directive': 'unrecognised'}  # We did not recognise command
Ejemplo n.º 10
0
def directive_eps(command,commandtext,linestyles,vars,settings,interactive):
 if (gp_settings.settings_global['MULTIPLOT'] != 'ON'): plotorder_clear()

 filename = command['filename']
 if 'x'        in command: x = command['x']
 else                    : x = settings['ORIGINX']
 if 'y'        in command: y = command['y']
 else                    : y = settings['ORIGINY']
 if 'rotation' in command: rotation = command['rotation']
 else                    : rotation = 0.0
 if 'width'    in command: width  = command['width']
 else                    : width  = None
 if 'height'   in command: height = command['height']
 else                    : height = None

 this_plotdesc = {'itemtype':'eps',
                  'number'  :len(multiplot_plotdesc),
                  'filename':filename,
                  'x_pos'   :x,
                  'y_pos'   :y,
                  'deleted' :'OFF',
                  'rotation':rotation,
                  'width'   :width,
                  'height'  :height,
                  'listdesc':commandtext
                  }
 multiplot_plotdesc.append(this_plotdesc)

 if (gp_settings.settings_global['MULTIPLOT'] == 'ON') and interactive:
  gp_report("eps image added to multiplot with reference %d."%this_plotdesc['number'])

 if (gp_settings.settings_global['DISPLAY'] == "ON"):
  try:
   unsuccessful_plot_operations = gp_plot.multiplot_plot(linestyles,vars,settings,multiplot_plotdesc)
  except KeyboardInterrupt: raise
  except:
   gp_error("Error:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
   return

  if (gp_settings.settings_global['MULTIPLOT'] == 'ON'):
   if (this_plotdesc['number'] in unsuccessful_plot_operations) and (gp_settings.settings_global['DISPLAY'] == "ON"):
    multiplot_plotdesc.pop()
    if interactive:
     gp_report("eps image has been removed from multiplot, because it generated an error.")
Ejemplo n.º 11
0
def gp_eval(expression, vars, verbose=True, iteration=1):
  if (iteration > 20):
   raise OverflowError, "Iteration depth exceeded in function evaluation."
  try:
    # Quick escape route if we're just evaluating a variable name
    if expression in vars: return vars[expression]
    # And also for really simple numbers
    try: evalexp = float(expression)
    except: pass
    else: return evalexp

    gp_userspace.passed_to_funcwrap = {'vars':vars,'iter':iteration,'verbose':verbose}
    return float(eval(expression, gp_userspace.function_namespace.copy(), vars))
  except KeyboardInterrupt: raise
  except:
   if (verbose):
    gp_error("Error evaluating expression %s"%expression)
    gp_error("Error:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
   raise
Ejemplo n.º 12
0
def directive_text(command,commandtext,linestyles,vars,settings,interactive):
 if (gp_settings.settings_global['MULTIPLOT'] != 'ON'): plotorder_clear()

 title = command['string']

 if 'x'        in command: x = command['x']
 else                    : x = 0.0
 if 'y'        in command: y = command['y']
 else                    : y = 0.0
 if 'rotation' in command: rotation = command['rotation']
 else                    : rotation = 0.0
 if 'colour'   in command: colour = command['colour']
 else                    : colour = settings['TEXTCOLOUR']

 this_plotdesc = {'itemtype':'text',
                  'number'  :len(multiplot_plotdesc),
                  'text'    :title,
                  'x_pos'   :x,
                  'y_pos'   :y,
                  'settings':settings.copy(),
                  'deleted' :'OFF',
                  'rotation':rotation,
                  'colour':colour,
                  'listdesc':commandtext
                  }
 multiplot_plotdesc.append(this_plotdesc)

 if (gp_settings.settings_global['MULTIPLOT'] == 'ON') and interactive:
  gp_report("Text label added to multiplot with reference %d."%this_plotdesc['number'])

 if (gp_settings.settings_global['DISPLAY'] == "ON"):
  try:
   unsuccessful_plot_operations = gp_plot.multiplot_plot(linestyles,vars,settings,multiplot_plotdesc)
  except KeyboardInterrupt: raise
  except:
   gp_error("Error:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
   return

  if (gp_settings.settings_global['MULTIPLOT'] == 'ON'):
   if (this_plotdesc['number'] in unsuccessful_plot_operations) and (gp_settings.settings_global['DISPLAY'] == "ON"):
    multiplot_plotdesc.pop()
    if interactive:
     gp_report("Text label has been removed from multiplot, because it generated an error.")
Ejemplo n.º 13
0
def get_bins(ranges, xmin, xmax, binwidth, binorigin):
    # If we have limits to where we're binning data from then there's no point in considering data outside it.
    if (len(ranges) == 0):
        xbinmin = xmin
        xbinmax = xmax
    else:
        if (ranges[0][0] == None):
            xbinmin = xmin
        else:
            xbinmin = ranges[0][0]
        if (ranges[0][1] == None):
            xbinmax = xmax
        else:
            xbinmax = ranges[0][1]

    # Turn the bin origin setting into something more useful
    binorigin -= binwidth * floor(binorigin / binwidth)

    # Expand the ends of the outside bins to match the provided bin pattern
    xbinmin = floor((xbinmin - binorigin) / binwidth) * binwidth + binorigin
    xbinmax = ceil((xbinmax - binorigin) / binwidth) * binwidth + binorigin

    # Deal with the case where the minimum datum and the lower edge of the bottom bin are in exactly the same place by adding one more bin
    if (xmin == xbinmin and (len(ranges) == 0 or ranges[0][0] == None)):
        xbinmin -= binwidth

    Nbins = (int)((xbinmax - xbinmin) / binwidth) + 1

    # Check for a silly number of bins
    if (Nbins > 1e6):
        gp_error('Too many bins (%d) for histogram command!' % Nbins)
        return None
    if (Nbins > 1e4):
        gp_warning(
            'You have specified a large number (%d) of bins in the histogram command.  This may require a large amount of memory and time...'
            % Nbins)

    bins = [i * binwidth + xbinmin for i in range(Nbins)]
    return bins
Ejemplo n.º 14
0
def directive_histogram(command, vars, settings):

    # Read ranges from RE++ input
    ranges = []
    for drange in command['range_list']:
        if 'min' in drange.keys(): range_min = drange['min']
        else: range_min = None
        if 'max' in drange.keys(): range_max = drange['max']
        else: range_max = None
        ranges.append([range_min, range_max])

    # Function name
    funcname = command['hist_function']

    # Read datafile filename
    datafile = command['filename']

    # every
    if 'every_list:' in command: every = command['every_list:']
    else: every = []

    # index
    if 'index' in command: index = int(command['index'])
    else: index = -1  # default

    # select
    if 'select_criterion' in command:
        select_criteria = command['select_criterion']
    else:
        select_criteria = ''

    # Using rows or columns
    if 'use_rows' in command: usingrowcol = "row"
    elif 'use_columns' in command: usingrowcol = "col"
    else: usingrowcol = "col"  # default

    # using
    if 'using_list:' in command:
        using = [item['using_item'] for item in command['using_list:']]
    else:
        using = ['1']

    if (len(using) > 1):
        raise ValueError, "histogram command needs 1 column of input data; %d supplied." % len(
            using)

    # We have now read all of our commandline parameters, and are ready to get the data
    try:
        (rows, columns,
         datagrid) = gp_datafile.gp_dataread(datafile, index, usingrowcol,
                                             using, select_criteria, True,
                                             every, vars, "points")[0]
    except KeyboardInterrupt:
        raise
    except:
        gp_error("Error reading input datafile:",
                 sys.exc_info()[1], "(",
                 sys.exc_info()[0], ")")
        return  # Error

    # Check for a blank dataset
    if (datagrid == []):
        gp_warning("Warning: No data provided to histogram command!")
        gp_userspace.gp_function_declare("%s(x) = 0." % funcname)
        return

    # Sort data points into a list in order of x-axis value
    datagrid.sort(gp_math.sort_on_second_list_item)
    xmin = datagrid[0][1]
    xmax = datagrid[-1][1]

    # If the x1 axis is a log axis we want to do the binning in log space
    logaxis = False
    if (gp_settings.axes['x'][1]['LOG'] == 'ON'):
        logaxis = True
        base = gp_settings.axes['x'][1]['LOGBASE']
        logbase = log(base)  # The (natural) log of the base that we will use
        if (xmax <= 0):
            gp_error(
                'Error: trying to bin exclusively non-positive data into logrithmic histogram bins'
            )
            return
        if (xmin <= 0):
            gp_warning(
                'Warning: negative data will be rejected from logrithmically-binned histogram'
            )
        i = 0
        # Log all the data, deleting those that are negative
        while (i < len(datagrid)):
            try:
                datagrid[i][1] = log(datagrid[i][1]) / logbase
            except KeyboardInterrupt:
                raise
            except ValueError:
                del datagrid[i]
                continue
            except OverflowError:
                del datagrid[i]
                continue
            i += 1
        # Reset these to possible new values
        xmin = datagrid[0][1]
        xmax = datagrid[-1][1]

    # Now we need to work out our bins
    # Precedence: 1. Set of bins specified by user in command
    #             2. Binwidth,origin specified by user in command
    #             3. Set of bins set as a setting (not currently implemented)
    #             4. Binwidth,origin set as a setting

    if ('bin_list,' in command):
        bins = []
        for x in command['bin_list,']:
            bins.append(x['x'])

    else:
        if ('binwidth' in command): binwidth = command['binwidth']
        else: binwidth = settings['BINWIDTH']
        assert (binwidth >
                0.0), "Width of histogram bins must be greater than zero"
        if ('binorigin' in command): binorigin = command['binorigin']
        else: binorigin = settings['BINORIGIN']
        bins = get_bins(ranges, xmin, xmax, binwidth, binorigin)
        if (bins == None): return

    # Check for user-specified data ranges to consider, else use maximum range of data
    binrange = [xmin, xmax]
    if (len(ranges) > 0):
        if (ranges[0][0] != None): binrange[0] = ranges[0][0]
        if (ranges[0][1] != None): binrange[1] = ranges[0][1]

    counts = histcount(bins, datagrid, binrange)  # Bin the data up
    # For binning in log(x), convert the bins from log to linear space
    if (logaxis):
        for i in range(len(bins)):
            bins[i] = base**bins[i]
    make_histogram_function(datafile, funcname, bins,
                            counts)  # Turn the binned data into a function
    return
Ejemplo n.º 15
0
try:

# Read filenames from commandline

  helped = False
  for i in range(1,len(sys.argv)):
    if   (sys.argv[i] in ['-h','--help']   ): gp_report(help_string)    ; helped = True
    elif (sys.argv[i] in ['-V','--version']): gp_report(version_string) ; helped = True
    elif (sys.argv[i] in ['-q','--quiet']  ): verbose = False
    elif (sys.argv[i] in ['-v','--verbose']): verbose = True
    else                                    : watch_files.append([sys.argv[i], {}])

  if (len(watch_files) == 0):
    if not helped:
      gp_error("ERROR: No files to watch! Please supply a list of PyXPlot scripts on the commandline.")
      sys.exit(1)
    sys.exit(0) # In case we were called with --help

  if verbose:
    gp_report(gp_text.init)
    gp_report("This is PyXPlot watcher. Press CTRL-C to exit.\nNow watching the following files:")
    for filelist in watch_files: gp_report(filelist[0])
    gp_report("\n--- Activity Log ---")


# PyXPlot each supplied filename straight away

  for [filename,statvals] in watch_files:
   filelist = glob.glob(os.path.expanduser(filename))
   if (len(filelist) == 0): gp_warning("WARNING: Cannot find file '%s'. Will commence watching it when if it appears."%filename)
Ejemplo n.º 16
0
def directive_plot(command,commandtext,linestyles,vars,settings,axes,labels,arrows,replot_stat,interactive):
  global plotlist, axes_this, replot_focus
  global multiplot_plotdesc

  x_position = settings['ORIGINX']
  y_position = settings['ORIGINY']

  if (gp_settings.settings_global['MULTIPLOT'] != 'ON'): plotorder_clear()

  if (replot_stat == 0): plotlist = [] # If not replotting, wipe plot list
  else:
   if (gp_settings.settings_global['MULTIPLOT'] == 'ON'): # If replotting a multiplot, wipe last graph
    if (replot_focus != None):
     x_position = multiplot_plotdesc[replot_focus]['settings']['ORIGINX'] # Plot may have been moved with the 'move' command
     y_position = multiplot_plotdesc[replot_focus]['settings']['ORIGINY']

  # Now make a local copy of axes, and store it in multiplot_plotdesc[n]['axes']
  # We need a copy, because user may change axis ranges in the plot command, overriding settings
  # in gp_settings lists of axes.
  if (replot_stat == 0): axes_this = { 'x':{},'y':{},'z':{} } # NB: replot on same axes as before when replotting, so do not clear axes_this
  for [direction,axis_list_to] in axes_this.iteritems():
   for [number,axis] in axes[direction].iteritems():
    if number not in axis_list_to:
     axis_list_to[number] = {'SETTINGS':axis.copy(), 'MIN_USED':None, 'MAX_USED':None, 'AXIS':None}
     # 'AXIS' will become a PyX axis object in due course

  # Now read range specifications, modifying local copy of axes as required
  # NB: Any ranges that are set go into ['SETTINGS']['MIN/MAX'], not ['MIN_USED'] or ['MAX_USED']
  for i in range(len(command['range_list'])):
   if ((i%2) == 0): direction='x'
   else           : direction='y'
   number=int(i/2)+1

   # Create axes if they don't already exist; linear autoscaling axes
   if (not number in axes_this[direction]): axes_this[direction][number] = {'SETTINGS':gp_settings.default_new_axis.copy(), 'MIN_USED':None, 'MAX_USED':None, 'AXIS':None}

   if 'min'     in command['range_list'][i]: axes_this[direction][number]['SETTINGS']['MIN'] = command['range_list'][i]['min']
   if 'max'     in command['range_list'][i]: axes_this[direction][number]['SETTINGS']['MAX'] = command['range_list'][i]['max']
   if 'minauto' in command['range_list'][i]: axes_this[direction][number]['SETTINGS']['MIN'] = None
   if 'maxauto' in command['range_list'][i]: axes_this[direction][number]['SETTINGS']['MAX'] = None

  # We leave the setting up of the key until a later date
  key = None

  # Add list of things to plot to plotlist (we use extend, as replot keeps the old list)
  if 'plot_list,' in command: plotlist.extend(command['plot_list,'])

  # Add plot to multiplot list (we do this even when not in multiplot mode, in which case the list has just been wiped, and will only have one member)
  this_plotdesc = {'itemtype':'plot',
                   'number'  :replot_focus, # This is filled in below if replot_focus is None
                   'plotlist':plotlist,
                   'key'     :key,
                   'settings':settings.copy(),
                   'labels'  :labels.copy(),
                   'arrows'  :arrows.copy(),
                   'deleted' :'OFF',
                   'axes'    :None, # Fill in this below
                   'listdesc':commandtext
                   }

  for item in this_plotdesc['plotlist']: # Turn string variables into filenames
    if ('expression_list:' in item):
     if len(item['expression_list:'])==1:
      filename_var = item['expression_list:'][0]['expression'].strip()
      if (filename_var in vars):
       if (type(vars[filename_var])==type("")):
        item['filename']=vars[filename_var]
        del item['expression_list:']

  if (replot_stat != 0) and (replot_focus != None): # If replotting a multiplot, overwrite last graph
   multiplot_plotdesc[replot_focus] = this_plotdesc
  else:
   replot_stat = 0 # We're not replotting, as there's nothing to replot
   multiplot_plotdesc.append( this_plotdesc )
   replot_focus = len(multiplot_plotdesc)-1
   multiplot_plotdesc[replot_focus]['number'] = replot_focus
  multiplot_plotdesc[replot_focus]['settings']['ORIGINX'] = x_position # Reset origin, bearing in mind that we may be replotting something which had been moved
  multiplot_plotdesc[replot_focus]['settings']['ORIGINY'] = y_position

  # Make a copy of axes_this and add it to multiplot catalogue of graph axes
  # We do a copy here, because 'set xlabel' will modify axes_this, in case we want to do a replot
  # But we don't want it to poke around with our latest multiplot addition (unless we replot that).
  axes_this_cpy = { 'x':{},'y':{},'z':{} }
  for [direction,axis_list_to] in axes_this_cpy.iteritems():
   for [number,axis] in axes_this[direction].iteritems():
    axis_list_to[number] = {'SETTINGS':axis['SETTINGS'].copy(), 'MIN_USED':None, 'MAX_USED':None, 'AXIS':None}
  multiplot_plotdesc[replot_focus]['axes'] = axes_this_cpy

  # Go ahead and make a canvas and plot everything!
  if interactive and (gp_settings.settings_global['MULTIPLOT'] == 'ON') and ((replot_stat == 0) or (replot_focus == None)):
   gp_report("Plot added to multiplot with reference %d."%this_plotdesc['number'])

  if (gp_settings.settings_global['DISPLAY'] == "ON"):
   try:
    unsuccessful_plot_operations = gp_plot.multiplot_plot(linestyles,vars,settings,multiplot_plotdesc)
   except KeyboardInterrupt: raise
   except:
    gp_error("Error:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
    return

   if (gp_settings.settings_global['MULTIPLOT'] == 'ON') and (replot_stat == 0) and (this_plotdesc['number'] in unsuccessful_plot_operations) and (gp_settings.settings_global['DISPLAY'] == "ON"):
    multiplot_plotdesc.pop()
    if interactive:
     gp_report("Plot has been removed from multiplot, because it generated an error.")
Ejemplo n.º 17
0
def output_table(datagrid, settings, format):
    # Get the filename
    fname_out = os.path.expanduser(gp_settings.settings_global['OUTPUT'])
    if (fname_out == ""): fname_out = "pyxplot.dat"
    outfile = os.path.join(gp_settings.cwd, os.path.expanduser(fname_out))

    # Create backup of pre-existing datafile if necessary
    if (gp_settings.settings_global['BACKUP']
            == "ON") and os.path.exists(outfile):
        i = 0
        while os.path.exists("%s~%d" % (outfile, i)):
            i += 1
        os.rename(outfile, "%s~%d" % (outfile, i))

    f = open(outfile, 'w')  # Open output datafile

    # Write a header
    f.write("# Datafile produced by PyXPlot %s on %s\n" %
            (gp_version.VERSION,
             time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())))

    # Produce a format string.
    cols = datagrid[0][1]
    formats = []
    formatprefix = ''
    # Test for a user-supplied string
    if (format != ''):
        # Check that this is a format string and extract an optional prefix
        test = re.search(r'^([^%]*)%', format)
        if (test == None):
            gp_error("Error: Bad format string %s" % format)
            return []
        formatprefix = test.group(1)
        # Extract the format substrings from it
        formatitems = re.findall(r'%[^%]*', format)
        Nformatitems = len(formatitems)
        formats = [formatitems[i % Nformatitems] for i in range(cols)]
    else:
        # Produce a default format string for each column of data; ensure that same format is used down whole column for easy readibility.
        allints = [True for i in range(cols)]
        allsmall = [True for i in range(cols)]
        for [rows, cols, block] in datagrid:
            for line in block:
                for i in range(cols):
                    if (line[i] == ''):
                        continue  # Skip blank points (padding for uneven data grids)
                    if (line[i] != float(int(line[i]))
                            or (abs(line[i]) > 1000)):
                        allints[i] = False
                    if (abs(line[i]) >= 1000
                            or (abs(line[i]) < .0999999 and line[i] != 0.)):
                        allsmall[i] = False

        for i in range(cols):
            if (allints[i]): formats.append("%10d")
            elif (allsmall[i]): formats.append("%11f")
            else: formats.append("%15e")

    # Actually write the data file
    try:
        for [rows, cols, block] in datagrid:
            for line in block:
                strs = [formatprefix]
                for i in range(len(line)):
                    format = formats[i]
                    if (line[i] == ''):
                        format = "%ss" % format[
                            0:
                            -1]  # Change the format string from %XX(d/e/f) to %XXs (sick, eh?)
                    if (format[-1] == 'd'): strs.append(format % int(line[i]))
                    else: strs.append(format % line[i])
                str = ' '.join(strs)
                f.write("%s\n" % str)
            f.write("\n")
    except:
        gp_error("Error whilst writing tabulated file.")
        raise

    f.close()
    return
Ejemplo n.º 18
0
def directive_fit(command, vars):
    global pass_function, no_arguments, len_vialist, pass_vialist, pass_vars, pass_dataset
    global yerrors, x_bestfit

    assert not SCIPY_ABSENT, "The fit command requires the scipy module for python, which is not installed. Please install and try again."

    # FIRST OF ALL, READ THE INPUT PARAMETERS FROM THE COMMANDLINE

    # Ranges
    ranges = []
    for drange in command['range_list']:
        if 'min' in drange.keys(): range_min = drange['min']
        else: range_min = None
        if 'max' in drange.keys(): range_max = drange['max']
        else: range_max = None
        ranges.append([range_min, range_max])

    # Function name
    pass_function = command['fit_function']
    if not pass_function in gp_userspace.functions:
        gp_error(
            "Error: fit command requested to fit function '%s'; no such function."
            % pass_function)
        return
    no_arguments = gp_userspace.functions[pass_function]['no_args']

    # Read datafile filename
    datafile = command['filename']

    # every
    if 'every_list:' in command: every = command['every_list:']
    else: every = []

    # index
    if 'index' in command: index = int(command['index'])
    else: index = -1  # default

    # select
    if 'select_criterion' in command:
        select_criteria = command['select_criterion']
    else:
        select_criteria = ''

    # smooth
    if 'smooth' in command: smoothing = float(command['smooth'])
    else: smoothing = 0.0

    # Using rows or columns
    if 'use_rows' in command: usingrowcol = "row"
    elif 'use_columns' in command: usingrowcol = "col"
    else: usingrowcol = "col"  # default

    # using
    if 'using_list:' in command:
        using = [item['using_item'] for item in command['using_list:']]
    else:
        using = [str(i + 1) for i in range(no_arguments + 1)]

    # via
    vialist = [item['fit_variable'] for item in command['fit_variables,']]

    # We have now read all of our commandline parameters, and are ready to start fitting
    try:
        (rows, columns,
         datagrid) = gp_datafile.gp_dataread(datafile,
                                             index,
                                             usingrowcol,
                                             using,
                                             select_criteria,
                                             True,
                                             every,
                                             vars,
                                             "points",
                                             firsterror=no_arguments + 1)[0]

        datagrid_cpy = []
        for datapoint in datagrid:
            if False not in [
                (i >= columns) or
                (((ranges[i][0] == None) or (datapoint[i] > ranges[i][0])) and
                 ((ranges[i][1] == None) or (datapoint[i] < ranges[i][1])))
                    for i in range(len(ranges))
            ]:
                datagrid_cpy.append(datapoint)
        datagrid = datagrid_cpy
    except KeyboardInterrupt:
        raise
    except:
        gp_error("Error reading input datafile:",
                 sys.exc_info()[1], "(",
                 sys.exc_info()[0], ")")
        return  # Error

    if (rows == 0):
        gp_error("Error: fit command provided with empty data file")
        return  # Error

    if (columns < (no_arguments + 1)):
        gp_error(
            "Error: fit command needs %d columns of input data to fit a %d-parameter function."
            % (no_arguments + 1, no_arguments))
        return  # Error
    elif (columns == (no_arguments + 1)):
        yerrors = False
    elif (columns == (no_arguments + 2)):
        yerrors = True
    else:
        yerrors = True
        gp_warning(
            "Warning: Too many columns supplied to fit command. Taking 1=first argument, .... , %d=%s(...), %d=errorbar on function output."
            % (no_arguments + 1, pass_function, no_arguments + 2))

    pass_vialist = vialist
    len_vialist = len(vialist)
    pass_vars = vars.copy()
    pass_dataset = datagrid

    # Set up a list containing the values of all of the parameters that we're fitting
    x = []
    for i in range(len_vialist):
        if vialist[i] in pass_vars:
            x.append(
                pass_vars[vialist[i]])  # Use default value, if we have one
        else:
            x.append(1.0)  # otherwise use 1.0 as our starting value

    # Find best fitting parameter values
    try:
        x = fmin(fit_residual, x, disp=0)
        x_bestfit = x
    except KeyboardInterrupt:
        raise
    except:
        gp_error("Numerical Error:",
                 sys.exc_info()[1], "(",
                 sys.exc_info()[0], ")")
        return  # Error

    # Print best fitting parameter values
    try:
        gp_report("\n# Best fit parameters were:")
        gp_report("# -------------------------\n")
        for i in range(len_vialist):  # Set via variables
            gp_report("%s = %s" % (vialist[i], x[i]))
            vars[vialist[i]] = x[i]  # Set variables in global scope
    except KeyboardInterrupt:
        raise
    except:
        gp_error("Error whilst display best fit values:",
                 sys.exc_info()[1], "(",
                 sys.exc_info()[0], ")")
        return  # Error

    # Estimate error magnitude in supplied data, if not supplied (this is critical to error in fitted paramters).
    gp_report(
        "\n# Estimated error in supplied data values (based upon misfit to this function fit, assuming uniform error on all datapoints):"
    )
    if not yerrors:
        try:
            sigma_data = fmin(sigma_logP, [1.0], disp=0)
            gp_report("sigma_datafile = %s\n" % sigma_data)
        except KeyboardInterrupt:
            raise
        except:
            gp_error(
                "Error whilst estimating the uncertainties in parameter values.\nError:",
                sys.exc_info()[1], "(",
                sys.exc_info()[0], ")")
            sigma_data = 1.0
            # Keep going; we may as well print out the Hessian matrix anyway
    else:
        gp_report(
            "# Not calculated, because datafile already contained errorbars on data values.\n"
        )
        sigma_data = 0.0  # sigma_data is the errorbar on the supplied datapoints

    # Calculate and print the Hessian matrix
    try:
        hessian = scipy.mat([[
            hessian_get(x, yerrors, sigma_data, i, j)
            for i in range(len_vialist)
        ] for j in range(len_vialist)])
        matrix_print(hessian, vialist,
                     "Hessian matrix of log-probability distribution",
                     "hessian")

        hessian_negative = True
        for i in range(len_vialist):
            for j in range(len_vialist):
                if (hessian[i, j] > 0.0): hessian_negative = False
        if not hessian_negative:
            gp_warning(
                "\n# *** WARNING: ***\n# Non-negative components in Hessian matrix. This implies that fitting procedure has failed.\n"
            )
    except KeyboardInterrupt:
        raise
    except:
        gp_error("Error whilst evaluating Hessian matrix.\nError:",
                 sys.exc_info()[1], "(",
                 sys.exc_info()[0], ")")
        return  # Error

    # Print the Covariance matrix
    try:
        covar = linalg.inv(-hessian)  # Covariance matrix = inverse(-H)
        matrix_print(covar, vialist,
                     "Covariance matrix of probability distribution",
                     "covariance")
    except KeyboardInterrupt:
        raise
    except:
        gp_error("Error whilst evaluating covariance matrix.\nError:",
                 sys.exc_info()[1], "(",
                 sys.exc_info()[0], ")")
        return  # Error

    # Get the standard errors on each parameter, which are the squareroots of the leading diagonal terms
    try:
        standard_devs = []
        for i in range(len_vialist):
            assert (
                covar[i, i] >= 0.0
            ), "Negative terms in leading diagonal of covariance matrix imply negative variances in fitted parameters. The fitting procedure has probably failed."
            standard_devs += [sqrt(covar[i, i])]
    except KeyboardInterrupt:
        raise
    except:
        gp_error(
            "Error whilst evaluating uncertainties in best fit parameter values.\nError:",
            sys.exc_info()[1], "(",
            sys.exc_info()[0], ")")
        return  # Error

    # Print the Correlation matrix
    try:
        for i in range(len_vialist):
            for j in range(len_vialist):
                covar[i, j] = covar[i, j] / standard_devs[i] / standard_devs[j]
        matrix_print(covar, vialist,
                     "Correlation matrix of probability distribution",
                     "correlation")
    except KeyboardInterrupt:
        raise
    except:
        gp_error("Error whilst evaluating correlation matrix.\nError:",
                 sys.exc_info()[1], "(",
                 sys.exc_info()[0], ")")
        return  # Error

    # Print the standard errors on variables
    try:
        gp_report("\n# Uncertainties in best fit parameter values are:")
        gp_report("# -----------------------------------------------\n")
        for i in range(len_vialist):
            gp_report("sigma_%s = %s" % (vialist[i], standard_devs[i]))

        # Print final summary
        gp_report("\n# Summary:")
        gp_report("# --------\n#")
        for i in range(len_vialist):
            gp_report("# %s = %s +/- %s" %
                      (vialist[i], x[i], standard_devs[i]))
    except KeyboardInterrupt:
        raise
    except:
        gp_error(
            "Error whilst displaying uncertainties in best fit parameter values.\nError:",
            sys.exc_info()[1], "(",
            sys.exc_info()[0], ")")
        return  # Error

    return  # Done!!!
Ejemplo n.º 19
0
def directive_histogram(command, vars, settings):
  
  # Read ranges from RE++ input
  ranges = []
  for drange in command['range_list']:
   if 'min' in drange.keys(): range_min = drange['min']
   else                     : range_min = None
   if 'max' in drange.keys(): range_max = drange['max']
   else                     : range_max = None
   ranges.append([ range_min , range_max ])

  # Function name
  funcname = command['hist_function']

  # Read datafile filename
  datafile = command['filename']

  # every
  if 'every_list:' in command: every = command['every_list:']
  else                       : every = []

  # index
  if 'index' in command: index = int(command['index'])
  else                 : index = -1 # default

  # select
  if 'select_criterion' in command: select_criteria = command['select_criterion']
  else                            : select_criteria = ''

  # Using rows or columns
  if   'use_rows'    in command: usingrowcol = "row"
  elif 'use_columns' in command: usingrowcol = "col"
  else                         : usingrowcol = "col" # default

  # using
  if 'using_list:' in command: using = [item['using_item'] for item in command['using_list:']]
  else                       : using = ['1']

  if (len(using) > 1):
   raise ValueError, "histogram command needs 1 column of input data; %d supplied."%len(using)

  # We have now read all of our commandline parameters, and are ready to get the data
  try:
   (rows,columns,datagrid) = gp_datafile.gp_dataread(datafile, index, usingrowcol, using, select_criteria, True, every, vars, "points")[0]
  except KeyboardInterrupt: raise
  except:
   gp_error("Error reading input datafile:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
   return # Error

  # Check for a blank dataset
  if (datagrid == []):
   gp_warning("Warning: No data provided to histogram command!")
   gp_userspace.gp_function_declare("%s(x) = 0."%funcname)
   return

  # Sort data points into a list in order of x-axis value
  datagrid.sort(gp_math.sort_on_second_list_item)
  xmin = datagrid[ 0][1]
  xmax = datagrid[-1][1]

  # If the x1 axis is a log axis we want to do the binning in log space
  logaxis = False
  if (gp_settings.axes['x'][1]['LOG'] == 'ON'):
   logaxis = True
   base    = gp_settings.axes['x'][1]['LOGBASE']
   logbase = log(base)     # The (natural) log of the base that we will use
   if (xmax <= 0):
    gp_error('Error: trying to bin exclusively non-positive data into logrithmic histogram bins')
    return
   if (xmin <= 0):
    gp_warning('Warning: negative data will be rejected from logrithmically-binned histogram')
   i=0
   # Log all the data, deleting those that are negative
   while (i<len(datagrid)):
    try: datagrid[i][1] = log(datagrid[i][1])/logbase
    except KeyboardInterrupt: raise
    except ValueError: 
     del datagrid[i]
     continue
    except OverflowError: 
     del datagrid[i]
     continue
    i += 1
   # Reset these to possible new values
   xmin = datagrid[ 0][1]
   xmax = datagrid[-1][1]

  # Now we need to work out our bins
  # Precedence: 1. Set of bins specified by user in command
  #             2. Binwidth,origin specified by user in command
  #             3. Set of bins set as a setting (not currently implemented)
  #             4. Binwidth,origin set as a setting

  if ('bin_list,' in command):
   bins = []
   for x in command['bin_list,']:
    bins.append(x['x'])

  else:
   if ('binwidth' in command)  : binwidth  = command['binwidth']
   else                        : binwidth  = settings['BINWIDTH']
   assert (binwidth > 0.0), "Width of histogram bins must be greater than zero"
   if ('binorigin' in command) : binorigin = command['binorigin']
   else                        : binorigin = settings['BINORIGIN']
   bins = get_bins(ranges, xmin, xmax, binwidth, binorigin)
   if (bins == None): return
  
  # Check for user-specified data ranges to consider, else use maximum range of data
  binrange = [xmin, xmax]
  if (len(ranges) > 0):
   if (ranges[0][0] != None): binrange[0] = ranges[0][0]
   if (ranges[0][1] != None): binrange[1] = ranges[0][1]

  counts = histcount(bins, datagrid, binrange)    # Bin the data up
  # For binning in log(x), convert the bins from log to linear space
  if (logaxis) : 
   for i in range(len(bins)) : 
    bins[i] = base**bins[i]  
  make_histogram_function(datafile, funcname, bins, counts) # Turn the binned data into a function
  return
Ejemplo n.º 20
0
def make_datagrid(iterator, description, lineunit, index, usingrowcol, using_list, select_criterion, select_cont, every_list, vars, style, verb_errors=True, firsterror=None):
  # Note that from henceforth, "rc" means "row or column, as appropriate"
  index_no   = 0
  single_rc_datafile = True  # This will be falsified later should the datafile be multi-column/row (as appropriate)

  # Step 1. -- Work out what bits of data we want

  # Check for blank using list and replace with default, remembering that we did so
  if (using_list == []): 
   using_list = gp_settings.datastyleinfo[style][0].split(":")
   fudged_using_list = True
  else:
   fudged_using_list = False

  # Tidy stuff up and set default arrays
  using_list = using_list[:]
  for i in range(len(using_list)): using_list[i] = using_list[i].strip()
  columns    = len(using_list)
  columns_using = columns # The number of columns of data specified in the "using" statement
  if (columns == 1): columns=2 # The number of columns of data that we return.  If the user has only specified a single column we give them the row index too.
  totalgrid  = [] # List of [file linenumber, line number for spare x-axis, list of data strings]s for all of the blocks that we're going to plot
  vars_local = vars.copy()
  data_used = {}
  errcount = 0

  # Parse the "every" list
  every_dict = parse_every(every_list, verb_errors)

  try:
   # Parse using list
   error_str = 'Internal error while parsing using expressions'
   parse_using (using_list, data_used, vars_local, verb_errors)

   # Check for the case where the using list doesn't specify any variables
   if (data_used == {}):
    data_used[0] = "used" # We need a single set of data anyway so that we get the right number of points; think about p sin(x) u (1)

   # Parse select criterion
   error_str = "Internal error while parsing select criterion -- offending expression was '%s'."%select_criterion
   select_criterion = parse_select (select_criterion, data_used, vars_local)

  except KeyboardInterrupt: raise
  except:
   if (verb_errors): gp_error(error_str)
   if (verb_errors): gp_error("Error:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
   return [[0,0,[]]]

  data_from_file = {'data':{}, 'lineNs':[{}]  }
  # OK, listen up.  We only ever need rows *or* columns.  So combine the two into one data structure, which looks like this:
  # data_from_file = { 'data'  : { <1st rc> : [ [ point 1, point 2, ...]  <-- values for the 1st row/column in the first block
  #                                             [ point 1, point 2, ...]  <-- "----------------------------------" second block
  #                                             [ .....................] ],
  #                                <2nd rc> : [ [ point 1, point 2, ...]   <-- values for the 2nd row/column in the first block
  #                                             [ point 1, point 2, ...]   <-- values for the 2nd row/column in the 2nd block
  #                                             [...                   ] ],
  #                                ... }
  #                    'lineNs': [  { <row/point 1> : <line N>, <row/point 2> : <line N> ...},   <-- Line numbers for the first block
  #                                 { <row/point 1> : <line N>, <row/point 2> : <line N> ...},   <-- Line numbers for the second block
  #                                                           ]
  # The part about data makes reasonable sense: data is stored by rc number first, then block by block as it comes in.  This is easiest from the readin POV.
  # 
  # Line numbers need a tad more explaining.  For ROWS, the line number of each ROW   must be stored within each block.  Each POINT then comes from the same row.
  #                                           For COLS, the line number of each POINT must be stored within each block.  Each COL   then comes from the same row.
  for rc, dummy in data_used.iteritems():
   data_from_file['data'][rc] = [[]]   # Include a blank list to put the first block of data into

  # Step 2 -- Get the required data from the file
  
  line_count = 0  # Count lines within a block
  block_count = 0 # Count blocks within an index block
  Nblocks = 0     # Count total number of blocks
  prev_blank = 10 # Skip opening blank lines 
  fileline = 0    # The line number within the file that the bit of data that we're looking at came from

  try:
   for line in iterator: # Iterate here, don't readlines() !
    fileline = line[1]
    data_list = line[0]

    # Check for a blank line; if found update block and index accounting as necessary
    if (data_list == ['']):
     prev_blank += 1
     block_count += 1
     if (line_count > 0): # Discontinuous line; we have a new block of data
      Nblocks += 1
      if (usingrowcol == "row") :
       for row in data_from_file['data']:
       # Fill in any rows that we didn't get with blank rows (though this data will be useless anyway)
        if len(data_from_file['data'][row]) < Nblocks:
         data_from_file['data'][row].append([])
       # Create blank lists to put the next block#'s worth of rows in
        data_from_file['data'][row].append([])
      else:
       # Create blank lists to put the next block#'s worth of rows in
       for col in data_from_file['data']:
        data_from_file['data'][col].append([])
      data_from_file['lineNs'].append({})

      line_count = 0
     if (prev_blank == 2): # Two blank lines means a new index
      index_no += 1
      block_count = 0
      if ((index >= 0) and (index_no > index)): break # No more data
     continue
    else: prev_blank = 0 # Reset blank lines counter

    if ((index >= 0) and (index_no != index)): continue # Still waiting for our index block; we don't want this line of data

    # This is the line number within the current block
    line_count += 1

    if usingrowcol == 'row':
     # See if data matches a requested row
     for row in data_from_file['data']:
      if (row == line_count):
       if (row != 1): single_rc_datafile = False
       # Check each data point against the 'every' statemetn
       for i in range(len(data_list)):
        if (check_every(block_count, i, every_dict)): data_from_file['data'][row][Nblocks].append(data_list[i]) # Insert into the already present blank list
       data_from_file['lineNs'][Nblocks][row] = fileline
       continue # Any given row can only match one row number
      
    else:
     # See if data matches a required column
     if check_every(block_count, line_count-1, every_dict):
      cols_read = len(data_list) # Number of cols we read from the file
      if cols_read > 1: single_rc_datafile = False
      for col in data_from_file['data']:
       if (col <= cols_read): data_from_file['data'][col][Nblocks].append(data_list[col-1]) # We have read the col that we're looking for; insert datum
       else                 : data_from_file['data'][col][Nblocks].append('') # We have not read the col that we're looking for; insert blank item
      data_from_file['lineNs'][Nblocks][len(data_from_file['data'][col][Nblocks])-1] = fileline # The last index is the item number in the data array
     
  except KeyboardInterrupt: raise
  except:
   if (verb_errors): gp_warning("Error encountered whilst reading '%s'."%description)
   gp_error("Error:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
   return [[0,0,[]]]

  # Step 3 -- Get the set of data that we want from the data extracted from the file

  if (firsterror == None): firsterror = gp_settings.datastyleinfo[style][2]
  allgrid = []
  outgrid = [[]]

  # Deal with the special case where the user didn't supply a using list and the file only has one column/row
  if (fudged_using_list and single_rc_datafile):
    # In this case we want to just plot the first rc against the data item counter.
    using_list = ['1']
    parse_using (using_list, data_used, vars_local, verb_errors)
    del data_from_file['data'][2]
    columns_using = 1

  data_rcs = data_from_file['data'].keys()  # The list of rcs that we require

  # Nblocks should be the number of blocks (currently it is the index of the last block)
  Nblocks += 1
  
  # Assemble the blocks of data to return one by one
  for i in range(Nblocks):
   data_counter = 0
   outblockgrid = []
   # If some rcs have more data than others, then find the rc with the minimum number of points; we return that number of points
   # This is only ever an issue for rows, not columns, where it's dealt with when reading in the data (you can't do that for rows without double-passing)
   Npoints = min([len(data_from_file['data'][rc][i]) for rc in data_rcs])

   # Iterate over the points in this block
   for j in range(Npoints):
    data_item = []
    invalid_datapoint = False

    # Get line number / x co-ordinate / whatever for error message purposes
    if (lineunit != "line " and data_from_file['data'].has_key(1)): linenumber = data_from_file['data'][1][i][j]  # Extract the x value of the point, not the line number
    elif (usingrowcol == 'col'): linenumber = "%d"%data_from_file['lineNs'][i][j] # Just one line number in the columns case
    else: linenumber = ''.join(["%d, "%data_from_file['lineNs'][i][k] for k in data_rcs])

    # For each row / column that we need, place its value in the relevent _gp_param variable
    for rc in data_rcs:
     if (rc == 0):
      vars_local['_gp_param0'] = data_counter
     else:
      point = data_from_file['data'][rc][i][j]
      if point == '':  # Lack of a data point; silently ignore
       invalid_datapoint = True
       break
      else:
       try:
        vars_local['_gp_param'+str(rc)] = float(point)
       except: # Error float()ing the data point
        invalid_datapoint = True
        if (verb_errors): 
         gp_warning("Warning: Could not evaluate data at %s%s %s."%(lineunit, linenumber, description))
         errcount += 1
         if (errcount > ERRORS_MAX): gp_warning("Warning: Not displaying any more errors for %s."%description) ; verb_errors = False
        break
    if invalid_datapoint: continue

    try: # To evaluate this data point
     # Check whether this data point satisfies select() criteria
     if (select_criterion != ""):
      error_str = "Warning: Could not evaluate select criterion at %s%s %s."%(lineunit, linenumber, description)
      if (gp_eval.gp_eval(select_criterion, vars_local, verbose=False) == 0.0): 
       invalid_datapoint = True # gp_eval applies float() to result and turns False into 0.0
       if ((select_cont == False)and(len(outblockgrid) > 0)): # Break line by creating new block here if the user has asked for discontinuous breaking with select
        outgrid.append([len(outblockgrid), columns, outblockgrid])
        outblockgrid = []
       continue

     # Evaluate the using statement
     if (columns_using == 1): data_item.append(data_counter) # If the user asked for a single column, prepend the point number
     data_counter += 1

     error_str = "Warning: Could not parse data at %s%s %s."%(lineunit, linenumber, description)
     errcount += evaluate_using(data_item, using_list, vars_local, style, firsterror, verb_errors, lineunit, linenumber, description)
     if (verb_errors and (errcount > ERRORS_MAX)):
      gp_warning("Warning: Not displaying any more errors for %s."%description) 
      verb_errors = False

    except KeyboardInterrupt: raise
    except ValueError:
     if (verb_errors):
      gp_warning("%s"%error_str)
      errcount += 1
      if (verb_errors and (errcount > ERRORS_MAX)): gp_warning("Warning: Not displaying any more errors for %s."%description) ; verb_errors = False
    except:
     if (verb_errors):
      gp_warning("%s %s (%s)"%(error_str,sys.exc_info()[1],sys.exc_info()[0]))
      errcount += 1
      if (verb_errors and (errcount > ERRORS_MAX)): gp_warning("Warning: Not displaying any more errors for %s."%description) ; verb_errors = False
    else:
     # For arrow linestyles, we break up each datapoint into a line between two points
     if (style[0:6] == "arrows"): outgrid.append([2,columns,[data_item,data_item[2:4]+data_item[0:2]+data_item[4:]]]) # arrow linestyles
     else                       : outblockgrid.append(data_item) # Append this data item to the current block
     allgrid.append(data_item) # Append the data item to the list of all data items (all the blocks catenated)
   if (len(outblockgrid) > 0): outgrid.append([len(outblockgrid),columns,outblockgrid])

  outgrid[0] = [len(allgrid),columns,allgrid]
  return outgrid
Ejemplo n.º 21
0
default_new_axis = default_axis.copy() # This is the prototype of a new axis. It is modified by, e.g. set notics, acting on all axes.

linestyles = {} # User-defined linestyles
arrows     = {} # Arrows superposed on figure
labels     = {} # Text labels

# NOW IMPORT FUNCTIONS FROM CONFIGURATION FILE

try:
 for preconfigvar in config_files.items('functions'):
  try:
    gp_userspace.gp_function_declare(preconfigvar[0] + "=" + preconfigvar[1])
  except KeyboardInterrupt: raise
  except:
    gp_error("Error importing function %s from configuration file:"%(preconfigvar[0]))
    gp_error("Error:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
except KeyboardInterrupt: raise
except:
 pass # Ignore if no functions section 


# NOW IMPORT VARIABLES FROM CONFIGURATION FILE

try:
 for preconfigvar in config_files.items('variables'):
  try:
    gp_userspace.gp_variable_set(preconfigvar[0], gp_eval.gp_eval(preconfigvar[1],gp_userspace.variables))
  except KeyboardInterrupt: raise
  except:
    gp_warning("Warning: Expression '%s' for variable %s in configuration file could not be evaluated."%(preconfigvar[1],preconfigvar[0]))
Ejemplo n.º 22
0
def directive_spline(command, vars):

    assert not SCIPY_ABSENT, "The use of cubic splines requires the scipy module for python, which is not installed. Please install and try again."

    # FIRST OF ALL, READ THE INPUT PARAMETERS FROM THE COMMANDLINE

    # Ranges
    ranges = []
    for drange in command['range_list']:
        if 'min' in drange.keys(): range_min = drange['min']
        else: range_min = None
        if 'max' in drange.keys(): range_max = drange['max']
        else: range_max = None
        ranges.append([range_min, range_max])

    # Function name
    funcname = command['fit_function']

    # Read datafile filename
    datafile = command['filename']

    # every
    if 'every_list:' in command: every = command['every_list:']
    else: every = []

    # index
    if 'index' in command: index = int(command['index'])
    else: index = -1  # default

    # select
    if 'select_criterion' in command:
        select_criteria = command['select_criterion']
    else:
        select_criteria = ''
    select_cont = True
    if (('select_cont' in command) and (command['select_cont'][0] == 'd')):
        select_cont = False

    # smooth
    if 'smooth' in command: smoothing = float(command['smooth'])
    else: smoothing = 0.0

    # Using rows or columns
    if 'use_rows' in command: usingrowcol = "row"
    elif 'use_columns' in command: usingrowcol = "col"
    else: usingrowcol = "col"  # default

    # using
    if 'using_list:' in command:
        using = [item['using_item'] for item in command['using_list:']]
    else:
        using = []

    # We have now read all of our commandline parameters, and are ready to start spline fitting
    try:
        (rows, columns, datagrid) = gp_datafile.gp_dataread(
            datafile, index, usingrowcol, using, select_criteria, select_cont,
            every, vars, "points")[0]
    except KeyboardInterrupt:
        raise
    except:
        gp_error("Error reading input datafile:",
                 sys.exc_info()[1], "(",
                 sys.exc_info()[0], ")")
        return  # Error

    if (rows == 0):
        gp_error("Error: empty data file %s provided to the spline command" %
                 datafile)
        return  # Error

    try:
        splineobj = make_spline_object(rows, columns, datagrid, smoothing,
                                       ranges)[2]
    except KeyboardInterrupt:
        raise
    except:
        gp_error("Error processing input datafile:",
                 sys.exc_info()[1], "(",
                 sys.exc_info()[0], ")")

    assert funcname not in gp_userspace.math_functions.keys(
    ), "Cannot re-define a core mathematical function."
    if funcname in gp_userspace.variables: del gp_userspace.variables[funcname]
    gp_userspace.functions[funcname] = {
        'no_args': 1,
        'type': 'spline',
        'histogram': False,
        'fname': datafile,
        'splineobj': splineobj
    }
    expression_lambda = gp_userspace.make_function_lambda_wrapper(funcname)
    gp_userspace.function_namespace[funcname] = expression_lambda
    return  # Done
Ejemplo n.º 23
0
def output_table (datagrid, settings, format):
  # Get the filename
  fname_out = os.path.expanduser(gp_settings.settings_global['OUTPUT'])
  if (fname_out == ""): fname_out = "pyxplot.dat"
  outfile = os.path.join(gp_settings.cwd, os.path.expanduser(fname_out))

  # Create backup of pre-existing datafile if necessary
  if( gp_settings.settings_global['BACKUP']=="ON") and os.path.exists(outfile):
   i=0
   while os.path.exists("%s~%d"%(outfile,i)): i+=1
   os.rename(outfile,"%s~%d"%(outfile,i))

  f = open(outfile, 'w') # Open output datafile

  # Write a header
  f.write("# Datafile produced by PyXPlot %s on %s\n"%(gp_version.VERSION,time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())))

  # Produce a format string.
  cols = datagrid[0][1]
  formats = []
  formatprefix = ''
  # Test for a user-supplied string
  if (format != ''):
   # Check that this is a format string and extract an optional prefix
   test = re.search(r'^([^%]*)%', format)
   if (test == None):
    gp_error("Error: Bad format string %s"%format)
    return []
   formatprefix = test.group(1)
   # Extract the format substrings from it
   formatitems = re.findall(r'%[^%]*', format)
   Nformatitems = len(formatitems)
   formats = [formatitems[i%Nformatitems] for i in range(cols)]
  else:
   # Produce a default format string for each column of data; ensure that same format is used down whole column for easy readibility.
   allints  = [True for i in range(cols)]
   allsmall = [True for i in range(cols)]
   for [rows, cols, block] in datagrid:
    for line in block:
     for i in range(cols):
      if (line[i]==''): continue # Skip blank points (padding for uneven data grids)
      if (line[i] != float(int(line[i])) or (abs(line[i])>1000)):               allints[i] = False
      if (abs(line[i]) >= 1000 or (abs(line[i]) < .0999999 and line[i] != 0.)): allsmall[i] = False
       
   for i in range(cols):
    if (allints[i]):    formats.append("%10d")
    elif (allsmall[i]): formats.append("%11f")
    else:               formats.append("%15e")
     


  # Actually write the data file
  try:
   for [rows, cols, block] in datagrid:
    for line in block:
     strs = [formatprefix]
     for i in range(len(line)):
      format = formats[i]
      if (line[i]==''): format = "%ss"%format[0:-1] # Change the format string from %XX(d/e/f) to %XXs (sick, eh?)
      if (format[-1] == 'd'): strs.append(format%int(line[i]))
      else:                   strs.append(format%line[i])
     str = ' '.join(strs)
     f.write("%s\n"%str)
    f.write("\n")
  except:
   gp_error("Error whilst writing tabulated file.")
   raise

  f.close()
  return
Ejemplo n.º 24
0
def directive_help(command, interactive):

    help_words = ["help"] + command['topic'].split()
    help_page = ""

    # Open XML help file
    try:
        help_filename = os.path.join(gp_version.SRCDIR, "gp_help.xml")
        help_file = xml.dom.pulldom.parse(help_filename)
    except KeyboardInterrupt:
        raise
    except:
        gp_error("Error: Cannot find help source file.")
        return

    # Parse XML file, extracting any text which matches the requested topic
    path = []
    match_list = []
    match = False
    hits = []
    subtopics = []
    try:
        for event, node in help_file:
            if (event == 'START_ELEMENT'):
                path.append(node.nodeName)
                if (len(help_words) <= len(path)):
                    match_list = [
                        autocomplete(help_words[i], path[i], 1)
                        for i in range(len(help_words))
                    ]
                else:
                    match_list = [False]
                match = (not False in match_list) and (len(help_words)
                                                       == len(path))
                if match and (not path in hits): hits.append(path[:])
                if (not False in match_list) and (len(path)
                                                  == len(help_words) + 1):
                    subtopics.append(path[-1])
            if (event == 'END_ELEMENT'):
                try:
                    while (path.pop() != node.nodeName):
                        pass
                    if (len(help_words) <= len(path)):
                        match_list = [
                            autocomplete(help_words[i], path[i], 1)
                            for i in range(len(help_words))
                        ]
                    else:
                        match_list = [False]
                    match = (not False in match_list) and (len(help_words)
                                                           == len(path))
                except KeyboardInterrupt:
                    raise
                except:
                    gp_error(
                        "Error in XML help file: closing tag '%s' found, but this tag wasn't open."
                        % node.nodeName)
                    print path
                    return
            if (event == 'CHARACTERS') and (len(help_words) == len(path)):
                if match: help_page += node.nodeValue
    except KeyboardInterrupt:
        raise
    except:
        gp_error(
            "Error found in help source XML file whilst reading section %s." %
            path)
        gp_error("Error:", sys.exc_info()[1], "(", sys.exc_info()[0], ")")
        return

    # Display help page
    if (len(help_page) == 0):
        gp_report("Sorry. No help was found on this topic.")
        return
    elif (len(hits) > 1):
        # Multiple pages fit request, but check whether one is shortened form of another, e.g. "grid" and "gridxaxis"
        for i in range(len(hits)):
            all_abbreviate_to_i = True
            for j in range(len(hits)):
                for k in range(len(hits[j])):
                    all_abbreviate_to_i = all_abbreviate_to_i and autocomplete(
                        hits[i][k], hits[j][k], 1)
            if all_abbreviate_to_i:
                hits = [hits[i]]
                break
    if (len(hits) > 1):  # STILL multiple pages fit request.
        gp_report(
            "Ambiguous help request. The following help topics were matched:")
        for topic in hits:
            gp_report(reduce(lambda a, b: a + b + " ", [""] + topic))
        gp_report(
            "Please make your help request more specific, and try again.")
    else:
        help_page = "\n**** Help Topic: %s****\n\n" % reduce(
            lambda a, b: a + b + " ", [""] + hits[0]) + help_page
        if (len(subtopics) < 1):
            help_page += "This help page has no subtopics.\n"
        else:
            help_page += "This help page has the following subtopics:\n\n"
            for i in range(len(subtopics)):
                if (i != 0): help_page += ", "
                help_page += subtopics[i]
        if (sys.stdin.isatty() and interactive):
            help_page += "\n\nPress the 'Q' key to exit this help page.\n"
        wordwrap_display(help_page, interactive)
    return
Ejemplo n.º 25
0
linestyles = {}  # User-defined linestyles
arrows = {}  # Arrows superposed on figure
labels = {}  # Text labels

# NOW IMPORT FUNCTIONS FROM CONFIGURATION FILE

try:
    for preconfigvar in config_files.items('functions'):
        try:
            gp_userspace.gp_function_declare(preconfigvar[0] + "=" +
                                             preconfigvar[1])
        except KeyboardInterrupt:
            raise
        except:
            gp_error("Error importing function %s from configuration file:" %
                     (preconfigvar[0]))
            gp_error("Error:", sys.exc_info()[1], "(", sys.exc_info()[0], ")")
except KeyboardInterrupt:
    raise
except:
    pass  # Ignore if no functions section

# NOW IMPORT VARIABLES FROM CONFIGURATION FILE

try:
    for preconfigvar in config_files.items('variables'):
        try:
            gp_userspace.gp_variable_set(
                preconfigvar[0],
                gp_eval.gp_eval(preconfigvar[1], gp_userspace.variables))
        except KeyboardInterrupt:
Ejemplo n.º 26
0
def directive_help(command, interactive):

  help_words = ["help"] + command['topic'].split() 
  help_page   = ""

  # Open XML help file
  try:
    help_filename = os.path.join(gp_version.SRCDIR, "gp_help.xml")
    help_file = xml.dom.pulldom.parse(help_filename)
  except KeyboardInterrupt: raise
  except:
    gp_error("Error: Cannot find help source file.")
    return

  # Parse XML file, extracting any text which matches the requested topic
  path       = []
  match_list = []
  match      = False
  hits       = []
  subtopics  = []
  try:
   for event, node in help_file:
     if (event == 'START_ELEMENT'):
       path.append(node.nodeName)
       if (len(help_words) <= len(path)):
         match_list = [autocomplete(help_words[i],path[i],1) for i in range(len(help_words))]
       else:
         match_list = [False]
       match = (not False in match_list) and (len(help_words) == len(path))
       if match and (not path in hits): hits.append(path[:])
       if (not False in match_list) and (len(path) == len(help_words)+1): subtopics.append(path[-1])
     if (event == 'END_ELEMENT'):
       try:
         while (path.pop() != node.nodeName): pass
         if (len(help_words) <= len(path)):
           match_list = [autocomplete(help_words[i],path[i],1) for i in range(len(help_words))]
         else:
           match_list = [False]
         match = (not False in match_list) and (len(help_words) == len(path))
       except KeyboardInterrupt: raise
       except:
         gp_error("Error in XML help file: closing tag '%s' found, but this tag wasn't open."%node.nodeName)
         print path
         return
     if (event == 'CHARACTERS') and (len(help_words) == len(path)):
       if match: help_page += node.nodeValue
  except KeyboardInterrupt: raise
  except:
    gp_error("Error found in help source XML file whilst reading section %s."%path)
    gp_error("Error:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
    return

  # Display help page
  if (len(help_page) == 0):
    gp_report("Sorry. No help was found on this topic.")
    return
  elif (len(hits) > 1):
    # Multiple pages fit request, but check whether one is shortened form of another, e.g. "grid" and "gridxaxis"
    for i in range(len(hits)):
     all_abbreviate_to_i = True
     for j in range(len(hits)):
      for k in range(len(hits[j])):
       all_abbreviate_to_i = all_abbreviate_to_i and autocomplete(hits[i][k],hits[j][k],1)
     if all_abbreviate_to_i:
      hits = [hits[i]]
      break
  if (len(hits) > 1): # STILL multiple pages fit request.
    gp_report("Ambiguous help request. The following help topics were matched:")
    for topic in hits: gp_report(reduce(lambda a,b: a+b+" ", [""]+topic))
    gp_report("Please make your help request more specific, and try again.")
  else:
    help_page = "\n**** Help Topic: %s****\n\n"%reduce(lambda a,b: a+b+" ", [""]+hits[0]) + help_page
    if (len(subtopics) < 1): help_page += "This help page has no subtopics.\n"
    else:
      help_page += "This help page has the following subtopics:\n\n"
      for i in range(len(subtopics)):
        if (i != 0): help_page += ", "
        help_page += subtopics[i]
    if (sys.stdin.isatty() and interactive):
      help_page += "\n\nPress the 'Q' key to exit this help page.\n"
    wordwrap_display(help_page, interactive)
  return
Ejemplo n.º 27
0
def directive_fit(command, vars):
  global pass_function, no_arguments, len_vialist, pass_vialist, pass_vars, pass_dataset
  global yerrors, x_bestfit

  assert not SCIPY_ABSENT, "The fit command requires the scipy module for python, which is not installed. Please install and try again."

  # FIRST OF ALL, READ THE INPUT PARAMETERS FROM THE COMMANDLINE

  # Ranges
  ranges = []
  for drange in command['range_list']:
   if 'min' in drange.keys(): range_min = drange['min']
   else                     : range_min = None
   if 'max' in drange.keys(): range_max = drange['max']
   else                     : range_max = None
   ranges.append([ range_min , range_max ])

  # Function name
  pass_function = command['fit_function']
  if not pass_function in gp_userspace.functions: gp_error("Error: fit command requested to fit function '%s'; no such function."%pass_function) ; return
  no_arguments  = gp_userspace.functions[pass_function]['no_args']

  # Read datafile filename
  datafile = command['filename']

  # every
  if 'every_list:' in command: every = command['every_list:']
  else                       : every = []

  # index
  if 'index' in command: index = int(command['index'])
  else                 : index = -1 # default

  # select
  if 'select_criterion' in command: select_criteria = command['select_criterion']
  else                            : select_criteria = ''

  # smooth
  if 'smooth' in command: smoothing = float(command['smooth'])
  else                  : smoothing = 0.0

  # Using rows or columns
  if   'use_rows'    in command: usingrowcol = "row"
  elif 'use_columns' in command: usingrowcol = "col"
  else                         : usingrowcol = "col" # default

  # using
  if 'using_list:' in command: using = [item['using_item'] for item in command['using_list:']]
  else                       : using = [str(i+1) for i in range(no_arguments+1)]

  # via
  vialist = [item['fit_variable'] for item in command['fit_variables,']]
  
  # We have now read all of our commandline parameters, and are ready to start fitting
  try:
    (rows,columns,datagrid) = gp_datafile.gp_dataread(datafile, index, usingrowcol, using, select_criteria, True, every, vars, "points", firsterror=no_arguments+1)[0]
    
    datagrid_cpy = []
    for datapoint in datagrid:
     if False not in [(i>=columns) or (((ranges[i][0] == None) or (datapoint[i]>ranges[i][0])) and ((ranges[i][1] == None) or (datapoint[i]<ranges[i][1]))) for i in range(len(ranges)) ]:
      datagrid_cpy.append(datapoint)
    datagrid = datagrid_cpy
  except KeyboardInterrupt: raise
  except:
    gp_error("Error reading input datafile:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
    return # Error

  if (rows == 0):
   gp_error("Error: fit command provided with empty data file")
   return # Error

  if   (columns <  (no_arguments+1)):
    gp_error("Error: fit command needs %d columns of input data to fit a %d-parameter function."%(no_arguments+1,no_arguments))
    return # Error
  elif (columns == (no_arguments+1)): yerrors = False
  elif (columns == (no_arguments+2)): yerrors = True
  else:
   yerrors = True
   gp_warning("Warning: Too many columns supplied to fit command. Taking 1=first argument, .... , %d=%s(...), %d=errorbar on function output."%(no_arguments+1,pass_function,no_arguments+2))

  pass_vialist = vialist
  len_vialist  = len(vialist)
  pass_vars    = vars.copy()
  pass_dataset = datagrid

  # Set up a list containing the values of all of the parameters that we're fitting
  x = []
  for i in range(len_vialist):
    if vialist[i] in pass_vars: x.append(pass_vars[vialist[i]]) # Use default value, if we have one
    else                      : x.append(1.0                  ) # otherwise use 1.0 as our starting value

  # Find best fitting parameter values
  try:
    x = fmin(fit_residual, x, disp=0)
    x_bestfit = x
  except KeyboardInterrupt: raise
  except:
    gp_error("Numerical Error:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
    return # Error

  # Print best fitting parameter values
  try:
    gp_report("\n# Best fit parameters were:")
    gp_report(  "# -------------------------\n")
    for i in range(len_vialist): # Set via variables
      gp_report("%s = %s"%(vialist[i],x[i]))
      vars[vialist[i]] = x[i] # Set variables in global scope
  except KeyboardInterrupt: raise
  except:
    gp_error("Error whilst display best fit values:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
    return # Error

  # Estimate error magnitude in supplied data, if not supplied (this is critical to error in fitted paramters).
  gp_report("\n# Estimated error in supplied data values (based upon misfit to this function fit, assuming uniform error on all datapoints):")
  if not yerrors:
    try:
     sigma_data = fmin(sigma_logP, [1.0], disp=0)
     gp_report(  "sigma_datafile = %s\n"%sigma_data)
    except KeyboardInterrupt: raise
    except:
      gp_error("Error whilst estimating the uncertainties in parameter values.\nError:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
      sigma_data = 1.0
      # Keep going; we may as well print out the Hessian matrix anyway
  else:
    gp_report("# Not calculated, because datafile already contained errorbars on data values.\n")
    sigma_data = 0.0 # sigma_data is the errorbar on the supplied datapoints

  # Calculate and print the Hessian matrix
  try:
    hessian = scipy.mat([[hessian_get(x, yerrors, sigma_data, i, j) for i in range(len_vialist)] for j in range(len_vialist)])
    matrix_print(hessian, vialist, "Hessian matrix of log-probability distribution", "hessian")

    hessian_negative = True
    for i in range(len_vialist):
     for j in range(len_vialist):
      if (hessian[i,j] > 0.0): hessian_negative = False
    if not hessian_negative: gp_warning("\n# *** WARNING: ***\n# Non-negative components in Hessian matrix. This implies that fitting procedure has failed.\n")
  except KeyboardInterrupt: raise
  except:
    gp_error("Error whilst evaluating Hessian matrix.\nError:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
    return # Error

  # Print the Covariance matrix
  try:
    covar = linalg.inv(-hessian) # Covariance matrix = inverse(-H)
    matrix_print(covar, vialist, "Covariance matrix of probability distribution", "covariance")
  except KeyboardInterrupt: raise
  except:
    gp_error("Error whilst evaluating covariance matrix.\nError:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
    return # Error

  # Get the standard errors on each parameter, which are the squareroots of the leading diagonal terms
  try:
    standard_devs = []
    for i in range(len_vialist):
      assert (covar[i,i] >= 0.0), "Negative terms in leading diagonal of covariance matrix imply negative variances in fitted parameters. The fitting procedure has probably failed."
      standard_devs += [sqrt(covar[i,i])]
  except KeyboardInterrupt: raise
  except:
    gp_error("Error whilst evaluating uncertainties in best fit parameter values.\nError:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
    return # Error

  # Print the Correlation matrix
  try:
    for i in range(len_vialist):
     for j in range(len_vialist):
      covar[i,j] = covar[i,j] / standard_devs[i] / standard_devs[j]
    matrix_print(covar, vialist, "Correlation matrix of probability distribution", "correlation")
  except KeyboardInterrupt: raise
  except:
    gp_error("Error whilst evaluating correlation matrix.\nError:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
    return # Error

  # Print the standard errors on variables
  try:
    gp_report("\n# Uncertainties in best fit parameter values are:")
    gp_report(  "# -----------------------------------------------\n")
    for i in range(len_vialist):
      gp_report("sigma_%s = %s"%(vialist[i],standard_devs[i]))

    # Print final summary
    gp_report("\n# Summary:")
    gp_report(  "# --------\n#")
    for i in range(len_vialist):
      gp_report("# %s = %s +/- %s"%(vialist[i],x[i],standard_devs[i]))
  except KeyboardInterrupt: raise
  except:
    gp_error("Error whilst displaying uncertainties in best fit parameter values.\nError:" , sys.exc_info()[1], "(" , sys.exc_info()[0] , ")")
    return # Error

  return # Done!!!