Beispiel #1
0
  def __init__(self, paras):
    self.jobname         = paras['jobname']
    self.xVars           = paras['xVars']
    self.xInitialGuess   = paras['xInitialParaCombo'] # np.array
    self.targetVars      = paras['targetVars']
    self.target_fx       = np.array(paras['target_fx'])
    self.convCrit        = paras['convCrit'] 
    self.converged       = False
    self.makePlots       = paras['plotting']
    self.nUpdates        = 0
    self.optimFolder     = paras['optimFolder']

    self.x               = self.xInitialGuess
    self.fx              = np.array([])

    self.nIntpoints_max  = 4
    self.maxDist         = 10 # maximal distance from second best point
    self.protected_ix    = False
    self.optimRange      = (-1.,0.)
    self.plotRange       = (-1.,0.)

    # Set up logger
    self.logger = wrapLogger(loggerName = 'costlyOptimizationLogger' + self.optimFolder, streamVerb = 'DEBUG', 
                        logFile = os.path.join(self.optimFolder, 'costlyOpt.log'))
    self.logger.debug('initialized new instance of costlyOptimization for jobname = %s' % paras['jobname'])
Beispiel #2
0
    def __init__(self, paras):
        self.jobname = paras["jobname"]
        self.xVars = paras["xVars"]
        self.xInitialGuess = paras["xInitialParaCombo"]  # np.array
        self.targetVars = paras["targetVars"]
        self.target_fx = np.array(paras["target_fx"])
        self.convCrit = paras["convCrit"]
        self.converged = False
        self.makePlots = paras["plotting"]
        self.nUpdates = 0
        self.optimFolder = paras["optimFolder"]

        self.x = self.xInitialGuess
        self.fx = np.array([])

        self.nIntpoints_max = 4
        self.maxDist = 10  # maximal distance from second best point
        self.protected_ix = False
        self.optimRange = (-1.0, 0.0)
        self.plotRange = (-1.0, 0.0)

        # Set up logger
        self.logger = wrapLogger(
            loggerName="costlyOptimizationLogger" + self.optimFolder,
            streamVerb="DEBUG",
            logFile=os.path.join(self.optimFolder, "costlyOpt.log"),
        )
        self.logger.debug("initialized new instance of costlyOptimization for jobname = %s" % paras["jobname"])
Beispiel #3
0
def lucasOneAgent(shockMatrix, transMatrix, beta, g, Etg, PD, PB, markovFilePath, deterministic = False):
  '''
    markovFilePath: path to parameters.in file
    determistic:    boolean indicating whether to compute special determistic or stochastic case. 
    
    One agent economy. Therefore we have: 
      C_t = Y_t
      C_{t+1} = Y_{t+1}
    In the normalied world: 
      c_t = 1
      c_{t+1} = 1
  '''
  
  logFile     = os.path.join(os.getcwd(), 'output/logs/lucasOneAgent.log')
  lucasLogger = wrapLogger(loggerName = 'lucasOneAgentLog', streamVerb = verb, logFile = logFile)

  gamma       = float(getParameter(markovFilePath, 'gamma', 'bar-separated'))
  psi         = float(getParameter(markovFilePath, 'psi', 'bar-separated'))
  cBar        = float(getParameter(markovFilePath, 'cBar', 'bar-separated'))
  mkov        = MkovM(shockMatrix, transMatrix)
  
  lucasLogger.debug('\n')
  lucasLogger.info('log file written to %s\n' % logFile)
  #np.set_printoptions(precision = 7, linewidth = 300)
  
  if deterministic:
    shockMatrix = np.array([[g]])
    transMatrix = np.array([[1]])
   
  states   = len(shockMatrix)
  c        = np.ones(states)
  rho      = 1. / psi
  gammaSeq = [ gamma ] * len(PD)

  if psi < 0: # CRRA case
    lucasLogger.debug('Using CRRA Euler Equations')
    # infinitely lived stock (lucas tree)
    qS_init = np.ones(states)
    def compute_qS(qS):
      euler_qS = eulerCRRA(g, beta, gamma, transMatrix, PD + qS) - qS
      return euler_qS  
    qS = scipy.optimize.newton_krylov(compute_qS, qS_init)
      
    # Price remaining assets
    for state in range(len(shockMatrix)):
      qD = eulerCRRA(g, beta, gamma, transMatrix, payoff = PD)
      qB = eulerCRRA(g, beta, gamma, transMatrix, payoff = PB)
  elif psi >= 0: # Epstein Zin case
    lucasLogger.debug('Using Epstein Zin Euler Equations')
    # guess initial value function
    V_init = g
    lucasLogger.debug('Initial V:      %s ' % V_init)
    V = V_init
    lucasLogger.info('starting backward recursion: ')
    dif = 1.
    counter = 0
    while dif > 1.e-10:
      newV = EpsteinZin(c - cBar, g, V, beta, gamma, psi, transMatrix)
      dif = np.sum(np.abs(newV - V))
      V = newV.copy()
      lucasLogger.debug('iteration: %d dif: %g V: %s' % ( counter, dif, V))
      counter += 1
    lucasLogger.info('converged after %d iterations: ' % counter)
    lucasLogger.info('converged value function: %s' % V)

    # infinitely lived stock 
    qS_init = np.ones(states)
    def compute_qS(qS):
      euler_qS = eulerEpsteinZin(g, V, beta, gamma, psi, transMatrix, payoff = PD + qS) - qS
      return euler_qS  
    qS = scipy.optimize.newton_krylov(compute_qS, qS_init)
    
    # Price remaining assets
    for state in range(len(shockMatrix)):
      qD = eulerEpsteinZin(g, V, beta, gamma, psi, transMatrix, payoff = PD)
      qB = eulerEpsteinZin(g, V, beta, gamma, psi, transMatrix, payoff = PB)
    
  # Generate output:   
  
  # stock
  lucasLogger.info('')
  lucasLogger.info('qS:                    %s' % qS)
  EtPS = inner1d(transMatrix, ( PD + qS ) * g)
  lucasLogger.info('EtPS:                  %s ' % EtPS)
  EtRS = EtPS / qS
  lucasLogger.info('EtRS:                  %s' % EtRS)
  if not deterministic:
    ERS = np.dot(mkov.getlmbda(), EtRS)
    lucasLogger.info('ERS:                   %s' % ERS)
  
  # bond
  lucasLogger.info('')
  lucasLogger.info('qB:                    %s' % qB)
#  EtPB = np.dot(transMatrix, PB * g)
  EtPB = inner1d(transMatrix, PB * g)
  lucasLogger.info('EtPB:                  %s' % EtPB)
  EtRB = EtPB / qB
  lucasLogger.info('EtRB:                  %s' % EtRB)
  if not deterministic:
    ERB = np.dot(mkov.getlmbda(), EtRB)
    lucasLogger.info('ERB:                   %s' % ERB)
    
  
  # dividend asset
  lucasLogger.info('')
  lucasLogger.info('qD:                    %s' % qD)
  EtPD = np.dot(transMatrix, PD * g)
  lucasLogger.info('EtPD:                  %s ' % EtPD)
  EtRD = EtPS / qD
  lucasLogger.info('RtD:                   %s' % EtRD)
  if not deterministic:
    ERD = np.dot(mkov.getlmbda(), EtRD)
    lucasLogger.info('ERD:                   %s' % ERD)
  
  # risk premium
  c_rp = EtRS - EtRB
  lucasLogger.info('\ncond risk prem:        %s' % c_rp)
  if not deterministic:
    rp = np.dot(mkov.getlmbda(), c_rp)
    lucasLogger.info('unc risk premium:      %s' % rp)
    lucasLogger.debug('\ndone with riskpremium computation')
  
  # Write prices to file
  savePath = os.path.join(os.getcwd(), 'output')
  lucasLogger.debug('writing qS/qB to path %slucasOneagent_qS|lucasOneagent_qS' % savePath)
  np.savetxt(os.path.join(savePath, 'lucasOneAgent_qS.in'), qS, fmt = '%15.10f')
  np.savetxt(os.path.join(savePath, 'lucasOneAgent_qB.in'), qB, fmt = '%15.10f')
  if psi > 0:
    np.savetxt(os.path.join(savePath, 'lucasOneAgent_V.in'), V, fmt = '%15.10f')
Beispiel #4
0
from pymods.markovChain.mcInterface import MkovM
from pymods.support.support import wrapLogger
from pymods.support.support import getParameter
from pymods.support.support import myArrayPrint

import numpy as np
import pandas
import scipy.optimize
from genMarkov import genMarkov

from numpy.core.umath_tests import inner1d

pprintFun = myArrayPrint(width = 12, prec = 7)
np.set_string_function(pprintFun, repr = False)

logger = wrapLogger(loggerName = 'lucasMainLog', streamVerb = 'DEBUG', logFile = None)

def lucasOneAgent(shockMatrix, transMatrix, beta, g, Etg, PD, PB, markovFilePath, deterministic = False):
  '''
    markovFilePath: path to parameters.in file
    determistic:    boolean indicating whether to compute special determistic or stochastic case. 
    
    One agent economy. Therefore we have: 
      C_t = Y_t
      C_{t+1} = Y_{t+1}
    In the normalied world: 
      c_t = 1
      c_{t+1} = 1
  '''
  
  logFile     = os.path.join(os.getcwd(), 'output/logs/lucasOneAgent.log')
    #     self.log.parent.addHandler(RedirectLoggingHandler())
    #     print self.log.handlers
    #     print self.log.parent.handlers
    #     print self.log.root.handlers

    #   self.log.critical('redirected gc3 log to ' + curFileName + '.log.')

    # interface to the GC3Libs main functionality
    self._core = self._get_core()

    # call hook methods from derived classes
    self.parse_args()


logger = wrapLogger(loggerName=curFileName + '.log',
                    streamVerb='INFO',
                    logFile=os.path.join(os.getcwd(), curFileName + '.log'))
gc3utilsLogger = wrapLogger(
    loggerName='gc3' + curFileName + '.log',
    streamVerb='INFO',
    logFile=os.path.join(os.getcwd(), curFileName + '.log'),
    streamFormat=
    '{record.time:%Y-%m-%d %H:%M:%S} - {record.channel}: {record.message}',
    fileFormat=
    '{record.time:%Y-%m-%d %H:%M:%S} - {record.channel}: {record.message}')
logger.debug('hello')


def dispatch_record(record):
    """Passes a record on to the handlers on the stack.  This is useful when
    log records are created programmatically and already have all the
Beispiel #6
0
from gc3libs.cmdline import SessionBasedScript
from gc3libs.workflow import SequentialTaskCollection, ParallelTaskCollection
import gc3libs.utils

import gc3libs.debug

# logger
from pymods.support.support import wrapLogger
import pymods.support.support as support
from pymods.classes.tableDict import tableDict

from makePlots import momentPlots

# Set up logger
logger = wrapLogger(loggerName='idRiskParaSearchLogger',
                    streamVerb='DEBUG',
                    logFile=os.path.join(os.getcwd(), 'idRiskParaSearch.log'))


class solveParaCombination(SequentialTaskCollection):
    def __init__(self, substs, solverParas, **sessionParas):

        logger.debug('entering solveParaCombination.__init__ for job %s' %
                     sessionParas['jobname'])
        self.iter = 0

        self.jobname = sessionParas['jobname']
        self.substs = substs

        self.sessionParas = sessionParas
        self.pathToExecutable = sessionParas['pathToExecutable']
Beispiel #7
0

# gc3 library imports
import gc3libs
from gc3libs import Application, Run, Task
from gc3libs.cmdline import SessionBasedScript, existing_file
import gc3libs.utils
import gc3libs.application.apppot


import gc3libs.debug


## custom application class

logger = wrapLogger(loggerName = 'gIdRisk.log', streamVerb = 'INFO', logFile = os.path.join(os.getcwd(), 'gParaSearch.log'))

class gIdRiskScript(SessionBasedScript, paraLoop):
    """
Read `.loop` files and execute the `forwardPremium` program accordingly.
    """

    def __init__(self):
        SessionBasedScript.__init__(
            self,
            version = '0.2',
            # only '.loop' files are considered as valid input
            input_filename_pattern = '*.loop',
        )
        paraLoop.__init__(self, 'INFO')
Beispiel #8
0
from gc3libs.cmdline import SessionBasedScript, existing_file
from gc3libs.workflow import SequentialTaskCollection, ParallelTaskCollection
import gc3libs.utils

import gc3libs.application.apppot

import gc3libs.debug

# logger
from pymods.support.support import fillInputDir
from pymods.support.support import wrapLogger
import pymods.support.support as support
from pymods.classes.tableDict import tableDict

# Set up logger
logger = wrapLogger(loggerName = 'idRiskParaSearchLogger', streamVerb = 'DEBUG', logFile = os.path.join(os.getcwd(), 'idRiskParaSearch.log'))

# call -x /home/benjamin/workspace/idrisk/model/bin/idRiskOut -b /home/benjamin/workspace/idrisk/model/base para.loop -C 10 -N -A '/home/benjamin/apppot0+ben.diskUpd.img'

class solveParaCombination(SequentialTaskCollection):

    def __init__(self, substs, solverParas, **sessionParas):

        logger.debug('entering solveParaCombination.__init__ for job %s' % sessionParas['jobname'])
        self.iter    = 0

        self.jobname = 'solverParacombination' + sessionParas['jobname']
        self.substs = substs

        self.sessionParas     = sessionParas
        self.pathToExecutable = sessionParas['pathToExecutable']
Beispiel #9
0
#!/usr/bin/env python

import numpy as n, pylab as p, time
import os

from pymods.support.support import wrapLogger

logger = wrapLogger(loggerName='convexHullLogger',
                    streamVerb='INFO',
                    logFile=os.path.join(os.getcwd(), 'convexHull.log'))


def _angle_to_points(points, centre):
    '''calculate angle in 2-D between points and x axis'''
    resOut = []
    for point in points:
        delta = point - centre
        res = n.arctan(delta[1] / delta[0])
        if delta[0] < 0:
            res += n.pi
        resOut.append(res)
    return n.array(resOut)


def _angle_to_point(point, centre):
    '''calculate angle in 2-D between points and x axis'''
    delta = point - centre
    res = n.arctan(delta[1] / delta[0])
    if delta[0] < 0:
        res += n.pi
    return res
Beispiel #10
0
from pymods.markovChain.mcInterface import mcInterface
from pymods.markovChain.mcInterface import MkovM
from pymods.support.support import wrapLogger
from pymods.support.support import getParameter
import numpy as np
import pandas
import scikits.statsmodels.tsa.api
import scikits.statsmodels.tsa
import scipy.linalg
import scipy.optimize
from pymods.support.support import myArrayPrint

pprintFun = myArrayPrint(width = 12, prec = 7)
#np.set_string_function(pprintFun, repr = False)

logger = wrapLogger(loggerName = 'genMarkovLog', streamVerb = 'DEBUG', logFile = os.path.join(os.getcwd(), 'output/logs/genMarkov.log'))

def genMarkov(markovFilePath, verb = 'INFO', nSimulation = int(5.e+4)):

  logger.setStreamVerb(verb = verb)
  logger.info('')
  #os.system('cat ' + markovFilePath)
  
  logger.debug('markovFilePath is %s' % markovFilePath)
  
  # Read paramter file
  beta = float(getParameter(markovFilePath, 'beta', 'bar-separated'))
  muG = float(getParameter(markovFilePath, 'muG', 'bar-separated'))
  sigmaG = float(getParameter(markovFilePath, 'sigmaG', 'bar-separated'))
  p = float(getParameter(markovFilePath, 'p', 'bar-separated'))
  dy = float(getParameter(markovFilePath, 'dy', 'bar-separated'))
Beispiel #11
0
from pymods.support.support import fillInputDir
from pymods.support.support import wrapLogger

# gc3 library imports
import gc3libs
from gc3libs import Application, Run, Task
from gc3libs.cmdline import SessionBasedScript, existing_file
import gc3libs.utils
import gc3libs.application.apppot

import gc3libs.debug

## custom application class

logger = wrapLogger(loggerName='gIdRisk.log',
                    streamVerb='INFO',
                    logFile=os.path.join(os.getcwd(), 'gParaSearch.log'))


class gIdRiskScript(SessionBasedScript, paraLoop):
    """
Read `.loop` files and execute the `forwardPremium` program accordingly.
    """
    def __init__(self):
        SessionBasedScript.__init__(
            self,
            version='0.2',
            # only '.loop' files are considered as valid input
            input_filename_pattern='*.loop',
        )
        paraLoop.__init__(self, 'INFO')
Beispiel #12
0
def createOverviewTable(resultDir, outFile = 'simulation.out', exportFileName = 'overviewSimu', sortCols = [], orderCols = [], verb = 'DEBUG'):
  '''
    create simple overview tables for idRisk project
  '''
  #set up logger
  logger = wrapLogger(loggerName = 'createTableLog', streamVerb = verb, logFile = None)

  # Extract the relevant result folders
  resultFolders = [] 
  for (folder, dirnames, filenames) in os.walk(resultDir):
    # Walk through all subdirectories and look into those with para.* format
   # (head, folderTail) = os.path.split(os.path.normpath(folder))
    # if it's one of these output.1 folders, ignore
    if re.search('output\.', folder):
      continue
    if not re.search('para.*', folder):
      continue
    if not outFile in filenames:
      continue
    fileName = os.path.join(folder, outFile)
    if not os.path.getsize(fileName):
      continue
    resultFolder, pathToOutputFile = re.match('(.*para[^_]*_[^/]*)/(.*)', fileName).groups()
    logger.debug('%s found' % fileName)
    # Check if simulation.out ended properly. 
   # fileName = os.path.join(pathToFile[0], pathToFile[1], pathToFile[2])
    outFileHandle = open(fileName)
    lines = outFileHandle.read()
    if lines.rfind('simulation ended'):
      resultFolders.append((resultFolder, pathToOutputFile))
    else:
      logger.warning('simulation.out did not end properly. Check: %s' % (fileName))
  if not resultFolders:
    logger.debug('nothing to do, all folders empty')
    return None
  
  logger.debug('resultFolders: ')
  logger.debug(resultFolders)
  
  # initialize arrays
  headers = ['run']
  resultEntry = []
  
  # Set up result matrix 
  logger.debug('\n')
  logger.debug('Set up result matrix')
  for ixFolder, (resultFolder, pathToOutputFile) in enumerate(resultFolders):
    fileName = os.path.join(resultFolder, pathToOutputFile)
    logger.debug('Reading folder: ' + resultFolder)
   # (head, folderPath) = os.path.split(os.path.normpath(folderPath))
    resultEntry.append([])
    descr = re.match('.*para[^_]*_([^/]*).*', fileName).groups(1)
    logger.debug('descr is '  + descr[0])
    resultEntry[ixFolder].append(descr[0])
    # open specific simu.out file
    try: 
      outFileHandle = open(fileName)
    except: 
      continue
    lines = outFileHandle.readlines()
    for ixLine, line in enumerate(lines):
      line = line.rstrip()
      if line.find('simulation ended') >= 0: continue
      if line: 
        lineEles = line.rstrip().split(':')
        if lineEles[0] == 'Iteration': continue
        if re.search('iBar_Shock[2-9]Agent0', lineEles[0]): continue
        if len(lineEles) != 2: continue
        if len(lineEles[1].split()) > 1: # dont read vectors
          continue
        if len(lineEles[1].split('.')) > 2: 
          continue
        (head, element) = [ ele.strip() for ele in lineEles]
        logger.debug('head=' +  head + ' element=' + element)
        if ixFolder == 0:
          newHead = reformatString(head)
          logger.debug('head = ' + newHead)
          headers.append(newHead)
        if not element:
          logger.debug('element is none')
        elif is_int(element):
          resultEntry[ixFolder].append(int(element))
        elif is_float(element):
          resultEntry[ixFolder].append(float(element))
        else:
          resultEntry[ixFolder].append(element)
    logger.debug('headers = ')
    logger.debug(headers)
    logger.debug('')
    
  # Create a tableDict from the read data. 
  overviewTableDict = tableDict.fromRowList(headers, resultEntry)
  logger.debug(overviewTableDict)
  
  # split run field
  parameters = overviewTableDict['run'][0].split('_')
  keys = []
  for para in parameters:
    para = para.split('=')
    key = para[0]
    if key == 'beta':
      # handle the insane case where we loop over the time discount factor which has the same name
      # as the slope coefficient from the UIP regression
      key = 'beta_disc'
    keys.append(key)

  # Keys from the run field
  keys = getUniqueList(keys)
  logger.debug('keys = ' + str(keys))
  
  # Set up a runs dictionary from the keys obtained from the run field. 
  runs = {}
  runs = runs.fromkeys(keys)

  for key in keys:
    runs[key] = []
    
  # Set up the keys for the final table. 
  # Combine keys from run column with the other column labels. 
  # Delete run field from relevant keys
  #keys = keys + headers.tolist()
  #keys.remove('run')
  runs['keys'] = keys 
  
  # generate runs columns
  for ixRun, run in enumerate(overviewTableDict['run']):
    parameters = run.split('_')
    for para in parameters:
      para = para.split('=')
      key = para[0]
      if key == 'beta':
        # handle the insane case where we loop over the time discount factor which has the same name
        # as the slope coefficient from the UIP regression
        key = 'beta_disc'
      val = para[1]
##      if run.count('_' + key) > 1:
##        try:
##          runs[key][ixRun].append(val)
##        except IndexError:
##          runs[key].append([])
##          runs[key][ixRun].append(val)
##      else:
      runs[key].append(val)
  
  splitRunField = tableDict.fromDict(runs)
  logger.debug('\nsplit Run Field: ')
  logger.debug(splitRunField)
  
  
  # Add newly obtained run fields to full dictionary. 
  overviewTableDict.hzcatTableDict(splitRunField, append = False)
  #splitRunField.hzcatTableDict(overviewTableDict)
  logger.debug(overviewTableDict.cols)
  logger.debug(overviewTableDict)
  
  # Drop run field
  overviewTableDict.drop('run')

  # create some additional columns for analysis
  overviewTableDict['eR_a_sc'] = overviewTableDict['eR_a']**4
  overviewTableDict['eR_b_sc'] = overviewTableDict['eR_b']**4
  overviewTableDict['rP_ana_sc'] = overviewTableDict['eR_a_sc'] - overviewTableDict['eR_b_sc']
  overviewTableDict['std_R_a_sc'] = overviewTableDict['std_R_a(quar)'] * 2
  overviewTableDict['std_R_b_sc'] = overviewTableDict['std_R_b(quar)'] * 2

  # place new columns in orderCols
  for ele in [ 'eR_a_sc', 'eR_b_sc', 'rP_ana_sc', 'std_R_a_sc', 'std_R_b_sc' ]:
    if ele in orderCols: continue
    orderCols.append(ele)


  # cols = list(overviewTableDict.cols)
  # print cols
  # cols.insert(cols.index('eR_a')         + 1, 'eR_a_sc')
  # cols.insert(cols.index('eR_b')         + 1, 'eR_b_sc')
  # cols.insert(cols.index('rP_ana')       + 1, 'rP_ana_sc')
  # cols.insert(cols.index('std_R_a(quar)' + 1), 'std_R_a_sc')
  # cols.insert(cols.index('std_R_b(quar)' + 1), 'std_R_b_sc')
  # overviewTableDict.cols = np.array(cols)
  
  if 'warning_____t_iteration_converged_no_more_than' in overviewTableDict.cols:
    overviewTableDict.rename('warning_____t_iteration_converged_no_more_than', 't_maxConv')    
    
  
##  if 'gamma' in overviewTableDict.cols:
##    overviewTableDict.sort(['gamma'])
    
  if sortCols:
    overviewTableDict.sort(sortCols)
  if orderCols:
    overviewTableDict.order(orderCols)
  
  overviewTableDict.setWidth(26)
  overviewTableDict.setPrec(10)   
  logger.debug(overviewTableDict)

  logger.debug('possible sort/order columns are')
  logger.debug(overviewTableDict.cols)
  logger.debug('syntax: createTable.py . [sortcols] [ordercols]')
  
  if exportFileName:
    exportFilePath = os.path.join(resultDir, exportFileName)
    logger.debug('Writing table to ' + exportFilePath)
    overviewSimu = open(exportFilePath, 'w')  

    print >> overviewSimu, overviewTableDict
    # flush the output in case this script is called within a larger project
    # by default output gets flushed when buffer full or program exits. 
    overviewSimu.flush()
    logger.debug('Done writing table')
    return overviewTableDict
Beispiel #13
0
#!/usr/bin/env python

import numpy as n, pylab as p, time
import os

from pymods.support.support import wrapLogger

logger = wrapLogger(loggerName = 'convexHullLogger', streamVerb = 'INFO', logFile = os.path.join(os.getcwd(), 'convexHull.log'))


def _angle_to_points(points, centre):
    '''calculate angle in 2-D between points and x axis'''
    resOut = []
    for point in points:
        delta = point - centre
        res = n.arctan(delta[1] / delta[0])
        if delta[0] < 0:
            res += n.pi
        resOut.append(res)
    return n.array(resOut)

def _angle_to_point(point, centre):
    '''calculate angle in 2-D between points and x axis'''
    delta = point - centre
    res = n.arctan(delta[1] / delta[0])
    if delta[0] < 0:
        res += n.pi
    return res


def _draw_triangle(p1, p2, p3, **kwargs):
Beispiel #14
0
def createOverviewTable(resultDir,
                        outFile='simulation.out',
                        exportFileName='overviewSimu',
                        sortCols=[],
                        orderCols=[],
                        verb='DEBUG'):
    '''
    create simple overview tables for idRisk project
  '''
    #set up logger
    logger = wrapLogger(loggerName='createTableLog',
                        streamVerb=verb,
                        logFile=None)

    # Extract the relevant result folders
    resultFolders = []
    for (folder, dirnames, filenames) in os.walk(resultDir):
        # Walk through all subdirectories and look into those with para.* format
        # (head, folderTail) = os.path.split(os.path.normpath(folder))
        # if it's one of these output.1 folders, ignore
        if re.search('output\.', folder):
            continue
        if not re.search('para.*', folder):
            continue
        if not outFile in filenames:
            continue
        fileName = os.path.join(folder, outFile)
        if not os.path.getsize(fileName):
            continue
        resultFolder, pathToOutputFile = re.match('(.*para[^_]*_[^/]*)/(.*)',
                                                  fileName).groups()
        logger.debug('%s found' % fileName)
        # Check if simulation.out ended properly.
        # fileName = os.path.join(pathToFile[0], pathToFile[1], pathToFile[2])
        outFileHandle = open(fileName)
        lines = outFileHandle.read()
        if lines.rfind('simulation ended'):
            resultFolders.append((resultFolder, pathToOutputFile))
        else:
            logger.warning('simulation.out did not end properly. Check: %s' %
                           (fileName))
    if not resultFolders:
        logger.debug('nothing to do, all folders empty')
        return None

    logger.debug('resultFolders: ')
    logger.debug(resultFolders)

    # initialize arrays
    headers = ['run']
    resultEntry = []

    # Set up result matrix
    logger.debug('\n')
    logger.debug('Set up result matrix')
    for ixFolder, (resultFolder, pathToOutputFile) in enumerate(resultFolders):
        fileName = os.path.join(resultFolder, pathToOutputFile)
        logger.debug('Reading folder: ' + resultFolder)
        # (head, folderPath) = os.path.split(os.path.normpath(folderPath))
        resultEntry.append([])
        descr = re.match('.*para[^_]*_([^/]*).*', fileName).groups(1)
        logger.debug('descr is ' + descr[0])
        resultEntry[ixFolder].append(descr[0])
        # open specific simu.out file
        try:
            outFileHandle = open(fileName)
        except:
            continue
        lines = outFileHandle.readlines()
        for ixLine, line in enumerate(lines):
            line = line.rstrip()
            if line.find('simulation ended') >= 0: continue
            if line:
                lineEles = line.rstrip().split(':')
                if lineEles[0] == 'Iteration': continue
                if re.search('iBar_Shock[2-9]Agent0', lineEles[0]): continue
                if len(lineEles) != 2: continue
                if len(lineEles[1].split()) > 1:  # dont read vectors
                    continue
                if len(lineEles[1].split('.')) > 2:
                    continue
                (head, element) = [ele.strip() for ele in lineEles]
                logger.debug('head=' + head + ' element=' + element)
                if ixFolder == 0:
                    newHead = reformatString(head)
                    logger.debug('head = ' + newHead)
                    headers.append(newHead)
                if not element:
                    logger.debug('element is none')
                elif is_int(element):
                    resultEntry[ixFolder].append(int(element))
                elif is_float(element):
                    resultEntry[ixFolder].append(float(element))
                else:
                    resultEntry[ixFolder].append(element)
        logger.debug('headers = ')
        logger.debug(headers)
        logger.debug('')

    # Create a tableDict from the read data.
    overviewTableDict = tableDict.fromRowList(headers, resultEntry)
    logger.debug(overviewTableDict)

    # split run field
    parameters = overviewTableDict['run'][0].split('_')
    keys = []
    for para in parameters:
        para = para.split('=')
        key = para[0]
        if key == 'beta':
            # handle the insane case where we loop over the time discount factor which has the same name
            # as the slope coefficient from the UIP regression
            key = 'beta_disc'
        keys.append(key)

    # Keys from the run field
    keys = getUniqueList(keys)
    logger.debug('keys = ' + str(keys))

    # Set up a runs dictionary from the keys obtained from the run field.
    runs = {}
    runs = runs.fromkeys(keys)

    for key in keys:
        runs[key] = []

    # Set up the keys for the final table.
    # Combine keys from run column with the other column labels.
    # Delete run field from relevant keys
    #keys = keys + headers.tolist()
    #keys.remove('run')
    runs['keys'] = keys

    # generate runs columns
    for ixRun, run in enumerate(overviewTableDict['run']):
        parameters = run.split('_')
        for para in parameters:
            para = para.split('=')
            key = para[0]
            if key == 'beta':
                # handle the insane case where we loop over the time discount factor which has the same name
                # as the slope coefficient from the UIP regression
                key = 'beta_disc'
            val = para[1]
            ##      if run.count('_' + key) > 1:
            ##        try:
            ##          runs[key][ixRun].append(val)
            ##        except IndexError:
            ##          runs[key].append([])
            ##          runs[key][ixRun].append(val)
            ##      else:
            runs[key].append(val)

    splitRunField = tableDict.fromDict(runs)
    logger.debug('\nsplit Run Field: ')
    logger.debug(splitRunField)

    # Add newly obtained run fields to full dictionary.
    overviewTableDict.hzcatTableDict(splitRunField, append=False)
    #splitRunField.hzcatTableDict(overviewTableDict)
    logger.debug(overviewTableDict.cols)
    logger.debug(overviewTableDict)

    # Drop run field
    overviewTableDict.drop('run')

    # create some additional columns for analysis
    overviewTableDict['eR_a_sc'] = overviewTableDict['eR_a']**4
    overviewTableDict['eR_b_sc'] = overviewTableDict['eR_b']**4
    overviewTableDict['rP_ana_sc'] = overviewTableDict[
        'eR_a_sc'] - overviewTableDict['eR_b_sc']
    overviewTableDict['std_R_a_sc'] = overviewTableDict['std_R_a(quar)'] * 2
    overviewTableDict['std_R_b_sc'] = overviewTableDict['std_R_b(quar)'] * 2

    # place new columns in orderCols
    for ele in ['eR_a_sc', 'eR_b_sc', 'rP_ana_sc', 'std_R_a_sc', 'std_R_b_sc']:
        if ele in orderCols: continue
        orderCols.append(ele)

    # cols = list(overviewTableDict.cols)
    # print cols
    # cols.insert(cols.index('eR_a')         + 1, 'eR_a_sc')
    # cols.insert(cols.index('eR_b')         + 1, 'eR_b_sc')
    # cols.insert(cols.index('rP_ana')       + 1, 'rP_ana_sc')
    # cols.insert(cols.index('std_R_a(quar)' + 1), 'std_R_a_sc')
    # cols.insert(cols.index('std_R_b(quar)' + 1), 'std_R_b_sc')
    # overviewTableDict.cols = np.array(cols)

    if 'warning_____t_iteration_converged_no_more_than' in overviewTableDict.cols:
        overviewTableDict.rename(
            'warning_____t_iteration_converged_no_more_than', 't_maxConv')

##  if 'gamma' in overviewTableDict.cols:
##    overviewTableDict.sort(['gamma'])

    if sortCols:
        overviewTableDict.sort(sortCols)
    if orderCols:
        overviewTableDict.order(orderCols)

    overviewTableDict.setWidth(26)
    overviewTableDict.setPrec(10)
    logger.debug(overviewTableDict)

    logger.debug('possible sort/order columns are')
    logger.debug(overviewTableDict.cols)
    logger.debug('syntax: createTable.py . [sortcols] [ordercols]')

    if exportFileName:
        exportFilePath = os.path.join(resultDir, exportFileName)
        logger.debug('Writing table to ' + exportFilePath)
        overviewSimu = open(exportFilePath, 'w')

        print >> overviewSimu, overviewTableDict
        # flush the output in case this script is called within a larger project
        # by default output gets flushed when buffer full or program exits.
        overviewSimu.flush()
        logger.debug('Done writing table')
        return overviewTableDict
Beispiel #15
0
    self.log.parent.handlers = []
    self.log.parent.addHandler(RedirectLoggingHandler())
    print self.log.handlers
    print self.log.parent.handlers
    print self.log.root.handlers

    self.log.critical("Successfully overridden gc3pie error handling. ")

    # interface to the GC3Libs main functionality
    self._core = self._make_core()

    # call hook methods from derived classes
    self.parse_args()


logger = wrapLogger(loggerName="ghousing.log", streamVerb="INFO", logFile=os.path.join(os.getcwd(), "ghousing.log"))
gc3utilsLogger = wrapLogger(
    loggerName="gc3ghousing.log",
    streamVerb="INFO",
    logFile=os.path.join(os.getcwd(), "ghousing.log"),
    streamFormat="{record.time:%Y-%m-%d %H:%M:%S} - {record.channel}: {record.message}",
    fileFormat="{record.time:%Y-%m-%d %H:%M:%S} - {record.channel}: {record.message}",
)


def dispatch_record(record):
    """Passes a record on to the handlers on the stack.  This is useful when
    log records are created programmatically and already have all the
    information attached and should be dispatched independent of a logger.
    """
    #    logbook.base._default_dispatcher.call_handlers(record)
Beispiel #16
0
    sys.path.append(path2Pymods)
from pymods.classes.tableDict import tableDict
from pymods.support.support import wrapLogger

# check matplotlib plot properties (kwargs) here:
# http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.plot
import matplotlib.cm
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab

from createTable import createOverviewTable

verb = 'DEBUG'
logger = wrapLogger(loggerName='createTableLog', streamVerb=verb, logFile=None)

# list of characters for line style
# http://www.thetechrepo.com/main-articles/469


def momentPlots(baseName,
                path,
                xVar,
                overlay,
                conditions={},
                xVarRange=None,
                figureFile=None,
                type='presentation'):

    if os.path.isdir(path):
Beispiel #17
0
def tauchen(F=np.matrix([[0.1, 0], [0, 0.05]]),
            F0=np.array([[0.9], [0.95]]),
            Sigma=np.matrix([[0.01, 0.0120], [0.0120, 0.09]]),
            Ns=3,
            bandwidth=1,
            verb='DEBUG'):
    '''
   function [P, y, x, MVndx, Moments0, p0] = tauchen(F, F0, Sigma, Ns, bandwidth)
   convert VAR(1) for y into Markov-Chain using Tauchen's method
   cool: Y can have arbitrary dimension!
   Y_t = F0 + F Y_{t-1} + e_t and E e_t e_t' = Sigma
   P is transition matrix
   y and x are midpoints of the Markov grid. x being the orthogonalized processes (using a choleski of Sigma)
   Ns is number of states *per variable* ! (Nstar = Ns^Ny)
   bandwidth is multiple of standard deviations which will be covered per variable

   Note: this function imposes same bandwidth and number of gridpoints per element of Y (makes algorithm more straightforward)
  '''

    # Get logger
    logger = wrapLogger(loggerName='mcInterface', streamVerb=verb, logFile='')

    # Make sure that input matrices are not arrays to allow for * notation
    F = np.mat(F)
    Sigma = np.mat(Sigma)

    # construct the range over which each of the compents may vary
    # Y_t = F0 + F Y_{t-1} + Q \varepsilon_t
    Ny = np.size(F, 0)
    Nstar = Ns**Ny
    Q = np.linalg.cholesky(
        Sigma
    )  # cholesky in Matlab is the transpose of numpy. -> Drop one transpose
    iQ = np.linalg.inv(Q)
    Iy = np.eye(Ny)
    EY = np.linalg.inv(Iy - F) * F0
    VarY = disclyap(F, Q * np.transpose(Q))

    # X_t = Q^{-1} Y_t = F0x + Fx X_{t-1} + \varepsilon_t
    Fx = iQ * F * Q
    F0x = iQ * F0
    EX = iQ * EY
    VarX = iQ * VarY * iQ.T  # = dlyap(F, Iy);
    StdX = np.sqrt(VarX.diagonal()).T

    # construct univariate grids for x (always midpoints!)
    griddy = np.tile(np.nan, (Ns, Ny))
    Ub = EX + bandwidth * StdX
    Lb = EX - bandwidth * StdX
    steps = (Ub - Lb) / (Ns - 1)
    for x in range(Ny):
        griddy[:, x] = np.linspace(Lb[x, 0], Ub[x, 0], Ns)
        #np.arange(Lb[x, 0], Ub[x, 0], steps[x, 0])

    # Index for Multivariate Grid
    MVndx = gridMVndx(Ns, Ny)
    #indexArr = np.array(list(getIndex([Ns, Ny], restr = None, direction = 'columnwise')))
    #indexArr = np.reshape(indexArr, (Ny, Ns, Ny))
    #midPattern = np.array(list(getIndex([Ns, Ns], restr = None, direction = 'columnwise')))

    # note: midpoints also used for conditional means! (see below)
    griddyRaveled = np.ravel(griddy, order='F')
    x = griddyRaveled[MVndx]
    y = x * Q.T

    endpoints = griddy[:-1, :] + np.tile(steps.T / 2, (Ns - 1, 1))

    # conditional distributions
    # note usage of griddy, not XuvgridMid!
    condmean = x * Fx.T + np.tile(F0x.T, (Nstar, 1))
    condstd = np.ones((1, Ny))
    P = np.tile(np.nan, (Nstar, Nstar))
    for s in range(Nstar):
        E = np.tile(condmean[s, :], (Ns - 1, 1))
        V = np.tile(condstd, (Ns - 1, 1))
        cdfValues = scipy.stats.norm.cdf(endpoints, E, V)
        probby = np.diff(np.vstack((np.zeros(
            (1, Ny)), cdfValues, np.ones((1, Ny)))),
                         axis=0)
        probbyRaveled = np.ravel(probby, order='F')
        P[s, :] = np.prod(probbyRaveled[MVndx], axis=1)

    # construct unconditional distribution -- diagonalize VarX !!

    if Ny > 1:
        logger.debug('p0 not correctly implemented! need MVgrid')

    colly = np.linalg.inv(np.linalg.cholesky(VarX).T)
    uncondmean = (colly * EX).T
    uncondstd = np.sqrt(np.diag(colly * VarX * colly.T)).T
    E = np.tile(uncondmean, (Ns - 1, 1))
    V = np.tile(uncondstd, (Ns - 1, 1))
    cdfValues = scipy.stats.norm.cdf(endpoints * colly.T, E, V)
    ProbUV = np.diff(np.vstack((np.zeros((1, Ny)), cdfValues, np.ones(
        (1, Ny)))),
                     axis=0)
    probUVRaveled = np.ravel(ProbUV, order='F')
    p0 = np.prod(probUVRaveled[MVndx], axis=1)

    Moments0 = {}
    Moments0['EY'] = EY
    Moments0['EX'] = EX
    Moments0['VarY'] = VarY
    Moments0['VarX'] = VarX

    returnDict = {}
    returnDict['P'] = P
    returnDict['y'] = y
    returnDict['x'] = x
    returnDict['MVndx'] = MVndx
    returnDict['Moments0'] = Moments0
    returnDict['p0'] = p0

    logger.debug('done tauchen')
    return returnDict
Beispiel #18
0
#    redirect_logging() # does the same thing as adding a RedirectLoggingHandler... might as well be explicit
    self.log.parent.handlers = []
    self.log.parent.addHandler(RedirectLoggingHandler())
    print self.log.handlers
    print self.log.parent.handlers
    print self.log.root.handlers

    self.log.critical('Successfully overridden gc3pie error handling. ')

    # interface to the GC3Libs main functionality
    self._core = self._make_core()

    # call hook methods from derived classes
    self.parse_args()

logger = wrapLogger(loggerName = 'ghousing.log', streamVerb = 'INFO', logFile = os.path.join(os.getcwd(), 'ghousing.log'))
gc3utilsLogger = wrapLogger(loggerName = 'gc3ghousing.log', streamVerb = 'INFO', logFile = os.path.join(os.getcwd(), 'ghousing.log'),
                            streamFormat = '{record.time:%Y-%m-%d %H:%M:%S} - {record.channel}: {record.message}',
                            fileFormat = '{record.time:%Y-%m-%d %H:%M:%S} - {record.channel}: {record.message}')

def dispatch_record(record):
    """Passes a record on to the handlers on the stack.  This is useful when
    log records are created programmatically and already have all the
    information attached and should be dispatched independent of a logger.
    """
#    logbook.base._default_dispatcher.call_handlers(record)
    gc3utilsLogger.call_handlers(record)

import gc3libs.cmdline
#gc3libs.cmdline._Script.pre_run = pre_run
import logbook
Beispiel #19
0
from pymods.markovChain.mcInterface import MkovM
from pymods.support.support import wrapLogger
from pymods.support.support import getParameter
from pymods.support.support import myArrayPrint

import numpy as np
import pandas
import scipy.optimize
from genMarkov import genMarkov

from numpy.core.umath_tests import inner1d

pprintFun = myArrayPrint(width = 12, prec = 7)
np.set_string_function(pprintFun, repr = False)

logger = wrapLogger(loggerName = 'lucasMainLog', streamVerb = 'DEBUG', logFile = None)

def lucasOneAgent(shockMatrix, transMatrix, beta, g, Etg, PD, PB, markovFilePath, deterministic = False):
  '''
    markovFilePath: path to parameters.in file
    determistic:    boolean indicating whether to compute special determistic or stochastic case. 
    
    One agent economy. Therefore we have: 
      C_t = Y_t
      C_{t+1} = Y_{t+1}
    In the normalied world: 
      c_t = 1
      c_{t+1} = 1
  '''
  
  logFile     = os.path.join(os.getcwd(), 'output/logs/lucasOneAgent.log')
Beispiel #20
0
from pymods.markovChain.mcInterface import MkovM
from pymods.support.support import wrapLogger
from pymods.support.support import getParameter
import numpy as np
import pandas
import scikits.statsmodels.tsa.api
import scikits.statsmodels.tsa
import scipy.linalg
import scipy.optimize
from pymods.support.support import myArrayPrint

pprintFun = myArrayPrint(width=12, prec=7)
#np.set_string_function(pprintFun, repr = False)

logger = wrapLogger(loggerName='genMarkovLog',
                    streamVerb='DEBUG',
                    logFile=os.path.join(os.getcwd(),
                                         'output/logs/genMarkov.log'))


def genMarkov(markovFilePath, verb='INFO', nSimulation=int(5.e+4)):

    logger.setStreamVerb(verb=verb)
    logger.info('')
    #os.system('cat ' + markovFilePath)

    logger.debug('markovFilePath is %s' % markovFilePath)

    # Read paramter file
    beta = float(getParameter(markovFilePath, 'beta', 'bar-separated'))
    muG = float(getParameter(markovFilePath, 'muG', 'bar-separated'))
    sigmaG = float(getParameter(markovFilePath, 'sigmaG', 'bar-separated'))
Beispiel #21
0
def lucasOneAgent(shockMatrix, transMatrix, beta, g, Etg, PD, PB, markovFilePath, deterministic = False):
  '''
    markovFilePath: path to parameters.in file
    determistic:    boolean indicating whether to compute special determistic or stochastic case. 
    
    One agent economy. Therefore we have: 
      C_t = Y_t
      C_{t+1} = Y_{t+1}
    In the normalied world: 
      c_t = 1
      c_{t+1} = 1
  '''
  
  logFile     = os.path.join(os.getcwd(), 'output/logs/lucasOneAgent.log')
  lucasLogger = wrapLogger(loggerName = 'lucasOneAgentLog', streamVerb = verb, logFile = logFile)

  gamma       = float(getParameter(markovFilePath, 'gamma', 'bar-separated'))
  psi         = float(getParameter(markovFilePath, 'psi', 'bar-separated'))
  cBar        = float(getParameter(markovFilePath, 'cBar', 'bar-separated'))
  mkov        = MkovM(shockMatrix, transMatrix)
  
  lucasLogger.debug('\n')
  lucasLogger.info('log file written to %s\n' % logFile)
  #np.set_printoptions(precision = 7, linewidth = 300)
  
  if deterministic:
    shockMatrix = np.array([[g]])
    transMatrix = np.array([[1]])
   
  states   = len(shockMatrix)
  c        = np.ones(states)
  rho      = 1. / psi
  gammaSeq = [ gamma ] * len(PD)

  if psi < 0: # CRRA case
    lucasLogger.debug('Using CRRA Euler Equations')
    # infinitely lived stock (lucas tree)
    qS_init = np.ones(states)
    def compute_qS(qS):
      euler_qS = eulerCRRA(g, beta, gamma, transMatrix, PD + qS) - qS
      return euler_qS  
    qS = scipy.optimize.newton_krylov(compute_qS, qS_init)
      
    # Price remaining assets
    for state in range(len(shockMatrix)):
      qD = eulerCRRA(g, beta, gamma, transMatrix, payoff = PD)
      qB = eulerCRRA(g, beta, gamma, transMatrix, payoff = PB)
  elif psi >= 0: # Epstein Zin case
    lucasLogger.debug('Using Epstein Zin Euler Equations')
    # guess initial value function
    V_init = g
    lucasLogger.debug('Initial V:      %s ' % V_init)
    V = V_init
    lucasLogger.info('starting backward recursion: ')
    dif = 1.
    counter = 0
    while dif > 1.e-10:
      newV = EpsteinZin(c - cBar, g, V, beta, gamma, psi, transMatrix)
      dif = np.sum(np.abs(newV - V))
      V = newV.copy()
      lucasLogger.debug('iteration: %d dif: %g V: %s' % ( counter, dif, V))
      counter += 1
    lucasLogger.info('converged after %d iterations: ' % counter)
    lucasLogger.info('converged value function: %s' % V)

    # infinitely lived stock 
    qS_init = np.ones(states)
    def compute_qS(qS):
      euler_qS = eulerEpsteinZin(g, V, beta, gamma, psi, transMatrix, payoff = PD + qS) - qS
      return euler_qS  
    qS = scipy.optimize.newton_krylov(compute_qS, qS_init)
    
    # Price remaining assets
    for state in range(len(shockMatrix)):
      qD = eulerEpsteinZin(g, V, beta, gamma, psi, transMatrix, payoff = PD)
      qB = eulerEpsteinZin(g, V, beta, gamma, psi, transMatrix, payoff = PB)
    
  # Generate output:   
  
  # stock
  lucasLogger.info('')
  lucasLogger.info('qS:                    %s' % qS)
  EtPS = inner1d(transMatrix, ( PD + qS ) * g)
  lucasLogger.info('EtPS:                  %s ' % EtPS)
  EtRS = EtPS / qS
  lucasLogger.info('EtRS:                  %s' % EtRS)
  if not deterministic:
    ERS = np.dot(mkov.getlmbda(), EtRS)
    lucasLogger.info('ERS:                   %s' % ERS)
  
  # bond
  lucasLogger.info('')
  lucasLogger.info('qB:                    %s' % qB)
#  EtPB = np.dot(transMatrix, PB * g)
  EtPB = inner1d(transMatrix, PB * g)
  lucasLogger.info('EtPB:                  %s' % EtPB)
  EtRB = EtPB / qB
  lucasLogger.info('EtRB:                  %s' % EtRB)
  if not deterministic:
    ERB = np.dot(mkov.getlmbda(), EtRB)
    lucasLogger.info('ERB:                   %s' % ERB)
    
  
  # dividend asset
  lucasLogger.info('')
  lucasLogger.info('qD:                    %s' % qD)
  EtPD = np.dot(transMatrix, PD * g)
  lucasLogger.info('EtPD:                  %s ' % EtPD)
  EtRD = EtPS / qD
  lucasLogger.info('RtD:                   %s' % EtRD)
  if not deterministic:
    ERD = np.dot(mkov.getlmbda(), EtRD)
    lucasLogger.info('ERD:                   %s' % ERD)
  
  # risk premium
  c_rp = EtRS - EtRB
  lucasLogger.info('\ncond risk prem:        %s' % c_rp)
  if not deterministic:
    rp = np.dot(mkov.getlmbda(), c_rp)
    lucasLogger.info('unc risk premium:      %s' % rp)
    lucasLogger.debug('\ndone with riskpremium computation')
  
  # Write prices to file
  savePath = os.path.join(os.getcwd(), 'output')
  lucasLogger.debug('writing qS/qB to path %slucasOneagent_qS|lucasOneagent_qS' % savePath)
  np.savetxt(os.path.join(savePath, 'lucasOneAgent_qS.in'), qS, fmt = '%15.10f')
  np.savetxt(os.path.join(savePath, 'lucasOneAgent_qB.in'), qB, fmt = '%15.10f')
  if psi > 0:
    np.savetxt(os.path.join(savePath, 'lucasOneAgent_V.in'), V, fmt = '%15.10f')
Beispiel #22
0
  sys.path.append(path2Pymods)
from pymods.classes.tableDict import tableDict
from pymods.support.support import wrapLogger

# check matplotlib plot properties (kwargs) here: 
# http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.plot
import matplotlib.cm
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab

from createTable import createOverviewTable

verb = 'DEBUG'
logger = wrapLogger(loggerName = 'createTableLog', streamVerb = verb, logFile = None)

# list of characters for line style
# http://www.thetechrepo.com/main-articles/469
  
  
  
def momentPlots(baseName, path, xVar, overlay, conditions = {}, xVarRange = None, figureFile = None, type = 'presentation'):
  
  if os.path.isdir(path):
#  if not tableFile:
    tableFile = os.path.join(path, 'overviewSimu')
  elif os.path.isfile(path):
    tableFile = path
  else: 
    logger.critical('path is not a directory or a table file.')
Beispiel #23
0
def tauchen(F = np.matrix([[0.1, 0], [0, 0.05]]), F0 = np.array([[0.9], [0.95]]), Sigma = np.matrix([[0.01, 0.0120], [0.0120, 0.09]]), 
            Ns = 3, bandwidth = 1, verb = 'DEBUG'):
  '''
   function [P, y, x, MVndx, Moments0, p0] = tauchen(F, F0, Sigma, Ns, bandwidth)
   convert VAR(1) for y into Markov-Chain using Tauchen's method
   cool: Y can have arbitrary dimension!
   Y_t = F0 + F Y_{t-1} + e_t and E e_t e_t' = Sigma
   P is transition matrix
   y and x are midpoints of the Markov grid. x being the orthogonalized processes (using a choleski of Sigma)
   Ns is number of states *per variable* ! (Nstar = Ns^Ny)
   bandwidth is multiple of standard deviations which will be covered per variable

   Note: this function imposes same bandwidth and number of gridpoints per element of Y (makes algorithm more straightforward)
  '''

  # Get logger
  logger = wrapLogger(loggerName = 'mcInterface', streamVerb = verb, logFile = '')
  
  # Make sure that input matrices are not arrays to allow for * notation
  F     = np.mat(F)
  Sigma = np.mat(Sigma)

  # construct the range over which each of the compents may vary
  # Y_t = F0 + F Y_{t-1} + Q \varepsilon_t
  Ny       = np.size(F, 0)
  Nstar    = Ns**Ny
  Q        = np.linalg.cholesky(Sigma)   # cholesky in Matlab is the transpose of numpy. -> Drop one transpose
  iQ       = np.linalg.inv(Q)
  Iy       = np.eye(Ny)
  EY       = np.linalg.inv(Iy - F) * F0
  VarY     = disclyap(F, Q * np.transpose(Q));

  # X_t = Q^{-1} Y_t = F0x + Fx X_{t-1} + \varepsilon_t 
  Fx       = iQ * F * Q
  F0x      = iQ * F0
  EX       = iQ * EY
  VarX     = iQ * VarY * iQ.T # = dlyap(F, Iy);
  StdX     = np.sqrt(VarX.diagonal()).T


  # construct univariate grids for x (always midpoints!)
  griddy  = np.tile(np.nan, (Ns, Ny))
  Ub      = EX + bandwidth * StdX
  Lb      = EX - bandwidth * StdX
  steps   = (Ub - Lb) / (Ns - 1)
  for x in range(Ny):
    griddy[:, x] = np.linspace(Lb[x,0], Ub[x,0], Ns)
    #np.arange(Lb[x, 0], Ub[x, 0], steps[x, 0])


  # Index for Multivariate Grid
  MVndx                = gridMVndx(Ns, Ny);
  #indexArr = np.array(list(getIndex([Ns, Ny], restr = None, direction = 'columnwise')))
  #indexArr = np.reshape(indexArr, (Ny, Ns, Ny))
  #midPattern = np.array(list(getIndex([Ns, Ns], restr = None, direction = 'columnwise')))

  # note: midpoints also used for conditional means! (see below)
  griddyRaveled = np.ravel(griddy, order = 'F')
  x = griddyRaveled[MVndx]
  y = x * Q.T

  endpoints = griddy[:-1,:] + np.tile(steps.T / 2, ( Ns - 1, 1) )
  
  
  # conditional distributions
  # note usage of griddy, not XuvgridMid!
  condmean = x * Fx.T + np.tile(F0x.T, (Nstar,1) )
  condstd  = np.ones((1, Ny))
  P = np.tile(np.nan, (Nstar, Nstar))
  for s in range(Nstar):
    E = np.tile(condmean[s,:], (Ns-1, 1))
    V = np.tile(condstd, (Ns-1, 1))
    cdfValues = scipy.stats.norm.cdf(endpoints, E, V)
    probby = np.diff(np.vstack((np.zeros((1, Ny)), cdfValues, np.ones((1,Ny)))), axis = 0)
    probbyRaveled = np.ravel(probby, order = 'F')
    P[s,:] = np.prod(probbyRaveled[MVndx], axis = 1)
    
    
  # construct unconditional distribution -- diagonalize VarX !!

  if Ny > 1:
    logger.debug('p0 not correctly implemented! need MVgrid')
   
  colly         = np.linalg.inv(np.linalg.cholesky(VarX).T)
  uncondmean    = (colly * EX).T
  uncondstd     = np.sqrt(np.diag(colly * VarX * colly.T)).T
  E             = np.tile(uncondmean, (Ns-1, 1))
  V             = np.tile(uncondstd, (Ns-1, 1))
  cdfValues     = scipy.stats.norm.cdf(endpoints * colly.T, E, V)
  ProbUV        = np.diff(np.vstack((np.zeros((1, Ny)), cdfValues, np.ones((1,Ny)))), axis = 0)
  probUVRaveled = np.ravel(ProbUV, order = 'F')
  p0            = np.prod(probUVRaveled[MVndx], axis = 1)
  
  Moments0 = {}
  Moments0['EY']   = EY
  Moments0['EX']   = EX
  Moments0['VarY'] = VarY
  Moments0['VarX'] = VarX
  
  returnDict             = {}
  returnDict['P']        = P
  returnDict['y']        = y
  returnDict['x']        = x
  returnDict['MVndx']    = MVndx
  returnDict['Moments0'] = Moments0
  returnDict['p0']       = p0

  
  logger.debug('done tauchen')
  return returnDict
Beispiel #24
0
#!/usr/bin/env python

import os
import numpy as np
import scipy.optimize
import scipy.interpolate as si
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import pylab as p

from pymods.support.support import wrapLogger
import convexHull

logger = wrapLogger(loggerName='iwdInterpolationLogger',
                    streamVerb='DEBUG',
                    logFile=os.path.join(os.getcwd(), 'iwdInterpolation.log'))

np.seterr(all='raise')


class iwdInterpolation(object):
    '''
    Inverse weighted density interpolation
    '''
    def __init__(self, xMat, fx, normExp=4, dx=1e-3, makePlot=False):
        logger.debug('initializing new instance of iwdInterpolation')
        self.center = xMat.mean(0)
        angles = convexHull._angle_to_points(xMat, self.center)
        sortIndices = angles.argsort()
        self.angles = angles[sortIndices]
Beispiel #25
0
#!/usr/bin/env python

import os
import numpy as np
import scipy.optimize
import scipy.interpolate as si
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import pylab as p

from pymods.support.support import wrapLogger
import convexHull


logger = wrapLogger(loggerName = 'iwdInterpolationLogger', streamVerb = 'DEBUG', logFile = os.path.join(os.getcwd(), 'iwdInterpolation.log'))

np.seterr(all='raise')

class iwdInterpolation(object):
    '''
    Inverse weighted density interpolation
    '''
    def __init__(self, xMat, fx, normExp = 4, dx = 1e-3, makePlot=False):
        logger.debug('initializing new instance of iwdInterpolation')
        self.center = xMat.mean(0)
        angles = convexHull._angle_to_points(xMat, self.center)
        sortIndices = angles.argsort()
        self.angles = angles[sortIndices]
        self.xMat = xMat[sortIndices, :]
        self.fx = fx[sortIndices]