コード例 #1
0
def do_refund():
    """Merchant will use doRefund after complete Payment. This method (Mandatory
    Parameters)require Transaction ID, Transaction Type and Refund Amount. This
    will initiate the refund for the specific Transaction ID.
    :return: None
    """
    """ Unique order for each order request """
    # Order id for which refund request needs to be raised 
    order_id = "YOUR_ORDER_ID"
    """ Unique refund id """
    ref_id = "UNIQUE_REFUND_ID"
    """ Transaction ID returned in Paytm\pg\process\PaymentStatus Api """
    # some old txn_id corresponding to order_id
    txn_id = "PAYTM_TRANSACTION_ID"
    """ Transaction Type for refund """
    txn_type = "REFUND"

    """Refund Amount to be refunded (should not be greater than the Amount paid in the Transaction)"""
    refund_amount = "1"

    """refund object will have all the information required to make refund call"""
    refund = RefundDetailBuilder(order_id, ref_id, txn_id, txn_type, refund_amount)\
        .set_sub_wallet_amount(SampleData.get_sub_wallet_amount())\
        .set_extra_params_map(SampleData.get_extra_params_map()).build()
    """Making call to SDK method which will return a AsyncRefundResponseBody object
    that will contain the Refund Response regarding the Transaction Id
    """
    response = Refund.doRefund(refund)
    print(response.get_json_response())
    # print(response.get_response_object().get_body().get_result_info().get_result_msg())
    """ End of Function """
コード例 #2
0
ファイル: SampleRefiner.py プロジェクト: MAZamarripa/foqus
    def adaptiveSample(fname, y, nSamples0, nSamplesNew):

        # read data
        data = LocalExecutionModule.readSampleFromPsuadeFile(fname)
        nSamples = SampleData.getNumSamples(data)

        # warn user of prohibitively long runtime if nSamples > 1000
        if nSamples > 1000:
            msg = 'SampleRefiner: Adaptive sampling will take a long time with ensembles with greater than 1000 sample points. Proceed?'
            QMessageBox.warning(None, 'SampleRefiner: Warning of Long Runtime',
                                msg, QMessageBox.Ok, QMessageBox.Ok)

        # write script
        suffix = '.refine_%d' % (nSamples + nSamplesNew)
        fnameOut = Common.getLocalFileName(SampleRefiner.dname, fname, suffix)
        f = tempfile.SpooledTemporaryFile()
        if platform.system() == 'Windows':
            import win32api
            fname = win32api.GetShortPathName(fname)
        f.write('load %s\n' % fname)
        cmd = 'a_refine'
        f.write('%s\n' % cmd)
        f.write('%d\n' % y)  # output to use for sample refinement
        f.write('%d\n' % nSamples0)  # original size of sample
        f.write('%d\n' % nSamplesNew)  # number of samples to be added
        if platform.system() == 'Windows':
            head, tail = os.path.split(fnameOut)
            head = win32api.GetShortPathName(head)
            fnameOut = os.path.join(head, tail)
        f.write('write %s\n' % fnameOut)
        nOutputs = SampleData.getNumOutputs(data)
        if nOutputs > 1:
            f.write('n\n')  # write all outputs
        f.write('quit\n')
        f.seek(0)

        # invoke psuade
        out, error = Common.invokePsuade(f)
        f.close()

        # process error
        if error:
            return None

        # check output file
        if not os.path.exists(fnameOut):
            error = 'SampleRefiner: %s does not exist.' % fnameOut
            Common.showError(error, out)
            return None

        return fnameOut
コード例 #3
0
ファイル: RSInference.py プロジェクト: elbashandy/FOQUS
 def xvarinfo(fname):
     # read data to get variable names and ranges
     data = LocalExecutionModule.readSampleFromPsuadeFile(fname)
     inVarNames = SampleData.getInputNames(data)
     inVarTypes = SampleData.getInputTypes(data)
     inVarLB = SampleData.getInputMins(data)
     inVarUB = SampleData.getInputMaxs(data)
     xnames = []
     xmin = []
     xmax = []
     for name, vartype, lower, upper in zip(inVarNames, inVarTypes, inVarLB,
                                            inVarUB):
         if vartype == Model.VARIABLE:
             xnames.append(name)
             xmin.append(lower)
             xmax.append(upper)
     return (xnames, xmin, xmax)
コード例 #4
0
    def yScatterPlot(data, y, x, cmd, mfile):
        types = data.getInputTypes()
        variableIndices = []
        for i in xrange(len(types)):
            if types[i] == Model.VARIABLE:
                variableIndices.append(i + 1)

        # plot
        inVarNames = SampleData.getInputNames(data)
        outVarNames = SampleData.getOutputNames(data)
        outVarName = outVarNames[y-1]
        if cmd == 'splot':
            ftitle = '"1-Input to 1-Output" Scatter Plot of %s' % outVarName
            xdat = []
            xlabel = []
            ptitle = []
            for i in x:
                datvar = 'X%d' % (variableIndices.index(i) + 1)
                xdat.append(Plotter.getdata(mfile, datvar))
                inVarName = inVarNames[i-1]
                xlabel.append(inVarName)
                ptitle.append('%s vs. %s' % (outVarName, inVarName))
            nx = len(x)
            xdat = np.array(xdat)
            datvar = 'Y'
            y = Plotter.getdata(mfile, datvar)
            ylim = [min(y), max(y)]
            ydat = [y]*nx
            ydat = np.array(ydat)
            ylabel = [outVarName]*nx
            Plotter.plotscatter(xdat,ydat,ftitle,ptitle,xlabel,ylabel,ylim)
        elif cmd == 'splot2':
            datvar = 'X'
            dat = Plotter.getdata(mfile, datvar)
            xdat = dat[:,0]
            ydat = dat[:,1]
            zdat = dat[:,2]
            ftitle = '"2-Input to 1-Output" Scatter Plot of %s' % outVarName
            x1 = x[0]
            x2 = x[1]
            xlabel = inVarNames[x1-1]
            ylabel = inVarNames[x2-1]
            zlabel = outVarName
            ptitle = '%s vs. (%s, %s)' % (zlabel, xlabel, ylabel)
            Plotter.plotscatter3d(xdat,ydat,zdat,ftitle,ptitle,xlabel,ylabel,zlabel)
コード例 #5
0
def create_txn_token_with_paytm_sso_token_and_payment_mode():
    """Merchant can change create_txn_token_with_paytm_sso_token_and_payment_mode according
    to his need.

    This method create a paymentDetails object with required parameters, payment
    modes and PaytmSSOToken. This method calls SDK's createTxnToken method to get
    the InitiateTransactionResponseBody object having token which will be used in
    future transactions such as getting payment options

    Merchant can only use payment modes for this transaction which he will
    specify in this call if these payment modes are applicable on the merchant
    :return: txn token
    """

    """Merchants who want to use PG with Wallet and configure payment-modes for
    accepting payments with paytmSSOTokenS
    """

    """ Channel through which call initiated [enum (APP, WEB, WAP, SYSTEM)] """
    channel_id = SampleData.get_channel_id()

    """ Unique order for each order request """
    order_id = SampleData.generate_random_string(6)

    """ Transaction amount and the currency value """
    txn_amount = SampleData.get_money()

    """User information contains user details cid : <Mandatory> user unique
    identification with respect to merchant
    """
    user_info = SampleData.get_user_info()

    """ Paytm Token for a user """
    paytm_sso_token = SampleData.get_paytm_sso_token()

    """list of the payment modes which needs to enable. If the value provided then
    only listed payment modes are available for transaction
    """
    enable_payment_mode = SampleData.get_enable_payment_modes()

    """list of the payment modes which need to disable. If the value provided then
    all the listed payment modes are unavailable for transaction
    """
    disable_payment_mode = SampleData.get_disable_payment_modes()

    """paymentDetails object will have all the information required to make
    createTxnToken call
    """
    payment_details = PaymentDetailsBuilder(channel_id, order_id, txn_amount, user_info).set_paytm_sso_token(paytm_sso_token)\
        .set_enable_payment_mode(enable_payment_mode).set_disable_payment_mode(disable_payment_mode).build()

    """Making call to SDK method which will return a InitiateTransactionResponse
    object that will contain a token which can be used for validation purpose for
    future transactions
    """
    response = Payment.createTxnToken(payment_details)
    print(response.get_json_response())
    # print(response.get_response_object().get_body().get_txn_token())
    """ End of Function """
コード例 #6
0
 def plotUA(data, y, mfile, moments):
     # plot
     outVarNames = SampleData.getOutputNames(data)
     outVarName = outVarNames[y-1]
     datvar = 'Y'
     dat = Plotter.getdata(mfile, datvar)
     ftitle = 'Uncertainty Analysis on Ensemble Data'
     ptitle = 'Probability Distribution for %s' % outVarName
     xlabel = outVarName
     ylabel = 'Probabilities'
     ystd = None
     rsPDF = None
     Plotter.plothist(dat, moments, ftitle, ptitle, xlabel, ylabel)
コード例 #7
0
def create_txn_token_with_required_params():
    """Merchant can change create_txn_token_with_required_params according to his need
    This method create a PaymentDetail object having all the required parameters
    and call SDK's create TxnToken method to get InitiateTransactionResponseBody
    object having token which will be used in future transactions such as getting
    payment options
    :return: txn_token
    """

    """Channel through which call initiated [enum (APP, WEB, WAP, SYSTEM)]"""
    channel_id = SampleData.get_channel_id()

    """Unique order for each order request"""
    order_id = SampleData.generate_random_string(6)

    """Transaction amount and the currency value"""
    txn_amount = SampleData.get_money()

    """User information contains user details cid : <Mandatory> user unique
    identification with respect to merchant
    """
    user_info = SampleData.get_user_info()

    """paymentDetails object will have all the information required to make
    createTxnToken call
    """
    payment_details = PaymentDetailsBuilder(channel_id, order_id, txn_amount, user_info).build()
    """Making call to SDK method which will return a InitiateTransactionResponse
    object that will contain a token which can be used for validation purpose for
    future transactions
    """
    response = Payment.createTxnToken(payment_details)
    print("Json response :: ", response.get_json_response())
    # print("token:: ", response.get_response_object().get_body().get_txn_token())
    # print("result-code:: ", response.get_response_object().get_body().get_result_info().get_result_code())
    # print("result_status:: ", response.get_response_object().get_body().get_result_info().get_result_status())
    # print("result_msg:: ", response.get_response_object().get_body().get_result_info().get_result_msg())
    """ End of Function """
コード例 #8
0
 def plotCA(data, y, mfile):
     # plot
     outVarNames = SampleData.getOutputNames(data)
     outVarName = outVarNames[y-1]
     ftitle = 'Correlation Analysis on Ensemble Data'
     ptitle = 'Correlation Analysis for %s' % outVarName
     xlabel = 'Input Parameters'
     xticklabels = []
     inputNames = data.getInputNames()
     inputTypes = data.getInputTypes()
     for name, inType in zip(inputNames, inputTypes):
         if inType == Model.VARIABLE:
             xticklabels.append(name)
     ylabel = 'Correlation Coefficients'
     ylabel = ['Pearson ' + ylabel, 'Spearman ' + ylabel]
     dat_pcc = Plotter.getdata(mfile, 'PCC')
     dat_scc = Plotter.getdata(mfile, 'SPEA')
     dat = [dat_pcc, dat_scc]
     std = [None]*len(dat)
     Plotter.plotbar(dat, std, ftitle, ptitle,
                     xlabel, ylabel, xticklabels, barlabels=True)
コード例 #9
0
    def plotSA(data, cmd, y, mfile):
        # plot
        datvar = 'Mids'
        figtitle = {'me': 'First-order',
                    'ie': 'Second-order',
                    'tsi': 'Total-order'}
        title = {'me': 'Sobol First Order Indices',
                 'ie': 'Sobol First and Second Order Indices',
                 'tsi': 'Sobol Total Order Indices'}
        ylabel = 'Sobol Indices'
        xlabel = 'Input Parameters'
        xticklabels = []
        inputNames = data.getInputNames()
        inputTypes = data.getInputTypes()
        for name, inType in zip(inputNames, inputTypes):
            if inType == Model.VARIABLE:
                xticklabels.append(name)
        dat = Plotter.getdata(mfile, datvar)
        std = None
        outVarNames = SampleData.getOutputNames(data)
        outVarName = outVarNames[y-1]
        ftitle = '%s Sensitivity Analysis on Ensemble Data' % figtitle[cmd]
        ptitle = '%s for %s' % (title[cmd], outVarName)
        if cmd == 'ie':
            import numpy as np   # numpy used here only
            import math          # math used here only
            L = len(dat)
            M = int(math.sqrt(L))
            dat = np.reshape(dat, [M,M], order='F')
            yticklabels = xticklabels
            Plotter.plotbar3d(dat, std, ftitle, ptitle,
                              xlabel, ylabel, xticklabels, yticklabels, barlabels=True)
        else:
            Plotter.plotbar(dat, std, ftitle, ptitle,
                            xlabel, ylabel, xticklabels, barlabels=True)

        return mfile
コード例 #10
0
def create_txn_token_with_all_params():

    """Merchant can change createTxnTokenwithAllParams according to his need.

    This method create a PaymentDetails object having all the parameters and
    calls SDK's createTxnToken method to get the InitiateTransactionResponseBody
    object having token which will be used in future transactions such as getting
    payment options
    """
    """Merchants who want to use PG with Wallet, configure paymentmodes, send
    Order details and Extended Information for accepting
    payments
    """

    """ Channel through which call initiated [enum (APP, WEB, WAP, SYSTEM)] """
    channel_id = SampleData.get_channel_id()

    """ Unique order for each order request """
    order_id = SampleData.generate_random_string(6)

    """ Transaction amount and the currency value """
    txn_amount = SampleData.get_money()

    """User information contains user details cid : <Mandatory> user unique
    identification with respect to merchant
    """
    user_info = SampleData.get_user_info()

    """ Paytm Token for a user """
    paytm_sso_token = SampleData.get_paytm_sso_token()

    """list of the payment modes which needs to enable. If the value provided then
        only listed payment modes are available for transaction
    """
    enable_payment_mode = SampleData.get_enable_payment_modes()

    """list of the payment modes which need to disable. If the value provided then
    all the listed payment modes are unavailable for transaction
    """
    disable_payment_mode = SampleData.get_disable_payment_modes()

    """ This contain the Goods info for an order. """
    goods = SampleData.get_goods_info()

    """ This contain the shipping info for an order. """
    shipping_info = SampleData.get_shipping_info()

    """ promo_code that user is using for the payment """
    promo_code = SampleData.get_promo_code()

    """ This contain the set of parameters for some additional information """
    extend_info = SampleData.get_extend_info()

    emi_option = SampleData.get_emi_option()
    card_token_required = SampleData.get_card_token_required()

    """PaymentDetail object will have all the information required to make
    createTxnToken call
    """
    payment_details = PaymentDetailsBuilder(channel_id, order_id, txn_amount, user_info)\
        .set_paytm_sso_token(paytm_sso_token).set_enable_payment_mode(enable_payment_mode)\
        .set_disable_payment_mode(disable_payment_mode).set_goods(goods).set_shipping_info(shipping_info)\
        .set_promo_code(promo_code).set_extend_info(extend_info).set_emi_option(emi_option)\
        .set_card_token_required(card_token_required).build()

    """Making call to SDK method which will return a InitiateTransactionResponse
        object that will contain a token which can be used for validation purpose for
        future transactions
    """
    response = Payment.createTxnToken(payment_details)
    print(response.get_json_response())
    # print(response.get_response_object().get_body().get_txn_token())
    """ End of Function """
コード例 #11
0
    def readSampleFromPsuadeFile(fileName, returnModelOnly = False):
        f = open(fileName, 'r')
        lines = f.readlines()
        f.close()

        model = Model()
        path, fname = os.path.split(fileName)  # exclude path from file name
        model.setName(fname)

        namesIncludeNodes = False
        hasSampleData = False
        readData = False
        readInputs = False
        readOutputs = False
        numInputs = None
        driverName = None
        optDriverName = None
        auxDriverName = None
        sampleType = None
        legendreOrder = None
        sampleMethod = None
        inputData = None
        outputData = None
        runState = None

        inputNames = []
        outputNames = []
        inputTypes = []
        inputMins = []
        inputMaxs = []
        inputDefaults = []
        inputDists = []
        inputDistParam1s = []
        inputDistParam2s = []
    
        for line in lines:
            if line[0] == '#' and 'NAMESHAVENODES' in line:
                namesIncludeNodes = True
            if len(line) > 0 and line[0] != '#': #Not comment
                if line.startswith('PSUADE_IO'):
                    readData = not readData
                    hasSampleData = True
                elif line.startswith('INPUT'):
                    readInputs = True
                elif line.startswith('OUTPUT'):
                    readOutputs = True
                elif line.startswith('END'):
                    if readInputs:
                        readInputs = False
                    elif readOutputs:
                        readOutputs = False
                elif readData: # Read samples
                    if numInputs is None: #Have not read number of inputs
                        nums = line.split()
                        numInputs = int(nums[0])
                        numOutputs = int(nums[1])
                        numSamples = int(nums[2])
                        runState = [False]*numSamples
                        inputData = [0]*numSamples
                        outputData = [0]*numSamples
                        readSampleData = False
                    elif not readSampleData: # Sample number and run state
                        nums = line.split()
                        sampleNum = int(nums[0]) - 1
                        runState[sampleNum] = bool(int(nums[1]))
                        readSampleData = True
                        numValuesRead = 0
                        sampleInputs = [0] * numInputs
                        sampleOutputs = [0] * numOutputs
                    else:
                        if numValuesRead < numInputs: #Input value
                            if line.strip() in ['9.9999999999999997e+34', '9.9999999999999997e+034']:
                                line = 'nan'
                            sampleInputs[numValuesRead] = float(line)
                            numValuesRead = numValuesRead + 1
                        else: #Output value
                            if line.strip() in ['9.9999999999999997e+34', '9.9999999999999997e+034']:
                                line = 'nan'
                            sampleOutputs[numValuesRead - numInputs] = float(line)
                            numValuesRead = numValuesRead + 1
                            if numValuesRead - numInputs == numOutputs:
                                inputData[sampleNum] = sampleInputs
                                outputData[sampleNum] = sampleOutputs
                                readSampleData = False
                elif readInputs: # Read inputs
                    stripped = line.strip()
                    values = stripped.split()
                    if values[0] == 'variable': # Variable name min max
                        inputNames = inputNames + [values[2]]
                        inputTypes = inputTypes + [Model.VARIABLE]
                        inputMins = inputMins + [float(values[4])]
                        inputMaxs = inputMaxs + [float(values[5])]
                        inputDefaults = inputDefaults + [(float(values[4]) + float(values[5])) / 2]
                        inputDists = inputDists + ['U']
                        inputDistParam1s = inputDistParam1s + [None]
                        inputDistParam2s = inputDistParam2s + [None]
                    elif values[0] == 'fixed': # Fixed variable
                        inputNames = inputNames + [values[2]]
                        inputTypes = inputTypes + [Model.FIXED]
                        fixedVal = float(values[4])
                        inputMins = inputMins + [fixedVal]
                        inputMaxs = inputMaxs + [fixedVal]
                        inputDefaults = inputDefaults + [fixedVal]
                        inputDists = inputDists + ['U']
                        inputDistParam1s = inputDistParam1s + [None]
                        inputDistParam2s = inputDistParam2s + [None]
                        # Insert input values
                        if hasSampleData:
                            for i in xrange(len(inputData)):
                                inputRow = inputData[i]
                                inputRow.insert(len(inputNames) - 1, fixedVal)
                                inputData[i] = inputRow
                    elif values[0] == 'PDF': # Distribution
                        index = int(values[1]) - 1
                        inputDists[index] = values[2]
                        if len(values) > 3:
                            if values[2] == Distribution.getPsuadeName(Distribution.SAMPLE):
                                inputDistParam1s[index] = values[3]
                            else:
                                inputDistParam1s[index] = float(values[3])
                            if len(values) > 4:
                                inputDistParam2s[index] = float(values[4])
                            
                elif readOutputs: # Read outputs
                    stripped = line.strip() # Variable name
                    if stripped.startswith('variable'):
                        values = stripped.split()
                        outputNames = outputNames + [values[2]]
                else:
                    stripped = line.strip()
                    values = stripped.split()
                    if values[0] == 'sampling': #Sampling method
                        sampleMethod = values[2]
                    elif values[0] == 'driver': #Driver
                        if values[2] == 'NONE':
                            values[2] = None
                        driverName = values[2]
                        if values[2] is not None and values[2] != 'PSUADE_LOCAL' and not os.path.exists(driverName):
                            # Check if driver exists in same directory as this file
                            if os.path.exists(os.path.join(path, driverName)):
                                driverName = os.path.join(path, driverName)
                            else: # Don't set the name because the file does not exist
                                driverName = None
                    elif values[0] == 'opt_driver': #Optimization driver
                        if values[2] == 'NONE':
                            values[2] = None
                        optDriverName = values[2]
                        if values[2] is not None and values[2] != 'PSUADE_LOCAL' and not os.path.exists(optDriverName):
                            # Check if driver exists in same directory as this file
                            if os.path.exists(os.path.join(path, optDriverName)):
                                optDriverName = os.path.join(path, optDriverName)
                            else: # Don't set the name because the file does not exist
                                optDriverName = None
                    elif values[0] == 'aux_opt_driver': #Batch simulation driver
                        if values[2] == 'NONE':
                            values[2] = None
                        auxDriverName = values[2]
                        if values[2] is not None and not os.path.exists(auxDriverName):
                            # Check if driver exists in same directory as this file
                            if os.path.exists(os.path.join(path, auxDriverName)):
                                auxDriverName = os.path.join(path, auxDriverName)
                            else: # Don't set the name because the file does not exist
                                auxDriverName = None
                    elif values[0] == 'num_samples': #Number of samples
                        numSamples = int(values[2])
                    elif values[0] == 'analyzer': # Analysis values
                        if values[1] == 'rstype':
                            sampleType = values[3]
                            sampleType = ResponseSurfaces.getEnumValue(sampleType)
                        elif values[1] == 'rs_legendre_order':
                            legendreOrder = int(values[3])
                            
                        
        model.setInputNames(inputNames)
        model.setOutputNames(outputNames)
        model.setNamesIncludeNodes(namesIncludeNodes)
        model.setInputTypes(inputTypes)
        model.setInputMins(inputMins)
        model.setInputMaxs(inputMaxs)
        model.setInputDefaults(inputDefaults)
        model.setSelectedOutputs(range(len(outputNames)))
        model.setDriverName(driverName)
        model.setOptDriverName(optDriverName)
        model.setAuxDriverName(auxDriverName)
        model.setRunType(Model.LOCAL) 
##        print model.getInputNames()
##        print model.getOutputNames()
##        print model.getInputDistributions()
##        print model.getNumSamples()
##        print model.getNumInputs()
##        print model.getNumOutputs()
##        print model.getInputMins()
##        print model.getInputMaxs()
##        from SamplingMethods import SamplingMethods
##        print SamplingMethods.getFullName(model.getSampleMethod())
##        print model.getDriver()
        if returnModelOnly:
            return model

        data = SampleData(model)
        data.setFromFile(True)
        data.setNumSamples(numSamples)
        if sampleMethod is None:
            data.setSampleMethod(SamplingMethods.MC)
        else:
            data.setSampleMethod(sampleMethod)
        data.setInputDistributions(inputDists, inputDistParam1s, inputDistParam2s)
        data.setSampleRSType(sampleType)
        data.setLegendreOrder(legendreOrder)
        if inputData:
            data.setInputData(inputData)
        if outputData:
            data.setOutputData(outputData)
        if runState:
            data.setRunState(runState)
        return data
コード例 #12
0
    def readSampleFromCsvFile(fileName, askForNumInputs = True):
        inputVals, outputVals, inputNames, outputNames, runState = LocalExecutionModule.readDataFromCsvFile(fileName, askForNumInputs)
        numInputs = inputVals.shape[1]
        numOutputs = outputVals.shape[1]

        # Setup model
        model = Model()
        path, fname = os.path.split(fileName)  # exclude path from file name
        model.setName(fname)
        model.setInputNames(inputNames)
        model.setOutputNames(outputNames)
        model.setInputTypes([Model.VARIABLE] * numInputs)
        mins = list(inputVals[0])
        for rowVals in inputVals[1:]:
            for col in xrange(numInputs):
                if rowVals[col] < mins[col]:
                    mins[col] = rowVals[col]
        model.setInputMins(mins)
        maxs = list(inputVals[0])
        for rowVals in inputVals[1:]:
            for col in xrange(numInputs):
                if rowVals[col] < maxs[col]:
                    maxs[col] = rowVals[col]
        model.setInputMaxs(maxs)
        model.setInputDefaults([(min + max)/2 for min,max in zip(mins,maxs)])
        model.setSelectedOutputs(range(numOutputs))
        model.setRunType(Model.LOCAL)

        data = SampleData(model)
        data.setFromFile(True)
        data.setNumSamples(len(inputVals))
        data.setSampleMethod(SamplingMethods.MC)
        data.setInputDistributions(['U'] * numInputs, [None] * numInputs, [None] * numInputs)
        data.setInputData(inputVals)
        if outputVals.size > 0:
            data.setOutputData(outputVals)
        data.setRunState(runState)
        return data
コード例 #13
0
    def performSA(fname, y, cmd):
        
        # read data
        data = LocalExecutionModule.readSampleFromPsuadeFile(fname)

        # check sample size 
        nSamples = SampleData.getNumSamples(data)
        nSamplesLB = {'me': 1000,    # lower bound on number of samples
                      'ie': 1000,
                      'tsi': 10000}
        N = nSamplesLB[cmd]
        if nSamples < N:
            error = 'RawDataAnalyzer: In function performSA(), %s requires at least %d samples.' % (cmd.upper(), N)
            Common.showError(error)
            return None

        # check number of inputs (for tsi only)
        nInputs = SampleData.getNumInputs(data)
        if cmd == 'tsi':
            D = 10
            if nInputs > D:
                error = 'RawDataAnalyzer: In function performSA(), %s requires at most %d inputs for total sensitivity analysis.' % (cmd.upper(), D)
                Common.showError(error)
                return None

        # write script
        f = tempfile.SpooledTemporaryFile()
        if platform.system() == 'Windows':
            import win32api
            fname = win32api.GetShortPathName(fname)
        f.write('load %s\n' % fname)
        f.write('%s\n' % cmd)
        f.write('%d\n' % y)   # select output
        f.write('quit\n')
        f.seek(0)

        # invoke psuade
        out, error = Common.invokePsuade(f)
        f.close()

        # process error
        if error:
            return None

        # check output file
        outfile = {'me': 'matlabme.m',
                   'ie': 'matlabaie.m',
                   'tsi': 'matlabtsi.m'}
        
        mfile = outfile[cmd]
        if os.path.exists(mfile):
            mfile_ = RawDataAnalyzer.dname + os.path.sep + mfile
            os.rename(mfile, mfile_)
            mfile = mfile_
        else:
            error = 'RawDataAnalyzer: %s does not exist.' % mfile
            Common.showError(error, out)
            return None

        RawDataAnalyzer.plotSA(data, cmd, y, mfile)
        return mfile
コード例 #14
0
    def parse_sample(self, sheet, job, service, document_id):
        """ Parses each tab in the sheet as a separate sample.
        
        Args:
            sheet (google sheet): The google sheet to process
            job (SRGJob): the pointer to the job object to hold all the results
            service (google sheets service): the google sheets api service
            document_id (str): the google sheets document id to fetch and process

        
        """

        #get the title of the sheet to be used for the ranges reference in batchGet()
        title = sheet.get('properties').get('title')

        #rowCount is taken here so we know how many rows to extract from the sheet
        row_count = sheet.get('properties').get('gridProperties').get(
            'rowCount')

        #get the first row with all the column headings as well as the first two
        #columns which contain the sample details
        result = service.spreadsheets().values().batchGet(
            spreadsheetId=document_id,
            ranges=['{0}!A2:B101'.format(title),
                    '{0}!A1:Z1'.format(title)]).execute()

        #The first element in this array is the sample details columns
        #the second element is the column names ie. first row of sheet
        valueRanges = result.get('valueRanges', [])

        #create a sample data object to store all the extracted data
        sample_data = SampleData()

        #Sample details columns, first column is name of detail and second
        #column is the value for the detail
        values = valueRanges[0].get('values', [])
        for row in values:
            if len(row) == 2 and row[0] != '':
                sample_data.add_detail(row[0], row[1])

        #names of all the columns in the spreadsheet. Need to get the index
        #of the Test Name and Result columns, these are the data columns that
        #need to be extracted from the sheet
        values = valueRanges[1].get('values', [])
        try:
            tn_col_index = values[0].index("Test Name")
            res_col_index = values[0].index("Result")
        except ValueError:
            #don't add this sample because the required columns did not exist
            return

        #convert the index of these columns to the column letter, columns
        #are letters in spreadsheets not numbers
        alpha_codes = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
        tn_code = alpha_codes[tn_col_index]
        res_code = alpha_codes[res_col_index]

        #make another request to the sheets api to get the test result data
        #from the Test Name and Result columns found above
        data_result = service.spreadsheets().values().batchGet(
            spreadsheetId=document_id,
            ranges=[
                '{0}!{1}2:{1}{2}'.format(title, tn_code, row_count),
                '{0}!{1}2:{1}{2}'.format(title, res_code, row_count)
            ]).execute()

        #The first element in data_values will be the Test Name column array
        #the second element will be the Result column array
        data_values = data_result.get('valueRanges', [])
        tn_data = data_values[0].get('values', [])
        res_data = data_values[1].get('values', [])

        #go through each row in the extracted data and get the value for
        #Test Name and Result
        for i in range(len(tn_data)):
            #Add the Result for this Test Name to the sample_data test result array
            if len(tn_data[i]) > 0 and len(res_data[i]) > 0:
                sample_data.add_result(tn_data[i][0], res_data[i][0])

        #if this sample had some useable data then add it to the job object
        if len(sample_data.details) > 0 and len(sample_data.test_results) > 0:
            job.add_sample(sample_data)
コード例 #15
0
ファイル: RSInference.py プロジェクト: elbashandy/FOQUS
    def infer(self,
              fname,
              ytable,
              xtable,
              exptable,
              genPostSample=False,
              addDisc=None,
              show=None,
              endFunction=None,
              disableWhilePlotting=None,
              userRegressionFile=None):

        xtable = copy.deepcopy(xtable)
        self.stopInfer = False

        # Function to execute after inference has finished.
        # Function would enable button again and such things.
        self.endFunction = endFunction

        # Widget to disable while plotting
        # If widget is not disabled, if clicked while plotting,
        # it would cause inference to start again.
        self.disableWhilePlotting = disableWhilePlotting

        # read data
        data = LocalExecutionModule.readSampleFromPsuadeFile(
            fname)  # does not assume rstype/order written to data

        # process output RS info
        nOutputs = SampleData.getNumOutputs(data)
        if len(ytable) != nOutputs:
            error = 'RSInference: In function infer(), "ytable" is expected to be a list of length "nOutputs".'
            Common.showError(error)
            return None
        indices = [
        ]  # indices[i] stores the output index for the i-th observed output variable
        rsIndices = [
        ]  # rsIndices[i] stores the RS index for the i-th observed output variable
        legendreOrders = [
        ]  # legendreOrders[i] stores the Legendre order for the i-th observed output variable
        userRegressionFiles = [
        ]  # userRegressionFiles[i] stores the user driver file for the i-th observed output variable
        userRegressionArgs = [
        ]  # userRegressionArgs[i] stores the output index, an optional argument to the driver file,
        # ... for the i-th observed output variable
        marsOptions = [
            None
        ] * nOutputs  # marsOptions[i] stores customized settings for MARS and MARSBag
        setMARS = []
        ### ytable should be an array of length N, where N is the number of outputs, observed and unobserved.
        ### if output is unobserved, ytable[i] = None
        ### if output is observed, ytable[i] should contain the following fields:
        ###     {'rsIndex':%d, 'legendreOrder':%d, 'userRegressionFile':%s, 'userRegressionArg':%d}
        ### if MARS options are set, then ytable[i] should contain additional fields:
        ###     {'rsIndex':%d, 'legendreOrder':%d, 'userRegressionFile':%s, 'userRegressionArg':%d, 'marsBases':%d, 'marsInteractions':%d}
        for i in xrange(nOutputs):
            obs = ytable[i]
            if obs is not None:
                if 'rsIndex' not in obs:
                    error = 'RSInference: In function infer(), each observed output must have rsIndex key'
                    Common.showError(error)
                    return None
                rsIndex = obs['rsIndex']
                if rsIndex == ResponseSurfaces.LEGENDRE:
                    order = None
                    if 'legendreOrder' in obs:
                        order = obs['legendreOrder']
                    userRegressionFiles.append(None)
                    userRegressionArgs.append(None)
                    if order is not None:
                        legendreOrders.append(order)
                    else:
                        error = 'RSInference: In function infer(), "legendreOrder" is required for LEGENDRE response surface.'
                        Common.showError(error)
                        return None
                elif rsIndex == ResponseSurfaces.USER:
                    userFile = None
                    if 'userRegressionFile' in obs:
                        userFile = obs['userRegressionFile']
                    userArg = None
                    if 'userRegressionArg' in obs:
                        userArg = obs['userRegressionArg']
                    legendreOrders.append(None)
                    if userFile is not None and os.path.exists(userFile):
                        userRegressionFiles.append(userFile)
                    else:
                        error = 'RSInference: In function infer(), "userRegressionFile" is required for USER REGRESSION response surface.'
                        Common.showError(error)
                        return None
                    if userArg is not None and isinstance(
                            userArg, (int, long, str)):
                        userRegressionArgs.append(userArg)
                    else:
                        userRegressionArgs.append(
                            1
                        )  # if no output index or name is given, use 1 as default
                elif rsIndex in [
                        ResponseSurfaces.MARS, ResponseSurfaces.MARSBAG
                ]:  # check for MARS options
                    if 'marsBases' in obs and 'marsInteractions' in obs:
                        mopts = RSAnalyzer.checkMARS(data, obs)
                        if mopts is not None:
                            setMARS.append(i)
                            marsOptions[i] = mopts


#                            marsBases, marsInteractions, marsNormOutputs = marsOptions

                rsIndices.append(rsIndex)
                indices.append(i)

        # delete unobserved outputs from data
        odelete = [
            i + 1 for i in xrange(nOutputs) if i not in indices
        ]  # stores the (1-indexed) output variable indices that are unobserved
        if odelete:
            # ... write script
            nOutputs = SampleData.getNumOutputs(data) - len(odelete)
            outfile = Common.getLocalFileName(RSInferencer.dname, fname,
                                              '.infdat')
            f = tempfile.SpooledTemporaryFile()
            if platform.system() == 'Windows':
                import win32api
                fname = win32api.GetShortPathName(fname)
            f.write('load %s\n' % fname)
            odelete_reverse = odelete[::
                                      -1]  # reverse output indices to be deleted
            for y in odelete_reverse:  # at each odelete, the highest-indexed output needs to be deleted first
                f.write('odelete\n')
                f.write('%d\n' % y)
            if platform.system() == 'Windows':
                head, tail = os.path.split(outfile)
                head = win32api.GetShortPathName(head)
                outfile = os.path.join(head, tail)
            f.write('write %s\n' % outfile)
            f.write('n\n')  # write all outputs
            f.write('quit\n')
            f.seek(0)
            out, error = Common.invokePsuade(f)
            f.close()
            if out is None:
                return
            # ... operate on the new data file
            fname = outfile
            data = LocalExecutionModule.readSampleFromPsuadeFile(fname)
            marsOptions = [
                m for i, m in enumerate(marsOptions) if i in indices
            ]

        # process input prior info
        ### xtable should be an array of length N, where N is the number of inputs.
        ### xtable[i] should contain the following fields:
        ###     {'name':%s, 'type':%s, 'value':%s, 'min':%f, 'max':%f, 'pdf':%d, 'param1':%f, 'param2':%f}
        inputTypes = data.getInputTypes()
        nVariableInputs = inputTypes.count(Model.VARIABLE)
        # ... get design inputs
        designInVars = [
            i + 1 for i, e in enumerate(xtable) if e['type'] == 'Design'
        ]
        # ... get fixed inputs
        fixedInVars = [
            i + 1 for i, e in enumerate(xtable) if e['type'] == 'Fixed'
        ]
        indexfile = None
        if fixedInVars:
            # ... write index file based on fixed inputs
            indexfile = RSInferencer.dname + os.path.sep + 'indexfile'
            f = open(indexfile, 'w')
            f.write('%d\n' % nVariableInputs)
            for i in xrange(nVariableInputs):
                k = i + 1
                e = xtable[i]
                if e['type'] == 'Fixed':
                    f.write('%d %d %f\n' % (k, 0, e['value']))
                else:
                    f.write('%d %d %d\n' % (k, k, 0))
            f.close()
        # Expand xtable and show in cases of user regression where all inputs are used in data, not just the variable ones
        j = 0
        newTable = [None] * nVariableInputs
        inputNames = data.getInputNames()
        variableInputNames = [
            inputName for i, inputName in enumerate(inputNames)
            if inputTypes[i] == Model.VARIABLE
        ]
        newShow = []
        for i in xrange(nVariableInputs):
            e = xtable[j]
            if e['name'] == variableInputNames[i]:
                # ... nullify xtable's entries corresponding to design/fixed inputs
                if e['type'] not in ['Design', 'Fixed']:
                    newTable[i] = e
                if j in show:
                    newShow.append(i)
                j += 1
                if j == len(xtable):
                    break
        xtable = newTable
        show = newShow
        # ... write out RS data file with only variable inputs' prior info
        p = RSAnalyzer.parsePrior(data, xtable)
        if p is not None:
            outfile = Common.getLocalFileName(RSInferencer.dname, fname,
                                              '.infdat')
            y = 1
            RSAnalyzer.writeRSdata(
                outfile,
                y,
                data,
                indexfile=indexfile,
                randseed=1211319841,  ### TO DO: remove rand seed?
                inputLowerBounds=p['inputLB'],
                inputUpperBounds=p['inputUB'],
                inputPDF=p['dist'])
            # ... operate on the new data file
            fname = outfile
            data = LocalExecutionModule.readSampleFromPsuadeFile(fname)

        # write MCMC data file
        ### *** THE FORMAT OF THIS DATA FILE IS: (O1 means output 1,
        ### m - no. of design parameters, p - no. of experiments):
        ### PSUADE_BEGIN
        ### <p> <nOutputs> <m> <design parameter identifiers>
        ### 1 <design values...> <O1 mean> <O1 std dev> ... <On std dev>
        ### 2 <design values...> <O1 mean> <O1 std dev> ... <On std dev>
        ### ...
        ### p <design values...> <O1 mean> <O1 std dev> ... <On std dev>
        ### PSUADE_END
        ###
        ### exptable should be an array of length p.
        ### exptable[i] should be a numeric array:
        ###        [expIndex, designVal_1, ..., designVal_m, outputMean_1, outputStd_1, ..., outputMean_n, outputStd_n]
        mcmcfile = RSInferencer.dname + os.path.sep + 'mcmcFile'
        f = open(mcmcfile, 'w')
        f.write('PSUADE_BEGIN\n')
        nOutputs = SampleData.getNumOutputs(
            data)  # number of (observed) outputs
        nExp = len(exptable)  # number of experiments
        nDesignInVars = len(designInVars)  # number of design parameters
        delim = ' '
        # ... write header
        dstr = ''
        if designInVars:
            dstr = [str(s) for s in designInVars]
            dstr = delim.join(dstr)
        f.write('%d %d %d %s\n' % (nExp, nOutputs, nDesignInVars, dstr))
        # ... write data
        nterms = nDesignInVars + 2 * nOutputs + 1
        for i in xrange(nExp):
            e = exptable[i]
            if len(e) == nterms:
                estr = [str(s) for s in e]
                estr = delim.join(estr)
                f.write('%s\n' % estr)
            else:
                error = 'RSInference: In function infer(), the %d-th row of exptable expects %d terms.' % (
                    i + 1, nterms)
                Common.showError(error, out)
                return None
        f.write('PSUADE_END\n')
        f.close()

        # write script
        cmd = 'rsmcmc'
        f = tempfile.SpooledTemporaryFile()
        if platform.system() == 'Windows':
            import win32api
            fname = win32api.GetShortPathName(fname)
        f.write('load %s\n' % fname)
        if len(setMARS) > 0:
            f.write('rs_expert\n')
        f.write(
            'ana_expert\n'
        )  # turn on analysis expert mode, required for non-uniform input priors
        f.write('%s\n' % cmd)
        if platform.system() == 'Windows':
            import win32api
            mcmcfile = win32api.GetShortPathName(mcmcfile)
        f.write('%s\n' %
                mcmcfile)  # spec file for building the likelihood function
        f.write('y\n')  # do include response surface uncertainties
        for i in xrange(
                nOutputs
        ):  # for each output, set RS; all outputs are observed in this data file
            rsIndex = rsIndices[i]
            f.write('%d\n' % rsIndex)
            if rsIndex == ResponseSurfaces.LEGENDRE:
                f.write('%d\n' % legendreOrders[i])
            elif rsIndex == ResponseSurfaces.USER:
                f.write('1\n')  # number of basis functions
                driverFile = userRegressionFiles[i]
                if platform.system() == 'Windows':
                    driverFile = win32api.GetShortPathName(driverFile)
                f.write('%s\n' % driverFile)  # driver file
                f.write('y\n')  # apply auxillary arg (output index)
                arg = userRegressionArgs[i]
                if isinstance(arg, (int, long)):
                    formatString = '%d\n'
                else:
                    formatString = '%s\n'
                f.write(formatString % arg)  # output name
            elif indices[i] in setMARS:
                if rsIndex == ResponseSurfaces.MARSBAG:
                    f.write('0\n')  # mean (0) or median (1) mode
                    f.write(
                        '100\n'
                    )  # number of MARS instantiations [range:10-5000, default=100]
                    ### TO DO: revert back to 100 for deployment
                marsBases, marsInteractions, marsNormOutputs = marsOptions[i]
                f.write('%d\n' % marsBases)
                f.write('%d\n' % marsInteractions)
                if rsIndex == ResponseSurfaces.MARS:
                    f.write('%s\n' % marsNormOutputs)
        f.write(
            '5000\n'
        )  # MCMC sample increment [range: 5000 - 50000]; default = 10000
        f.write('20\n'
                )  # number of bins in histogram [range: 10 - 25]; default = 20
        f.write('-1\n')  # generate posterior plots for all inputs
        disc = 'n'
        if addDisc:
            disc = 'y'
        f.write('%s\n' % disc)  # add discrepancy function
        saveSample = 'n'
        if genPostSample:
            saveSample = 'y'
        f.write('%s\n' %
                saveSample)  # generate a sample from posterior distributions
        f.write(
            '60\n'
        )  # sample size to construct MCMC proposal distribution; default = 100
        if addDisc:
            f.write(
                '18\n')  # set RS for discrepancy function; default = kriging
            ### TO DO: allow user to customize RS for discrepancy function
            f.write('n\n')  # set nominal values for other inputs
            if len(setMARS) > 0:
                f.write('3\n')  # Kriging slow mode
                f.write('1e-4\n')  # Kriging tolerance
        f.write('3\n')  # number of MCMC chains; default = 3
        f.write(
            '1.05\n'
        )  # PSRF (convergence metric for MCMC) thershold; default = 1.05
        f.write('quit\n')
        f.seek(0)

        # invoke psuade
        self.fname = fname
        self.genPostSample = genPostSample
        self.addDisc = addDisc
        self.data = data
        self.show = show
        self.textDialog = Common.textDialog()
        self.thread = psuadeThread(self, f, self.finishInfer, self.textDialog)
        self.thread.start()
コード例 #16
0
    def showRS(fnameRS, y, x, rsdim, rsMethodName, **kwargs):

        from ResponseSurfaces import ResponseSurfaces

        # read data
        data = LocalExecutionModule.readSampleFromPsuadeFile(fnameRS)   # rstype/order written to data
        nSamples = SampleData.getNumSamples(data)
        order = SampleData.getLegendreOrder(data)

        types = data.getInputTypes()
        inputNames = data.getInputNames()
        variableIndices = []
        variableNames = []
        for i in xrange(len(types)):
            if types[i] == Model.VARIABLE:
                variableIndices.append(i + 1)
                variableNames.append(inputNames[i])

        rsIndex = ResponseSurfaces.getEnumValue(rsMethodName)

        # process keyworded arguments
        vmax = np.inf
        vmin = -vmax
        userRegressionFile = None
        setMARS = False
        for key in kwargs:
            k = key.lower()
            if k == 'rsoptions': 
                rsOptions = kwargs[key]
                if rsIndex == ResponseSurfaces.LEGENDRE: 
                    if rsOptions is None:
                        error = 'Visualizer: In function showRS(), "legendreOrder" is required for LEGENDRE response surface.'
                        Common.showError(error)
                        return None
                    else:
                        if isinstance(rsOptions, dict):
                            legendreOrder = rsOptions['legendreOrder']
                        else:
                            legendreOrder = rsOptions
                        if type(legendreOrder) == int:
                            order = legendreOrder
                        else:
                            error = 'Visualizer: In function showRS(), "legendreOrder" is required for LEGENDRE response surface.'
                            Common.showError(error)
                            return None
                elif rsIndex in [ResponseSurfaces.MARS, ResponseSurfaces.MARSBAG]: # check for MARS options
                    if rsOptions is not None:
                        from RSAnalyzer import RSAnalyzer
                        marsOptions = RSAnalyzer.checkMARS(data, rsOptions)
                        if marsOptions is not None:
                            marsBases, marsInteractions, marsNormOutputs = marsOptions
                            setMARS = True
            elif k == 'vmin':
                vmin = kwargs[key]
            elif k == 'vmax':
                vmax = kwargs[key]
            elif k == 'userregressionfile':
                userRegressionFile = kwargs[key]

        # check user arguments
        if rsdim < 1 or rsdim > 3:
            error = 'Visualizer: showRS() supports only rs1, rs2 and rs3.'
            Common.showError(error)
            return None
        nx = len(x)
        if nx != rsdim:
            error = 'Visualizer: showRS() expects x to be a list of length %d.' % rsdim
            Common.showError(error)
            return None
        uniqx = list(set(x))
        if nx != len(uniqx):
            error = 'Visualizer: showRS() expects unique elements in x.'
            Common.showError(error)
            return None
        if vmin >= vmax:
            error = 'Visualizer: showRS() requires vmin to be less than vmax.'
            Common.showError(error)
            return None

        # write script
        cmd = 'rs%d' % rsdim
        f = tempfile.SpooledTemporaryFile()
        if setMARS:
            f.write('rs_expert\n')
        if platform.system() == 'Windows':
            import win32api
            fnameRS = win32api.GetShortPathName(fnameRS)
        f.write('load %s\n' % fnameRS)
        if nSamples > 4000:
            f.write('rsmax %d\n' % nSamples)
        f.write('%s\n' % cmd)
        ngrid = 0
        if rsdim == 2:
            ngrid = 256              # select grid resolution (32-256)
            f.write('%d\n' % ngrid)
        elif rsdim == 3:
            ngrid = 32               # select grid resolution (16-32)
            f.write('%d\n' % ngrid)
        f.write('%d\n' % rsIndex)    # select response surface
        if rsIndex == ResponseSurfaces.USER and userRegressionFile is not None:
            f.write('1\n')                         # number of basis functions
            f.write('%s\n' % userRegressionFile)   # surrogate file
            f.write('y\n')                         # apply auxillary arg (output index)
            outVarNames = data.getOutputNames()
            outName = outVarNames[y - 1]
            outName = Common.getUserRegressionOutputName(outName, userRegressionFile, data)
            f.write('%s\n' % outName)              # output name
        elif setMARS:
            if rsIndex == ResponseSurfaces.MARSBAG:
                f.write('0\n')    # mean (0) or median (1) mode
                f.write('100\n')   # number of MARS instantiations [range:10-5000, default=100]
                                  ### TO DO: revert back to 100 for deployment
            f.write('%d\n' % marsBases)
            f.write('%d\n' % marsInteractions)
            if rsIndex == ResponseSurfaces.MARS:
                f.write('%s\n' % marsNormOutputs)
        nInputs = SampleData.getNumInputs(data)
        if nInputs > rsdim:
            for d in xrange(0,rsdim):
                #f.write('%d\n' % (variableIndices.index(x[d]) + 1))   # select input
                f.write('%d\n' % (variableNames.index(x[d]) + 1))   # select input
            f.write('y\n')               # set nominal values for other inputs
        elif nInputs == rsdim:
            for d in xrange(0,rsdim-1):  # psuade can infer last remaining input
                #f.write('%d\n' % (variableIndices.index(x[d]) + 1))   # select input
                f.write('%d\n' % (variableNames.index(x[d]) + 1))   # select input
        elif nInputs < rsdim:
            error = 'Visualizer: In showRS(), %s cannot be performed on a %d-input system.' % (cmd.upper(), nInputs)
            Common.showError(error)
            return None
        f.write('%d\n' % y)   # select output
        if rsIndex == ResponseSurfaces.LEGENDRE:
            f.write('%d\n' % order)
        f.write('n\n')            # select no for selecting lower threshold
        f.write('n\n')            # select no for selecting upper threshold
        f.write('quit\n')
        f.seek(0)

        # print the psuade script to screen
        #for line in f:
        #    print line.strip()
        #f.seek(0)

        # invoke psuade
        out, error = Common.invokePsuade(f)
        f.close()

        # process error
        if error:
            return None

        # check output file
        mfile = 'matlab' + cmd + '.m'
        if os.path.exists(mfile):
            mfile_ = Visualizer.dname + os.path.sep + mfile
            os.rename(mfile, mfile_)
            mfile = mfile_
        else:
            error = 'Visualizer: %s does not exist.' % mfile
            Common.showError(error, out)
            return None

        Visualizer.showRSPlot(data, y, x, rsdim, ngrid, rsMethodName, vmin, vmax, mfile)
        return mfile
コード例 #17
0
ファイル: DataProcessor.py プロジェクト: MAZamarripa/foqus
    def filterdata(fname, **kwargs):

        # read data
        data = LocalExecutionModule.readSampleFromPsuadeFile(fname)

        # process keyworded arguments
        filterVar = None
        vmin = None
        vmax = None
        for key in kwargs:
            k = key.lower()
            if k == 'input':
                filterVar = kwargs[key]
                cmd = 'ifilter'

                # Get only input variables that are variable
                types = data.getInputTypes()
                inVarNames = SampleData.getInputNames(data)
                varNames = []
                for i in xrange(len(types)):
                    if types[i] == Model.VARIABLE:
                        varNames.append(inVarNames[i])

            elif k == 'output':
                filterVar = kwargs[key]
                cmd = 'ofilter'
                varNames = SampleData.getOutputNames(data)
            elif k == 'vmin':
                vmin = kwargs[key]
            elif k == 'vmax':
                vmax = kwargs[key]

        if filterVar is None:
            error = 'DataProcessor: In function filterData(), the filter variable is not specified.'
            Common.showError(error)
            return None
        if (vmin is None) | (vmax is None) | (vmin >= vmax):
            error = 'DataProcessor: filterData() requires a valid [min, max] range to filter the variable "%s".' % filterVar
            Common.showError(error)
            return None

        # check if the filter variable exists 
        if filterVar not in varNames:
            error = 'DataProcessor: In function filterData(), %s does not contain the filter variable "%s".' % (fname, filterVar)
            Common.showError(error)
            outfile = fname
            return outfile

        # get the output index to filter variable
        filterIndex = varNames.index(filterVar) + 1    # psuade is 1-indexed

        # write script
        outfile = Common.getLocalFileName(DataProcessor.dname, fname, '.filtered')
        f = tempfile.SpooledTemporaryFile()
        if platform.system() == 'Windows':
            import win32api
            fname = win32api.GetShortPathName(fname)        
        f.write('load %s\n' % fname)
        f.write('%s\n' % cmd)            # invoke ifilter or ofilter
        if (cmd == 'ifilter' and data.getNumInputs() > 1) or (cmd == 'ofilter' and data.getNumOutputs() > 1):
            f.write('%d\n' % filterIndex)    # select the filter variable
        f.write('%f\n' % vmin)           # extract points within range [vmin, vmax]
        f.write('%f\n' % vmax)
        if platform.system() == 'Windows':
            head, tail = os.path.split(outfile)
            head = win32api.GetShortPathName(head)
            outfile = os.path.join(head, tail)
        f.write('write %s\n' % outfile) 
        f.write('n\n')                   # write all outputs
        f.write('quit\n')
        f.seek(0)

        # invoke psuade
        out, error = Common.invokePsuade(f)
        f.close()

        # Process error
        if error:
            return None

        if not os.path.exists(outfile):
            error = 'DataProcessor: %s does not exist.' % outfile
            Common.showError(error, out)
            return None

        return outfile
コード例 #18
0
    def yScatter(fname, y, x, cmd):
        # x is 1-indexed

        # read data
        data = LocalExecutionModule.readSampleFromPsuadeFile(fname)
        nInputs = SampleData.getNumInputs(data)

        types = data.getInputTypes()
        variableIndices = []
        for i in xrange(len(types)):
            if types[i] == Model.VARIABLE:
                variableIndices.append(i + 1)

        # process user arguments
        if cmd == 'splot':
            validInputs = set(x).issubset(xrange(1,nInputs+1))
            if not validInputs:
                error = 'Visualizer: In function yScatter(), x is out of range for valid inputs.'
                Common.showError(error)
                return None
        elif cmd == 'splot2':
            if len(x) == 2:
                x1 = x[0]
                x2 = x[1]
                if x1 == x2:
                    error = 'Visualizer: In function yScatter(), %s requires unique elements in x.' % cmd.upper()
                    Common.showError(error)
                    return None
            else:
                error = 'Visualizer: In function yScatter(), %s expects x to be a list of length 2.' % cmd.upper()
                Common.showError(error)
                return None

        # write script
        f = tempfile.SpooledTemporaryFile()
        if platform.system() == 'Windows':
            import win32api
            fname = win32api.GetShortPathName(fname)
        f.write('load %s\n' % fname)
        f.write('%s\n' % cmd)
        if cmd == 'splot2':
            f.write('%d\n' % (variableIndices.index(x1) + 1))   # select input
            f.write('%d\n' % (variableIndices.index(x2) + 1))   # select input
        f.write('%d\n' % y)   # select output
        f.write('quit\n')
        f.seek(0)
        
        # invoke psuade
        out, error = Common.invokePsuade(f)
        f.close()

        # process error
        if error:
            return None

        # check output file
        outfile = {'splot': 'matlabsp.m',
                   'splot2': 'matlabsp2.m'}
        mfile = outfile[cmd]
        if os.path.exists(mfile):
            mfile_ = Visualizer.dname + os.path.sep + mfile
            os.rename(mfile, mfile_)
            mfile = mfile_
        else:
            error = 'Visualizer: %s does not exist.' % mfile
            Common.showError(error, out)
            return None

        Visualizer.yScatterPlot(data, y, x, cmd, mfile)

        return mfile