Ejemplo n.º 1
0
def num_crl(wf_n):
    """Function computes the autocorrelation function from given vectors\
    and the Discrete Fourier transform

    Args:
        wf_n(numpy array, complex): Wave function over time

    Returns:
        numpy array, complex: The wave function complex over time.
        numpy array, complex: The autocorrelation function over time.
        numpy array, complex: The Discrete Fourier Transformation function over\
        frequency
    """

    # setting up the time vector and deleting it from array
    time_vc = np.zeros([len(wf_n[0])])
    time_vc = wf_n[0]
    wf_n = np.delete(wf_n, [0], axis=0)

    # the lenth of the vector
    t_wf = len(wf_n[0])
    p_wf = len(wf_n[:, 0])

    # turning array into complex
    comp_vc = np.zeros([p_wf, t_wf], dtype=np.complex_)
    for n in range(p_wf):
        comp_vc[:, n] = wf_n[n * 2] + wf_n[1 + n * 2] * 1j
    return comp_vc, time_vc
Ejemplo n.º 2
0
 def __init__(self, objects, lights, background_color, max_depth):
     self.objects = objects
     self.lights = lights
     self.camera = camera
     self.background_color = background_color
     self.max_depth = max_depth
     self.image = np.zeros((self.camera.canvas_width, self.camera.canvas_height, 3), dtype=np.uint8)
Ejemplo n.º 3
0
def predict_estimators(estimators, X):
    # Get predictions of a list of classifiers
    X_meta = np.zeros((X.shape[0], len(estimators)))
    for m in estimators:
        y_pred = m.predict(X)
        X_meta[:, i] = y_pred
    return X_meta
Ejemplo n.º 4
0
def returnize0(nds):
    """
    @summary Computes stepwise (usually daily) returns relative to 0, where
    0 implies no change in value.
    @return the array is revised in place
    """
    s = np.shape(nds)
    if len(s) == 1:
        nds = np.expand_dims(nds, 1)
    nds[1:, :] = (nds[1:, :] / nds[0:-1]) - 1
    nds[0, :] = np.zeros(nds.shape[1])
Ejemplo n.º 5
0
    def simulate(self, parameters, scan_rates):

        forward_sweep_pos = np.zeros(len(scan_rates))
        reverse_sweep_pos = np.zeros(len(scan_rates))
        for i in range(0, len(scan_rates)):

            self.dim_dict["v"] = scan_rates[i]
            self.nd_param = params(self.dim_dict)
            self.calculate_times()
            volts = self.define_voltages()
            current = super().simulate(parameters, [])
            if self.simulation_options["synthetic_noise"] != 0:
                current = self.add_noise(
                    current,
                    self.simulation_options["synthetic_noise"] * max(current))

                #current=self.rolling_window(current, 8)
            if self.simulation_options["record_exps"] == True:
                self.saved_sims["current"].append(current)
                self.saved_sims["voltage"].append(volts)
            forward_sweep_pos[i], reverse_sweep_pos[
                i] = self.trumpet_positions(current, volts)
        if "dcv_sep" in self.optim_list:
            forward_sweep_pos += self.nd_param.nd_param_dict["dcv_sep"]
            reverse_sweep_pos -= self.nd_param.nd_param_dict["dcv_sep"]
        if self.simulation_options["trumpet_method"] == "both":
            if self.simulation_options["trumpet_test"] == True:
                print(parameters)
                log10_scans = np.log10(scan_rates)
                fig, ax = plt.sublots(1, 1)
                ax.scatter(log10_scans, forward_sweep_pos)
                ax.scatter(log10_scans, reverse_sweep_pos)
                ax.scatter(log10_scans, self.secret_data_trumpet[:, 0])
                ax.scatter(log10_scans, self.secret_data_trumpet[:, 1])
                plt.show()
            return np.column_stack((forward_sweep_pos, reverse_sweep_pos))
        elif self.simulation_options["trumpet_method"] == "forward":
            return forward_sweep_pos
        else:
            return reverse_sweep_pos
Ejemplo n.º 6
0
    def simulate(self, parameters, frequency_range):
        start=time.time()
        maxes=np.zeros(len(frequency_range))
        if self.simulation_options["amplitudes_set"]!=True:
            raise ValueError("Need to define SW ampltidues")
        for j in range(0, len(frequency_range)):
            self.dim_dict["omega"]=frequency_range[j]
            delta_p=np.zeros(len(self.Esw_range))
            for q in range(0, len(self.Esw_range)):
                self.dim_dict["SW_amplitude"]=self.Esw_range[q]
                self.nd_param=params(self.dim_dict)
                #self.calculate_times()
                #start=time.time()
                #volts=self.define_voltages()
                current=super().simulate(parameters, [])
                f, b, net, potential=super().SW_peak_extractor(current)

                if self.simulation_options["synthetic_noise"]!=0:

                    current=self.add_noise(net, self.simulation_options["synthetic_noise"]*max(net))
                    #current=self.rolling_window(current, 8)
                else:
                    current=net

                #plt.plot(potential, current)
                    #current=self.rolling_window(current, 8)
                if self.simulation_options["record_exps"]==True:
                    self.saved_sims["current"].append(current)
                    self.saved_sims["voltage"].append(volts)
                delta_p[q]=(max(current))/self.Esw_range[q]
            #plt.show()
            maxes[j]=(self.Esw_range[np.where(delta_p==max(delta_p))])

        if self.simulation_options["SWVtest"]==True:
            plt.scatter(1/frequency_range, maxes)
            plt.scatter(1/frequency_range, self.test)
            plt.show()

        return maxes
Ejemplo n.º 7
0
def auto_corr(comp_vc, p_wf, t_wf):
    """Function computes the autocorrelation function from given vectors
    Args:
        wf_n(numpy array, complex): Wave function over time

    Returns:
        numpy array, complex: The autocorrelation function over time.
    """

    # autocorrelation fuction
    ac_file = np.zeros([p_wf, t_wf + 1], dtype=np.complex_)
    for n in range(p_wf):
        ac_file[n] = np.sum(comp_vc[:, 0] * np.conjugate(comp_vc[:, n]))
    return ac_file
Ejemplo n.º 8
0
    def train(self, X, Y, train_name, round=100, step_num=20):
        dataset_size = X.shape[0]
        one_weight = np.zeros(dataset_size, dtype='float')
        one_weight = 1.0 / dataset_size
        classfier = WeakClassifier(X, Y, one_weight)
        for i in range(0, round):
            dump_label, dump_tree, min_error = classfier.buildstump(step_num)
            alpha = 1.0 / 2 * np.log2((1.0 - min_error) / min_error)
            Z = 0
            for j in range(0, i):
                Z = Z + one_weight[j] * np.exp(
                    -alpha * Y[j] *
                    self.classfies[train_name][j].prediction(X)[0])
            # np.dot(np.exp(-alpha*Y*)

        return
Ejemplo n.º 9
0
    def simulate(self, parameters, frequency_range):
        maxes=np.zeros(len(frequency_range))

        for j in range(0, len(frequency_range)):

            self.dim_dict["omega"]=frequency_range[j]
            self.nd_param=params(self.dim_dict)
            #self.calculate_times()
            #start=time.time()
            #volts=self.define_voltages()
            #start=time.time()

            current=super().simulate(parameters, [])
            f, b, net, potential=super().SW_peak_extractor(current)

            if self.simulation_options["synthetic_noise"]!=0:
                current=self.add_noise(net, self.simulation_options["synthetic_noise"]*max(net))
                #current=self.rolling_window(current, 8)
            else:
                current=net
            #plt.plot(potential, current)
                #current=self.rolling_window(current, 8)
            if self.simulation_options["record_exps"]==True:
                self.saved_sims["current"].append(current)
                self.saved_sims["voltage"].append(volts)
            first_half=tuple(np.where(potential<self.dim_dict["E_0"]))
            second_half=tuple(np.where(potential>self.dim_dict["E_0"]))
            data=[first_half, second_half]
            peak_pos=[0 ,0]
            for i in range(0, 2):
                maximum=max(current[data[i]])
                max_idx=potential[data[i]][np.where(current[data[i]]==maximum)]
                peak_pos[i]=max_idx
            maxes[j]=(peak_pos[1]-peak_pos[0])*1000
        if self.simulation_options["SWVtest"]==True:
            plt.scatter(1/frequency_range, maxes)
            plt.scatter(1/frequency_range, self.test)
            plt.show()
            #print(time.time()-start)
        return maxes
Ejemplo n.º 10
0
 def fit(self, x, y, w=None):
     if w is None:
         w = np.ones(len(y)) / len(y)
     data = zip(x, y, w)
     data = sorted(data, key=lambda s: s[0])
     [x, y, w] = zip(*data)
     y = np.array(y)
     w = np.array(w)
     correct = np.zeros((2, len(y)))  # 0 row for x < v, 1 row for x >= v
     for i in range(len(y)):
         w_front = w[:i]
         w_back = w[i:]
         correct[0, i] += np.sum(w_front[y[:i] == 1]) + np.sum(
             w_back[y[i:] == -1])
         correct[1, i] += np.sum(w_front[y[:i] == -1]) + np.sum(
             w_back[y[i:] == 1])
     idx = np.argmax(correct, axis=1)
     if correct[0, int(idx[0])] > correct[1, int(idx[1])]:
         self.sign = "smaller"
         self.thres = x[idx[0]]
     else:
         self.sign = "equal to or bigger"
         self.thres = x[idx[1]]
Ejemplo n.º 11
0
def lm_series(M, design, ndups=2, spacing=1, block=None, correlation=None,
        weights=None):
    """ Fit linear model for each gene to a series of microarrays.
    Fit is by generalized least squares allowing for correlation between
    duplicate spots.

    """

    n_arrays = M.shape[1]
    n_beta = design.shape[1]
    coef_names = design.columns

    if getattr(weights, 'size', False):
        weights[np.insnan[weights]] = 0
        if weights.shape[0] == M.shape[0]:
            weights = np.tile(weights, (1, M.shape[1]))
        else:
            weights = np.tile(weights, (M.shape[0], 1))
        # :TODO need to refactor this
        M[weights < 1e-15] = np.nan
        weights[weights < 1e-15] = np.nan


    if ndups >= 1:
        M = unwrap_duplicates(M, ndups=ndups, spacing=spacing)
        design = np.mul(design, np.repeat(1, ndups))
        if getattr(weights, 'size', False):
            weights = unwrap_duplicates(weights, ndups=ndups, spacing=spacing)

    n_genes = M.shape[0]

    stdev_unscaled = np.repeat(np.nan, (n_genes, n_beta))
    sigma = np.repeat(np.nan, n_genes)
    df_resid = np.zeros(n_genes)
    no_probe_wts = not any(np.isnan(M)) and\
            (getattr(weights, 'size', False) or\
             getattr(weights, array_weights, False))
    if no_probe_wts:
        if getattr(weights, 'size', False):
            # :TODO find best lm fit function
            fit = lm_fit(design, M.T)
        else:
            fit = lm_wfit(design, M.T, weights[1,:])
            fit.weights = None
        if fit.df_residual > 0:
            fit.sigma = np.sqrt()

        df_resid = fit.resid
        sigma = fit.sigma
        weights = fit





    else: pass


    if not correlation:
        correlation = duplicate_correlation(M, design=design, 
                                            ndups=ndups)


    if not block:
        if ndups < 2:
            ndups = 1
            correlation = 0
        corr_matrix = np.diag(np.repeat(correlation, )).dot(())
        


    corr_matrix = Z.dot(correlation * Z.T)

    """
Ejemplo n.º 12
0
def lm_series(M,
              design,
              ndups=2,
              spacing=1,
              block=None,
              correlation=None,
              weights=None):
    """ Fit linear model for each gene to a series of microarrays.
    Fit is by generalized least squares allowing for correlation between
    duplicate spots.

    """

    n_arrays = M.shape[1]
    n_beta = design.shape[1]
    coef_names = design.columns

    if getattr(weights, 'size', False):
        weights[np.insnan[weights]] = 0
        if weights.shape[0] == M.shape[0]:
            weights = np.tile(weights, (1, M.shape[1]))
        else:
            weights = np.tile(weights, (M.shape[0], 1))
        # :TODO need to refactor this
        M[weights < 1e-15] = np.nan
        weights[weights < 1e-15] = np.nan

    if ndups >= 1:
        M = unwrap_duplicates(M, ndups=ndups, spacing=spacing)
        design = np.mul(design, np.repeat(1, ndups))
        if getattr(weights, 'size', False):
            weights = unwrap_duplicates(weights, ndups=ndups, spacing=spacing)

    n_genes = M.shape[0]

    stdev_unscaled = np.repeat(np.nan, (n_genes, n_beta))
    sigma = np.repeat(np.nan, n_genes)
    df_resid = np.zeros(n_genes)
    no_probe_wts = not any(np.isnan(M)) and\
            (getattr(weights, 'size', False) or\
             getattr(weights, array_weights, False))
    if no_probe_wts:
        if getattr(weights, 'size', False):
            # :TODO find best lm fit function
            fit = lm_fit(design, M.T)
        else:
            fit = lm_wfit(design, M.T, weights[1, :])
            fit.weights = None
        if fit.df_residual > 0:
            fit.sigma = np.sqrt()

        df_resid = fit.resid
        sigma = fit.sigma
        weights = fit

    else:
        pass

    if not correlation:
        correlation = duplicate_correlation(M, design=design, ndups=ndups)

    if not block:
        if ndups < 2:
            ndups = 1
            correlation = 0
        corr_matrix = np.diag(np.repeat(correlation, )).dot(())

    corr_matrix = Z.dot(correlation * Z.T)
    """
Ejemplo n.º 13
0
def optimizePortfolio(df_rets, list_min, list_max, list_price_target,
                      target_risk, direction="long"):

    naLower = np.array(list_min)
    naUpper = np.array(list_max)
    naExpected = np.array(list_price_target)

    b_same_flag = np.all(naExpected == naExpected[0])
    if b_same_flag and (naExpected[0] == 0):
        naExpected = naExpected + 0.1
    if b_same_flag:
        na_randomness = np.ones(naExpected.shape)
        target_risk = 0
        for i in range(len(na_randomness)):
            if i % 2 == 0:
                na_randomness[i] = -1
        naExpected = naExpected + naExpected * 0.0000001 * na_randomness

    (fMin, fMax) = getRetRange(df_rets.values, naLower, naUpper,
                               naExpected, direction)

    # Try to avoid intractible endpoints due to rounding errors """
    fMin += abs(fMin) * 0.00000000001
    fMax -= abs(fMax) * 0.00000000001

    if target_risk == 1:
        (naPortWeights, fPortDev, b_error) = OptPort(df_rets.values, fMax, naLower, naUpper, naExpected, direction)
        allocations = _create_dict(df_rets, naPortWeights)
        return {'allocations': allocations, 'std_dev': fPortDev, 'expected_return': fMax, 'error': b_error}

    fStep = (fMax - fMin) / 50.0

    lfReturn = [fMin + x * fStep for x in range(51)]
    lfStd = []
    lnaPortfolios = []

    for fTarget in lfReturn:
        (naWeights, fStd, b_error) = OptPort(df_rets.values, fTarget, naLower, naUpper, naExpected, direction)
        if not b_error:
            lfStd.append(fStd)
            lnaPortfolios.append(naWeights)
        else:
            # Return error on ANY failed optimization
            allocations = _create_dict(df_rets, np.zeros(df_rets.shape[1]))
            return {'allocations': allocations, 'std_dev': 0.0,
                    'expected_return': fMax, 'error': True}

    if len(lfStd) == 0:
        (naPortWeights, fPortDev, b_error) = OptPort(df_rets.values, fMax, naLower, naUpper, naExpected, direction)
        allocations = _create_dict(df_rets, naPortWeights)
        return {'allocations': allocations, 'std_dev': fPortDev, 'expected_return': fMax, 'error': True}

    f_return = lfReturn[lfStd.index(min(lfStd))]

    if target_risk == 0:
        naPortWeights = lnaPortfolios[lfStd.index(min(lfStd))]
        allocations = _create_dict(df_rets, naPortWeights)
        return {'allocations': allocations, 'std_dev': min(lfStd), 'expected_return': f_return, 'error': False}

    # Otherwise try to hit custom target between 0-1 min-max risk
    fTarget = f_return + ((fMax - f_return) * target_risk)

    (naPortWeights, fPortDev, b_error) = OptPort(df_rets.values, fTarget, naLower, naUpper, naExpected, direction)
    allocations = _create_dict(df_rets, naPortWeights)
    return {'allocations': allocations, 'std_dev': fPortDev, 'expected_return': fTarget, 'error': b_error}
Ejemplo n.º 14
0
def OptPort(naData, fTarget, naLower=None, naUpper=None, naExpected=None, s_type="long"):
    """
    @summary Returns the Markowitz optimum portfolio for a specific return.
    @param naData: Daily returns of the various stocks (using returnize1)
    @param fTarget: Target return, i.e. 0.04 = 4% per period
    @param lPeriod: Period to compress the returns to, e.g. 7 = weekly
    @param naLower: List of floats which corresponds to lower portfolio% for each stock
    @param naUpper: List of floats which corresponds to upper portfolio% for each stock
    @return tuple: (weights of portfolio, min possible return, max possible return)
    """
    ''' Attempt to import library '''
    try:
        pass
        from cvxopt import matrix
        from cvxopt.solvers import qp, options

    except ImportError:
        print 'Could not import CVX library'
        return ([], 0, True)

    ''' Get number of stocks '''
    length = naData.shape[1]
    b_error = False

    naLower = deepcopy(naLower)
    naUpper = deepcopy(naUpper)
    naExpected = deepcopy(naExpected)

    # Assuming AvgReturns as the expected returns if parameter is not specified
    if (naExpected is None):
        naExpected = np.average(naData, axis=0)

    na_signs = np.sign(naExpected)
    indices, = np.where(na_signs == 0)
    na_signs[indices] = 1
    if s_type == "long":
        na_signs = np.ones(len(na_signs))
    elif s_type == "short":
        na_signs = np.ones(len(na_signs)) * (-1)

    naData = na_signs * naData
    naExpected = na_signs * naExpected

    # Covariance matrix of the Data Set
    naCov = np.cov(naData, rowvar=False)

    # If length is one, just return 100% single symbol
    if length == 0:
        return ([], [0], False)
    elif length == 1:
        return (list(na_signs), np.std(naData, axis=0)[0], False)
    # If we have 0/1 "free" equity we can't optimize
    # We just use     limits since we are stuck with 0 degrees of freedom

    ''' Special case for None == fTarget, simply return average returns and cov '''
    if fTarget is None:
        return (naExpected, np.std(naData, axis=0), b_error)

    # Upper bound of the Weights of a equity, If not specified, assumed to be 1.
    if naUpper is None:
        naUpper = np.ones(length)

    # Lower bound of the Weights of a equity, If not specified assumed to be 0 (No shorting case)
    if naLower is None:
        naLower = np.zeros(length)

    if sum(naLower) == 1:
        fPortDev = np.std(np.dot(naData, naLower))
        return (naLower, fPortDev, False)

    if sum(naUpper) == 1:
        fPortDev = np.std(np.dot(naData, naUpper))
        return (naUpper, fPortDev, False)

    naFree = naUpper != naLower
    if naFree.sum() <= 1:
        lnaPortfolios = naUpper.copy()

        # If there is 1 free we need to modify it to make the total
        # Add up to 1
        if naFree.sum() == 1:
            f_rest = naUpper[~naFree].sum()
            lnaPortfolios[naFree] = 1.0 - f_rest

        lnaPortfolios = na_signs * lnaPortfolios
        fPortDev = np.std(np.dot(naData, lnaPortfolios))
        return (lnaPortfolios, fPortDev, False)

    # Double the covariance of the diagonal elements for calculating risk.
    for i in range(length):
        naCov[i][i] = 2 * naCov[i][i]

    # Note, returns are modified to all be long from here on out
    (fMin, fMax) = getRetRange(False, naLower, naUpper, naExpected, "long")
    #print (fTarget, fMin, fMax)
    if fTarget < fMin or fTarget > fMax:
        print "Target not possible", fTarget, fMin, fMax
        b_error = True

    naLower = naLower * (-1)

    # Setting up the parameters for the CVXOPT Library, it takes inputs in Matrix format.
    '''
    The Risk minimization problem is a standard Quadratic Programming problem according to the Markowitz Theory.
    '''
    S = matrix(naCov)
    #pbar=matrix(naExpected)
    naLower.shape = (length, 1)
    naUpper.shape = (length, 1)
    naExpected.shape = (1, length)
    zeo = matrix(0.0, (length, 1))
    I = np.eye(length)
    minusI = -1 * I
    G = matrix(np.vstack((I, minusI)))
    h = matrix(np.vstack((naUpper, naLower)))
    ones = matrix(1.0, (1, length))
    A = matrix(np.vstack((naExpected, ones)))
    b = matrix([float(fTarget), 1.0])

    # Optional Settings for CVXOPT
    options['show_progress'] = False
    options['abstol'] = 1e-25
    options['reltol'] = 1e-24
    options['feastol'] = 1e-25

    # Optimization Calls
    # Optimal Portfolio
    try:
            lnaPortfolios = qp(S, -zeo, G, h, A, b)['x']
    except:
        b_error = True

    if b_error:
        print "Optimization not Possible"
        na_port = naLower * -1
        if sum(na_port) < 1:
            if sum(naUpper) == 1:
                na_port = naUpper
            else:
                i = 0
                while(sum(na_port) < 1 and i < 25):
                    naOrder = naUpper - na_port
                    i = i + 1
                    indices = np.where(naOrder > 0)
                    na_port[indices] = na_port[indices] + (1 - sum(na_port)) / len(indices[0])
                    naOrder = naUpper - na_port
                    indices = np.where(naOrder < 0)
                    na_port[indices] = naUpper[indices]

        lnaPortfolios = matrix(na_port)

    lnaPortfolios = (na_signs.reshape(-1, 1) * lnaPortfolios).reshape(-1)
    # Expected Return of the Portfolio
    # lfReturn = dot(pbar, lnaPortfolios)

    # Risk of the portfolio
    fPortDev = np.std(np.dot(naData, lnaPortfolios))
    return (lnaPortfolios, fPortDev, b_error)
Ejemplo n.º 15
0
def getOptPort(rets, f_target, l_period=1, naLower=None, naUpper=None, lNagDebug=0):
    """
    @summary Returns the Markowitz optimum portfolio for a specific return.
    @param rets: Daily returns of the various stocks (using returnize1)
    @param f_target: Target return, i.e. 0.04 = 4% per period
    @param l_period: Period to compress the returns to, e.g. 7 = weekly
    @param naLower: List of floats which corresponds to lower portfolio% for each stock
    @param naUpper: List of floats which corresponds to upper portfolio% for each stock
    @return tuple: (weights of portfolio, min possible return, max possible return)
    """

    # Attempt to import library """
    try:
        pass
        import nagint as nag
    except ImportError:
        print 'Could not import NAG library'
        print 'make sure nagint.so is in your python path'
        return ([], 0, 0)

    # Get number of stocks """
    lStocks = rets.shape[1]

    # If period != 1 we need to restructure the data """
    if(l_period != 1):
        rets = getReindexedRets(rets, l_period)

    # Calculate means and covariance """
    naAvgRets = np.average(rets, axis=0)
    naCov = np.cov(rets, rowvar=False)

    # Special case for None == f_target"""
    # simply return average returns and cov """
    if(f_target is None):
        return naAvgRets, np.std(rets, axis=0)

    # Calculate upper and lower limits of variables as well as constraints """
    if(naUpper is None):
        naUpper = np.ones(lStocks)  # max portfolio % is 1

    if(naLower is None):
        naLower = np.zeros(lStocks)  # min is 0, set negative for shorting
    # Two extra constraints for linear conditions"""
    # result = desired return, and sum of weights = 1 """
    naUpper = np.append(naUpper, [f_target, 1.0])
    naLower = np.append(naLower, [f_target, 1.0])

    # Initial estimate of portfolio """
    naInitial = np.array([1.0 / lStocks] * lStocks)

    # Set up constraints matrix"""
    # composed of expected returns in row one, unity row in row two """
    naConstraints = np.vstack((naAvgRets, np.ones(lStocks)))

    # Get portfolio weights, last entry in array is actually variance """
    try:
        naReturn = nag.optPort(naConstraints, naLower, naUpper,
                               naCov, naInitial, lNagDebug)
    except RuntimeError:
        print 'NAG Runtime error with target: %.02lf' % (f_target)
        return (naInitial, sqrt(naCov[0][0]))
    #return semi-junk to not mess up the rest of the plot

    # Calculate stdev of entire portfolio to return"""
    # what NAG returns is slightly different """
    fPortDev = np.std(np.dot(rets, naReturn[0, 0:-1]))

    # Show difference between above stdev and sqrt NAG covariance"""
    # possibly not taking correlation into account """
    #print fPortDev / sqrt(naReturn[0, -1])

    # Return weights and stdDev of portfolio."""
    #  note again the last value of naReturn is NAG's reported variance """
    return (naReturn[0, 0:-1], fPortDev)