Beispiel #1
0
def pvbBound(n):
    a = multiply(2, n)
    b = power(a, 50)
    c = multiply(6, b)
    d = divide(c, 0.05)
    e = log(d)
    f = divide(1.0, n)
    return divide(1.0, n) + sqrt(divide(1.0, power(n, 2)) + multiply(f, e))
def ma_multiply_test():
    XM=[[1,0,1,0],[0,1,0,0],[0,0,100, 0]]
    Coins=[[1],[2],[3]]
    from numpy import ma
    print(ma.multiply(XM, Coins))
    print(ma_multiply(XM, Coins))
    Coins=[1,2,3,4]
    print(ma.multiply(XM, Coins))
    print(ma_multiply(XM, Coins))
def ma_multiply_test():
    XM = [[1, 0, 1, 0], [0, 1, 0, 0], [0, 0, 100, 0]]
    Coins = [[1], [2], [3]]
    from numpy import ma
    print(ma.multiply(XM, Coins))
    print(ma_multiply(XM, Coins))
    Coins = [1, 2, 3, 4]
    print(ma.multiply(XM, Coins))
    print(ma_multiply(XM, Coins))
Beispiel #4
0
 def runLogisticRegression(self):
     diff = 1
     while diff > 0.01:
         permutation = np.random.permutation(self.N)
         newWeights = self.w.copy()
         for i in permutation:
             x, y = self.trainingData[i]
             gradient = divide(
                 multiply(-1.0, multiply(x, y)),
                 (1.0 + exp(multiply(y, np.dot(transpose(self.w), x)))))
             newWeights = subtract(newWeights,
                                   multiply(self.learningRate, gradient))
         self.epoch += 1
         diff = norm(self.w - newWeights)
         self.w = newWeights
Beispiel #5
0
    def normalize(self, info: PoseNormalizationInfo, scale_factor: float = 1):
        """
        Normalize the point to a fixed distance between two points
        """
        mask = self.body.data.mask
        transposed = self.body.zero_filled().points_perspective()

        p1s = transposed[info.p1]
        p2s = transposed[info.p2]

        if transposed.shape[1] == 0:
            p1s = p1s[0]
            p2s = p2s[0]
        else:
            p1s = ma.concatenate(p1s)
            p2s = ma.concatenate(p2s)

        # try:
        mean_distance = np.mean(distance_batch(p1s, p2s))
        # except FloatingPointError:
        #     print(self.body.data)
        #     print(p1s)
        #     print(p2s)

        scale = scale_factor / mean_distance  # scale all points to dist/scale

        if round(scale, 5) != 1:
            self.body.data = ma.multiply(self.body.data, scale)

        self.body.data = ma.array(self.body.data, mask=mask)

        return self
def b_test():
    from numpy import ma as ma
    td = 0.33333333333333333
    XM = [[-td, -td, 2 * td], [2 * td, 2 * td, -td], [-td, -td, -td]]
    Coins = [1000000] * 3
    print(ma.multiply(XM, Coins).T.dot(XM))
    print(dot(switch_row_cols(matrix_multiply(XM, Coins)), XM))
Beispiel #7
0
 def findEout(self, numSamples=1000):
     e_out = 0
     dataSamples = [self.createDataPoint() for _ in range(numSamples)]
     for x, y in dataSamples:
         e_out += log(1 +
                      exp(-1 * multiply(y, np.dot(transpose(self.w), x))))
     e_out /= float(numSamples)
     return e_out
Beispiel #8
0
def logicreg_mle_iteration(Y, X, w):
    w_result = None
    #Terminating control params
    w_ctrl = 0.0001
    G_ctrl = 0.00001
    L_ctrl = 0.000001
    #Initialize looping params
    w_base = w
    P_base = logicreg_func(X, w_base)
    #Initialize monitoring params
    iteration_round = 1
    while True:
        #Log-likelihood gradients
        G = dot(transpose(X), subtract(Y, P_base))
        #Terminating control 1: G ~ 0
        if norm(G) < G_ctrl:
            print 'G condition met, iteration_round:', iteration_round
            w_result = w_base
            break
        else:
            #Sample_diag for Hessian matrix computation
            D = diagflat(multiply(P_base, subtract(1, P_base)))
            #Hessian matrix H
            H = multiply(-1, dot(dot(transpose(X), D), X))
            #Each new param W is approximated base on the latest w and sample data set Y, X
            w_new = subtract(w_base, dot(logicreg_matrix_inverse(H), G))
            #Terminating control 2: w_new ~ w_base
            if norm(subtract(w_new, w_base)) < w_ctrl:
                print 'w condition met, iteration_round:', iteration_round
                w_result = w_new
                break
            else:
                P_new = logicreg_func(X, w_new)
                #Terminating control 3: L_new ~ L_base
                if abs(logicreg_log_likelihood_func(Y, P_new) - logicreg_log_likelihood_func(Y, P_base)) < L_ctrl:
                    print 'L condition met, iteration_round:', iteration_round
                    w_result = w_new
                    break
                else:
                    #Prepare params for next loop
                    P_base = P_new
                    w_base = w_new
                    iteration_round += 1
                    continue
    return w_result
Beispiel #9
0
    def plotSpectrogram(self, f, startTime=0):
        if self.filePath != '':
            samplingFrequency, signalData = f
            self.figureSpec.clear()

            # Plot the signal read from wav file

            ax = self.figureSpec.add_subplot(111)

            nfft = int(self.nfftComboBox.currentText())
            nover = int(self.noverlapComboBox.currentText())

            if self.windowComboBox.currentText() == 'boxcar':
                win = signal.get_window('boxcar', len(signalData))
            elif self.windowComboBox.currentText() == 'triang':
                win = signal.get_window('triang', len(signalData))
            elif self.windowComboBox.currentText() == 'blackman':
                win = signal.get_window('blackman', len(signalData))
            elif self.windowComboBox.currentText() == 'hamming':
                win = signal.get_window('hamming', len(signalData))
            elif self.windowComboBox.currentText() == 'hann':
                win = signal.get_window('hann', len(signalData))
            elif self.windowComboBox.currentText() == 'bartlett':
                win = signal.get_window('bartlett', len(signalData))
            elif self.windowComboBox.currentText() == 'flattop':
                win = signal.get_window('flattop', len(signalData))
            elif self.windowComboBox.currentText() == 'parzen':
                win = signal.get_window('parzen', len(signalData))
            elif self.windowComboBox.currentText() == 'nuttall':
                win = signal.get_window('nuttall', len(signalData))
            elif self.windowComboBox.currentText() == 'taylor':
                win = signal.get_window('taylor', len(signalData))

            signalData = multiply(signalData, win)

            ax.specgram(signalData,
                        NFFT=nfft,
                        noverlap=nover,
                        Fs=samplingFrequency,
                        cmap=self.colorMap)

            ax.set_yscale(self.scaleComboBox.currentText())  #linear or symlog
            if self.maxFreqComboBox.currentText() != 'Auto':
                ax.set_ylim(
                    0, int(self.maxFreqComboBox.currentText().split(' ',
                                                                    1)[0]))
            ax.set_ylim(
                int(self.minFreqComboBox.currentText().split(' ', 1)[0]), )

            ax.set_title('Spectrogram', fontsize=12)
            ax.set_xlabel('Time [s]', fontsize=8)
            ax.set_ylabel('Frequency [Hz]', fontsize=8)
            ax.tick_params(labelsize=7)

            self.figureSpec.tight_layout(pad=0.3)

            self.figureSpecCanvas.draw()
def b_test():
    from numpy import ma as ma
    td=0.33333333333333333
    XM=[[-td, -td, 2*td],
        [2*td, 2*td, -td],
        [-td, -td, -td]]
    Coins=[1000000]*3
    print(ma.multiply(XM, Coins).T.dot(XM))
    print(dot(switch_row_cols(matrix_multiply(XM, Coins)), XM))
Beispiel #11
0
def heat_equation_FE(n, m, e, umin, umax, theta, alpha, sigma, r):
    dx = (umax - umin) / n  # Price step
    dt = theta / m  # time step
    vam = initial_option(n, r, m, e, umin, dt, dx, alpha, sigma)
    F = dt / dx**2
    if F >= 0.5:
        print("unstable!!!")
    # Implementing the explicit algorithm
    for i in range(1, m, 1):
        # Checks if early exercise is better for the American Option
        vam[1:n - 1, i] = fmax(
            vam[1:n - 1, i - 1] + multiply(
                F, vam[2:n, i - 1] + vam[0:n - 2, i - 1] -
                2 * vam[1:n - 1, i - 1]), vam[1:n - 1, 0])
    # Reversal of the time components in the matrix as the solution of the Black Scholes equation was performed
    # backwards
    vam = fliplr(vam)
    return vam
Beispiel #12
0
    def applyDownscaling(self, emissivity_image_100m, mean_emissivity_100m):

        #emissivity_image = self.calcEmissivitySobrino()

        # ******** MODIS LST (1km) ***********

        lst_image = Image(
            self.modis_image.lst.split(".")[0] + '_subdivided_100m.tif')

        modis_array = lst_image.getArray(masked=True,
                                         lower_valid_range=7500,
                                         upper_valid_range=65535)

        # convertir à des températures de surface en Celsius
        lst_metadata = lst_image.getMetadata()

        # vérifier si le scale_factor est présent dans les métadonnées (c'est le cas pour AppEEARS, pas EarthData)
        if 'scale_factor' in lst_metadata:
            scale_factor = float(
                lst_metadata['scale_factor'])  # multiplier par 0.02
            add_offset = float(lst_metadata['add_offset'])
        else:
            scale_factor = float(0.02)
            add_offset = float(0)

        # conversion en Kelvin, puis en Celsius
        kelvin_array = np.add(np.multiply(modis_array, scale_factor),
                              add_offset)
        lst_celsius_array = np.subtract(kelvin_array, 273.15)

        # apply PBIM formula (T_high = T_low * emissivity_high / emissivity_avg) for each pixel

        # ********** Émissivité (100m) *********
        emissivity = Image(emissivity_image_100m).getArray(masked=False)

        mean_emissivity = Image(mean_emissivity_100m).getArray(masked=False)

        # PBIM formula
        t_high = ma.divide(ma.multiply(lst_celsius_array, emissivity),
                           mean_emissivity)

        Image(emissivity_image_100m).save_band(
            t_high, r'secteur3/PBIM_100m_result.tif')
Beispiel #13
0
def forward_backward(evidence_sequence, prior):
    """Does the forwards-backwards-algorithm with the given evidence sequence and prior"""
    t = len(evidence_sequence)
    forward_messages = []  # forward messages
    b = matrix([1, 1])
    b = transpose(b)  # B needs to be as columns for matrix operations.
    smoothed_estimates = [0] * (t + 1)  #
    forward_messages.append(prior)
    for i in range(1, t + 1):
        forward_messages.append(forward(forward_messages[i - 1], evidence_sequence[i - 1]))

    print("forwarded= %s" % forward_messages)
    for j in range(t, 1, -1):
        normal = multiply(forward_messages[j], b)
        normal = normalize(transpose(normal))  # Normalizing requires rows.
        smoothed_estimates[j] = normal
        b = backward(b, evidence_sequence[j - 1])
        print("Backward message: %s" % b)

    return smoothed_estimates
Beispiel #14
0
    def WeightedCov(self, votes_filled):
        """Weights are the number of coins people start with, so the aim of this
        weighting is to count 1 vote for each of their coins -- e.g., guy with 10
        coins effectively gets 10 votes, guy with 1 coin gets 1 vote, etc.

        http://stats.stackexchange.com/questions/61225/correct-equation-for-weighted-unbiased-sample-covariance

        """
        # Compute the weighted mean (of all voters) for each decision
        weighted_mean = ma.average(votes_filled,
                                   axis=0,
                                   weights=self.rep_coins.squeeze())

        # Each vote's difference from the mean of its decision (column)
        mean_deviation = np.matrix(votes_filled - weighted_mean)

        # Compute the unbiased weighted population covariance
        # (for uniform weights, equal to np.cov(votes_filled.T, bias=1))
        covariance_matrix = 1/float(np.sum(self.rep_coins)-1) * ma.multiply(mean_deviation, self.rep_coins).T.dot(mean_deviation)

        return covariance_matrix, mean_deviation
Beispiel #15
0
def forward_backward(evidence_sequence, prior):
    """Does the forwards-backwards-algorithm with the given evidence sequence and prior"""
    t = len(evidence_sequence)
    forward_messages = []  # forward messages
    b = matrix([1, 1])
    b = transpose(b)  # B needs to be as columns for matrix operations.
    smoothed_estimates = [0] * (t + 1)  #
    forward_messages.append(prior)
    for i in range(1, t + 1):
        forward_messages.append(
            forward(forward_messages[i - 1], evidence_sequence[i - 1]))

    print("forwarded= %s" % forward_messages)
    for j in range(t, 1, -1):
        normal = multiply(forward_messages[j], b)
        normal = normalize(transpose(normal))  # Normalizing requires rows.
        smoothed_estimates[j] = normal
        b = backward(b, evidence_sequence[j - 1])
        print("Backward message: %s" % b)

    return smoothed_estimates
Beispiel #16
0
def l2norm_weighted(values, overall_scale, term_weights):
    """Calculates scaled and weighted Euclidean distance.

    Calculated distance is of form: scale * sqrt((a1*a)**2 + (b1*b)**2 + ...)
    where a, b, ... are terms to be summed and a1, a2, ... are optional weights
    for the terms.

    Args:
        values (tuple): Tuple containing the values.
        overall_scale (float): Scale factor for the calculated
            Euclidean distance.
        term_weights (tuple): Weights for the terms. Must be single
            float or a list of numbers (one per term).

    Returns:
        float: Scaled and weighted Euclidean distance.

    TODO: Probably better use masked arrays instead of tuples.

    """
    weighted_values = ma.multiply(values, term_weights)
    return overall_scale * l2norm(*weighted_values)
Beispiel #17
0
def l2norm_weighted(values: tuple, overall_scale: float,
                    term_weights: tuple) -> ma.MaskedArray:
    """Calculates scaled and weighted Euclidean distance.

    Calculated distance is of form: scale * sqrt((a1*a)**2 + (b1*b)**2 + ...)
    where a, b, ... are terms to be summed and a1, a2, ... are optional weights
    for the terms.

    Args:
        values: Tuple containing the values.
        overall_scale: Scale factor for the calculated Euclidean distance.
        term_weights: Weights for the terms. Must be single float or a list of numbers
            (one per term).

    Returns:
        Scaled and weighted Euclidean distance.

    TODO: Use masked arrays instead of tuples.

    """
    generic_values = ma.array(values, dtype=object)
    weighted_values = ma.multiply(generic_values, term_weights)
    return overall_scale * l2norm(*weighted_values)
Beispiel #18
0
 def test_testArithmetic(self):
     # Test of basic arithmetic.
     (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
     a2d = array([[1, 2], [0, 4]])
     a2dm = masked_array(a2d, [[0, 0], [1, 0]])
     assert_(eq(a2d * a2d, a2d * a2dm))
     assert_(eq(a2d + a2d, a2d + a2dm))
     assert_(eq(a2d - a2d, a2d - a2dm))
     for s in [(12,), (4, 3), (2, 6)]:
         x = x.reshape(s)
         y = y.reshape(s)
         xm = xm.reshape(s)
         ym = ym.reshape(s)
         xf = xf.reshape(s)
         assert_(eq(-x, -xm))
         assert_(eq(x + y, xm + ym))
         assert_(eq(x - y, xm - ym))
         assert_(eq(x * y, xm * ym))
         with np.errstate(divide='ignore', invalid='ignore'):
             assert_(eq(x / y, xm / ym))
         assert_(eq(a10 + y, a10 + ym))
         assert_(eq(a10 - y, a10 - ym))
         assert_(eq(a10 * y, a10 * ym))
         with np.errstate(divide='ignore', invalid='ignore'):
             assert_(eq(a10 / y, a10 / ym))
         assert_(eq(x + a10, xm + a10))
         assert_(eq(x - a10, xm - a10))
         assert_(eq(x * a10, xm * a10))
         assert_(eq(x / a10, xm / a10))
         assert_(eq(x ** 2, xm ** 2))
         assert_(eq(abs(x) ** 2.5, abs(xm) ** 2.5))
         assert_(eq(x ** y, xm ** ym))
         assert_(eq(np.add(x, y), add(xm, ym)))
         assert_(eq(np.subtract(x, y), subtract(xm, ym)))
         assert_(eq(np.multiply(x, y), multiply(xm, ym)))
         with np.errstate(divide='ignore', invalid='ignore'):
             assert_(eq(np.divide(x, y), divide(xm, ym)))
Beispiel #19
0
 def test_testArithmetic(self):
     # Test of basic arithmetic.
     (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
     a2d = array([[1, 2], [0, 4]])
     a2dm = masked_array(a2d, [[0, 0], [1, 0]])
     assert_(eq(a2d * a2d, a2d * a2dm))
     assert_(eq(a2d + a2d, a2d + a2dm))
     assert_(eq(a2d - a2d, a2d - a2dm))
     for s in [(12,), (4, 3), (2, 6)]:
         x = x.reshape(s)
         y = y.reshape(s)
         xm = xm.reshape(s)
         ym = ym.reshape(s)
         xf = xf.reshape(s)
         assert_(eq(-x, -xm))
         assert_(eq(x + y, xm + ym))
         assert_(eq(x - y, xm - ym))
         assert_(eq(x * y, xm * ym))
         with np.errstate(divide='ignore', invalid='ignore'):
             assert_(eq(x / y, xm / ym))
         assert_(eq(a10 + y, a10 + ym))
         assert_(eq(a10 - y, a10 - ym))
         assert_(eq(a10 * y, a10 * ym))
         with np.errstate(divide='ignore', invalid='ignore'):
             assert_(eq(a10 / y, a10 / ym))
         assert_(eq(x + a10, xm + a10))
         assert_(eq(x - a10, xm - a10))
         assert_(eq(x * a10, xm * a10))
         assert_(eq(x / a10, xm / a10))
         assert_(eq(x ** 2, xm ** 2))
         assert_(eq(abs(x) ** 2.5, abs(xm) ** 2.5))
         assert_(eq(x ** y, xm ** ym))
         assert_(eq(np.add(x, y), add(xm, ym)))
         assert_(eq(np.subtract(x, y), subtract(xm, ym)))
         assert_(eq(np.multiply(x, y), multiply(xm, ym)))
         with np.errstate(divide='ignore', invalid='ignore'):
             assert_(eq(np.divide(x, y), divide(xm, ym)))
def lspolyfit(myTime,
              data,
              order,
              mode,
              thresholdMask=None,
              firstNan=None,
              fitColumnIndex=None,
              fitColumnMask=None,
              full=False,
              alpha=0.95,
              tryLowerOrders=False):

    if thresholdMask is None:
        # well use all data points regardless
        thresholdMask = np.zeros(data.shape, dtype=np.int8)
        firstNan = np.int64(np.ones(data.shape[1]) * len(myTime))

    if mode == naming.FAST_CAMERA_MODE:
        # fast mode does not use the first frame for fit
        data = data[1:, :]
        # time vector is shorter but must maintain the values
        orgMyTime = myTime
        myTime = myTime[1:]
    else:
        orgMyTime = myTime

    # masked arrays can be multi dimensional
    coef = np.zeros((order + 1, data.shape[1]))
    coefMask = np.ones((order + 1, data.shape[1]))
    if full:
        # diagonal has same mask as coef
        myDiag = np.zeros((order + 1, data.shape[1]))
        lengthVec = np.zeros(data.shape[1])

    if not tryLowerOrders:
        # Vandermonde matrices for legth N of time series are
        # just the first N lines of the full matrix
        van = np.vander(myTime, order + 1)

        # first list entry contains array with all columns
        # that can fit full order
        # unmask everything that can do full order
        coefMask[:, fitColumnIndex[0]] = 0

        # now we need to loop through all the possible lengths
        # of time vectors
        goodLengths = np.int8(np.unique(firstNan[fitColumnIndex[0]]))

        # print(goodLengths)
        for k in goodLengths:
            # print('looping through all good lengths', k)
            tmpMask = np.where(firstNan == k,
                               np.zeros(firstNan.shape, dtype=np.int8), 1)
            # combine threshold and fitColumnMask
            tmpMask = thresholdMask + tmpMask
            tmpMask[tmpMask == 2] = 1
            # mask everything that cannot do full order
            yMa = ma.array(data, mask=tmpMask)
            cov = np.linalg.inv(np.dot(van[:k].T, van[:k]))
            coef = coef + ma.dot(cov, ma.dot(van[:k].T, yMa[:k])).data
            if full:
                tmpInd = np.where(firstNan == k)[0]
                myDiag[:, tmpInd] = np.repeat(np.diag(cov)[:, None],
                                              len(tmpInd),
                                              axis=1)
                lengthVec[tmpInd] = k

        # masked arrays can be multi dimensional
        coef = ma.array(coef, mask=coefMask)

        if not full:
            return coef
        myDiag = ma.array(myDiag, mask=coefMask)
        lengthVec = ma.array(lengthVec, mask=fitColumnMask[0])
        dataMask = thresholdMask + fitColumnMask[0]
        dataMask[dataMask == 2] = 1
        yMa = ma.array(data, mask=dataMask)

        # covariance matrix for parameters is (X.T X)-1 * sig2
        # where sig2 is variance of noise
        # use variance of residuals to estimate sig2
        # n number of points, p degree of polynomial + 1
        # sig2 = 1/(n-p)*sum(res_i^2)
        # coeffsig_i = c*sqrt(sig2*diag((X.T X)-1)_ii)
        # estimating variance for fit parameters
        fitX = np.tile(myTime, (yMa.shape[1], 1))
        fit = np.polyval(coef, fitX.T)
        res = (yMa - fit)
        if mode == naming.FAST_CAMERA_MODE:
            # for fast mode redo the fit so it matches input data shape
            # and add zeros to residuals
            fitX = np.tile(orgMyTime, (yMa.shape[1], 1))
            fit = np.polyval(coef, fitX.T)
            res = res.insert(res, 0, axis=0)
        yRes = yMa - ma.mean(yMa)
        # lengthVec - order -1 is effective degree of freedom
        effDf = lengthVec - (order + 1)
        rSquared = 1 - (ma.sum(res**2, axis=0)) / (ma.sum(yRes**2, axis=0))
        # r2Adj = 1 - (1-r2) * (lengthVec -1)/(effDf - 1)
        sig2 = 1 / (effDf) * ma.sum(res**2, axis=0)
        tValue = stats.t.ppf(alpha, effDf)
        tValue = ma.array(tValue, mask=fitColumnMask[0])
        coefSig = tValue * ma.sqrt(ma.multiply(myDiag, sig2))
        # tSquared = ma.divide(coef**2, (coefSig/tValue)**2)
        # significanHighestOrder = tSquared[0] < tValue**2
        # print(tValue**2)
        # print(tSquared[0])
        # print(significanHighestOrder)
        return coef, coefSig, fit, res, sig2, rSquared

    # else try for all other orders
    # Vandermonde matrices for legth N of time series are
    # just the first N lines of the full matrix
    # we need different matrices for different lower orders
    van = []
    for i in range(order, 0, -1):
        print(i)
        van.append(np.vander(myTime, i + 1))
    print(van)
    return van
def validationExterne():
    """ Permet d'effectuer une validation externe entre l'image résultante de la réduction d'échelle et une image de
        température de surface calculée à partir des bandes 10 et 11 de Landsat 8 (disponibles sur EarthData).

        Les résultats de la validation externe sont des métriques de qualité en comparant les résultats de la réduction
        d'échelle à la température de surface calculée à 100m. Ces résultats sont présentés dans la console par des
        'print' (lignes 129 à 144).
    """

    # Match prediction result extent
    landsat_b10 = Image(
        r'data/LC08_L1TP_014028_20200706_20200721_01_T1_B10.TIF')
    landsat_b10.reprojectMatch(
        r'data/MOD11_L2.clipped_test2.tif'.split(".")[0] +
        '_subdivided_100m.tif', False)
    landsat_b10.setNewFile(
        landsat_b10.filename.replace(".TIF", "_reproject.tif"))

    # Get TOA radiance
    b10_array = landsat_b10.getArray(masked=True,
                                     lower_valid_range=1,
                                     upper_valid_range=65535)
    b10_array_radiance = ma.add(ma.multiply(b10_array, 0.00033420), 0.10000)

    # Get Brightness Temperature
    b10_array_brightness_temp = (1321.0789 / (ma.log(
        (774.8853 / b10_array_radiance) + 1))) - 273.15

    # Get NDVI
    landsat_b4 = Image(
        r'data/LC08_L1TP_014028_20200706_20200721_01_T1_B4_reproject.tif')
    b4_DN = landsat_b4.getArray(masked=True,
                                lower_valid_range=1,
                                upper_valid_range=65535)
    b4 = np.add(np.multiply(b4_DN, float(0.00002)), float(-0.10))

    landsat_b5 = Image(
        r'data/LC08_L1TP_014028_20200706_20200721_01_T1_B5_reproject.tif')
    b5_DN = landsat_b5.getArray(masked=True,
                                lower_valid_range=1,
                                upper_valid_range=65535)
    b5 = np.add(np.multiply(b5_DN, float(0.00002)), float(-0.10))

    ndvi = np.divide(np.subtract(b5, b4),
                     np.add(b5, b4),
                     where=((np.add(b5, b4)) != 0))

    # Get proportion of vegetation
    min_ndvi = ma.amin(ndvi)
    max_ndvi = ma.amax(ndvi)

    pv = ma.power(
        ma.divide(ma.subtract(ndvi, min_ndvi),
                  (ma.subtract(max_ndvi, min_ndvi)),
                  where=(ma.subtract(max_ndvi, min_ndvi)) != 0), 2)

    # Get emissivity
    emissivity = 0.004 * pv + 0.986

    # Get Landsat 8 LST
    landsat_lst = b10_array_brightness_temp / (
        1 +
        (0.00115 * b10_array_brightness_temp / 1.4388) * ma.log(emissivity))

    # Save LST image for visualization
    landsat_b10.save_band(landsat_lst, r'data/landsat_lst.tif')

    # Validation between both arrays
    predicted_lst = ma.masked_invalid(
        Image(r'data/MODIS_predit_100m.tif').getArray())
    predicted_lst_with_residuals = ma.masked_invalid(
        Image(r'data/MODIS_predit_100m_avec_residus.tif').getArray())

    predicted_lst = ma.filled(predicted_lst, 0)
    predicted_lst_with_residuals = ma.filled(predicted_lst_with_residuals, 0)

    # Without residuals
    print('Without residual correction')
    print('Mean Absolute Error (MAE):',
          metrics.mean_absolute_error(predicted_lst, landsat_lst))
    print('Mean Squared Error:',
          metrics.mean_squared_error(predicted_lst, landsat_lst))
    print('Root Mean Squared Error:',
          np.sqrt(metrics.mean_squared_error(predicted_lst, landsat_lst)),
          "°C")
    print(
        'Accuracy:', 100 -
        np.mean(100 * ((abs(predicted_lst - landsat_lst)) / landsat_lst)), "%")
    print('Explained variance score (EVS):',
          metrics.explained_variance_score(predicted_lst, landsat_lst))

    # With residuals
    print("\n")
    print('With residual correction')
    print(
        'Mean Absolute Error (MAE):',
        metrics.mean_absolute_error(predicted_lst_with_residuals, landsat_lst))
    print(
        'Mean Squared Error:',
        metrics.mean_squared_error(predicted_lst_with_residuals, landsat_lst))
    print(
        'Root Mean Squared Error:',
        np.sqrt(
            metrics.mean_squared_error(predicted_lst_with_residuals,
                                       landsat_lst)), "°C")
    print(
        'Accuracy:', 100 - np.mean(100 * (
            (abs(predicted_lst_with_residuals - landsat_lst)) / landsat_lst)),
        "%")
    print(
        'Explained variance score (EVS):',
        metrics.explained_variance_score(predicted_lst_with_residuals,
                                         landsat_lst))
Beispiel #22
0
def rpBound(n):
    return sqrt(
        divide(multiply(2, log(multiply(multiply(
            2, n), power(n, 50)))), n)) + sqrt(
                multiply(divide(2, n), log(divide(1, 0.05)))) + divide(1, n)
Beispiel #23
0
def vcBound(n):
    return sqrt(
        multiply(divide(8.0, n),
                 log(multiply(4, divide(power(multiply(2, n), 50), 0.05)))))
Beispiel #24
0
def devroyeBound(n):
    # Ran into overflow error performing naive calculation.  Had to decompose natural log components.
    return divide(1, (n - 2.0)) + sqrt(
        divide(1, power(n - 2.0, 2)) +
        multiply(divide(1, multiply(2, (n - 2.0))),
                 log(4) + multiply(100, log(n)) - log(0.05)))