Esempio n. 1
0
last_y = np.dot(last_a, last_x)
last_y = np.reshape(last_y, (5, 1))
y_noise = 0.5 * np.random.rand(5, 1) + last_y

print "My m-estimator = ", lp.irls(last_a, y_noise, lp.psi_huber, clipping=1.5, lamb=0, scale=2)

'''

# I fix here the number of measurements of vector y
nmeasurements = 15

# I define a vector x to use it in my functions to generate the matrix A and vector y
x_base = np.ones((2, 1))  # fixed source

# I generate the matrix A
a_base = util.getmatrix(2, 'random', nmeasurements)  # get the sensing matrix

# I generate the vector y
y_base = util.getmeasurements(a_base, x_base, 'gaussian')

a_base, y_base = gen.generate_random(5, 2)

y_gui = copy.deepcopy(y_base)
a_gui = copy.deepcopy(a_base)

y_marta = copy.deepcopy(y_base.reshape(-1, 1))
a_marta = copy.deepcopy(a_base)

# define parameters necessary for basic tau...
lossfunction = 'optimal'
Esempio n. 2
0
last_y = np.reshape(last_y, (5, 1))
y_noise = 0.5 * np.random.rand(5, 1) + last_y

print "My m-estimator = ", lp.irls(last_a, y_noise, lp.psi_huber, clipping=1.5, lamb=0, scale=2)

'''


# I fix here the number of measurements of vector y
nmeasurements = 15

# I define a vector x to use it in my functions to generate the matrix A and vector y
x_base = np.ones((2, 1))  # fixed source

# I generate the matrix A
a_base = util.getmatrix(2, 'random', nmeasurements)  # get the sensing matrix

# I generate the vector y
y_base = util.getmeasurements(a_base, x_base, 'gaussian')


a_base, y_base = gen.generate_random(5,2)

y_gui = copy.deepcopy(y_base)
a_gui = copy.deepcopy(a_base)


y_marta = copy.deepcopy(y_base.reshape(-1,1))
a_marta = copy.deepcopy(a_base)

# define parameters necessary for basic tau...
Esempio n. 3
0
def experimentthree(nrealizations, outliers, measurementsize, sourcesize,
                    source):

    sourcetype = 'sparse'  # kind of source we want
    sparsity = 0.2
    matrixtype = 'illposed'  # type of sensing matrix
    conditionnumber = 1000  # condition number of the matrix that we want
    noisetype = 'outliers'  # additive noise
    var = 3  # variance of the noise
    clippinghuber = 1.345  # clipping parameter for the huber function
    clippingopt = (
        0.4, 1.09
    )  # clipping parameters for the opt function in the tau estimator
    ninitialsolutions = 10  # how many initial solutions do we want in the tau estimator
    maxiter = 50
    nlmbd = 5  # how many different lambdas are we trying in each case
    realscale = 1
    x = source

    print '||x|| = ', np.linalg.norm(x)
    a = util.getmatrix(sourcesize, matrixtype, measurementsize,
                       conditionnumber)  # get the sensing matrix
    noutliers = outliers.size
    scaling = 1e-2
    realscale *= scaling

    if scaling == 1e-2:
        lmbdls = np.zeros(
            (noutliers,
             nlmbd))  # every proportion of outliers need a different lambda
        lmbdls[0, :] = np.logspace(-2, 3, nlmbd)  # lambdas for ls
        lmbdls[1, :] = np.logspace(3, 5, nlmbd)  # lambdas for ls
        lmbdls[2, :] = np.logspace(3.5, 5, nlmbd)  # lambdas for ls
        lmbdls[3, :] = np.logspace(4, 5, nlmbd)  # lambdas for ls
        lmbdls[4, :] = np.logspace(4, 5, nlmbd)  # lambdas for ls

        lmbdlm = np.zeros(
            (noutliers,
             nlmbd))  # every proportion of outliers need a different lambda
        lmbdlm[0, :] = np.logspace(-3, 3, nlmbd)  # lambdas for m
        lmbdlm[1, :] = np.logspace(-3, 3.5, nlmbd)  # lambdas for m
        lmbdlm[2, :] = np.logspace(-3, 4, nlmbd)  # lambdas for m
        lmbdlm[3, :] = np.logspace(-3, 4, nlmbd)  # lambdas for m
        lmbdlm[4, :] = np.logspace(-3, 4, nlmbd)  # lambdas for m

        lmbdlmes = np.zeros(
            (noutliers,
             nlmbd))  # every proportion of outliers need a different lambda
        lmbdlmes[0, :] = np.logspace(-3, 3,
                                     nlmbd)  # lambdas for m with est. scale
        lmbdlmes[1, :] = np.logspace(2, 3.5,
                                     nlmbd)  # lambdas for m with est. scale
        lmbdlmes[2, :] = np.logspace(2.2, 3.5,
                                     nlmbd)  # lambdas for m with est. scale
        lmbdlmes[3, :] = np.logspace(3, 4,
                                     nlmbd)  # lambdas for m with est. scale
        lmbdlmes[4, :] = np.logspace(3.5, 4.5,
                                     nlmbd)  # lambdas for m with est. scale

        lmbdltau = np.zeros(
            (noutliers,
             nlmbd))  # every proportion of outliers need a different lambda
        lmbdltau[0, :] = np.logspace(-3.5, 1, nlmbd)  # lambdas for tau
        lmbdltau[1, :] = np.logspace(-3, 1, nlmbd)  # lambdas for tau
        lmbdltau[2, :] = np.logspace(-3.5, 1, nlmbd)  # lambdas for tau
        lmbdltau[3, :] = np.logspace(-3.5, 1, nlmbd)  # lambdas for tau
        lmbdltau[4, :] = np.logspace(-1.5, 2, nlmbd)  # lambdas for tau

    else:
        lmbdls = np.zeros(
            (noutliers,
             nlmbd))  # every proportion of outliers need a different lambda
        lmbdls[0, :] = np.logspace(2, 6, nlmbd)  # lambdas for ls
        lmbdls[1, :] = np.logspace(7, 9, nlmbd)  # lambdas for ls
        lmbdls[2, :] = np.logspace(7, 9, nlmbd)  # lambdas for ls
        lmbdls[3, :] = np.logspace(7, 9, nlmbd)  # lambdas for ls
        lmbdls[4, :] = np.logspace(6, 9, nlmbd)  # lambdas for ls

        lmbdlm = np.zeros(
            (noutliers,
             nlmbd))  # every proportion of outliers need a different lambda
        lmbdlm[0, :] = np.logspace(-2, 3.5, nlmbd)  # lambdas for m
        lmbdlm[1, :] = np.logspace(-2, 3.5, nlmbd)  # lambdas for m
        lmbdlm[2, :] = np.logspace(-2, 4, nlmbd)  # lambdas for m
        lmbdlm[3, :] = np.logspace(-2, 4, nlmbd)  # lambdas for m
        lmbdlm[4, :] = np.logspace(-2, 4, nlmbd)  # lambdas for m

        lmbdlmes = np.zeros(
            (noutliers,
             nlmbd))  # every proportion of outliers need a different lambda
        lmbdlmes[0, :] = np.logspace(-1, 4,
                                     nlmbd)  # lambdas for m with est. scale
        lmbdlmes[1, :] = np.logspace(-0.5, 4,
                                     nlmbd)  # lambdas for m with est. scale
        lmbdlmes[2, :] = np.logspace(-1, 5,
                                     nlmbd)  # lambdas for m with est. scale
        lmbdlmes[3, :] = np.logspace(-1, 5,
                                     nlmbd)  # lambdas for m with est. scale
        lmbdlmes[4, :] = np.logspace(0, 6,
                                     nlmbd)  # lambdas for m with est. scale

        lmbdltau = np.zeros(
            (noutliers,
             nlmbd))  # every proportion of outliers need a different lambda
        lmbdltau[0, :] = np.logspace(-2, 2, nlmbd)  # lambdas for tau
        lmbdltau[1, :] = np.logspace(-2, 2, nlmbd)  # lambdas for tau
        lmbdltau[2, :] = np.logspace(-1.5, 2, nlmbd)  # lambdas for tau
        lmbdltau[3, :] = np.logspace(-1.5, 2.5, nlmbd)  # lambdas for tau
        lmbdltau[4, :] = np.logspace(-1.5, 3, nlmbd)  # lambdas for tau

    errorls = np.zeros(
        (noutliers, nlmbd, nrealizations))  # store results for ls
    errormes = np.zeros(
        (noutliers, nlmbd,
         nrealizations))  # store results for m with an estimated scale
    errorm = np.zeros((noutliers, nlmbd, nrealizations))  # store results for m
    errortau = np.zeros(
        (noutliers, nlmbd, nrealizations))  # store results for tau

    k = 0
    while k < noutliers:
        print 'number of outliers %s' % k
        t = 0
        while t < nlmbd:
            print 'lambdas %s' % t
            r = 0
            while r < nrealizations:
                y = util.getmeasurements(a, x, noisetype, var,
                                         outliers[k])  # get the measurements
                ascaled = a * scaling  # scaling the data to avoid numerical problems with cvx
                yscaled = y * scaling

                #  -------- ls solution
                #xhat = inv.lasso(yscaled, ascaled, lmbdls[k, t])  # solving the problem with ls
                xhat = ln.lasso_regularization(
                    ascaled, yscaled,
                    lambda_parameter=lmbdls[k,
                                            t])  # solving the problem with ls
                xhat = xhat.reshape(-1, 1)
                error = np.linalg.norm((x - xhat))
                errorls[k, t, r] = error

                # -------- m estimated scale solution
                xpreliminary = xhat  # we take the ls to estimate a preliminary scale
                # respreliminary = y - np.dot(a, xpreliminary)
                respreliminary = yscaled - np.dot(ascaled, xpreliminary)
                estimatedscale = np.median(
                    np.abs(respreliminary
                           )) / .6745  # robust mad estimator for the scale
                # xhat = inv.mlasso(yscaled, ascaled, 'huber', clippinghuber, estimatedscale, lmbdlmes[k, t])  # solving the problem with m
                xhat = ln.m_estimator(ascaled,
                                      yscaled,
                                      'optimal',
                                      clippinghuber,
                                      estimatedscale,
                                      regularization=ln.lasso_regularization,
                                      lmbd=lmbdlmes[k, t])
                xhat = xhat.reshape(-1, 1)
                error = np.linalg.norm(x - xhat)
                errormes[k, t, r] = error

                #  -------- m real scale solution
                # xhat = inv.mlasso(yscaled, ascaled, 'huber', clippinghuber, realscale, lmbdlm[k, t])  # solving the problem with m
                xhat = ln.m_estimator(ascaled,
                                      yscaled,
                                      'optimal',
                                      clippinghuber,
                                      realscale,
                                      regularization=ln.lasso_regularization,
                                      lmbd=lmbdlm[k, t])
                xhat = xhat.reshape(-1, 1)
                error = np.linalg.norm(x - xhat)
                errorm[k, t, r] = error

                #  -------- tau solution
                # xhat, scale = inv.fasttaulasso(yscaled, ascaled, 'optimal', clippingopt, ninitialsolutions, lmbdltau[k, t], maxiter)
                xhat, scale = ln.basictau(
                    ascaled,
                    yscaled,
                    'optimal',
                    clippingopt,
                    ninitialx=ninitialsolutions,
                    maxiter=maxiter,
                    nbest=1,
                    regularization=ln.lasso_regularization,
                    lamb=lmbdltau[k, t])
                xhat = xhat.reshape(-1, 1)
                error = np.linalg.norm(x - xhat)
                errortau[k, t, r] = error

                print 'error = ', error
                print 'error shape =', (x - xhat).shape
                print '% of outliers =', outliers[k]
                print 'lambda idx = ', t
                print '---------------------'

                r += 1  # update the number of realization]
            t += 1  # update the number of realization
        k += 1  # updated the number of outlier proportion

    minls = np.min(errorls, 1)
    minm = np.min(errorm, 1)
    minmes = np.min(errormes, 1)
    mintau = np.min(errortau, 1)

    avgls = np.mean(minls, 1)
    avgm = np.mean(minm, 1)
    avgmes = np.mean(minmes, 1)
    avgtau = np.mean(mintau, 1)

    pickle.dump(avgls, open("ls.p", "wb"))
    pickle.dump(avgm, open("m.p", "wb"))
    pickle.dump(avgmes, open("mes.p", "wb"))
    pickle.dump(avgtau, open("tau.p", "wb"))

    fthree, axthree = plt.subplots(
        noutliers,
        sharex=True)  # plots to check if we are getting the best lambda
    cnt = 0
    while cnt < noutliers:
        axthree[cnt].plot(lmbdlmes[0, :], errormes[cnt, :, 1])
        axthree[cnt].set_xscale('log')
        cnt += 1
    axthree[0].set_title('M estimator estimated scale')
    plt.show()

    # fthree, axthree = plt.subplots(noutliers, sharex=True)  # plots to check if we are getting the best lambda
    # cnt = 0
    # while cnt < noutliers:
    #   axthree[cnt].plot(lmbdlmes[0, :], errortau[cnt, :, 0])
    #   axthree[cnt].set_xscale('log')
    #   cnt += 1
    # axthree[0].set_title('M estimator est.0')
    # plt.show()
    #
    # fthree, axthree = plt.subplots(noutliers, sharex=True)  # plots to check if we are getting the best lambda
    # cnt = 0
    # while cnt < noutliers:
    #   axthree[cnt].plot(lmbdlmes[0, :], errorm[cnt, :, 1])
    #   axthree[cnt].set_xscale('log')
    #   cnt += 1
    # axthree[0].set_title('Mmm estimator est 1')
    # plt.show()
    #
    # fthree, axthree = plt.subplots(noutliers, sharex=True)  # plots to check if we are getting the best lambda
    # cnt = 0
    # while cnt < noutliers:
    #   axthree[cnt].plot(lmbdlmes[0, :], errorm[cnt, :, 0])
    #   axthree[cnt].set_xscale('log')
    #   cnt += 1
    # axthree[0].set_title('Mmm estimator est.0')
    #plt.show()

    # fthree, axthree = plt.subplots(noutliers, sharex=True)  # plots to check if we are getting the best lambda
    # cnt = 0
    # while cnt < noutliers:
    #   axthree[cnt].plot(lmbdlmes[0, :], errortau[cnt, :, 2])
    #   axthree[cnt].set_xscale('log')
    #   cnt += 1
    # axthree[0].set_title('M estimator est. 1')
    # plt.show()

    # ffour, axfour = plt.subplots(noutliers, sharex=True)  # plots to check if we are getting the best lambda
    # cnt = 0
    # while cnt < noutliers:
    #   axfour[cnt].plot(lmbdltau[0, :], errortau[cnt, :, 1])
    #   axfour[cnt].set_xscale('log')
    #   cnt += 1
    # axfour[0].set_title('tau estimator')
    # plt.show()

    # store results
    name_file = 'experiment_three.pkl'
    fl = os.path.join(DATA_DIR, name_file)
    f = open(fl, 'wb')
    pickle.dump([avgls, avgm, avgmes, avgtau], f)
    f.close()

    fig = plt.figure()
    plt.plot(outliers, avgls, 'r--', label='ls')
    plt.plot(outliers, avgm, 'bs--', label='m estimator')
    plt.plot(outliers, avgmes, 'g^-', label='m est. scale')
    plt.plot(outliers, avgtau, 'kd-', label='tau')
    plt.legend(loc=2)  # plot legend
    plt.xlabel('% outliers')  # plot x label
    plt.ylabel('error')  # plot y label

    name_file = 'experiment_three.eps'
    fl = os.path.join(FIGURES_DIR, name_file)
    fig.savefig(fl, format='eps')
Esempio n. 4
0
def sensitivitycurve(estimator, lossfunction, regularization, yrange, arange,
                     nmeasurements, points):

    x = 1.5  # fixed source
    a = util.getmatrix(1, 'random', nmeasurements)  # get the sensing matrix
    y = util.getmeasurements(a, x, 'gaussian')
    if estimator == 'tau' and regularization == 'none':
        # xhat, shat = inv.fasttau(y, a, lossfunction, (0.4, 1.09), 10, 3, 3)  # solution without outliers
        xhat, shat = ln.basictau(a,
                                 y,
                                 'optimal', [0.4, 1.09],
                                 ninitialx=30,
                                 maxiter=100,
                                 nbest=1,
                                 lamb=0)
    elif estimator == 'tau' and regularization == 'l2':
        # xhat, shat = inv.basictauridge(y, a, lossfunction, (0.4, 1.09), 20, 0.1)  # solution without outliers
        xhat, shat = ln.basictau(a,
                                 y,
                                 'optimal', [0.4, 1.09],
                                 ninitialx=30,
                                 maxiter=100,
                                 nbest=1,
                                 regularization=ln.tikhonov_regularization,
                                 lamb=0.1)
    elif estimator == 'tau' and regularization == 'l1':
        # xhat, shat = inv.fasttaulasso(y, a, lossfunction, (0.4, 1.09), 30, 0.1)  # solution without outliers
        xhat, shat = ln.basictau(a,
                                 y,
                                 'optimal', [0.4, 1.09],
                                 ninitialx=30,
                                 maxiter=100,
                                 nbest=1,
                                 regularization=ln.lasso_regularization,
                                 lamb=0.1)

    elif estimator == 'M' and regularization == 'none':
        xhat = inv.mestimator(y, a, lossfunction, 1.345,
                              1)  # solution without outliers
    else:
        sys.exit('I do not know the estimator % s' %
                 estimator)  # die gracefully
    y0 = np.linspace(-yrange, yrange, points)
    a0 = np.linspace(-arange, arange, points)
    sc = np.zeros((points, points))
    for i in np.arange(points):
        for j in np.arange(points):
            yout = np.insert(y, nmeasurements, y0[i])
            yout = np.expand_dims(yout, axis=1)
            aout = np.insert(a, nmeasurements, a0[j])
            aout = np.expand_dims(aout, axis=1)
            if estimator == 'tau' and regularization == 'none':
                # xout, sout = inv.fasttau(yout, aout, lossfunction, (0.4, 1.09), 10, 3, 3)  # solution with one outlier
                xout, sout = ln.basictau(aout,
                                         yout,
                                         'optimal', [0.4, 1.09],
                                         ninitialx=10,
                                         maxiter=100,
                                         nbest=1,
                                         lamb=0)
                sc[i, j] = (xout - xhat) / (1 / (nmeasurements + 1))
            elif estimator == 'M' and regularization == 'none':
                xout = inv.mestimator(yout, aout, 'huber', 1.345,
                                      1)  # solution with one outlier
                sc[i, j] = (xout - xhat) / (1 / (nmeasurements + 1))
            elif estimator == 'tau' and regularization == 'l2':
                xout, sout = inv.basictauridge(
                    yout, aout, lossfunction, (0.4, 1.09), 20,
                    0.1)  # solution with one outlier
                sc[i, j] = (xout - xhat) / (1 / (nmeasurements + 1))
            elif estimator == 'tau' and regularization == 'l1':
                #xout, sout = inv.fasttaulasso(yout, aout, lossfunction, (0.4, 1.09), 30, 0.1)  # solution with one outlier
                xout, sout = ln.basictau(
                    aout,
                    yout,
                    'optimal', [0.4, 1.09],
                    ninitialx=30,
                    maxiter=100,
                    nbest=1,
                    regularization=ln.lasso_regularization,
                    lamb=0.01)
                sc[i, j] = (xout - xhat) / (1 / (nmeasurements + 1))
            else:
                sys.exit('I do not know the estimator % s' %
                         estimator)  # die gracefully

    x, y = np.mgrid[0:points, 0:points]

    name_file = 'sc_' + regularization + '.pkl'
    fl = os.path.join(DATA_DIR, name_file)
    f = open(fl, 'wb')
    pickle.dump(sc, f)
    f.close()

    f = sc.T  # transpose it, for correct visualization
    fig = plt.figure()
    ax = Axes3D(fig)
    ax.plot_surface(x, y, f, rstride=1, cstride=1)
    plt.xlabel('a0')
    plt.ylabel('y0')

    name_file = 'sc_' + regularization + '.eps'
    fl = os.path.join(FIGURES_DIR, name_file)
    fig.savefig(fl, format='eps')

    return sc
Esempio n. 5
0
def experimenttwo(nrealizations, outliers, measurementsize, sourcesize,
                  source):

    matrixtype = 'illposed'  # type of sensing matrix
    conditionnumber = 1000  # condition number of the matrix that we want
    noisetype = 'outliers'  # additive noise
    clippinghuber = 1.345  # clipping parameter for the huber function
    clippingopt = (
        0.4, 1.09
    )  # clipping parameters for the opt function in the tau estimator
    ninitialsolutions = 50  # how many initial solutions do we want in the tau estimator
    realscale = 1
    var = 3
    x = source  # load stored source
    # x = util.getsource(sourcetype, sourcesize)  # get the ground truth
    a = util.getmatrix(sourcesize, matrixtype, measurementsize,
                       conditionnumber)  # get the sensing matrix
    noutliers = outliers.size
    nlmbd = 6  # how many different lambdas are we trying in each case

    lmbdls = np.zeros(
        (noutliers,
         nlmbd))  # every proportion of outliers need a different lambda
    lmbdls[0, :] = np.logspace(0, 3, nlmbd)  # lambdas for ls
    lmbdls[1, :] = np.logspace(7, 10, nlmbd)  # lambdas for ls
    lmbdls[2, :] = np.logspace(8, 11, nlmbd)  # lambdas for ls
    lmbdls[3, :] = np.logspace(8, 11, nlmbd)  # lambdas for ls
    lmbdls[4, :] = np.logspace(9, 11, nlmbd)  # lambdas for ls

    lmbdm = np.zeros(
        (noutliers,
         nlmbd))  # every proportion of outliers need a different lambda
    lmbdm[0, :] = np.logspace(-1, 1, nlmbd)  # lambdas for ls
    lmbdm[1, :] = np.logspace(-1, 2, nlmbd)  # lambdas for ls
    lmbdm[2, :] = np.logspace(-1, 2, nlmbd)  # lambdas for ls
    lmbdm[3, :] = np.logspace(1, 3.5, nlmbd)  # lambdas for ls
    lmbdm[4, :] = np.logspace(1, 4, nlmbd)  # lambdas for ls

    lmbdmes = np.zeros(
        (noutliers,
         nlmbd))  # every proportion of outliers need a different lambda
    lmbdmes[0, :] = np.logspace(1, 4, nlmbd)  # lambdas for ls
    lmbdmes[1, :] = np.logspace(4, 6, nlmbd)  # lambdas for ls
    lmbdmes[2, :] = np.logspace(4, 6, nlmbd)  # lambdas for ls
    lmbdmes[3, :] = np.logspace(4, 6, nlmbd)  # lambdas for ls
    lmbdmes[4, :] = np.logspace(4, 6, nlmbd)  # lambdas for ls

    lmbdtau = np.zeros(
        (noutliers,
         nlmbd))  # every proportion of outliers need a different lambda
    lmbdtau[0, :] = np.logspace(-2, 1, nlmbd)  # lambdas for ls
    lmbdtau[1, :] = np.logspace(-2, 2, nlmbd)  # lambdas for ls
    lmbdtau[2, :] = np.logspace(-1, 2, nlmbd)  # lambdas for ls
    lmbdtau[3, :] = np.logspace(0, 2, nlmbd)  # lambdas for ls
    lmbdtau[4, :] = np.logspace(2, 4, nlmbd)  # lambdas for ls

    errorls = np.zeros(
        (noutliers, nlmbd, nrealizations))  # store results for ls
    errormes = np.zeros(
        (noutliers, nlmbd,
         nrealizations))  # store results for m with an estimated scale
    errorm = np.zeros((noutliers, nlmbd, nrealizations))  # store results for m
    errortau = np.zeros(
        (noutliers, nlmbd, nrealizations))  # store results for tau
    k = 0
    while k < noutliers:
        t = 0
        print 'outliers % s' % k
        while t < nlmbd:
            print 'lambda % s' % t
            r = 0
            while r < nrealizations:
                y = util.getmeasurements(a, x, noisetype, var,
                                         outliers[k])  # get the measurements
                # -------- ls solution
                xhat = inv.ridge(y, a,
                                 lmbdls[k, t])  # solving the problem with ls
                error = np.linalg.norm(x - xhat)
                errorls[k, t, r] = error
                # -------- m estimated scale solution
                xpreliminary = xhat  # we take the ls to estimate a preliminary scale
                respreliminary = y - np.dot(a, xpreliminary)
                estimatedscale = np.median(
                    np.abs(respreliminary
                           )) / .6745  # robust mad estimator for the scale
                xhat = inv.mridge(y, a, 'huber', clippinghuber, estimatedscale,
                                  lmbdmes[k, t])  # solving the problem with m
                error = np.linalg.norm(x - xhat)
                errormes[k, t, r] = error
                # -------- m real scale solution
                xhat = inv.mridge(y, a, 'huber', clippinghuber, realscale,
                                  lmbdm[k, t])  # solving the problem with m
                error = np.linalg.norm(x - xhat)
                errorm[k, t, r] = error
                # -------- tau solution

                xhat, scale = ln.basictau(
                    a,
                    y,
                    'optimal',
                    clippingopt,
                    ninitialx=ninitialsolutions,
                    maxiter=100,
                    nbest=1,
                    regularization=ln.tikhonov_regularization,
                    lamb=lmbdtau[k, t])

                error = np.linalg.norm(x - xhat)
                errortau[k, t, r] = error
                r += 1  # update the number of realization
            t += 1  # update the number of lambda that we are trying
        k += 1  # updated the number of outlier proportion

    minls = np.min(errorls, 1)
    minm = np.min(errorm, 1)
    minmes = np.min(errormes, 1)
    mintau = np.min(errortau, 1)

    avgls = np.mean(minls, 1)
    avgm = np.mean(minm, 1)
    avgmes = np.mean(minmes, 1)
    avgtau = np.mean(mintau, 1)

    fone, axone = plt.subplots(
        noutliers,
        sharex=True)  # plots to check if we are getting the best lambda
    cnt = 0
    while cnt < noutliers:
        axone[cnt].plot(lmbdls[0, :], errorls[cnt, :, 1])
        axone[cnt].set_xscale('log')
        cnt += 1
    axone[0].set_title('LS')
    # plt.show()

    ftwo, axtwo = plt.subplots(
        noutliers,
        sharex=True)  # plots to check if we are getting the best lambda
    cnt = 0
    while cnt < noutliers:
        axtwo[cnt].plot(lmbdls[0, :], errorm[cnt, :, 1])
        axtwo[cnt].set_xscale('log')
        cnt += 1
    axtwo[0].set_title('M estimator')
    # plt.show()

    fthree, axthree = plt.subplots(
        noutliers,
        sharex=True)  # plots to check if we are getting the best lambda
    cnt = 0
    while cnt < noutliers:
        axthree[cnt].plot(lmbdmes[0, :], errormes[cnt, :, 1])
        axthree[cnt].set_xscale('log')
        cnt += 1
    axthree[0].set_title('M estimator est. scale')
    # plt.show()

    ffour, axfour = plt.subplots(
        noutliers,
        sharex=True)  # plots to check if we are getting the best lambda
    cnt = 0
    while cnt < noutliers:
        axfour[cnt].plot(lmbdtau[0, :], errortau[cnt, :, 1])
        axfour[cnt].set_xscale('log')
        cnt += 1
    axfour[0].set_title('tau estimator')
    # plt.show()

    # store results
    name_file = 'experiment_two.pkl'
    fl = os.path.join(DATA_DIR, name_file)
    f = open(fl, 'wb')
    pickle.dump([avgls, avgm, avgmes, avgtau], f)
    f.close()

    fig = plt.figure()
    plt.plot(outliers, avgls, 'r--', label='ls')
    plt.plot(outliers, avgm, 'bs--', label='m estimator')
    plt.plot(outliers, avgmes, 'g^-', label='m est. scale')
    plt.plot(outliers, avgtau, 'kd-', label='tau')
    plt.legend(loc=2)
    plt.xlabel('% outliers')
    plt.ylabel('error')

    name_file = 'experiment_two.eps'
    fl = os.path.join(FIGURES_DIR, name_file)
    fig.savefig(fl, format='eps')
Esempio n. 6
0
def experimentone(nrealizations, outliers, measurementsize, sourcesize,
                  source):

    sourcetype = 'random'  # kind of source we want
    matrixtype = 'random'  # type of sensing matrix
    noisetype = 'outliers'  # additive noise
    clippinghuber = 1.345  # clipping parameter for the huber function
    clippingopt = (
        0.4, 1.09
    )  # clipping parameters for the opt function in the tau estimator
    ninitialsolutions = 10  # how many initial solutions do we want in the tau estimator
    realscale = 1
    var = 1
    x = source
    # x = util.getsource(sourcetype, sourcesize)  # get the ground truth
    a = util.getmatrix(sourcesize, matrixtype,
                       measurementsize)  # get the sensing matrix
    noutliers = outliers.size
    averrorls = np.zeros((noutliers, 1))  # store results for ls
    averrorm = np.zeros((noutliers, 1))  # store results for m
    averrormes = np.zeros(
        (noutliers, 1))  # store results for m with an estimated scale
    averrortau = np.zeros((noutliers, 1))  # store results for tau
    k = 0
    while k < noutliers:
        r = 0
        while r < nrealizations:
            y = util.getmeasurements(a, x, noisetype, var,
                                     outliers[k])  # get the measurements
            # -------- ls solution
            xhat = inv.leastsquares(y, a)  # solving the problem with ls
            error = np.linalg.norm(x - xhat)
            averrorls[k] += error
            # -------- m estimated scale solution
            xpreliminary = xhat  # we take the ls to estimate a preliminary scale
            respreliminary = y - np.dot(a, xpreliminary)
            estimatedscale = np.median(np.abs(
                respreliminary)) / .6745  # robust mad estimator for the scale
            xhat = inv.mestimator(y, a, 'huber', clippinghuber,
                                  estimatedscale)  # solving the problem with m
            error = np.linalg.norm(x - xhat)
            averrormes[k] += error
            # -------- m real scale solution
            xhat = inv.mestimator(y, a, 'huber', clippinghuber,
                                  realscale)  # solving the problem with m
            error = np.linalg.norm(x - xhat)
            averrorm[k] += error
            # -------- tau solution
            # xhat, scale = inv.fasttau(y, a, 'optimal', clippingopt, ninitialsolutions)  # solving the problem with tau
            xhat, obj = ln.basictau(a,
                                    y,
                                    'optimal',
                                    clippingopt,
                                    ninitialx=ninitialsolutions,
                                    maxiter=100,
                                    nbest=1,
                                    lamb=0)
            error = np.linalg.norm(x - xhat)
            averrortau[k] += error
            r += 1  # update the number of realization
        k += 1  # updated the number of outlier proportion

    averrorls = averrorls / nrealizations  # compute average
    averrorm = averrorm / nrealizations
    averrormes = averrormes / nrealizations
    averrortau = averrortau / nrealizations

    # store results
    name_file = 'experiment_one.pkl'
    fl = os.path.join(DATA_DIR, name_file)
    f = open(fl, 'wb')
    pickle.dump([averrorls, averrorm, averrormes, averrortau], f)
    f.close()

    fig = plt.figure()
    plt.plot(outliers, averrorls, 'r--', label='ls')
    plt.plot(outliers, averrorm, 'bs--', label='m estimator')
    plt.plot(outliers, averrormes, 'g^-', label='m est. scale')
    plt.plot(outliers, averrortau, 'kd-', label='tau')
    plt.legend(loc=2)
    plt.xlabel('% outliers')
    plt.ylabel('error')

    name_file = 'experiment_one.eps'
    fl = os.path.join(FIGURES_DIR, name_file)
    fig.savefig(fl, format='eps')
Esempio n. 7
0
def asv(estimator, regularization, lrange, lstep, nrealizations):

    lmbds = np.arange(0, lrange, lstep)
    nlmbd = np.size(lmbds)
    estimates = np.zeros((nlmbd, nrealizations))
    sourcesize = 1  # dimension of the source
    matrixtype = 'random'  # type of sensing matrix
    noisetype = 'gaussian'  # additive noise
    measurementsize = 1000  # number of measurements
    x = 1.5
    for idx, lbd in enumerate(lmbds):
        print 'Current lambda =', lbd
        k = 0  # counter
        while k < nrealizations:
            a = util.getmatrix(sourcesize, matrixtype,
                               measurementsize)  # get the sensing matrix
            y = util.getmeasurements(a, x, noisetype)  # get the measurements
            if estimator == 'tau' and regularization == 'l2':
                # xhat, obj = inv.basictauridge(y, a, 'optimal', (0.4, 1), 10, lmbds[i])
                xhat, obj = ln.basictau(
                    a,
                    y,
                    'optimal', [0.4, 1],
                    ninitialx=10,
                    maxiter=100,
                    nbest=1,
                    regularization=ln.tikhonov_regularization,
                    lamb=lbd)
            elif estimator == 'tau' and regularization == 'l1':
                #xhat, obj = inv.fasttaulasso(y, a, 'optimal', (0.4, 1), 10, lmbds[i])
                xhat, obj = ln.basictau(a,
                                        y,
                                        'optimal', [0.4, 1],
                                        ninitialx=10,
                                        maxiter=100,
                                        nbest=1,
                                        regularization=ln.lasso_regularization,
                                        lamb=lbd)
            elif estimator == 'ls' and regularization == 'l2':
                xhat = inv.ridge(y, a, lbd)
            elif estimator == 'ls' and regularization == 'l1':
                xhat = inv.lasso(y, a, lbd)
            elif estimator == 'huber' and regularization == 'l2':
                xhat = inv.mridge(y, a, 'huber', 1.345, 1, lbd)
            else:
                sys.exit('I do not know the estimator % s' %
                         estimator)  # die gracefully
            estimates[idx, k] = xhat
            k += 1
    vr = np.var(estimates, 1)
    av = measurementsize * vr

    avg = np.mean(estimates, 1)

    # store results
    name_file = 'asv_' + regularization + '.pkl'
    fl = os.path.join(DATA_DIR, name_file)
    f = open(fl, 'wb')
    pickle.dump([av, avg, lmbds], f)
    f.close()

    fig = plt.figure()
    plt.plot(lmbds, av)
    plt.xlabel('lambda')
    plt.ylabel('asv')

    name_file = 'asv_' + regularization + '.eps'
    fl = os.path.join(FIGURES_DIR, name_file)
    fig.savefig(fl, format='eps')
    # plt.show()

    return av, lmbds