Exemplo n.º 1
0
def test_archetypalAnalysis():
    img_file = 'lena.png'
    try:
        img = Image.open(img_file)
    except Exception as e:
        print("Cannot load image %s (%s) : skipping test" %(img_file,e))
        return None
    I = np.array(img) / 255.
    if I.ndim == 3:
        A = np.asfortranarray(I.reshape((I.shape[0],I.shape[1] * I.shape[2])),dtype = myfloat)
        rgb = True
    else:
        A = np.asfortranarray(I,dtype = myfloat)
        rgb = False

    m = 8;n = 8;
    X = spams.im2col_sliding(A,m,n,rgb)

    X = X - np.tile(np.mean(X,0),(X.shape[0],1))
    X = np.asfortranarray(X / np.tile(np.sqrt((X * X).sum(axis=0)),(X.shape[0],1)),dtype = myfloat)
    K = 64 # learns a dictionary with 64 elements
    robust = False # use robust archetypal analysis or not, default parameter(True)
    epsilon = 1e-3 # width in Huber loss, default parameter(1e-3)
    computeXtX = True # memorize the product XtX or not default parameter(True)
    stepsFISTA = 0 # 3 alternations by FISTA, default parameter(3)
    # a for loop in FISTA is used, we stop at 50 iterations
    # remember that we are not guarantee to descent in FISTA step if 50 is too small
    stepsAS = 10 # 7 alternations by activeSet, default parameter(50)
    randominit = True # random initilazation, default parameter(True)

    ############# FIRST EXPERIMENT  ##################
    tic = time.time()
    # learn archetypes using activeSet method for each convex sub-problem
    (Z,A,B) = spams.archetypalAnalysis(np.asfortranarray(X[:, :10000]), returnAB= True, p = K, robust = robust, epsilon = epsilon, computeXtX = computeXtX,  stepsFISTA = stepsFISTA , stepsAS = stepsAS, numThreads = -1)
    tac = time.time()
    t = tac - tic
    print('time of computation for Archetypal Dictionary Learning: %f' %t)

    print('Evaluating cost function...')
    alpha = spams.decompSimplex(np.asfortranarray(X[:, :10000]),Z = Z, computeXtX = True, numThreads = -1)
    xd = X[:,:10000] - Z * alpha
    R = np.sum(xd*xd)
    print("objective function: %f" %R)

    ############# FIRST EXPERIMENT  ##################
    tic = time.time()
    # learn archetypes using activeSet method for each convex sub-problem
    Z2 = spams.archetypalAnalysis(np.asfortranarray(X[:, :10000]), Z0 = Z, robust = robust, epsilon = epsilon, computeXtX = computeXtX , stepsFISTA = stepsFISTA,stepsAS = stepsAS, numThreads = -1)
    tac = time.time()
    t = tac - tic
    print('time of computation for Archetypal Dictionary Learning (Continue): %f' %t)

    print('Evaluating cost function...')
    alpha = spams.decompSimplex(np.asfortranarray(X[:, :10000]),Z = np.asfortranarray(Z2), computeXtX = True, numThreads = -1)
    xd = X[:,:10000] - Z2 * alpha
    R = np.sum(xd*xd)
    print("objective function: %f" %R)

    # learn archetypes using activeSet method for each convex sub-problem
    (Z3,A3,B3) = spams.archetypalAnalysis(np.asfortranarray(X[:, :10000]), returnAB= True, p = K, robust = True, epsilon = epsilon, computeXtX = computeXtX,  stepsFISTA = stepsFISTA , stepsAS = stepsAS, numThreads = -1)
    tac = time.time()
    t = tac - tic
    print('time of computation for Robust Archetypal Dictionary Learning: %f' %t)
Exemplo n.º 2
0
def run_AA(data,
           n_archetypes,
           true_archetypal_coords=None,
           true_archetypes=None,
           method='PCHA',
           n_subsample=None,
           n_batches=40000,
           latent_noise=0.05,
           arch=[1024, 512, 256, 128],
           seed=42):
    """Runs Chen at al. 2014 on input data and calculates errors on the
    data in the archetypal space and the error between the learned vs true
    archetypes.

    Parameters
    ----------
    data : [samples, features]
        Data in the feature space
    true_archetypal_coords : [samples, archetypes]
        Ground truth archetypal coordinates. Rows must sum to 1.
    true_archetypes : [archetypes, features]
        Ground truth archetypes in the feature space
    n_archetypes : int
        Number of archetypes to be learned
    method : ['PCHA', 'kernelPCHA', 'Chen', 'Javadi', 'NMF', 'PCHA_on_AE', 'AAnet']
        The method to use for archetypal analysis
    n_subsample : int
        Number of data points to subsample
    seed : int
        Random seed
    batches : int
        Number of batches used to train AAnet or AutoEncoder
    Returns
    -------
    mse_archetypes: float
        Mean squared error between the learned archetypes and the ground
        truth archetypes as calculated in the feature space
    mse_encoding: float
        Mean squared error between the true coordinates of the data in the
        archetypal space and the coordinates in the learned space
    new_archetypal_coords: [samples, archetypes]
        Learned encoding of the samples in the archetypal space
    new_archetypes: [archetypes, features]
        Learned archetypes in the feature space
    """
    tic = time.time()
    # Select a subsample of the data
    np.random.seed(seed)
    if n_subsample is not None:
        r_idx = np.random.choice(data.shape[0], n_subsample, replace=False)
        data = data[r_idx, :]  # otherwise really slow
        true_archetypal_coords = true_archetypal_coords[r_idx, :]

    if method == 'Chen':
        '''AA as implemented in Chen et al. 2014 https://arxiv.org/abs/1405.6472'''
        new_archetypes, new_archetypal_coords, _ = sp.archetypalAnalysis(
            np.asfortranarray(data.T),
            p=n_archetypes,
            returnAB=True,
            numThreads=-1)

        # Fix transposition
        new_archetypal_coords = new_archetypal_coords.toarray().T
        new_archetypes = new_archetypes.T
    elif method == 'Javadi':
        '''AA as implemented in Javadi et al. 2017 https://arxiv.org/abs/1705.02994'''

        new_archetypal_coords, new_archetypes, _, _ = javadi.acc_palm_nmf(
            data, r=n_archetypes, maxiter=25, plotloss=False, ploterror=False)
    elif method == 'PCHA':
        '''Principal convex hull analysis as implemented by Morup and Hansen 2012.
        https://www.sciencedirect.com/science/article/pii/S0925231211006060 '''
        new_archetypes, new_archetypal_coords, _, _, _ = PCHA(data.T,
                                                              noc=n_archetypes)
        new_archetypes = np.array(new_archetypes.T)
        new_archetypal_coords = np.array(new_archetypal_coords.T)

    elif method == 'kernelPCHA':
        '''PCHA in a kernel space as described by Morup and Hansen 2012.
        https://www.sciencedirect.com/science/article/pii/S0925231211006060 '''
        D = scipy.spatial.distance.pdist(data)
        D = scipy.spatial.distance.squareform(D)
        sigma = np.std(D)
        #K = np.exp(-((D**2)/sigma))
        K = data @ data.T
        _, new_archetypal_coords, C, _, _ = PCHA(K, noc=n_archetypes)
        new_archetypes = np.array(data.T @ C).T
        new_archetypal_coords = np.array(new_archetypal_coords.T)

    elif method == 'NMF':
        '''Factor analysis using non-negative matrix factorization (NMF)'''
        nnmf = NMF(n_components=n_archetypes,
                   init='nndsvda',
                   tol=1e-4,
                   max_iter=1000)
        new_archetypal_coords = nnmf.fit_transform(data - np.min(data))
        new_archetypes = nnmf.components_

    elif method == 'PCHA_on_AE':
        ##############
        # MODEL PARAMS
        ##############
        noise_z_std = 0
        z_dim = arch
        act_out = tf.nn.tanh
        input_dim = data.shape[1]

        enc_AE = network.Encoder(num_at=n_archetypes, z_dim=z_dim)
        dec_AE = network.Decoder(x_dim=input_dim,
                                 noise_z_std=noise_z_std,
                                 z_dim=z_dim,
                                 act_out=act_out)

        # By setting both gammas to zero, we arrive at the standard autoencoder
        AE = AAnet.AAnet(enc_AE, dec_AE, gamma_convex=0, gamma_nn=0)
        ##########
        # TRAINING
        ##########
        # AE
        AE.train(data, batch_size=256, num_batches=n_batches)
        latent_encoding = AE.data2z(data)

        # PCHA learns an encoding into a simplex
        new_archetypes, new_archetypal_coords, _, _, _ = PCHA(
            latent_encoding.T, noc=n_archetypes)
        new_archetypes = np.array(new_archetypes.T)
        new_archetypal_coords = np.array(new_archetypal_coords.T)

        # Decode ATs
        new_archetypes = AE.z2data(new_archetypes)

    elif method == 'AAnet':
        ##############
        # MODEL PARAMS
        ##############

        noise_z_std = latent_noise
        z_dim = arch
        act_out = tf.nn.tanh
        input_dim = data.shape[1]

        enc_net = network.Encoder(num_at=n_archetypes, z_dim=z_dim)
        dec_net = network.Decoder(x_dim=input_dim,
                                  noise_z_std=noise_z_std,
                                  z_dim=z_dim,
                                  act_out=act_out)
        model = AAnet.AAnet(enc_net, dec_net)

        ##########
        # TRAINING
        ##########

        model.train(data, batch_size=256, num_batches=n_batches)

        ###################
        # GETTING OUTPUT
        ###################

        new_archetypal_coords = model.data2at(data)
        new_archetypes = model.get_ats_x()
    else:
        raise ValueError('{} is not a valid method'.format(method))
    toc = time.time() - tic

    # Calculate MSE if given ground truth
    if true_archetypes is not None:
        mse_archetypes, _, _ = calc_MSE(new_archetypes, true_archetypes)
    else:
        mse_archetypes = None
    if true_archetypal_coords is not None:
        mse_encoding, _, _ = calc_MSE(new_archetypal_coords.T,
                                      true_archetypal_coords.T)
    else:
        mse_encoding = None

    return mse_archetypes, mse_encoding, new_archetypal_coords, new_archetypes, toc
Exemplo n.º 3
0
def test_archetypalAnalysis():
    img_file = 'lena.png'
    try:
        img = Image.open(img_file)
    except Exception as e:
        print "Cannot load image %s (%s) : skipping test" %(img_file,e)
        return None
    I = np.array(img) / 255.
    if I.ndim == 3:
        A = np.asfortranarray(I.reshape((I.shape[0],I.shape[1] * I.shape[2])),dtype = myfloat)
        rgb = True
    else:
        A = np.asfortranarray(I,dtype = myfloat)
        rgb = False

    m = 8;n = 8;
    X = spams.im2col_sliding(A,m,n,rgb)

    X = X - np.tile(np.mean(X,0),(X.shape[0],1))
    X = np.asfortranarray(X / np.tile(np.sqrt((X * X).sum(axis=0)),(X.shape[0],1)),dtype = myfloat)
    K = 64 # learns a dictionary with 64 elements
    robust = False # use robust archetypal analysis or not, default parameter(True)
    epsilon = 1e-3 # width in Huber loss, default parameter(1e-3)
    computeXtX = True # memorize the product XtX or not default parameter(True)
    stepsFISTA = 0 # 3 alternations by FISTA, default parameter(3)
    # a for loop in FISTA is used, we stop at 50 iterations
    # remember that we are not guarantee to descent in FISTA step if 50 is too small
    stepsAS = 10 # 7 alternations by activeSet, default parameter(50)
    randominit = True # random initilazation, default parameter(True)
    
    ############# FIRST EXPERIMENT  ##################
    tic = time.time()
    # learn archetypes using activeSet method for each convex sub-problem
    (Z,A,B) = spams.archetypalAnalysis(np.asfortranarray(X[:, :10000]), returnAB= True, p = K, robust = robust, epsilon = epsilon, computeXtX = computeXtX,  stepsFISTA = stepsFISTA , stepsAS = stepsAS, numThreads = -1)
    tac = time.time()
    t = tac - tic
    print 'time of computation for Archetypal Dictionary Learning: %f' %t

    print 'Evaluating cost function...'
    alpha = spams.decompSimplex(np.asfortranarray(X[:, :10000]),Z = Z, computeXtX = True, numThreads = -1)
    xd = X[:,:10000] - Z * alpha
    R = np.sum(xd*xd)
    print "objective function: %f" %R

    ############# FIRST EXPERIMENT  ##################
    tic = time.time()
    # learn archetypes using activeSet method for each convex sub-problem
    Z2 = spams.archetypalAnalysis(np.asfortranarray(X[:, :10000]), Z0 = Z, robust = robust, epsilon = epsilon, computeXtX = computeXtX , stepsFISTA = stepsFISTA,stepsAS = stepsAS, numThreads = -1)
    tac = time.time()
    t = tac - tic
    print 'time of computation for Archetypal Dictionary Learning (Continue): %f' %t

    print 'Evaluating cost function...'
    alpha = spams.decompSimplex(np.asfortranarray(X[:, :10000]),Z = np.asfortranarray(Z2), computeXtX = True, numThreads = -1)
    xd = X[:,:10000] - Z2 * alpha
    R = np.sum(xd*xd)
    print "objective function: %f" %R

    # learn archetypes using activeSet method for each convex sub-problem
    (Z3,A3,B3) = spams.archetypalAnalysis(np.asfortranarray(X[:, :10000]), returnAB= True, p = K, robust = True, epsilon = epsilon, computeXtX = computeXtX,  stepsFISTA = stepsFISTA , stepsAS = stepsAS, numThreads = -1)
    tac = time.time()
    t = tac - tic
    print 'time of computation for Robust Archetypal Dictionary Learning: %f' %t
L = 1
log.info('ArchetypalAnalysis for ' + str(L) + ' layers')

X = torch.load('tensors/tensor' + str(L) + '.pt')
X = torch.t(X).numpy()
K = 32  # learns a dictionary with 32 elements
robust = True  # use robust archetypal analysis or not, default parameter(True)
epsilon = 1e-3  # width in Huber loss, default parameter(1e-3)
computeXtX = True  # memorize the product XtX or not default parameter(True)
stepsFISTA = 0  # 3 alternations by FISTA, default parameter(3)
# a for loop in FISTA is used, we stop at 50 iterations
# remember that we are not guarantee to descent in FISTA step if 50 is too small
stepsAS = 25  # 7 alternations by activeSet, default parameter(50)
randominit = True  # random initilazation, default para

(Z,A,B) = spams.archetypalAnalysis(np.asfortranarray(X), returnAB= True, p = K, \
    robust = robust, epsilon = epsilon, computeXtX = computeXtX,  stepsFISTA = stepsFISTA , stepsAS = stepsAS, numThreads = -1)

print('Evaluating cost function...')
alpha = spams.decompSimplex(np.asfortranarray(X),
                            Z=Z,
                            computeXtX=True,
                            numThreads=-1)
xd = X - Z * alpha
R = np.sum(xd * xd)
print("objective function: %f" % R)
print(Z.shape)
print(A.shape)
print(B.shape)
torch.save(Z, 'tensors/Z' + str(L) + '.pt')
torch.save(A, 'tensors/A' + str(L) + '.pt')
torch.save(B, 'tensors/B' + str(L) + '.pt')