Пример #1
0
def learn(Phi=None, IMAGES=None, scales=3, patch_dim=9*9, 
    overcomplete=1, iterations=2000, batch=100, alpha=10, beta=0.998, lambdav=0.075, plot=False, save=False):
    # Alpha is powered based on scale. alpha_s = alpha**(scales-s-1)
    # 32x32 image size
    # 9x9 neurons. 1 pixel stride
    
    if IMAGES == None:
        num_images = 10000
        base_image_dim = 32*32
        # IMAGES = preprocess.extract_images(images='vanhateran', num_images=num_images, image_dim=image_dim)
    else:
        (base_image_dim, num_images) = IMAGES.shape

    base_image_side = int(sqrt(base_image_dim))
    patch_side = int(sqrt(patch_dim))
    pad = (patch_side-1)/2

    base_neurons = (base_image_side+(patch_side-1))**2

    if Phi == None:
        Phi = initialize(base_image_dim, patch_dim, scales)

    G = laplacian_pyramid.generative(base_image_dim, scales)

    for t in range(iterations+1):
      chosen_images = np.random.permutation(arange(0,num_images))[:batch]
      I = preprocess.extract_images(images='vanhateran', num_images=batch, image_dim=base_image_dim)

      A = sparsify(I.T, G, Phi, lambdav)
      R = reconstruct(G, Phi, A, base_image_dim)

      error = laplacian_pyramid.build(R - I, scales)
      dPhi = [sps.csc_matrix((base_image_dim/(4**(scales-s-1)), base_image_dim/(4**(scales-s-1)))) for s in range(scales)]

      for s in range(scales):
        image_dim = base_image_dim/(4**(scales-s-1))
        image_side = int(sqrt(image_dim))

        error[s] = error[s].reshape((image_side, image_side, batch))
        
        patches = patchify(np.pad(error[s], ((pad, pad), (pad, pad), (0, 0)), mode='constant'), (patch_side, patch_side), mode='2d')
        neurons = image_dim

        for n in range(neurons):
          print n
          update = np.dot(patches[n], A[s][n].T)
          print localize(update, image_side, patch_side, divmod(n, image_side)) 
          dPhi[s][:, n] = localize(update, image_side, patch_side, divmod(n, image_side)) 

      for s in range(s):
        Phi[s] = Phi[s] + alpha * dPhi[s]
        normalize(Phi[s], norm='l1', axis=0, copy=False)

      if mod(t,10):
        print t
        display()
Пример #2
0
def learn_scales(Phi=None,
                 scales=2,
                 image_dim=32 * 32,
                 patch_dim=9 * 9,
                 normalize=True,
                 bandpass=False,
                 overcomplete=1,
                 iterations=4000,
                 batch=100,
                 alpha=400,
                 beta=0.9995,
                 gamma=0.95,
                 lambdav=0.05,
                 plot_every=50,
                 label=None):
    # Main learning loop

    patch_side = int(sqrt(patch_dim))
    image_side = int(sqrt(image_dim))
    pad = (patch_side - 1) / 2
    indices = all_indices(image_side, patch_side, overcomplete)

    if Phi == None:
        Phi = initialize(image_side, patch_side, overcomplete)

    neurons = overcomplete * image_dim
    old_dPhi = np.zeros((neurons, patch_dim))

    for t in range(iterations + 1):
        I = preprocess.extract_images(images='vanhateran',
                                      num_images=batch,
                                      image_dim=image_dim,
                                      normalize=normalize,
                                      bandpass=bandpass)
        I = I.T
        I = np.pad(I.reshape(image_side, image_side, batch),
                   ((pad, pad), (pad, pad), (0, 0)),
                   mode='constant')
        I = I.reshape((image_side + 2 * pad) * (image_side + 2 * pad), batch)

        # A = sparsify(I, Phi, lambdav)
        A = fista.fista(I,
                        Phi,
                        lambdav,
                        max_iterations=10 * overcomplete,
                        display=True)
        R = reconstruct(Phi, A)

        error = I - R
        error = error.reshape(image_side + 2 * pad, image_side + 2 * pad,
                              batch)
        # TO DO: set error on paddings to 0
        error = patchify(error, (patch_side, patch_side))
        error = error.reshape(batch, neurons / overcomplete, patch_dim)
        error = np.tile(error, (1, overcomplete, 1))  # Repeat for OC

        dPhi = error.transpose(1, 2, 0) * A[:, None, :]
        dPhi = dPhi.sum(axis=2)
        dPhi = (1 - gamma) * dPhi + gamma * old_dPhi
        old_dPhi = dPhi

        print "Old Objective: " + str(
            np.sum((I - R)**2) + lambdav * np.sum(np.abs(A)))
        Phi = Phi.tolil()
        Phi[indices[0],
            indices[1]] = Phi[indices[0],
                              indices[1]] + (alpha / float(batch)) * dPhi.T
        Phi = Phi.tocsc()
        skp.normalize(Phi, norm='l2', axis=0, copy=False)

        A = fista.fista(I,
                        Phi,
                        lambdav,
                        max_iterations=10 * overcomplete,
                        display=False)
        R = reconstruct(Phi, A)
        print "New Objective: " + str(
            np.sum((I - R)**2) + lambdav * np.sum(np.abs(A)))

        # Armajillo's Rule
        alpha = alpha * beta

        if t % plot_every == 0:
            display(t,
                    Phi,
                    save=True,
                    patch_dim=patch_dim,
                    overcomplete=overcomplete,
                    label=label)
Пример #3
0
def learn_conv(Phi=None,
               scales=3,
               image_dim=32 * 32,
               patch_dim=9 * 9,
               whiten=True,
               overcomplete=1,
               iterations=2000,
               batch=100,
               alpha=400,
               beta=0.9995,
               gamma=0.95,
               lambdav=0.05,
               plot=False,
               save=False):
    # Main learning loop

    label = 'conv'

    patch_side = int(sqrt(patch_dim))
    image_side = int(sqrt(image_dim))
    pad = (patch_side - 1) / 2
    indices = all_indices(image_side, patch_side, overcomplete)

    if Phi == None:
        Phi = initialize(image_side,
                         patch_side,
                         overcomplete,
                         convolutional=True)

    neurons = overcomplete * image_dim
    #old_dPhi = np.zeros((neurons, patch_dim))

    if whiten == True:
        label = label + '-whitened'
        print "Whitening"
        I = preprocess.extract_images(images='vanhateran',
                                      num_images=50000,
                                      image_dim=image_dim)
        (_, W) = preprocess.whitening_matrix(I)

    for t in range(iterations + 1):
        I = preprocess.extract_images(images='vanhateran',
                                      num_images=batch,
                                      image_dim=image_dim,
                                      whiten=W)
        I = I.T
        I = np.pad(I.reshape(image_side, image_side, batch),
                   ((pad, pad), (pad, pad), (0, 0)),
                   mode='constant')
        I = I.reshape((image_side + 2 * pad) * (image_side + 2 * pad), batch)

        # A = sparsify(I, Phi, lambdav)
        A = fista.fista(I, Phi[0::], lambdav, max_iterations=50)
        R = reconstruct(Phi, A)

        error = I - R
        error = error.reshape(image_side + 2 * pad, image_side + 2 * pad,
                              batch)
        e = error
        # TO DO: set error on paddings to 0
        error = patchify(error, (patch_side, patch_side))
        error = error.reshape(batch, neurons / overcomplete, patch_dim)
        error = np.tile(error, (1, overcomplete, 1))  # Repeat for OC

        dPhi = error.transpose(1, 2, 0) * A[:, None, :]
        dPhi = dPhi.sum(axis=2)  # Sum over batch
        dPhi = sum_chunk(dPhi, neurons / overcomplete, axis=0)
        dPhi = dPhi / float(neurons / overcomplete)  #normalize
        dPhi = dPhi.repeat(neurons / overcomplete, axis=0)

        #dPhi = (1-gamma)*dPhi + gamma*old_dPhi
        #old_dPhi = dPhi

        # print "Old Objective: " + str(np.sum((I-R)**2) + lambdav*np.sum(np.abs(A)))
        Phi = Phi.tolil()
        Phi[indices[0],
            indices[1]] = Phi[indices[0],
                              indices[1]] + (alpha / float(batch)) * dPhi.T
        Phi = Phi.tocsc()
        skp.normalize(Phi, norm='l2', axis=0, copy=False)

        #A = sparsify(I, Phi, lambdav)
        # A = fista.fista(I, Phi, lambdav, max_iterations=50)
        # R = reconstruct(Phi, A)
        # print "New Objective: " + str(np.sum((I-R)**2) + lambdav*np.sum(np.abs(A)))

        # Armajillo's Rule
        alpha = alpha * beta

        if t % 50 == 0:
            display(t,
                    Phi,
                    save=True,
                    patch_dim=patch_dim,
                    overcomplete=overcomplete,
                    label=label)
Пример #4
0
def learn(G=None,
          Phi=None,
          dataset='vanhateran',
          base_image_dim=32 * 32,
          patch_dim=9 * 9,
          scales=2,
          overcomplete=1,
          iterations=4000,
          inf_iterations=150,
          batch=100,
          alpha=[200, 400],
          beta=0.95,
          gamma=0.95,
          decrease_every=200,
          lambdav=0.05,
          plot=False,
          plot_every=50,
          save=False,
          label=''):

    patch_side = int(sqrt(patch_dim))
    base_image_side = int(sqrt(base_image_dim))
    pad = (patch_side - 1) / 2

    indices = [
        all_indices(base_image_side / 2**(scales - s - 1),
                    patch_side,
                    overcomplete=overcomplete) for s in range(scales)
    ]

    if Phi == None:
        Phi = initialize_scales(G, base_image_side, patch_side, scales=scales)

    if G == None:
        G = laplacian_pyramid.generative(base_image_side,
                                         patch_side,
                                         scales,
                                         base_mask_radius=(patch_side - 1) / 2)

    base_neurons = base_image_dim
    momentum = [
        np.zeros((patch_dim, base_neurons / 4**(scales - s - 1)))
        for s in range(scales)
    ]
    M = sps.hstack([G[s].dot(Phi[s]) for s in range(scales)]).tocsr()
    max_eig = scipy.sparse.linalg.svds(M,
                                       1,
                                       which='LM',
                                       return_singular_vectors=False)

    for t in range(iterations + 1):
        I = preprocess.extract_images(dataset=dataset,
                                      num_images=batch,
                                      image_dim=base_image_dim,
                                      center=True,
                                      rescale=True,
                                      lowpass=True,
                                      remove_scale=scales,
                                      pad=pad).T

        A = inference(I,
                      G,
                      Phi,
                      base_image_dim,
                      lambdav,
                      algorithm='fista-gpu',
                      max_iterations=inf_iterations,
                      max_eig=max_eig)
        R = reconstruct(G, Phi, A)

        error = I - R

        old_obj = np.sum(error**
                         2) + lambdav * np.sum(np.sum(np.abs(a)) for a in A)
        print "Old Objective: " + str(old_obj)

        for s in range(scales):
            neurons = base_neurons / 4**((scales - s - 1))
            image_side = base_image_side / 2**((scales - s - 1))
            error_s = G[s].T.dot(error)
            #print "s", s
            #print "(scales-s-1)", (scales-s-1)
            #error_s = error_s/float(scales-s-1)

            error_s = error_s.reshape(image_side + 2 * pad,
                                      image_side + 2 * pad, batch)
            error_s = patchify(error_s, (patch_side, patch_side))
            error_s = error_s.reshape(batch, neurons, patch_dim)

            dPhi = (error_s.transpose(1, 2, 0) *
                    A[s][:, None, :]).sum(axis=2).T

            gamma = 1 - 3 / float(t + 5)
            momentum[s] = gamma * momentum[s] + alpha[s] / float(batch) * dPhi

            Phi[s] = Phi[s].tolil()
            Phi[s][indices[s][0], indices[s][1]] += momentum[s]
            Phi[s] = Phi[s].tocsc()

        Phi = normalize(G, Phi)
        #for s in range(scales):
        #  skp.normalize(Phi[s], norm='l2', axis=0, copy=False)

        R = reconstruct(G, Phi, A)

        error = I - R
        new_obj = np.sum(error**
                         2) + lambdav * np.sum(np.sum(np.abs(a)) for a in A)
        print "New Objective: " + str(new_obj)

        # Armajillo's Rule
        if new_obj > old_obj or t % decrease_every == 0:
            alpha = [a * beta for a in alpha]
            print "Learning rate: " + str(alpha)

        if t % plot_every == 0:
            display_scales(t,
                           G,
                           Phi,
                           save=save,
                           patch_side=patch_side,
                           label=label)
            # Eigenvalue doesn't change that often
            M = sps.hstack([G[s].dot(Phi[s]) for s in range(scales)]).tocsr()
            max_eig = scipy.sparse.linalg.svds(M,
                                               1,
                                               which='LM',
                                               return_singular_vectors=False)
            print "Max eigenvalue" + str(max_eig)
Пример #5
0
def mse_vs_sparsity(batch=100, image_dim=32 * 32, patch_dim=9 * 9):
    patch_side = int(np.sqrt(patch_dim))
    image_side = int(np.sqrt(image_dim))
    pad = (patch_side - 1) / 2

    I = preprocess.extract_images(images='vanhateran',
                                  num_images=batch,
                                  image_dim=image_dim)
    I = I.T
    I = np.pad(I.reshape(image_side, image_side, batch),
               ((pad, pad), (pad, pad), (0, 0)),
               mode='constant')
    I = I.reshape((image_side + 2 * pad) * (image_side + 2 * pad), batch)
    """
  Phi_conv_oc2_l005 = np.load('/Users/zayd/Dropbox/Code/multiscale/laplacian/dictionaries/conv-oc-2-i40-p9-t1900-2014-02-04 09:39:29.570434.npy').item()
  Phi_conv_oc4_l005 = np.load('/Users/zayd/Dropbox/Code/multiscale/laplacian/dictionaries/conv-oc-4-i40-p9-t400-2014-02-04 15:47:39.352604.npy').item()

  Phi_oc_l005 = np.load('/Users/zayd/Dropbox/Code/multiscale/laplacian/dictionaries/oc1-i40-p9-t1000-2014-02-03 22:54:32.030729.npy').item()
  Phi_oc2_l005 = np.load('/Users/zayd/Dropbox/Code/multiscale/laplacian/dictionaries/oc2-i40-p9-t1000-2014-02-04 00:51:06.116929.npy').item()
  Phi_oc4_l005 = np.load('/Users/zayd/Dropbox/Code/multiscale/laplacian/dictionaries/oc4-i40-p9-t1800-2014-02-04 16:00:06.398794.npy').item()
=======
  Phi_conv_oc2_l005 = np.load('/Users/zayd/Dropbox/Code/multiscale/laplacian2/dictionaries/conv-oc-2-i40-p9-t1900-2014-02-04 09:39:29.570434.npy').item()
  Phi_conv_oc4_l005 = np.load('/Users/zayd/Dropbox/Code/multiscale/laplacian2/dictionaries/conv-oc-4-i40-p9-t400-2014-02-04 15:47:39.352604.npy').item()

  Phi_oc_l005 = np.load('/Users/zayd/Dropbox/Code/multiscale/laplacian2/dictionaries/oc1-i40-p9-t1000-2014-02-03 22:54:32.030729.npy').item()
  Phi_oc2_l005 = np.load('/Users/zayd/Dropbox/Code/multiscale/laplacian2/dictionaries/oc2-i40-p9-t1000-2014-02-04 00:51:06.116929.npy').item()
  Phi_oc4_l005 = np.load('/Users/zayd/Dropbox/Code/multiscale/laplacian2/dictionaries/oc4-i40-p9-t1800-2014-02-04 16:00:06.398794.npy').item()
>>>>>>> 6764613a3f3ef85b2fb61beb9079b57bba057ece

  A_conv_oc2_l005 = fista.fista(I, Phi_conv_oc2_l005, lambdav=0.05, max_iterations=100)
  A_conv_oc4_l005 = fista.fista(I, Phi_conv_oc4_l005, lambdav=0.05, max_iterations=100)

  A_oc2_l005 = fista.fista(I, Phi_oc2_l005, lambdav=0.05, max_iterations=100)
  A_oc4_l005 = fista.fista(I, Phi_oc4_l005, lambdav=0.05, max_iterations=100)
 
  R_conv_oc2_l005 = single.reconstruct(Phi_conv_oc2_l005, A_conv_oc2_l005)
  R_conv_oc4_l005 = single.reconstruct(Phi_conv_oc4_l005, A_conv_oc4_l005)

  R_oc2_l005 = single.reconstruct(Phi_oc2_l005, A_oc2_l005)
  R_oc4_l005 = single.reconstruct(Phi_oc4_l005, A_oc4_l005)

  mse_conv_oc2_l005 = np.sum((I-R_conv_oc2_l005)**2)/float(batch)
  mse_conv_oc4_l005 = np.sum((I-R_conv_oc4_l005)**2)/float(batch)
  mse_oc2_l005 = np.sum((I-R_oc2_l005)**2)/float(batch)
  mse_oc4_l005 = np.sum((I-R_oc4_l005)**2)/float(batch)

  l1_conv_oc2_l005 = np.sum(np.abs(A_conv_oc2_l005))/float(batch)
  l1_conv_oc4_l005 = np.sum(np.abs(A_conv_oc4_l005))/float(batch)

  l1_oc2_l005 = np.sum(np.abs(A_oc2_l005))/float(batch)
  l1_oc4_l005 = np.sum(np.abs(A_oc4_l005))/float(batch)

  x = [l1_conv_oc2_l005, l1_conv_oc4_l005, l1_oc2_l005, l1_oc4_l005]
  y = [mse_conv_oc2_l005, mse_conv_oc4_l005, mse_oc2_l005, mse_oc4_l005]

  y = [s/float(image_dim) for s in y]
  """

    plt.scatter(x, y)
    plt.xlabel('Sparsity (l1 Norm)')
    plt.ylabel('MSE (per pixel)')
    plt.show()

    return (x, y)