Exemple #1
0
 def indexed_loss_fun(w, L2_vect, i_iter):
     rs = RandomState(
         (seed, i_hyper,
          i_iter))  # Deterministic seed needed for backwards pass.
     idxs = rs.randint(N_train, size=batch_size)
     return loss_fun(w, train_data['X'][idxs], train_data['T'][idxs],
                     L2_vect)
 def primal_loss(z_vect, transform, i_primal, record_results=False):
     RS = RandomState((seed, i_primal, "primal"))
     idxs = RS.randint(N_data, size=batch_size)
     minibatch = dictslice(data, idxs)
     w_vect = transform_weights(z_vect, transform)
     loss = loss_fun(w_vect, **minibatch)
     reg = regularization(z_vect)
     return loss + reg
 def primal_loss(w_vect, reg, i_primal, record_results=False):
     RS = RandomState((seed, i_primal, "primal"))
     idxs = RS.randint(N_data, size=batch_size)
     minibatch = dictslice(data, idxs)
     loss = loss_fun(w_vect, **minibatch)
     reg = regularization(w_vect, reg)
     if record_results and i_primal % N_thin == 0:
         print "Iter {0}: train: {1}".format(i_primal, getval(loss))
     return loss + reg
Exemple #4
0
 def primal_loss(w_vect, reg, i_primal, record_results=False):
     RS = RandomState((seed, i_primal, "primal"))
     idxs = RS.randint(N_data, size=batch_size)
     minibatch = dictslice(data, idxs)
     loss = loss_fun(w_vect, **minibatch)
     reg = regularization(w_vect, reg)
     if record_results and i_primal % N_thin == 0:
         print "Iter {0}: train: {1}".format(i_primal, getval(loss))
     return loss + reg
 def primal_loss(z_vect, transform, i_primal, record_results=False):
     RS = RandomState((seed, i_primal, "primal"))
     idxs = RS.randint(N_data, size=batch_size)
     minibatch = dictslice(data, idxs)
     w_vect = transform_weights(z_vect, transform) #TODO: this is a scale transformation, not regularization!
     loss = loss_fun(w_vect, **minibatch) #use new scale for prediction
     reg = regularization(z_vect) #regularize original scale
     #TODO: should be equivalent: w = z*e^transform, so 
     # f(z*e^transform) + e^\lambda||z||^2 = f(w) + e^\lambda||z||^2 = f(w) + e^(\lambda)||e^-2transform w||^2
     # see process_transform
     
     #if record_results and i_primal % N_thin == 0:
         #print "Iter {0}: train: {1}".format(i_primal, getval(loss))
     return loss + reg
Exemple #6
0
 def primal_stochastic_loss(z_vect, transform_vect, i_primal):
     RS = RandomState((seed, i_hyper, i_primal))
     loss = 0.0
     for _ in range(N_scripts_per_iter):
         i_script = RS.randint(N_scripts)
         N_train = train_data[i_script]['X'].shape[0]
         idxs = RS.permutation(N_train)[:batch_size]
         minibatch = dictslice(train_data[i_script], idxs)
         loss += loss_from_latents(z_vect, transform_vect, i_script, minibatch)
     reg  = regularization(z_vect)
     if i_primal % 20 == 0:
         print "Iter {0}, loss {1}, reg {2}".format(i_primal, getval(loss), getval(reg))
         print "Full losses: train: {0}, valid: {1}".format(
             total_loss(train_data, getval(z_vect)),
             total_loss(valid_data, getval(z_vect)))
     return loss + reg
Exemple #7
0
 def primal_stochastic_loss(z_vect, transform_vect, i_primal):
     RS = RandomState((seed, i_hyper, i_primal))
     loss = 0.0
     for _ in range(N_scripts_per_iter):
         i_script = RS.randint(N_scripts)
         N_train = train_data[i_script]['X'].shape[0]
         idxs = RS.permutation(N_train)[:batch_size]
         minibatch = dictslice(train_data[i_script], idxs)
         loss += loss_from_latents(z_vect, transform_vect, i_script, minibatch)
     reg  = regularization(z_vect)
     if i_primal % 1 == 0:
         print "Iter {0}, loss {1}, reg {2}".format(i_primal, getval(loss), getval(reg))
         print "Full losses: train: {0}, valid: {1}".format(
             total_loss(train_data, getval(z_vect)),
             total_loss(valid_data, getval(z_vect)))
     return loss + reg
def show_alphabets(alphabets, ax=None, n_cols=20):
    import matplotlib as mpl
    import matplotlib.pyplot as plt
    from nn_utils import plot_images
    seed = 1
    n_rows = len(alphabets)
    full_image = np.zeros((0, n_cols * 28))
    for alphabet in alphabets:
        RS = RandomState(seed)
        char_idxs = RS.randint(alphabet['X'].shape[0], size=n_cols)
        char_ids = np.argmax(alphabet['T'][char_idxs], axis=1)
        image = alphabet['X'][char_idxs].reshape((n_cols, 28, 28))
        image = np.transpose(image, axes=[1, 0, 2]).reshape((28, n_cols * 28))
        full_image = np.concatenate((full_image, image))
        
    if ax is None:
        fig = plt.figure()
        fig.set_size_inches((8, 8 * n_rows/n_cols))
        ax = fig.add_subplot(111)
    ax.imshow(full_image, cmap=mpl.cm.binary)
    ax.set_xticks(np.array([]))
    ax.set_yticks(np.array([]))
    plt.tight_layout()
    plt.savefig("all_alphabets.png")
Exemple #9
0
def show_alphabets(alphabets, ax=None, n_cols=20):
    import matplotlib as mpl
    import matplotlib.pyplot as plt
    from nn_utils import plot_images
    seed = 1
    n_rows = len(alphabets)
    full_image = np.zeros((0, n_cols * 28))
    for alphabet in alphabets:
        RS = RandomState(seed)
        char_idxs = RS.randint(alphabet['X'].shape[0], size=n_cols)
        char_ids = np.argmax(alphabet['T'][char_idxs], axis=1)
        image = alphabet['X'][char_idxs].reshape((n_cols, 28, 28))
        image = np.transpose(image, axes=[1, 0, 2]).reshape((28, n_cols * 28))
        full_image = np.concatenate((full_image, image))
        
    if ax is None:
        fig = plt.figure()
        fig.set_size_inches((8, 8 * n_rows/n_cols))
        ax = fig.add_subplot(111)
    ax.imshow(full_image, cmap=mpl.cm.binary)
    ax.set_xticks(np.array([]))
    ax.set_yticks(np.array([]))
    plt.tight_layout()
    plt.savefig("all_alphabets.png")
 def indexed_loss_fun(w, L2_vect, i_iter):
     rs = RandomState((seed, i_hyper, i_iter))  # Deterministic seed needed for backwards pass.
     idxs = rs.randint(N_train, size=batch_size)
     return loss_fun(w, train_data['X'][idxs], train_data['T'][idxs], L2_vect)