コード例 #1
0
def make_large_weights(small_rbms):
    W = np.zeros((300 * 26, 32, 32, 3), dtype=np.float32)
    W[...] = RNG(seed=1234).rand(*W.shape) * 5e-6
    vb = np.zeros((32, 32, 3))
    hb = np.zeros(300 * 26)

    for i in xrange(4):
        for j in xrange(4):
            rbm_id = 4 * i + j
            weights = small_rbms[rbm_id].get_tf_params(scope='weights')
            W_small = weights['W']
            W_small = W_small.T  # (300, 192)
            W_small = im_unflatten(W_small)  # (300, 8, 8, 3)
            W[300 * rbm_id:300 * (rbm_id + 1), 8 * i:8 * (i + 1),
              8 * j:8 * (j + 1), :] = W_small
            vb[8 * i:8 * (i + 1),
               8 * j:8 * (j + 1), :] += im_unflatten(weights['vb'])
            hb[300 * rbm_id:300 * (rbm_id + 1)] = weights['hb']

    for i in xrange(3):
        for j in xrange(3):
            rbm_id = 16 + 3 * i + j
            weights = small_rbms[rbm_id].get_tf_params(scope='weights')
            W_small = weights['W']
            W_small = W_small.T
            W_small = im_unflatten(W_small)
            W[300 * rbm_id:300 * (rbm_id + 1), 4 + 8 * i:4 + 8 * (i + 1),
              4 + 8 * j:4 + 8 * (j + 1), :] = W_small
            vb[4 + 8 * i:4 + 8 * (i + 1),
               4 + 8 * j:4 + 8 * (j + 1), :] += im_unflatten(weights['vb'])
            hb[300 * rbm_id:300 * (rbm_id + 1)] = weights['hb']

    weights = small_rbms[25].get_tf_params(scope='weights')
    W_small = weights['W']
    W_small = W_small.T
    W_small = im_unflatten(W_small)
    vb_small = im_unflatten(weights['vb'])
    for i in xrange(8):
        for j in xrange(8):
            U = W_small[:, i, j, :]
            U = np.expand_dims(U, -1)
            U = np.expand_dims(U, -1)
            U = U.transpose(0, 2, 3, 1)
            W[-300:, 4 * i:4 * (i + 1), 4 * j:4 * (j + 1), :] = U / 16.
            vb[4 * i:4 * (i + 1),
               4 * j:4 * (j + 1), :] += vb_small[i, j, :].reshape(
                   (1, 1, 3)) / 16.
            hb[-300:] = weights['hb']

    W = im_flatten(W)
    W = W.T
    vb /= 2.
    vb[4:-4, 4:-4, :] /= 1.5
    vb = im_flatten(vb)

    return W, vb, hb
コード例 #2
0
def make_augmentation(X_train, y_train, n_train, args):
    X_aug = None
    X_aug_path = os.path.join(args.data_path, 'X_aug.npy')
    y_train = y_train.tolist() * 10
    RNG(seed=1337).shuffle(y_train)

    augment = True
    if os.path.isfile(X_aug_path):
        print "\nLoading augmented data ..."
        X_aug = np.load(X_aug_path)
        print "Checking augmented data ..."
        if len(X_aug) == 10 * n_train:
            augment = False

    if augment:
        print "\nAugmenting data ..."
        s = Stopwatch(verbose=True).start()

        X_aug = np.zeros((10 * n_train, 32, 32, 3), dtype=np.float32)
        X_train = im_unflatten(X_train)
        X_aug[:n_train] = X_train
        for i in xrange(n_train):
            for k, offset in enumerate(((1, 0), (-1, 0), (0, 1), (0, -1))):
                img = X_train[i].copy()
                X_aug[(k + 1) * n_train + i] = shift(img, offset=offset)
        for i in xrange(5 * n_train):
            X_aug[5 * n_train + i] = horizontal_mirror(X_aug[i].copy())

        # shuffle once again
        RNG(seed=1337).shuffle(X_aug)

        # convert to 'uint8' type to save disk space
        X_aug *= 255.
        X_aug = X_aug.astype('uint8')

        # flatten to (10 * `n_train`, 3072) shape
        X_aug = im_flatten(X_aug)

        # save to disk
        np.save(X_aug_path, X_aug)

        s.elapsed()
        print "\n"

    return X_aug, y_train
コード例 #3
0
    # first 16 ...
    for i in xrange(4):
        for j in xrange(4):
            rbm_id = 4 * i + j
            rbm_dirpath = args.small_dirpath_prefix + str(rbm_id) + '/'

            if os.path.isdir(rbm_dirpath):
                print "\nLoading small RBM #{0} ...\n\n".format(rbm_id)
                rbm = GaussianRBM.load_model(rbm_dirpath)
            else:
                print "\nTraining small RBM #{0} ...\n\n".format(rbm_id)
                X_patches = X_train[:, 8 * i:8 * (i + 1), 8 * j:8 * (j + 1), :]
                X_patches_val = X_val[:, 8 * i:8 * (i + 1),
                                      8 * j:8 * (j + 1), :]
                X_patches = im_flatten(X_patches)
                X_patches_val = im_flatten(X_patches_val)

                rbm = GaussianRBM(random_seed=9000 + rbm_id,
                                  model_path=rbm_dirpath,
                                  **small_rbm_config)
                rbm.fit(X_patches, X_patches_val)
            small_rbms.append(rbm)

    # next 9 ...
    for i in xrange(3):
        for j in xrange(3):
            rbm_id = 16 + 3 * i + j
            rbm_dirpath = args.small_dirpath_prefix + str(rbm_id) + '/'

            if os.path.isdir(rbm_dirpath):
コード例 #4
0
def make_small_rbms(xxx_todo_changeme, args):
    (X_train, X_val) = xxx_todo_changeme
    X_train = im_unflatten(X_train)
    X_val = im_unflatten(X_val)

    small_rbm_config = dict(
        n_visible=8 * 8 * 3,
        n_hidden=300,
        sigma=1.,
        W_init=0.001,
        vb_init=0.,
        hb_init=0.,
        n_gibbs_steps=1,
        learning_rate=args.small_lr,
        momentum=np.geomspace(0.5, 0.9, 8),
        max_epoch=args.small_epochs,
        batch_size=args.small_batch_size,
        l2=args.small_l2,
        sample_v_states=True,
        sample_h_states=True,
        sparsity_target=args.small_sparsity_target,
        sparsity_cost=args.small_sparsity_cost,
        dbm_first=True,  # !!!
        metrics_config=dict(
            msre=True,
            feg=True,
            train_metrics_every_iter=2000,
            val_metrics_every_epoch=2,
            feg_every_epoch=2,
            n_batches_for_feg=100,
        ),
        verbose=True,
        display_filters=12,
        display_hidden_activations=36,
        v_shape=(8, 8, 3),
        dtype='float32',
        tf_saver_params=dict(max_to_keep=1))
    small_rbms = []

    # first 16 ...
    for i in range(4):
        for j in range(4):
            rbm_id = 4 * i + j
            rbm_dirpath = args.small_dirpath_prefix + str(rbm_id) + '/'

            if os.path.isdir(rbm_dirpath):
                print("\nLoading small RBM #{0} ...\n\n".format(rbm_id))
                rbm = GaussianRBM.load_model(rbm_dirpath)
            else:
                print("\nTraining small RBM #{0} ...\n\n".format(rbm_id))
                X_patches = X_train[:, 8 * i:8 * (i + 1), 8 * j:8 * (j + 1), :]
                X_patches_val = X_val[:, 8 * i:8 * (i + 1),
                                      8 * j:8 * (j + 1), :]
                X_patches = im_flatten(X_patches)
                X_patches_val = im_flatten(X_patches_val)

                rbm = GaussianRBM(random_seed=9000 + rbm_id,
                                  model_path=rbm_dirpath,
                                  **small_rbm_config)
                rbm.fit(X_patches, X_patches_val)
            small_rbms.append(rbm)

    # next 9 ...
    for i in range(3):
        for j in range(3):
            rbm_id = 16 + 3 * i + j
            rbm_dirpath = args.small_dirpath_prefix + str(rbm_id) + '/'

            if os.path.isdir(rbm_dirpath):
                print("\nLoading small RBM #{0} ...\n\n".format(rbm_id))
                rbm = GaussianRBM.load_model(rbm_dirpath)
            else:
                print("\nTraining small RBM #{0} ...\n\n".format(rbm_id))
                X_patches = X_train[:, 4 + 8 * i:4 + 8 * (i + 1),
                                    4 + 8 * j:4 + 8 * (j + 1), :]
                X_patches_val = X_val[:, 4 + 8 * i:4 + 8 * (i + 1),
                                      4 + 8 * j:4 + 8 * (j + 1), :]
                X_patches = im_flatten(X_patches)
                X_patches_val = im_flatten(X_patches_val)

                rbm = GaussianRBM(random_seed=args.small_random_seed + rbm_id,
                                  model_path=rbm_dirpath,
                                  **small_rbm_config)
                rbm.fit(X_patches, X_patches_val)
            small_rbms.append(rbm)

    # ... and the last one
    rbm_id = 25
    rbm_dirpath = args.small_dirpath_prefix + str(rbm_id) + '/'

    if os.path.isdir(rbm_dirpath):
        print("\nLoading small RBM #{0} ...\n\n".format(rbm_id))
        rbm = GaussianRBM.load_model(rbm_dirpath)
    else:
        print("\nTraining small RBM #{0} ...\n\n".format(rbm_id))
        X_patches = X_train.copy()  # (N, 32, 32, 3)
        X_patches = X_patches.transpose(0, 3, 1, 2)  # (N, 3, 32, 32)
        X_patches = X_patches.reshape(
            (-1, 3, 4, 8, 4, 8)).mean(axis=4).mean(axis=2)  # (N, 3, 8, 8)
        X_patches = X_patches.transpose(0, 2, 3, 1)  # (N, 8, 8, 3)
        X_patches = im_flatten(X_patches)  # (N, 8*8*3)

        X_patches_val = X_val.copy()
        X_patches_val = X_patches_val.transpose(0, 3, 1, 2)
        X_patches_val = X_patches_val.reshape(
            (-1, 3, 4, 8, 4, 8)).mean(axis=4).mean(axis=2)
        X_patches_val = X_patches_val.transpose(0, 2, 3, 1)
        X_patches_val = im_flatten(X_patches_val)

        rbm = GaussianRBM(random_seed=9000 + rbm_id,
                          model_path=rbm_dirpath,
                          **small_rbm_config)
        rbm.fit(X_patches, X_patches_val)
    small_rbms.append(rbm)
    return small_rbms