コード例 #1
0
ファイル: pretrain.py プロジェクト: aragornkishore/ml
def sgd(f, initial_params, batches, momentum, hook=None):
    output_dir = utils.make_output_directory(OUTPUT_PATH)
    save_params = parameters.save_hook(output_dir)
    def post_epoch(*args):
        save_params(*args)
        if hook is not None:
            hook(*args)
    return optimize.sgd(f, initial_params, batches,
                        epochs, learning_rate, momentum,
                        post_epoch=post_epoch)
コード例 #2
0
ファイル: sparsity.py プロジェクト: aragornkishore/ml
def ex(inputs):
    inputs = utils.remove_dc(inputs)
    inputs, zca = utils.zca_white(inputs, 0.1)
    batches = data.BatchIterator(inputs, 100)
    num_vis = inputs.shape[1]
    num_hid = 400
    epochs = 100
    momentum = 0

    initial_params = grbm.initial_params(num_hid, num_vis, 0.001, 1.0)

    neg_free_energy_grad = functools.partial(grbm.neg_free_energy_grad,
                                             learn_sigma=False)

    def f(params, inputs):
        return rbm.cd(params, inputs,
                      grbm.sample_h_noisy_relu, grbm.sample_v,
                      neg_free_energy_grad)
    
    learning_rate = 0.005

    output_dir = utils.make_output_directory(OUTPUT_PATH)
    save_params = parameters.save_hook(output_dir)
    error_history = []
    sparsity_history = []

    def post_epoch(*args):
        W_norm = utils.rescale(args[0].W)
        utils.save_image(utils.tile(W_norm),
                         os.path.join(output_dir, ('w%i.png' % args[1])))

        # Estimate sparsity from subset of data.
        h_mean = grbm.sample_h_noisy_relu(args[0], inputs[0:5000], True)[1]
        mean_activation = np.mean(h_mean > 0)
        print 'approx mean activation: %f' % mean_activation
        
        # The callback from optimize.sgd needs modifying so that it
        # passes the reconstrcution error as an argument to make this
        # work. (This was used when I did the original experiments.)
        # error_history.append(args[2])
        sparsity_history.append(mean_activation)
        
        save_params(args[0], args[1])

    params = optimize.sgd(f, initial_params, batches,
                          epochs, learning_rate,
                          momentum,
                          post_epoch=post_epoch)

    with(open(os.path.join(output_dir, 'history.pickle'), 'wb')) as f:
        pickle.dump(error_history, f, -1)
        pickle.dump(sparsity_history, f, -1)

    return params, error_history, sparsity_history
コード例 #3
0
def sgd(f, initial_params, batches, momentum, hook=None):
    output_dir = utils.make_output_directory(OUTPUT_PATH)
    save_params = parameters.save_hook(output_dir)

    def post_epoch(*args):
        save_params(*args)
        if hook is not None:
            hook(*args)

    return optimize.sgd(f,
                        initial_params,
                        batches,
                        epochs,
                        learning_rate,
                        momentum,
                        post_epoch=post_epoch)
コード例 #4
0
def ex(inputs):
    inputs = zero_mean(inputs)
    inputs, zca = utils.zca_white(inputs, 0.1)
    batches = data.BatchIterator(inputs, 100)
    num_vis = inputs.shape[1]
    num_hid = 400
    epochs = 100
    momentum = 0

    initial_params = grbm.initial_params(num_hid, num_vis, 0.001, 0.4)

    neg_free_energy_grad = functools.partial(grbm.neg_free_energy_grad,
                                             learn_sigma=False)

    def f(params, inputs):
        return rbm.cd(params, inputs, grbm.sample_h_noisy_relu, grbm.sample_v,
                      neg_free_energy_grad)

    learning_rate = 0.005

    output_dir = utils.make_output_directory(OUTPUT_PATH)
    save_params = parameters.save_hook(output_dir)

    def post_epoch(*args):
        save_params(*args)
        # Save visualization weights.
        W_norm = utils.rescale(args[0].W)
        img = Image.fromarray(
            np.uint8(utils.tile(W_norm, channel_count=3) * 255))
        img.save(os.path.join(output_dir, ('w%i.png' % args[1])))
        # Estimate sparsity from subset of data.
        h_mean = grbm.sample_h_noisy_relu(args[0], inputs[0:5000], True)[1]
        mean_activation = np.mean(h_mean > 0)
        print 'approx mean activation: %f' % mean_activation

    return optimize.sgd(f,
                        initial_params,
                        batches,
                        epochs,
                        learning_rate,
                        momentum,
                        post_epoch=post_epoch)
コード例 #5
0
ファイル: color_patches.py プロジェクト: aragornkishore/ml
def ex(inputs):
    inputs = zero_mean(inputs)
    inputs, zca = utils.zca_white(inputs, 0.1)
    batches = data.BatchIterator(inputs, 100)
    num_vis = inputs.shape[1]
    num_hid = 400
    epochs = 100
    momentum = 0

    initial_params = grbm.initial_params(num_hid, num_vis, 0.001, 0.4)

    neg_free_energy_grad = functools.partial(grbm.neg_free_energy_grad,
                                             learn_sigma=False)

    def f(params, inputs):
        return rbm.cd(params, inputs,
                      grbm.sample_h_noisy_relu, grbm.sample_v,
                      neg_free_energy_grad)
    
    learning_rate = 0.005

    output_dir = utils.make_output_directory(OUTPUT_PATH)
    save_params = parameters.save_hook(output_dir)

    def post_epoch(*args):
        save_params(*args)
        # Save visualization weights.
        W_norm = utils.rescale(args[0].W)
        img = Image.fromarray(np.uint8(utils.tile(W_norm, channel_count=3) * 255))
        img.save(os.path.join(output_dir, ('w%i.png' % args[1])))
        # Estimate sparsity from subset of data.
        h_mean = grbm.sample_h_noisy_relu(args[0], inputs[0:5000], True)[1]
        mean_activation = np.mean(h_mean > 0)
        print 'approx mean activation: %f' % mean_activation

    return optimize.sgd(f, initial_params, batches,
                        epochs, learning_rate,
                        momentum,
                        post_epoch=post_epoch)
コード例 #6
0
ファイル: mnist.py プロジェクト: aragornkishore/ml
    g = itertools.islice(gc, start, None, step)
    g = itertools.islice(g, count)
    g = itertools.imap(operator.itemgetter(1), g)
    g = itertools.imap(utils.tile, g)
    utils.save_images(g, tempfile.mkdtemp(dir=OUTPUT_PATH))

# def f(params, inputs):
#     return rbm.cd(params, inputs,
#                   rbm.sample_h,
#                   rbm.sample_v,
#                   rbm.neg_free_energy_grad,
#                   weight_decay=weight_decay,
#                   k=k)


output_dir = utils.make_output_directory(OUTPUT_PATH)
save_params = parameters.save_hook(output_dir)

f = rbm.pcd(rbm.sample_h, rbm.sample_v,
            rbm.neg_free_energy_grad, weight_decay)

def post_epoch(*args):
    #save_params(*args)
    #print 'Mean hidden activation prob. is %f.' % f.q
    pass
   
params = optimize.sgd(f, initial_params, batches, epochs,
                      learning_rate,
                      momentum,
                      weight_constraint=weight_constraint,
                      post_epoch=post_epoch)
コード例 #7
0
    g = itertools.islice(gc, start, None, step)
    g = itertools.islice(g, count)
    g = itertools.imap(operator.itemgetter(1), g)
    g = itertools.imap(utils.tile, g)
    utils.save_images(g, tempfile.mkdtemp(dir=OUTPUT_PATH))


# def f(params, inputs):
#     return rbm.cd(params, inputs,
#                   rbm.sample_h,
#                   rbm.sample_v,
#                   rbm.neg_free_energy_grad,
#                   weight_decay=weight_decay,
#                   k=k)

output_dir = utils.make_output_directory(OUTPUT_PATH)
save_params = parameters.save_hook(output_dir)

f = rbm.pcd(rbm.sample_h, rbm.sample_v, rbm.neg_free_energy_grad, weight_decay)


def post_epoch(*args):
    #save_params(*args)
    #print 'Mean hidden activation prob. is %f.' % f.q
    pass


params = optimize.sgd(f,
                      initial_params,
                      batches,
                      epochs,