예제 #1
0
def estimate_log_z(w, b_h, b_v, annealing_ratios, n_runs = 10, rng = None):
    """
    Use Annealed importance sampling
    http://www.iro.umontreal.ca/~lisa/pointeurs/breuleux+bengio_nc2011.pdf
    To estimate the probability of the test data given the RBM parameters.

    This code is a Pythonified version of Russ Salakhutdinov's Matlab code:
    http://www.utstat.toronto.edu/~rsalakhu/code_AIS/RBM_AIS.m

    NOTE: THIS CODE DOES NOT SEEM TO BE PRODUCING GOOD RESULTS (They don't match with exact numbers.  Not sure why!)
    Better option: Use the rbm_ais method from pylearn2 (from pylearn2.rbm_tools import rbm_ais)

    :param w: Weights (n_visible, n_hidden)
    :param b_h: Hidden biases (n_hidden)
    :param b_v: Visible biases (n_visible)
    :param annealing_ratios: A monotonically increasing vector from 0 to 1
    :param n_runs: Number of annealing chains to use.
    :param rng: Random Number generator
    :return:
    """
    assert annealing_ratios[0]==0 and annealing_ratios[-1]==1 and np.all(np.diff(annealing_ratios)>0)
    rng = get_rng(rng)
    n_visible, n_hidden = w.shape
    visbiases_base = np.zeros_like(b_v)
    neg_data = rng.rand(n_runs, n_visible) < sigm(visbiases_base)  # Collect
    logww = - neg_data.dot(visbiases_base) - n_hidden*np.log(2)
    w_h = neg_data.dot(w)+b_h
    bv_base = neg_data.dot(visbiases_base)
    bee_vee = bv_base
    for t, r in enumerate(annealing_ratios):
        exp_wh = np.exp(r*w_h)
        logww += (1-r)*bv_base + r*bee_vee + np.sum(np.log(1+exp_wh), axis =1)
        wake_hid_probs = exp_wh/(1+exp_wh)
        wake_hid_states = wake_hid_probs > rng.rand(*wake_hid_probs.shape)
        neg_vis_probs = sigm((1-r)*visbiases_base + r*(wake_hid_states.dot(w.T)+b_v))
        neg_vis_states = neg_vis_probs > rng.rand(*neg_vis_probs.shape)

        w_h = neg_vis_states.dot(w)+b_h
        bv_base = neg_vis_states.dot(visbiases_base)
        bee_vee = neg_vis_states.dot(b_v)

        exp_wh = np.exp(r*w_h)
        logww -= (1-r)*bv_base + r*bee_vee + np.sum(np.log(1+exp_wh), axis = 1)

    exp_wh = np.exp(w_h)
    logww += neg_data.dot(b_v) + np.sum(np.log(1+exp_wh), axis = 1)

    np.mean(logww)
    r_ais = logsumexp(logww) - np.log(n_runs)
    log_z_base = np.sum(np.log(1+np.exp(visbiases_base))) + n_hidden*np.log(2)
    log_z_est = r_ais + log_z_base
    aa = np.mean(logww)
    logstd_AIS = np.log(np.std(np.exp(logww-aa))) + aa - np.log(n_runs)/2
    logZZ_est_up = logsumexp([np.log(3)+logstd_AIS, r_ais], axis = 0) + log_z_base
    logZZ_est_down = logdiffexp([(np.log(3)+logstd_AIS), r_ais], axis = 0) + log_z_base
    return log_z_est, (logZZ_est_up, logZZ_est_down)
예제 #2
0
def get_svm_score(w, b_h, dataset):
    """
    Given a trained RBM, get the classification score of a linear SVM trained on the hidden Representation
    :param w: Weights
    :param b_h: Hidden biases
    :param dataset: A Dataset object
    :return: A scalar score
    """
    proj_training_data = sigm(dataset.training_set.input.dot(w)+b_h)
    classifier = LinearSVC()
    classifier.fit(proj_training_data, dataset.training_set.target)
    proj_test_data = sigm(dataset.test_set.input.dot(w)+b_h)
    predicted_labels = classifier.predict(proj_test_data)
    score = percent_correct(dataset.test_set.target, predicted_labels)
    return score
예제 #3
0
def test_assess_prediction_functions(print_results=True):

    x_tr, y_tr, x_ts, y_ts = get_synthethic_linear_dataset(n_input_dims=80, n_output_dims=4).xyxy

    w = np.random.RandomState(1234).randn(80, 4)

    results = assess_prediction_functions(
        test_pairs=[('train', (x_tr, y_tr)), ('test', (x_ts, y_ts))],
        functions=[('prediction', lambda x: sigm(x.dot(w)))],
        costs = [mean_squared_error, percent_argmax_correct],
        print_results=print_results
        )

    assert results['train', 'prediction', 'percent_argmax_correct'] == 23.1
def test_assess_prediction_functions(print_results=True):

    x_tr, y_tr, x_ts, y_ts = get_synthethic_linear_dataset(
        n_input_dims=80, n_output_dims=4).xyxy

    w = np.random.RandomState(1234).randn(80, 4)

    results = assess_prediction_functions(
        test_pairs=[('train', (x_tr, y_tr)), ('test', (x_ts, y_ts))],
        functions=[('prediction', lambda x: sigm(x.dot(w)))],
        costs=[mean_squared_error, percent_argmax_correct],
        print_results=print_results)

    assert results['train', 'prediction', 'percent_argmax_correct'] == 23.1
예제 #5
0
def get_synthethic_linear_dataset(noise_level = 0.1, n_input_dims = 20, n_output_dims = 4, n_training_samples = 1000,
        n_test_samples = 200, nonlinearity = None, offset_mag = 0, seed = 8158):
    """
    A Synthethic dataset that can be used for testing generalized linear models.

    :param noise_level:
    :param n_input_dims:
    :param n_output_dims:
    :param n_training_samples:
    :param n_test_samples:
    :param nonlinearity:
    :param seed:
    :return:
    """

    input_singleton = n_input_dims == 0
    if input_singleton:
        n_input_dims = 1

    output_singleton = n_output_dims == 0
    if output_singleton:  # Unfortunately we have to deal with the inconsistencies in numpy's handling of singleton dimensions.
        n_output_dims = 1

    rng = np.random.RandomState(seed)
    w = rng.randn(n_input_dims, n_output_dims) * 1/np.sqrt(n_input_dims)
    input_data = rng.randn(n_training_samples+n_test_samples, n_input_dims)
    target_data = np.dot(input_data, w) + offset_mag * rng.randn(n_output_dims) + noise_level*rng.randn(n_training_samples+n_test_samples, n_output_dims)
    if nonlinearity=='softmax':
        target_data = softmax(target_data, axis=1),
    elif nonlinearity=='sigmoid':
        target_data = sigm(target_data)
    elif nonlinearity=='argmax':
        target_data==np.argmax(target_data, axis=1)
    elif nonlinearity is None:
        target_data = target_data
    else:
        assert callable(nonlinearity), 'Unknown nonlinearity: {}'.format(nonlinearity)
        target_data = nonlinearity(target_data)

    if input_singleton:
        input_data = input_data[:, 0]

    if output_singleton:
        target_data = target_data[:, 0]

    return DataSet(
        training_set = DataCollection(input_data[:n_training_samples], target_data[:n_training_samples]),
        test_set = DataCollection(input_data[n_training_samples:], target_data[n_training_samples:]),
        )
예제 #6
0
def test_exp_sig_of_norm():

    mean = 1
    std = 0.8
    n_points = 1000
    seed = 1234

    inputs = np.random.RandomState(seed).normal(mean, std, size = n_points)
    vals = sigm(inputs)
    sample_mean = np.mean(vals)

    for method in ('maclauren-2', 'maclauren-3', 'probit'):
        approx_true_mean = expected_sigm_of_norm(mean, std, method = method)
        approx_sample_mean = expected_sigm_of_norm(np.mean(inputs), np.std(inputs), method = method)
        true_error = np.abs(approx_true_mean-sample_mean)/sample_mean
        sample_error = np.abs(approx_sample_mean-sample_mean)/sample_mean
        print 'Error for %s: %.4f True, %.4f Sample.' % (method, true_error, sample_error)
        assert true_error < 0.02, 'Method %s did pretty bad' % (method, )
예제 #7
0
def test_exp_sig_of_norm():

    mean = 1
    std = 0.8
    n_points = 1000
    seed = 1234

    inputs = np.random.RandomState(seed).normal(mean, std, size = n_points)
    vals = sigm(inputs)
    sample_mean = np.mean(vals)

    for method in ('maclauren-2', 'maclauren-3', 'probit'):
        approx_true_mean = expected_sigm_of_norm(mean, std, method = method)
        approx_sample_mean = expected_sigm_of_norm(np.mean(inputs), np.std(inputs), method = method)
        true_error = np.abs(approx_true_mean-sample_mean)/sample_mean
        sample_error = np.abs(approx_sample_mean-sample_mean)/sample_mean
        print('Error for %s: %.4f True, %.4f Sample.' % (method, true_error, sample_error))
        assert true_error < 0.02, 'Method %s did pretty bad' % (method, )
예제 #8
0
def get_synthethic_linear_dataset(noise_level=0.1,
                                  n_input_dims=20,
                                  n_output_dims=4,
                                  n_training_samples=1000,
                                  n_test_samples=200,
                                  nonlinearity=None,
                                  offset_mag=0,
                                  seed=8158):
    """
    A Synthethic dataset that can be used for testing generalized linear models.

    :param noise_level:
    :param n_input_dims:
    :param n_output_dims:
    :param n_training_samples:
    :param n_test_samples:
    :param nonlinearity:
    :param seed:
    :return:
    """

    input_singleton = n_input_dims == 0
    if input_singleton:
        n_input_dims = 1

    output_singleton = n_output_dims == 0
    if output_singleton:  # Unfortunately we have to deal with the inconsistencies in numpy's handling of singleton dimensions.
        n_output_dims = 1

    rng = np.random.RandomState(seed)
    w = rng.randn(n_input_dims, n_output_dims) * 1 / np.sqrt(n_input_dims)
    input_data = rng.randn(n_training_samples + n_test_samples, n_input_dims)
    target_data = np.dot(
        input_data,
        w) + offset_mag * rng.randn(n_output_dims) + noise_level * rng.randn(
            n_training_samples + n_test_samples, n_output_dims)
    if nonlinearity == 'softmax':
        target_data = softmax(target_data, axis=1),
    elif nonlinearity == 'sigmoid':
        target_data = sigm(target_data)
    elif nonlinearity == 'argmax':
        target_data == np.argmax(target_data, axis=1)
    elif nonlinearity is None:
        target_data = target_data
    else:
        assert callable(nonlinearity), 'Unknown nonlinearity: {}'.format(
            nonlinearity)
        target_data = nonlinearity(target_data)

    if input_singleton:
        input_data = input_data[:, 0]

    if output_singleton:
        target_data = target_data[:, 0]

    return DataSet(
        training_set=DataCollection(input_data[:n_training_samples],
                                    target_data[:n_training_samples]),
        test_set=DataCollection(input_data[n_training_samples:],
                                target_data[n_training_samples:]),
    )