コード例 #1
0
ファイル: hw5.py プロジェクト: Elistrael/caltech.ml
def run_log_regression():

    nb_in_sample = 100
    nb_out_of_sample = 100000
    nb_runs = 100
    nb_epochs = 0
    nb_Eout = 0
    lr = 0.01
    eps = 0.01

    for i in range(nb_runs):
        # generate random function
        l = randomline()
        f = target_random_function(l)
        # generate in sample data and out of sample data
        data_in_sample = data(nb_in_sample)
        data_out_of_sample = data(nb_out_of_sample)
        # create training set structure [[w0,w1,...],target_value]
        t_set_in = build_training_set_fmultipleparams(data_in_sample,f)
        t_set_out = build_training_set_fmultipleparams(data_out_of_sample,f)
        # run logistic regression in sample
        epochs,w = log_regression_sgd(t_set_in,eps,lr)
        # compute the out of sample error given the previously compute weights.
        e_out = log_regression_compute_Eout(t_set_out,w)

        print "Run: %s - epochs: %s"%(i, epochs)
        print "Eout: %s"%(e_out)

        nb_Eout += e_out
        nb_epochs += epochs

    print 'Number of runs:%s'%(nb_runs)
    print "Avg epochs: %s"%(nb_epochs / nb_runs*1.0)
    print "Avg Eout: %s"%(nb_Eout / nb_runs*1.0)
コード例 #2
0
ファイル: hw5.py プロジェクト: yprog/caltech.ml
def run_log_regression():

    nb_in_sample = 100
    nb_out_of_sample = 100000
    nb_runs = 100
    nb_epochs = 0
    nb_Eout = 0
    lr = 0.01
    eps = 0.01

    for i in range(nb_runs):
        # generate random function
        l = randomline()
        f = target_random_function(l)
        # generate in sample data and out of sample data
        data_in_sample = data(nb_in_sample)
        data_out_of_sample = data(nb_out_of_sample)
        # create training set structure [[w0,w1,...],target_value]
        t_set_in = build_training_set_fmultipleparams(data_in_sample, f)
        t_set_out = build_training_set_fmultipleparams(data_out_of_sample, f)
        # run logistic regression in sample
        epochs, w = log_regression_sgd(t_set_in, eps, lr)
        # compute the out of sample error given the previously compute weights.
        e_out = log_regression_compute_Eout(t_set_out, w)

        print "Run: %s - epochs: %s" % (i, epochs)
        print "Eout: %s" % (e_out)

        nb_Eout += e_out
        nb_epochs += epochs

    print 'Number of runs:%s' % (nb_runs)
    print "Avg epochs: %s" % (nb_epochs / nb_runs * 1.0)
    print "Avg Eout: %s" % (nb_Eout / nb_runs * 1.0)
コード例 #3
0
def generate_t_set(N, f=None):
    '''
    Generate a training set of N = 1000 points on X = [1; 1] * [1; 1] with uniform
    probability of picking each x that belongs X . Generate simulated noise by 
    fipping the sign of a random 10% subset of the generated training set
    '''
    d = data(N)
    if f is None:
        f = lambda x: sign(x[0]**2 + x[1]**2 - 0.6)

    t_set = build_training_set_fmultipleparams(d, f)
    t_set = t_set_errorNoise(t_set, N / 10)

    return t_set, f
コード例 #4
0
ファイル: hw2.py プロジェクト: Elistrael/caltech.ml
def generate_t_set(N,f=None):
    '''
    Generate a training set of N = 1000 points on X = [1; 1] * [1; 1] with uniform
    probability of picking each x that belongs X . Generate simulated noise by 
    fipping the sign of a random 10% subset of the generated training set
    '''
    d = data(N)
    if f is None:
        f = lambda x: sign(x[0]**2 + x[1]**2 -0.6)
    
    t_set = build_training_set_fmultipleparams(d,f)
    t_set = t_set_errorNoise(t_set,N/10)
        
    return t_set,f