sdlbfs_runtimes = sdlbfs_runtimes[0::int(sdlbfs_its /
                                         min(sdlbfs_its, points_to_plot))]
sdlbfs_grads = sdlbfs_grads[0::int(sdlbfs_its /
                                   min(sdlbfs_its, points_to_plot))]

#%%

import algorithms as alg
obj_f = lambda x, order: tanh_obj(
    x, order=order, data=data, minibatch_size=10, regularizer=1e-4)
#from hw4_functions import cross_entropy_error as cross_obj
#obf_f = lambda x, order: cross_obj(x, data=data, order=order, minibatch_size=10)
'SGD'

sgd_x, sgd_values, sgd_runtimes, sgd_xs, sgd_grads = \
    alg.subgradient_descent(obj_f, initial_x, max_iterations, 0.1)

print('Solution found by stochastic subgradient descent', sgd_x)
print('Objective function', obj_f(sgd_x, 0))
sgd_minind = sgd_values.index(min(sgd_values))
sgd_corr = correct_rate(sgd_xs[sgd_minind], features, labels)
sgd_pred = [
    np.sign(np.dot(sgd_xs[sgd_minind].T, feature.T)).item(0)
    for feature in features
]
print(sgd_corr)
sgd_its = len(sgd_runtimes)
sgd_values = [
    obj_f(sgd_xs[i], 0)
    for i in range(0, sgd_its, int(sgd_its / min(sgd_its, points_to_plot)))
]
示例#2
0
w = np.zeros((d, 1))

minibatch_size = 10
max_iterations_sd = 20
max_iterations_sgd = int(max_iterations_sd * features.shape[0] /
                         minibatch_size)
points_to_plot = 20

func_stochastic = lambda x, order: hw_func.svm_objective_function_stochastic(
    x, features, labels, order, minibatch_size)
func = lambda x, order: hw_func.svm_objective_function(x, features, labels,
                                                       order)

initial_x = np.zeros((d, 1))

sgd_x, sgd_values, sgd_runtimes, sgd_xs = alg.subgradient_descent(
    func_stochastic, initial_x, max_iterations_sgd, 1)
print('Solution found by stochastic subgradient descent', sgd_x)
print('Objective function', func(sgd_x, 0))  # 0.8144200035391359
sgd_values = [
    func(sgd_xs[i], 0) for i in range(0, max_iterations_sgd,
                                      int(max_iterations_sgd / points_to_plot))
]

ada_x, ada_values, ada_runtimes, ada_xs = alg.adagrad(func_stochastic,
                                                      initial_x,
                                                      max_iterations_sgd, 1)
print('Solution found by stochastic adagrad', ada_x)
print('Objective function', func(ada_x, 0))  # 0.8210954033966148
ada_values = [
    func(ada_xs[i], 0) for i in range(0, max_iterations_sgd,
                                      int(max_iterations_sgd / points_to_plot))
示例#3
0
    'Objective function',
    cross_enp_func(sdlbfs_x[-1],
                   order=0,
                   data=data,
                   minibatch_size=len(labels)))

#%%
'SGD'
from hw4_functions import cross_entropy_error as cross_enp_func
import algorithms as alg
obj_f = lambda x, order: cross_enp_func(
    x, order=order, data=data, minibatch_size=50)
initial_x = np.zeros((d, 1))

sgd_x, sgd_values, sgd_runtimes, sgd_xs = \
    alg.subgradient_descent(obj_f, initial_x, max_iterations, 0.1)

print('Solution found by stochastic subgradient descent', sgd_x)
print('Objective function', obj_f(sgd_x, 0))
sgd_its = len(sgd_runtimes)
sgd_values = [
    obj_f(sgd_xs[i], 0)
    for i in range(0, sgd_its, int(sgd_its / min(sgd_its, points_to_plot)))
]
#%%
'Ada'

ada_x, ada_values, ada_runtimes, ada_xs = alg.adagrad(obj_f, initial_x,
                                                      max_iterations, 0.1)
print('Solution found by stochastic adagrad', ada_x)
print('Objective function', obj_f(ada_x, 0))