Пример #1
0
def regularized_logistic_regression_bounded(log_func, learning_rate, bound,
                                            stop_threshold, pts, l):
    curr_cost = regularized_cost(log_func, pts,
                                 squared_coefficient_regularization, l)
    print("Initial cost is: {}".format(curr_cost))
    increasing_count = 0
    consecutive_small_count = 0
    itcount = 0
    while True:
        itcount += 1
        gradvals = regularized_gradient_descent_value(
            log_func, pts, squared_coefficient_regularization_gradient, l)
        nextparams = next_params(log_func, learning_rate, gradvals)
        log_func = logistic_function(nextparams)
        last_cost, curr_cost = curr_cost, regularized_cost(
            log_func, pts, squared_coefficient_regularization, l)

        diff = last_cost - curr_cost
        # print("Cost after iteration {} is : {} with difference {}".format(itcount, curr_cost, diff))
        # print("Params at iteration {} are: {}".format(itcount, log_func["params"]))
        if 0 < diff < bound:
            consecutive_small_count += 1
        elif diff < 0:
            increasing_count += 1

        if consecutive_small_count > stop_threshold:
            break
        elif increasing_count > stop_threshold:
            raise OverflowError
    return log_func
Пример #2
0
def ols_regression_bounded(lin_func, learning_rate, bound, stop_threshold, pts):
    curr_cost = non_regularized_cost(lin_func, pts)
    print("Initial cost is: {}".format(curr_cost))
    increasing_count = 0
    consecutive_small_count = 0
    itcount = 0
    while True:
        itcount += 1
        gradvals = non_regularized_gradient_descent_value(lin_func, pts)
        nextparams = next_params(lin_func, learning_rate, gradvals)
        lin_func = linear_function(nextparams)
        last_cost, curr_cost = curr_cost, non_regularized_cost(lin_func, pts)

        diff = last_cost - curr_cost
        #print("Cost after iteration {} is : {} with difference {}".format(itcount, curr_cost, diff))
        if 0 < diff < bound:
            consecutive_small_count += 1
        elif diff < 0:
            increasing_count += 1

        if consecutive_small_count > stop_threshold:
            break
        elif increasing_count > stop_threshold:
            raise OverflowError
    return lin_func
Пример #3
0
def logistic_regression(log_func, iterations, learning_rate, pts):
    for itcount in range(iterations):
        gradvals = non_regularized_gradient_descent_value(log_func, pts)
        nextparams = next_params(log_func, learning_rate, gradvals)
        log_func = logistic_function(tuple(nextparams))
        curr_cost = non_regularized_cost(log_func, pts)
        print("Cost after iteration {} is : {}".format(itcount, curr_cost))
    return log_func
Пример #4
0
def ridge_regression(lin_func, iterations, learning_rate, pts, l):
    for itcount in range(iterations):
        gradvals = regularized_gradient_descent_value(lin_func, pts, squared_coefficient_regularization_gradient, l)
        nextparams = next_params(lin_func, learning_rate, gradvals)
        lin_func = linear_function(tuple(nextparams))
        curr_cost = regularized_cost(lin_func, pts, squared_coefficient_regularization, l)
        #print("Cost after iteration {} is : {}".format(itcount, curr_cost))
    return lin_func