Beispiel #1
0
def MACD(stock_data, fast_period=12, slow_period=26, signal_period=9):
    closes = stock_data.sort_index().close.values
    macd = talib.MACD(closes, fast_period, slow_period, signal_period)
    for i in range(len(macd[0])):
        macd[2][i] = (utils.f(macd[0][i]) - utils.f(macd[1][i])) * 2
    stock_data['dif'] = macd[0][::-1]
    stock_data['dea'] = macd[1][::-1]
    stock_data['macd'] = macd[2][::-1]
    return stock_data
Beispiel #2
0
def MACD(stock_data, fast_period=12, slow_period=26, signal_period=9):
    closes = stock_data.sort_index().close.values
    macd = talib.MACD(closes, fast_period, slow_period, signal_period)
    for i in range(len(macd[0])):
        macd[2][i] = (utils.f(macd[0][i]) - utils.f(macd[1][i])) * 2
    stock_data['dif'] = macd[0][::-1]
    stock_data['dea'] = macd[1][::-1]
    stock_data['macd'] = macd[2][::-1]
    return stock_data
Beispiel #3
0
def ranked(**kwargs):
    population = kwargs.get('population')

    min_f = min(f(x) for x in population)
    max_f = max(f(x) for x in population)
    sorted_population = sorted(population, key=f, reverse=True)
    scaling_func = lambda x: min_f + (max_f - min_f) * sorted_population.index(x) / (len(population) - 1)

    return roulette(scaling_func=scaling_func, **kwargs)
Beispiel #4
0
def ranked(**kwargs):
    population = kwargs.get('population')

    min_f = min(f(x) for x in population)
    max_f = max(f(x) for x in population)
    sorted_population = sorted(population, key=f, reverse=True)
    scaling_func = lambda x: min_f + (max_f - min_f) * sorted_population.index(
        x) / (len(population) - 1)

    return roulette(scaling_func=scaling_func, **kwargs)
Beispiel #5
0
def iteration(u, n, t, prev):
    s = 0.5
    g = n / 2
    result = np.zeros((n + 1, n + 1))

    delta_u = delta(u, n)
    a = [0] + [-t * t * s * n * n] * (n - 1) + [0]
    b = [1] + [1 + 2 * t * t * s * n * n] * (n - 1) + [1]
    c = [0] + [-t * t * s * n * n] * (n - 1) + [0]
    temp = make_matrix(n)
    for k in range(1, n):
        d = -(u[k] - prev[k]) * t * g + t * t * (
            delta_u[k] - f(k / n, np.linspace(0, 1, n + 1)) / n / n) * n * n
        d[0] = 0
        d[n] = 0
        temp[k] = tdma(n + 1, a, b, c, d)

    temp = np.transpose(temp)
    for k in range(1, n):
        d = temp[k]
        d[0] = 0
        d[n] = 0
        result[k] = tdma(n + 1, a, b, c, d)

    result = np.transpose(result)
    return result + 2 * u - prev
Beispiel #6
0
def centroids(hyperplanes, param, distrib, dataset=None):
    """
        Gives the centroid of every non-void region. The returned object is an
        array containing every non-void regions with the coordinates of its
        centroid.
        param is the number of realisations of f on the whole space, that are
        used for computing the centroids. Augmenting it will augment the
        precision but also computation time.
    """
    output = []
    rpr = []  # realisations per region
    for i in range(param):
        x = utils.f(len(hyperplanes[0]) - 1, distrib, dataset)
        r = utils.findRegion(x, hyperplanes)
        ir = -1  # index of r in the array output
        for j in range(len(output)):  # check if r is already registered
            if np.all(output[j][0] == r):
                ir = j
                break
        if ir == -1:
            output.append([r, x])
            rpr.append(1.)
        else:
            output[ir][1] += x
            rpr[ir] += 1.
    # divide the coordinates for each region by the rpr for that region
    for k in range(len(output)):
        output[k][1] /= rpr[k]
    return np.array(output)
def MSEforDirection(directions, pCentroids, pMeasure, distrib, dataset):
    """
        A more efficient way to compute MSE for different directions (for a
        specific update function)
    """
    directionsMSE = np.zeros(len(directions))
    numberOfPointsUsed = np.zeros(len(directions))
    # define regions and centroids
    regionsWithCentroids = []
    for dir in directions:
        regionsWithCentroids.append(core.centroids(dir,pCentroids,distrib) )
    # calculate error
    for k in range(pMeasure):
        x = utils.f(len(directions[0][0])-1 , distrib, dataset) # pick a random point x
        for i in range(len(directions)): #for each direction i
            r = utils.findRegion(x,directions[i])
            regionRegistered = False
            for j in range(len(regionsWithCentroids[i])):
                if np.all(regionsWithCentroids[i][j,0] == r):
                    c = regionsWithCentroids[i][j,1]
                    regionRegistered = True
                    break;
            if regionRegistered:
                directionsMSE[i] += utils.squareDistance(x,c)
                numberOfPointsUsed[i] += 1.
    directionsMSE /= float(len(x))
    directionsMSE /= numberOfPointsUsed
    return directionsMSE
def MSE(hyperplanes,pCentroids,pMeasure,distrib, dataset):
    """
        Returns MSE given the hyperplanes separating regions.
        Parameter pCentroids is the number of realisations of f used for
        determining the centroids of each region.
        Parameter pMeasure is the number of realisations used for computing the MSE.
    """
    error = 0.
    numberOfPointsUsed = 0
    regionsWithCentroids = core.centroids(hyperplanes,pCentroids,distrib)
    for i in range(pMeasure):
        x = utils.f(len(hyperplanes[0])-1 , distrib, dataset)
        r = utils.findRegion(x,hyperplanes)
        regionRegistered = False
        for j in range(len(regionsWithCentroids)):
            if np.all(regionsWithCentroids[j,0] == r):
                c = regionsWithCentroids[j,1]
                regionRegistered = True
                break;
        if regionRegistered:
            error += utils.squareDistance(x,c)
            numberOfPointsUsed += 1
    error /= float(len(x))
    error /= float(numberOfPointsUsed)
    return error
def negEntropy(hyperplanes,pCentroids,pMeasure,distrib, dataset):
    """
        Returns the opposite (negative) of the overall entropy of the
        hyperplane configuration inducted by the parameter hyperplanes.
        The method used to calculate entropy is similar to that of MSE:
        generate a big amount of random points following the distribution to
        estimate the probabilities.
    """
    regionsWithCentroids = core.centroids(hyperplanes,pCentroids,distrib)
    entropies = np.zeros(len(regionsWithCentroids))
    numberOfPointsUsed = 0
    
    for i in range(pMeasure):
        
        # generate point and find its region
        x = utils.f(len(hyperplanes[0])-1 , distrib, dataset)
        r = utils.findRegion(x,hyperplanes)
        
        # match region with an already known one
        for j in range(len(regionsWithCentroids)):
            if np.all(regionsWithCentroids[j,0] == r):
                
                #increase entropies counter
                entropies[j] += 1
                numberOfPointsUsed += 1
                break;
    
    # adjust values and convert to an actual entropy
    entropies /= float(len(x))
    entropies /= float(numberOfPointsUsed)
    logEntropies = np.log2(entropies) 
    entropies *= logEntropies
    
    return sum(entropies)
Beispiel #10
0
def post_user_repos(username, data):
    password = getpass.getpass()
    res = requests.post(f("{API_URL}/{USER}/{REPOS}"),
                        json=data,
                        auth=(username, password))
    json_res = res.json()
    return json_res
Beispiel #11
0
def delete_user_repo(username, name):
    password = getpass.getpass()
    res = requests.delete(f("{API_URL}/{REPOS}/%s/%s" % (username, name)),
                          auth=(username, password))
    print(res)
    print(res.text)
    return True
Beispiel #12
0
    def animate(i):
        x = xList

        v_xt_list = v_xt_array[i, :]
        minus_f_x_list = (-ut.f(xList, 1))[:, 0]
        u_xt_list_simple_sum = v_xt_list + minus_f_x_list
        y = u_xt_list_simple_sum

        line.set_data(x, y)
        return line,
Beispiel #13
0
def sigma_scaled(**kwargs):
    sigma = kwargs.get('sigma')
    average_fitness = kwargs.get('average_fitness')
    population = kwargs.get('population')

    expected_value_func = lambda x: 1 if sigma == 0 else 1 + ((f(x) - average_fitness) / (2 * sigma))
    sigma_sum = sum(expected_value_func(x) for x in population)
    scaling_func = lambda x: expected_value_func(x) / sigma_sum

    return roulette(scaling_func=scaling_func, **kwargs)
Beispiel #14
0
def solve_layer(u, n, k):
    if k == 0:
        for t in range(k, n + 1 - k):
            u[t][k] = 0.0
            u[t][n - k] = 0.0
            u[k][t] = 0.5 * sin(np.pi * t / n)
            u[n - k][t] = 0.0
    elif k == 1:
        for t in range(k, n + 1 - k):
            a = u[t][k - 1]
            b = u[t - 1][k - 1]
            c = u[t + 1][k - 1]
            u[t][k] = f(t / n, (k - 1) / n) / n / n + 3 * a - b - c

            a = u[t][n - k + 1]
            b = u[t - 1][n - k + 1]
            c = u[t + 1][n - k + 1]
            u[t][n - k] = f(t / n, (n - k + 1) / n) / n / n + 3 * a - b - c

            a = u[k - 1][t]
            b = u[k - 1][t - 1]
            c = u[k - 1][t + 1]
            u[k][t] = f((k - 1) / n, t / n) / n / n + 3 * a - b - c

            a = u[n - k + 1][t]
            b = u[n - k + 1][t - 1]
            c = u[n - k + 1][t + 1]
            u[n - k][t] = f((n - k + 1) / n, t / n) / n / n + 3 * a - b - c
    else:
        for t in range(k, n + 1 - k):
            a = u[t][k - 1]
            b = u[t - 1][k - 1]
            c = u[t + 1][k - 1]
            d = u[t][k - 2]
            u[t][k] = f(t / n, (k - 1) / n) / n / n + 4 * a - b - c - d

            a = u[t][n - k + 1]
            b = u[t - 1][n - k + 1]
            c = u[t + 1][n - k + 1]
            d = u[t][n - k + 2]
            u[t][n - k] = f(t / n, (n - k + 1) / n) / n / n + 4 * a - b - c - d

            a = u[k - 1][t]
            b = u[k - 1][t - 1]
            c = u[k - 1][t + 1]
            d = u[k - 2][t]
            u[k][t] = f((k - 1) / n, t / n) / n / n + 4 * a - b - c - d

            a = u[n - k + 1][t]
            b = u[n - k + 1][t - 1]
            c = u[n - k + 1][t + 1]
            d = u[n - k + 2][t]
            u[n - k][t] = f((n - k + 1) / n, t / n) / n / n + 4 * a - b - c - d
Beispiel #15
0
def sigma_scaled(**kwargs):
    sigma = kwargs.get('sigma')
    average_fitness = kwargs.get('average_fitness')
    population = kwargs.get('population')

    expected_value_func = lambda x: 1 if sigma == 0 else 1 + (
        (f(x) - average_fitness) / (2 * sigma))
    sigma_sum = sum(expected_value_func(x) for x in population)
    scaling_func = lambda x: expected_value_func(x) / sigma_sum

    return roulette(scaling_func=scaling_func, **kwargs)
def plot_time_evolve_step(N, step, f, T, dt):
    x = get_x(N)
    v0 = f(x)
    v = np.copy(v0)
    print("Making step")
    A = step(N, V, dt)
    n = int(T / dt)
    print("walking {} steps".format(n))
    fig, ax = plt.subplots()
    ax.plot(x, v0)
    for _ in range(3):
        for _ in range(int(n / 3)):
            v = A @ v
        ax.plot(x, v)
    plt.show()
Beispiel #17
0
def MSE(regions, germs, pMeasure, distrib, dataset):
    '''
        Returns the mean squared error based on an approximation made with 
        random points generated according to the random distribution being 
        studied.
    '''
    nDimensions = len(regions[0, 0]) - 1
    error = 0.
    for k in range(pMeasure):
        x = utils.f(nDimensions, distrib, dataset)
        r = findRegion(x, regions)
        error += utils.squareDistance(x, germs[r])
    error /= float(nDimensions)
    error /= float(pMeasure)
    return error
Beispiel #18
0
    def update(frame):  # frame = t_index basically
        t = tList[frame]

        u_xt_list = u_xt_array[frame, :]  # u(x,t) at the specific time t.
        #u_xt_list = u_xt_array[frame+1, :]

        v_xt_list = v_xt_array[frame, :]
        minus_f_x_list = (-ut.f(xList, 1))[:, 0]
        u_xt_list_simple_sum = v_xt_list + minus_f_x_list

        xdata.append(t)
        #ydata.append(u_xt_list_simple_sum[frame])
        ydata.append(minus_f_x_list[frame])
        line.set_data(xdata, ydata)
        return line,
def doublePoint(nHyperplanes, nDimensions, distrib):
    """
        Returns a set of hyperplanes with random orientations. nHyperplanes is
        the number of hyperplanes to return, and nDimension the number of
        dimensions of the space.
        Here for each hyperplane, nDimensions random points are generated
        following the distribution distrib, and the unique hyperplane passing 
        by all these points is kept.
    """
    hyperplanes = []
    for k in range(nHyperplanes):
        points = np.array([
            utils.f(nDimensions, distrib, dataset) for n in range(nDimensions)
        ])
        hyperplanes.append(utils.hyperplaneFromPoints(points))
    return np.array(hyperplanes)
def plot_roots():
    l = roots(f, 0.1, V0)
    ls = np.linspace(0, V0, 1000)

    fig, ax = plt.subplots(figsize=(8, 4))
    ax.plot(ls, f(ls, V0), label="$f(x)$")
    ax.set_title("${}$ roots".format((len(l))))
    ax.plot(ls, np.zeros_like(ls), "--k", lw=1)
    ax.set_xlabel("$x / [2mL/\hbar^2]$")
    ax.set_ylabel("$f(x)$")
    for a in l:
        leg = ax.plot(a, 0, "xk", ms=12)
    plt.legend((leg), ("roots", ))
    ax.legend()
    plt.tight_layout()
    plt.savefig(FIG_PATH + "roots.pdf")
Beispiel #21
0
def polar_decode_sc1(in_llr, F, u):
    n = len(in_llr) // 2
    if n == 1:
        L_1 = f(in_llr[0], in_llr[1])
        u_1 = check1(L_1, F[0])
        L_2 = g(in_llr[0], in_llr[1], u_1)
        u_2 = check1(L_2, F[1])
        u.append(u_1)
        u.append(u_2)
        return u, in_llr, np.array([u_1 ^ u_2, u_2])
    else:
        u, out_llr1, x1 = polar_decode_sc1(f_N1(in_llr), F[:n], u)
        u, out_llr2, x2 = polar_decode_sc1(g_N1(in_llr, x1), F[n:], u)
        x = np.concatenate((np.bitwise_xor(x1, x2), x2))
        out_llr = np.concatenate((out_llr1, out_llr2))
    return u, out_llr, x
Beispiel #22
0
def converge_solution(u, n, *args, **kwargs):
    result = make_matrix(n)
    diff = 0
    for t in range(0, n + 1):
        result[t][0] = 0.0
        result[t][n] = 0.0
        result[0][t] = 0.5 * sin(np.pi * t / n)
        result[n][t] = 0.0
    for k in range(1, n):
        for m in range(1, n):
            a = u[k - 1][m]
            b = u[k + 1][m]
            c = u[k][m - 1]
            d = u[k][m + 1]
            result[k][m] = 0.25 * (a + b + c + d - f(k / n, m / n) / n / n)
            diff += (result[k][m] - u[k][m] or 0)**2
    return result, diff
Beispiel #23
0
def iteration(u, n, t):
    result = make_matrix(n)
    for k in range(0, n + 1):
        result[0][k] = u[0][k]
        result[n][k] = u[n][k]

    a = [0] + [-t * n * n] * (n - 1) + [0]
    b = [1] + [1 + 2 * t * n * n] * (n - 1) + [1]
    c = [0] + [-t * n * n] * (n - 1) + [0]
    for k in range(1, n):
        d = u[k] + t * (u[k - 1] + u[k + 1] - 2 * u[k]) * n * n - t * f(
            k / n, np.linspace(0, 1, n + 1))
        d[0] = u[k][0]
        d[n] = u[k][n]
        sol = tdma(n + 1, a, b, c, d)
        result[k] = sol
    return result
Beispiel #24
0
def polar_decode_sc(in_llr, F, u):
    N = len(in_llr)
    if N == 2:
        L_1 = f(in_llr[0], in_llr[1])
        u_1 = check(L_1, F[0])
        L_2 = g(in_llr[0], in_llr[1], u_1)
        u_2 = check(L_2, F[1])
        u.append(u_1)
        u.append(u_2)
        return in_llr, u, np.array([u_1 ^ u_2, u_2])
    else:

        out_llr1, u1, x1 = polar_decode_sc(f_N(in_llr), F[:int(N / 2)], u)
        out_llr2, u, x2 = polar_decode_sc(g_N(in_llr, x1), F[int(N / 2):], u1)
        x = np.concatenate((x1 ^ x2, x2))
        out_llr = np.concatenate((out_llr1, out_llr2))
    return out_llr, u, x
Beispiel #25
0
def converge_solution(u, n, *args, **kwargs):
    result = make_matrix(n)
    for t in range(0, n + 1):
        result[t][0] = 0.0
        result[t][n] = 0.0
        result[0][t] = sqrt(sin(np.pi * t / n))
        result[n][t] = 0.0
    for k in range(1, n):
        for m in range(1, n):
            a = u[k - 1][m]
            b = u[k + 1][m]
            c = u[k][m - 1]
            d = u[k][m + 1]
            result[k][m] = sqrt(a * a + b * b + c * c + d * d -
                                2 * f(k / n, m / n) / n / n) / 2

    diff = np.linalg.norm(result - u, ord='fro')
    return result, diff
Beispiel #26
0
def converge_solution(u, n, *args, **kwargs):
    t = 1 / 100000
    result = make_matrix(n)
    for k in range(0, n + 1):
        result[k][0] = u[k][0]
        result[k][n] = u[k][n]
        result[0][k] = u[0][k]
        result[n][k] = u[n][k]
    for k in range(1, n):
        for m in range(1, n):
            a = u[k - 1][m]
            b = u[k + 1][m]
            c = u[k][m - 1]
            d = u[k][m + 1]
            e = u[k][m]
            delta = (a * a + b * b + c * c + d * d -
                     4 * e * e) * n * n / 2 - f(k / n, m / n)
            result[k][m] = e + delta * t
    diff = np.linalg.norm(result - u, ord='fro')
    return result, diff
Beispiel #27
0
def centroids(regions, pCentroids, distrib, dataset):
    '''
        This function computes the positions of the centroids of regions, based 
        on an estimation of pCentroids points.
    '''
    nRegions, nDimensions = len(regions), len(
        regions[0, 0]) - 1  # number of regions or germs, and dimensions
    germs = np.zeros((nRegions, nDimensions))
    rpr = np.ones(
        (nRegions
         ))  #realisations per region - should be zero but then shit happens
    for _ in range(pCentroids):
        x = utils.f(nDimensions, distrib, dataset)
        r = findRegion(x, regions)
        germs[r] += x
        rpr[r] += 1
    if 0. in rpr:
        print('attention, a 0 in rpr:', rpr)  #!!!!!!!!!!
    germs = [germ / x for germ, x in zip(germs, rpr)]
    return np.array(germs)
Beispiel #28
0
def converge_solution(u, n, prev=None, *args, **kwargs):
    t = 1 / 1000
    g = n / 2
    result = make_matrix(n)
    for k in range(0, n + 1):
        result[k][0] = u[k][0]
        result[k][n] = u[k][n]
        result[0][k] = u[0][k]
        result[n][k] = u[n][k]
    for k in range(1, n):
        for m in range(1, n):
            a = u[k - 1][m]
            b = u[k + 1][m]
            c = u[k][m - 1]
            d = u[k][m + 1]
            e = u[k][m]
            result[k][m] = 2 * e - prev[k][m] \
                - g * t * (e - prev[k][m]) + t * t * \
                ((a * a + b * b + c * c + d * d - 4 * e * e) * n * n / 2
                - f(k / n, m / n))

            # print(f'{k},{m}\t\t', result[k][m], e)
    diff = np.linalg.norm(result - u, ord='fro')
    return result, diff, u
Beispiel #29
0
def get_users_repos(username):
    res = requests.get(f("{API_URL}/{USERS}/%s/{REPOS}" % username))
    json_res = res.json()
    return json_res
Beispiel #30
0
import utils
from utils import f
from utils import *

utils.f()
f()
do_get_request()

Beispiel #31
0
def run_simulation(simulation, log=LOG):
    problem = simulation.get('problem')
    problem_parameters = simulation.get('problem_parameters', {})
    population_size = problem_parameters.get('population_size')
    adult_selection = simulation.get('adult_selection_method')
    mate_selection = simulation.get('mate_selection_method')
    mate_selection_args = simulation.get('mate_selection_args', {})
    crossover_rate = simulation.get('crossover_rate')
    crossover_method = simulation.get('crossover_method')
    n_children = simulation.get('n_children', 2)
    mutation_method = simulation.get('mutation_method')
    mutation_chance = simulation.get('mutation_rate')
    stop = simulation.get('stop', {})
    stop_fitness = stop.get('fitness')
    stop_generation = stop.get('generation')

    # STEP 0: Initialize child genotype population
    population = []
    children = [{'genotype': genotype} for genotype in problem.generate_initial_population(**problem_parameters)]

    average_fitnesses = []
    sigmas = []
    best_fitnesses = []

    if log:
        print("Start simulation")
        pprint(simulation, indent=2)

    generation_number = 0
    while True:
        generation_number += 1

        # STEP 1: Development: Generate Phenotypes from Genotypes
        for individual in children:
            individual['phenotype'] = problem.geno_to_pheno(individual['genotype'], **problem_parameters)

        # STEP 2: Test Fitness of Phenotypes
        for individual in children:
            individual['fitness'] = problem.fitness_evaluation(individual['phenotype'], **problem_parameters)

        # STEP 3: Adult Selection
        population = adult_selection(
            old_population=population,
            children=children,
            m=population_size
        )
        children = []

        total_fitness = sum(f(x) for x in population)
        average_fitness = total_fitness / population_size
        deviations = ((f(x) - average_fitness) ** 2 for x in population)
        sigma = sqrt(sum(deviations) / population_size)

        best_individual = max(population, key=f)

        # STEP 4: Parent Selection
        pairs = mate_selection(
            population=population,
            sigma=sigma,
            average_fitness=average_fitness,
            **mate_selection_args
        )

        # STEP 5: Reproduction
        mutation_func = lambda x: mutation_method(x, p=mutation_chance, **problem_parameters)
        for pair in pairs:
            new_genotypes = crossover(
                *pair,
                crossover_rate=crossover_rate,
                method=crossover_method,
                n_children=n_children
            )

            # STEP 5.5: Mutation
            mutated_genotypes = map(mutation_func, new_genotypes)

            children.extend({'genotype': g} for g in mutated_genotypes)

        if log:
            print(
                '''
GENERATION {n}
AVG:\t{avg_fitness}
STD:\t{sigma}
BEST:\t{best_fitness}
{best_phenotype}
                '''
                .format(
                    n=generation_number,
                    avg_fitness=average_fitness,
                    sigma=sigma,
                    best_phenotype=problem.phenotype_representation(
                        best_individual['phenotype'],
                        **problem_parameters
                    ),
                    best_fitness=best_individual['fitness']
                )
            )

        average_fitnesses.append(average_fitness)
        sigmas.append(sigma)
        best_fitnesses.append(best_individual['fitness'])

        if stop_fitness and f(best_individual) >= stop_fitness:
            if log:
                print('FITNESS STOP')
            break
        elif stop_generation and generation_number >= stop_generation:
            if log:
                print('GENERATION STOP')
            break

            # Begin Next Generation

    return {
        'simulation': simulation,
        'generation_number': generation_number,
        'average_fitnesses': average_fitnesses,
        'sigmas': sigmas,
        'best_fitnesses': best_fitnesses,
        'final_population': population,
        'final_best_individual': best_individual
    }
Beispiel #32
0
def fitness_proportionate(**kwargs):
    total_fitness = sum(f(x) for x in kwargs.get('population'))
    scaling_func = lambda x: f(x) / total_fitness

    return roulette(scaling_func=scaling_func, **kwargs)
Beispiel #33
0
train_acc = []
valid_acc = []
step = 1
for epoch in range(epoches):
    X_train, Y_train = utils.shuffle(X_train, Y_train)

    for i in range(int(np.floor((train_size / batch_size)))):
        X = X_train[i * batch_size:(i + 1) * batch_size]
        Y = Y_train[i * batch_size:(i + 1) * batch_size]

        #w,b = utils.gradient_descent(X,Y,w,b,lr)
        w_grad, b_grad = utils.gradient_descent(X, Y, w, b)
        w -= lr / np.sqrt(step) * w_grad
        b -= lr / np.sqrt(step) * b_grad
        step += 1
    y_train_pred = utils.f(X_train, w, b)
    Y_train_pred = np.round(y_train_pred)
    train_acc.append(utils.accruacy(Y_train_pred, Y_train))
    train_loss.append(
        utils.cross_entropy_loss(y_train_pred, Y_train) / train_size)

    y_valid_pred = utils.f(X_valid, w, b)
    Y_valid_pred = np.round(y_valid_pred)
    valid_acc.append(utils.accruacy(Y_valid_pred, Y_valid))
    valid_loss.append(
        utils.cross_entropy_loss(y_valid_pred, Y_valid) / valid_size)

print('Training loss: {}'.format(train_loss[-1]))
print('Validation loss: {}'.format(valid_loss[-1]))
print('Training accuracy: {}'.format(train_acc[-1]))
print('Validation accuracy: {}'.format(valid_acc[-1]))
Beispiel #34
0
def fitness_proportionate(**kwargs):
    total_fitness = sum(f(x) for x in kwargs.get('population'))
    scaling_func = lambda x: f(x) / total_fitness

    return roulette(scaling_func=scaling_func, **kwargs)