コード例 #1
0
def LinearLearner(dataset, learning_rate=0.01, epochs=100):
    """Define with learner = LinearLearner(data); infer with learner(x)."""
    idx_i = dataset.inputs
    idx_t = dataset.target  # As of now, dataset.target gives only one index.
    examples = dataset.examples
    num_examples = len(examples)

    # X transpose
    X_col = [dataset.values[i] for i in idx_i]  # vertical columns of X

    # Add dummy
    ones = [1 for _ in range(len(examples))]
    X_col = [ones] + X_col

    # Initialize random weigts
    num_weights = len(idx_i) + 1
    w = random_weights(min_value=-0.5, max_value=0.5, num_weights=num_weights)

    for epoch in range(epochs):
        err = []
        # Pass over all examples
        for example in examples:
            x = [1] + example
            y = dotproduct(w, x)
            t = example[idx_t]
            err.append(t - y)

        # update weights
        for i in range(len(w)):
            w[i] = w[i] + learning_rate * (dotproduct(err, X_col[i]) / num_examples)

    def predict(example):
        x = [1] + example
        return dotproduct(w, x)
    return predict
コード例 #2
0
def Linearlearner(dataset, learning_rate=0.01, epochs=100):
    """Define with learner = Linearlearner(data); infer with learner(x)."""
    idx_i = dataset.inputs
    idx_t = dataset.target  # As of now, dataset.target gives only one index.
    examples = dataset.examples

    # X transpose
    X_col = [dataset.values[i] for i in idx_i]  # vertical columns of X

    # Add dummy
    ones = [1 for i in range(len(examples))]
    X_col = ones + X_col

    # Initialize random weigts
    w = [random(-0.5, 0.5) for i in range(len(idx_i) + 1)]

    for epoch in range(epochs):
        err = []
        # Pass over all examples
        for example in examples:
            x = [example[i] for i in range(idx_i)]
            x = [1] + x
            y = dotproduct(w, x)
            t = example[idx_t]
            err.append(t - y)

        # update weights
        for i in range(len(w)):
            w[i] = w[i] - dotproduct(err, X_col[i])

    def predict(example):
        x = [1] + example
        return dotproduct(w, x)

    return predict
コード例 #3
0
ファイル: learning.py プロジェクト: rajul/aima-python
def Linearlearner(dataset, learning_rate=0.01, epochs=100):
    """Define with learner = Linearlearner(data); infer with learner(x)."""
    idx_i = dataset.inputs
    idx_t = dataset.target     # As of now, dataset.target gives only one index.
    examples = dataset.examples

    # X transpose
    X_col = [dataset.values[i] for i in idx_i]  # vertical columns of X

    # Add dummy
    ones = [1 for i in range(len(examples))]
    X_col = ones + X_col

    # Initialize random weigts
    w = [random(-0.5, 0.5) for i in range(len(idx_i) + 1)]

    for epoch in range(epochs):
        err = []
        # Pass over all examples
        for example in examples:
            x = [example[i] for i in range(idx_i)]
            x = [1] + x
            y = dotproduct(w, x)
            t = example[idx_t]
            err.append(t - y)

        # update weights
        for i in range(len(w)):
            w[i] = w[i] - dotproduct(err, X_col[i])

    def predict(example):
        x = [1] + example
        return dotproduct(w, x)
    return predict
コード例 #4
0
def LinearLearner(dataset, learning_rate=0.01, epochs=100):
    """Define with learner = LinearLearner(data); infer with learner(x)."""
    idx_i = dataset.inputs
    idx_t = dataset.target  # As of now, dataset.target gives only one index.
    examples = dataset.examples
    num_examples = len(examples)

    # X transpose
    X_col = [dataset.values[i] for i in idx_i]  # vertical columns of X

    # Add dummy
    ones = [1 for _ in range(len(examples))]
    X_col = [ones] + X_col

    # Initialize random weigts
    num_weights = len(idx_i) + 1
    w = random_weights(min_value=-0.5, max_value=0.5, num_weights=num_weights)

    for epoch in range(epochs):
        err = []
        # Pass over all examples
        for example in examples:
            x = [1] + example
            y = dotproduct(w, x)
            t = example[idx_t]
            err.append(t - y)

        # update weights
        for i in range(len(w)):
            w[i] = w[i] + learning_rate * (dotproduct(err, X_col[i]) / num_examples)

    def predict(example):
        x = [1] + example
        return dotproduct(w, x)
    return predict
コード例 #5
0
 def remove_component(X):
     """Removes components of already obtained eigen vectors from X"""
     X_m = X[:m]
     X_n = X[m:]
     for eivec in eivec_m:
         coeff = dotproduct(X_m, eivec)
         X_m = [x1 - coeff*x2 for x1, x2 in zip(X_m, eivec)]
     for eivec in eivec_n:
         coeff = dotproduct(X_n, eivec)
         X_n = [x1 - coeff*x2 for x1, x2 in zip(X_n, eivec)]
     return X_m + X_n
コード例 #6
0
 def remove_component(X):
     """Removes components of already obtained eigen vectors from X"""
     X_m = X[:m]
     X_n = X[m:]
     for eivec in eivec_m:
         coeff = dotproduct(X_m, eivec)
         X_m = [x1 - coeff*x2 for x1, x2 in zip(X_m, eivec)]
     for eivec in eivec_n:
         coeff = dotproduct(X_n, eivec)
         X_n = [x1 - coeff*x2 for x1, x2 in zip(X_n, eivec)]
     return X_m + X_n
コード例 #7
0
ファイル: learning.py プロジェクト: davidtekeshe/aima-python
    def predict(example):
        o_nodes = learned_net[1]

        # Forward pass
        for node in o_nodes:
            in_val = dotproduct(example, node.weights)
            node.value = node.activation(in_val)

        # Hypothesis
        return find_max_node(o_nodes)
コード例 #8
0
    def predict(example):
        o_nodes = learned_net[1]

        # Forward pass
        for node in o_nodes:
            in_val = dotproduct(example, node.weights)
            node.value = node.activation(in_val)

        # Hypothesis
        return find_max_node(o_nodes)
コード例 #9
0
def LogisticLinearLeaner(dataset, learning_rate=0.01, epochs=100):
    """
    [Section 18.6.4]
    Linear classifier with logistic regression.
    """
    idx_i = dataset.inputs
    idx_t = dataset.target
    examples = dataset.examples
    num_examples = len(examples)

    # X transpose
    X_col = [dataset.values[i] for i in idx_i]  # vertical columns of X

    # add dummy
    ones = [1 for _ in range(len(examples))]
    X_col = [ones] + X_col

    # initialize random weights
    num_weights = len(idx_i) + 1
    w = random_weights(min_value=-0.5, max_value=0.5, num_weights=num_weights)

    for epoch in range(epochs):
        err = []
        h = []
        # pass over all examples
        for example in examples:
            x = [1] + example
            y = sigmoid(dotproduct(w, x))
            h.append(sigmoid_derivative(y))
            t = example[idx_t]
            err.append(t - y)

        # update weights
        for i in range(len(w)):
            buffer = [x * y for x, y in zip(err, h)]
            w[i] = w[i] + learning_rate * (dotproduct(buffer, X_col[i]) /
                                           num_examples)

    def predict(example):
        x = [1] + example
        return sigmoid(dotproduct(w, x))

    return predict
コード例 #10
0
def LinearLearner(dataset, learning_rate=0.01, epochs=100):
    """
    [Section 18.6.3]
    Linear classifier with hard threshold.
    """
    idx_i = dataset.inputs
    idx_t = dataset.target
    examples = dataset.examples
    num_examples = len(examples)

    # X transpose
    X_col = [dataset.values[i] for i in idx_i]  # vertical columns of X

    # add dummy
    ones = [1 for _ in range(len(examples))]
    X_col = [ones] + X_col

    # initialize random weights
    num_weights = len(idx_i) + 1
    w = random_weights(min_value=-0.5, max_value=0.5, num_weights=num_weights)

    for epoch in range(epochs):
        err = []
        # pass over all examples
        for example in examples:
            x = [1] + example
            y = dotproduct(w, x)
            t = example[idx_t]
            err.append(t - y)

        # update weights
        for i in range(len(w)):
            w[i] = w[i] + learning_rate * (dotproduct(err, X_col[i]) /
                                           num_examples)

    def predict(example):
        x = [1] + example
        return dotproduct(w, x)

    return predict
コード例 #11
0
    def predict(example):
        # Input nodes
        i_nodes = learned_net[0]

        # Activate input layer
        for v, n in zip(example, i_nodes):
            n.value = v

        # Forward pass
        for layer in learned_net[1:]:
            for node in layer:
                inc = [n.value for n in node.inputs]
                in_val = dotproduct(inc, node.weights)
                node.value = node.activation(in_val)

        # Hypothesis
        o_nodes = learned_net[-1]
        pred = [o_nodes[i].value for i in range(o_units)]
        return 1 if pred[0] >= 0.5 else 0
コード例 #12
0
ファイル: learning.py プロジェクト: rajul/aima-python
    def predict(example):
        # Input nodes
        i_nodes = learned_net[0]

        # Activate input layer
        for v, n in zip(example, i_nodes):
            n.value = v

        # Forward pass
        for layer in learned_net[1:]:
            for node in layer:
                inc = [n.value for n in node.inputs]
                in_val = dotproduct(inc, node.weights)
                node.value = node.activation(in_val)

        # Hypothesis
        o_nodes = learned_net[-1]
        pred = [o_nodes[i].value for i in range(o_units)]
        return 1 if pred[0] >= 0.5 else 0
コード例 #13
0
ファイル: learning.py プロジェクト: lucasmoura/aima-python
    def predict(example):
        # Input nodes
        i_nodes = learned_net[0]

        # Activate input layer
        for v, n in zip(example, i_nodes):
            n.value = v

        # Forward pass
        for layer in learned_net[1:]:
            for node in layer:
                inc = [n.value for n in node.inputs]
                in_val = dotproduct(inc, node.weights)
                node.value = node.activation(in_val)

        # Hypothesis
        o_nodes = learned_net[-1]
        prediction = find_max_node(o_nodes)
        return prediction
コード例 #14
0
    def predict(example):
        # Input nodes
        i_nodes = learned_net[0]

        # Activate input layer
        for v, n in zip(example, i_nodes):
            n.value = v

        # Forward pass
        for layer in learned_net[1:]:
            for node in layer:
                inc = [n.value for n in node.inputs]
                in_val = dotproduct(inc, node.weights)
                node.value = node.activation(in_val)

        # Hypothesis
        o_nodes = learned_net[-1]
        prediction = find_max_node(o_nodes)
        return prediction
コード例 #15
0
def BackPropagationLearner(dataset, net, learning_rate, epochs):
    """[Figure 18.23] The back-propagation algorithm for multilayer network"""
    # Initialise weights
    for layer in net:
        for node in layer:
            node.weights = random_weights(min_value=-0.5,
                                          max_value=0.5,
                                          num_weights=len(node.weights))

    examples = dataset.examples
    '''
    As of now dataset.target gives an int instead of list,
    Changing dataset class will have effect on all the learners.
    Will be taken care of later
    '''
    o_nodes = net[-1]
    i_nodes = net[0]
    o_units = len(o_nodes)
    idx_t = dataset.target
    idx_i = dataset.inputs
    n_layers = len(net)

    inputs, targets = init_examples(examples, idx_i, idx_t, o_units)

    for epoch in range(epochs):
        # Iterate over each example
        for e in range(len(examples)):
            i_val = inputs[e]
            t_val = targets[e]

            # Activate input layer
            for v, n in zip(i_val, i_nodes):
                n.value = v

            # Forward pass
            for layer in net[1:]:
                for node in layer:
                    inc = [n.value for n in node.inputs]
                    in_val = dotproduct(inc, node.weights)
                    node.value = node.activation(in_val)

            # Initialize delta
            delta = [[] for i in range(n_layers)]

            # Compute outer layer delta

            # Error for the MSE cost function
            err = [t_val[i] - o_nodes[i].value for i in range(o_units)]
            # The activation function used is the sigmoid function
            delta[-1] = [
                sigmoid_derivative(o_nodes[i].value) * err[i]
                for i in range(o_units)
            ]

            # Backward pass
            h_layers = n_layers - 2
            for i in range(h_layers, 0, -1):
                layer = net[i]
                h_units = len(layer)
                nx_layer = net[i + 1]
                # weights from each ith layer node to each i + 1th layer node
                w = [[node.weights[k] for node in nx_layer]
                     for k in range(h_units)]

                delta[i] = [
                    sigmoid_derivative(layer[j].value) *
                    dotproduct(w[j], delta[i + 1]) for j in range(h_units)
                ]

            #  Update weights
            for i in range(1, n_layers):
                layer = net[i]
                inc = [node.value for node in net[i - 1]]
                units = len(layer)
                for j in range(units):
                    layer[j].weights = vector_add(
                        layer[j].weights,
                        scalar_vector_product(learning_rate * delta[i][j],
                                              inc))

    return net
コード例 #16
0
ファイル: learning.py プロジェクト: rajul/aima-python
 def predict(example):
     x = [1] + example
     return dotproduct(w, x)
コード例 #17
0
ファイル: learning.py プロジェクト: abbas5253/aima-python
def BackPropagationLearner(dataset, net, learning_rate, epochs, activation=sigmoid, momentum=False, beta=0.903):
    """[Figure 18.23] The back-propagation algorithm for multilayer networks"""
    # Initialise weights
    for layer in net:
        for node in layer:
            node.weights = random_weights(min_value=-0.5, max_value=0.5,
                                          num_weights=len(node.weights))

    examples = dataset.examples
    '''
    As of now dataset.target gives an int instead of list,
    Changing dataset class will have effect on all the learners.
    Will be taken care of later.
    '''
    o_nodes = net[-1]
    i_nodes = net[0]
    o_units = len(o_nodes)
    idx_t = dataset.target
    idx_i = dataset.inputs
    n_layers = len(net)

    inputs, targets = init_examples(examples, idx_i, idx_t, o_units)

    for epoch in range(epochs):
        # Iterate over each example
        for e in range(len(examples)):
            i_val = inputs[e]
            t_val = targets[e]

            # Activate input layer
            for v, n in zip(i_val, i_nodes):
                n.value = v

            # Finding the values of the nodes through forward propogation
            for layer in net[1:]:
                for node in layer:
                    inc = [n.value for n in node.inputs]
                    in_val = dotproduct(inc, node.weights)
                    node.value = node.activation(in_val)

            # Initialize delta which stores the values of the gradients for each activation units
            delta = [[] for _ in range(n_layers)]
		
            #initializing the velocity_gradient
            if momentum == True:
                v_dw = [[0 for i in range(len(_))] for _ in net]

            # Compute outer layer delta

            # Error for the MSE cost function
            err = [t_val[i] - o_nodes[i].value for i in range(o_units)]

            # The activation function used is relu or sigmoid function
            # First backward fast 
            if node.activation == sigmoid:
                delta[-1] = [sigmoid_derivative(o_nodes[i].value) * err[i] for i in range(o_units)]
            elif node.activation == relu:
                delta[-1] = [relu_derivative(o_nodes[i].value) * err[i] for i in range(o_units)]
            elif node.activation == tanh:
                delta[-1] = [tanh_derivative(o_nodes[i].value) * err[i] for i in range(o_units)]
            elif node.activation == elu:
                delta[-1] = [elu_derivative(o_nodes[i].value) * err[i] for i in range(o_units)]
            else:
                delta[-1] = [leaky_relu_derivative(o_nodes[i].value) * err[i] for i in range(o_units)]


            # Propogating backward and finding gradients of nodes for each hidden layer
            h_layers = n_layers - 2
            for i in range(h_layers, 0, -1):
                layer = net[i]
                h_units = len(layer)
                nx_layer = net[i+1]

                # weights from each ith layer node to each i + 1th layer node
                w = [[node.weights[k] for node in nx_layer] for k in range(h_units)]

                if activation == sigmoid:
                    delta[i] = [sigmoid_derivative(layer[j].value) * dotproduct(w[j], delta[i+1])
                            for j in range(h_units)]
                elif activation == relu:
                    delta[i] = [relu_derivative(layer[j].value) * dotproduct(w[j], delta[i+1])
                            for j in range(h_units)]
                elif activation == tanh:
                    delta[i] = [tanh_derivative(layer[j].value) * dotproduct(w[j], delta[i+1])
                            for j in range(h_units)]
                elif activation == elu:
                    delta[i] = [elu_derivative(layer[j].value) * dotproduct(w[j], delta[i+1])
                            for j in range(h_units)]
                else:
                    delta[i] = [leaky_relu_derivative(layer[j].value) * dotproduct(w[j], delta[i+1])
                            for j in range(h_units)]

            #optimization with velocity gradient
            t_ = epoch + 1

            if momentum == True:
                if epoch == 0:

                    for i in range(len(delta)):
                        for j in range(len(delta[i])):
                            v_dw[i][j] = ((1-beta)*delta[i][j])/(1-beta**(t_+1))
                else:

                    for i in range(len(delta)):
                        for j in range(len(delta[i])):
                            v_dw[i][j] = (beta*v_dw[i][j]+(1-beta)*delta[i][j])/(1-beta**(t_+1))





            #  Update weights with normal gradient descent
            if momentum == False:
                for i in range(1, n_layers):
                    layer = net[i]
                    inc = [node.value for node in net[i-1]]
                    units = len(layer)
                    for j in range(units):
                        layer[j].weights = vector_add(layer[j].weights,
                                                    scalar_vector_product(
                                                    learning_rate * delta[i][j], 
                                                    inc
                                                    ))                                               
            # Update weights with velocity gradient optimizer in gradient descent
            else:
                for i in range(1, n_layers):
                    layer = net[i]
                    inc = [node.value for node in net[i-1]]
                    units = len(layer)
                    for j in range(units):
                        layer[j].weights = vector_add(layer[j].weights,
                                                    scalar_vector_product(
                                                    learning_rate * v_dw[i][j], 
                                                    inc
                                                    ))
                                                    


    return net
コード例 #18
0
ファイル: learning.py プロジェクト: rajul/aima-python
def BackPropagationLearner(dataset, net, learning_rate, epoches):
    "[Figure 18.23] The back-propagation algorithm for multilayer network"
    # Initialise weights
    for layer in net:
        for node in layer:
            node.weights = [random.uniform(-0.5, 0.5)
                            for i in range(len(node.weights))]

    examples = dataset.examples
    '''
    As of now dataset.target gives an int instead of list,
    Changing dataset class will have effect on all the learners.
    Will be taken care of later
    '''
    idx_t = [dataset.target]
    idx_i = dataset.inputs
    n_layers = len(net)
    o_nodes = net[-1]
    i_nodes = net[0]

    for epoch in range(epoches):
        # Iterate over each example
        for e in examples:
            i_val = [e[i] for i in idx_i]
            t_val = [e[i] for i in idx_t]
            # Activate input layer
            for v, n in zip(i_val, i_nodes):
                n.value = v

            # Forward pass
            for layer in net[1:]:
                for node in layer:
                    inc = [n.value for n in node.inputs]
                    in_val = dotproduct(inc, node.weights)
                    node.value = node.activation(in_val)

            # Initialize delta
            delta = [[] for i in range(n_layers)]

            # Compute outer layer delta
            o_units = len(o_nodes)
            err = [t_val[i] - o_nodes[i].value
                   for i in range(o_units)]
            delta[-1] = [(o_nodes[i].value)*(1 - o_nodes[i].value) *
                         (err[i]) for i in range(o_units)]

            # Backward pass
            h_layers = n_layers - 2
            for i in range(h_layers, 0, -1):
                layer = net[i]
                h_units = len(layer)
                nx_layer = net[i+1]
                # weights from each ith layer node to each i + 1th layer node
                w = [[node.weights[k] for node in nx_layer]
                     for k in range(h_units)]

                delta[i] = [(layer[j].value) * (1 - layer[j].value) *
                            dotproduct(w[j], delta[i+1])
                            for j in range(h_units)]

            #  Update weights
            for i in range(1, n_layers):
                layer = net[i]
                inc = [node.value for node in net[i-1]]
                units = len(layer)
                for j in range(units):
                    layer[j].weights = vector_add(layer[j].weights,
                                                  scalar_vector_product(
                                                  learning_rate * delta[i][j], inc))

    return net
コード例 #19
0
def BackPropagationLearner(dataset, net, learning_rate, epochs):
    """[Figure 18.23] The back-propagation algorithm for multilayer network"""
    # Initialise weights
    for layer in net:
        for node in layer:
            node.weights = random_weights(min_value=-0.5, max_value=0.5,
                                          num_weights=len(node.weights))

    examples = dataset.examples
    '''
    As of now dataset.target gives an int instead of list,
    Changing dataset class will have effect on all the learners.
    Will be taken care of later
    '''
    o_nodes = net[-1]
    i_nodes = net[0]
    o_units = len(o_nodes)
    idx_t = dataset.target
    idx_i = dataset.inputs
    n_layers = len(net)

    inputs, targets = init_examples(examples, idx_i, idx_t, o_units)

    for epoch in range(epochs):
        # Iterate over each example
        for e in range(len(examples)):
            i_val = inputs[e]
            t_val = targets[e]

            # Activate input layer
            for v, n in zip(i_val, i_nodes):
                n.value = v

            # Forward pass
            for layer in net[1:]:
                for node in layer:
                    inc = [n.value for n in node.inputs]
                    in_val = dotproduct(inc, node.weights)
                    node.value = node.activation(in_val)

            # Initialize delta
            delta = [[] for i in range(n_layers)]

            # Compute outer layer delta

            # Error for the MSE cost function
            err = [t_val[i] - o_nodes[i].value for i in range(o_units)]
            # The activation function used is the sigmoid function
            delta[-1] = [sigmoid_derivative(o_nodes[i].value) * err[i] for i in range(o_units)]

            # Backward pass
            h_layers = n_layers - 2
            for i in range(h_layers, 0, -1):
                layer = net[i]
                h_units = len(layer)
                nx_layer = net[i+1]
                # weights from each ith layer node to each i + 1th layer node
                w = [[node.weights[k] for node in nx_layer] for k in range(h_units)]

                delta[i] = [sigmoid_derivative(layer[j].value) * dotproduct(w[j], delta[i+1])
                            for j in range(h_units)]

            #  Update weights
            for i in range(1, n_layers):
                layer = net[i]
                inc = [node.value for node in net[i-1]]
                units = len(layer)
                for j in range(units):
                    layer[j].weights = vector_add(layer[j].weights,
                                                  scalar_vector_product(
                                                  learning_rate * delta[i][j], inc))

    return net
コード例 #20
0
def BackPropagationLearner(dataset, net, learning_rate, epoches):
    "[Figure 18.23] The back-propagation algorithm for multilayer network"
    # Initialise weights
    for layer in net:
        for node in layer:
            node.weights = [
                random.uniform(-0.5, 0.5) for i in range(len(node.weights))
            ]

    examples = dataset.examples
    '''
    As of now dataset.target gives an int instead of list,
    Changing dataset class will have effect on all the learners.
    Will be taken care of later
    '''
    idx_t = [dataset.target]
    idx_i = dataset.inputs
    n_layers = len(net)
    o_nodes = net[-1]
    i_nodes = net[0]

    for epoch in range(epoches):
        # Iterate over each example
        for e in examples:
            i_val = [e[i] for i in idx_i]
            t_val = [e[i] for i in idx_t]
            # Activate input layer
            for v, n in zip(i_val, i_nodes):
                n.value = v

            # Forward pass
            for layer in net[1:]:
                for node in layer:
                    inc = [n.value for n in node.inputs]
                    in_val = dotproduct(inc, node.weights)
                    node.value = node.activation(in_val)

            # Initialize delta
            delta = [[] for i in range(n_layers)]

            # Compute outer layer delta
            o_units = len(o_nodes)
            err = [t_val[i] - o_nodes[i].value for i in range(o_units)]
            delta[-1] = [(o_nodes[i].value) * (1 - o_nodes[i].value) * (err[i])
                         for i in range(o_units)]

            # Backward pass
            h_layers = n_layers - 2
            for i in range(h_layers, 0, -1):
                layer = net[i]
                h_units = len(layer)
                nx_layer = net[i + 1]
                # weights from each ith layer node to each i + 1th layer node
                w = [[node.weights[k] for node in nx_layer]
                     for k in range(h_units)]

                delta[i] = [(layer[j].value) * (1 - layer[j].value) *
                            dotproduct(w[j], delta[i + 1])
                            for j in range(h_units)]

            #  Update weights
            for i in range(1, n_layers):
                layer = net[i]
                inc = [node.value for node in net[i - 1]]
                units = len(layer)
                for j in range(units):
                    layer[j].weights = vector_add(
                        layer[j].weights,
                        scalar_vector_product(learning_rate * delta[i][j],
                                              inc))

    return net
コード例 #21
0
 def predict(self, data):
     return [sign(dotproduct(x, self.__w)) for x in data]
コード例 #22
0
ファイル: learning.py プロジェクト: victorcavero14/AI
 def predict(example):
     x = [1] + example
     return sigmoid(dotproduct(w, x))
コード例 #23
0
 def predict(example):
     x = [1] + example
     return dotproduct(w, x)
コード例 #24
0
 def __misclassification(self, x, label):
     return dotproduct(x, self.__w) * label < 1