Ejemplo n.º 1
0
    def train(self,inputs_list,targets_list):
        #convert inputs list to 2d array
        inputs = inputs_list.transpose()
        targets = targets_list.transpose()

        #calculate signals into hidden layer
        hidden_inputs = multiply(self.weights_ih,inputs)
        hidden_outputs = self.activation_function(hidden_inputs)

        #calculate signals entering final output layer
        final_inputs = multiply(self.weights_ho,hidden_outputs)
        #calculate signals exiting final output layer
        final_outputs = self.activation_function(final_inputs)

        #output layer error is the target - actual
        output_errors = targets - final_outputs
        #hidden layer error is the output_errors, split by weights,
        #recombined at hidden nodes
        hidden_errors = multiply(transpose(self.weights_ho),output_errors)

        #update the weights for the links between the hidden and output layers
        self.weights_ho += self.lr  * multiply((output_errors*final_inputs *\
                                                (1.0 - final_outputs)),
                                               transpose(hidden_outputs))

        #update the weights for the links between the input and hidden layers
        self.weights_ih += self.lr * multiply((hidden_errors * hidden_outputs *\
                                               (1.0 - hidden_outputs)),
                                              transpose(inputs))
Ejemplo n.º 2
0
def compute_metrics(spectra, parents):
    transaction_names = transpose(spectra)[0][1:]
    spectra = transpose(spectra)[1:]
    spectra_dict = _spectra_to_dict(spectra)
    ddus = _parent_dicts(parents)
    for p, cs in parents.items():
        constructor = p + '#' + p.split('.')[-1]
        pattern = re.compile("%s\(.*\)" % constructor)
        components = list(filter(lambda c: pattern.match(c) is None, cs))
        components_activity = list(map(lambda c: spectra_dict[c], components))
        transactions = transpose(components_activity)
        transactions = zip(transaction_names, transactions)
        transactions = reduce(_remove_no_hit, transactions, [])
        print('\nParent:', p)
        print('Components:', components)
        if not transactions:
            continue
        if len(components) >= 8:
            write_transactions(transactions, p)
        ddus[p]['unit_tests'], ddus[p][
            'integration_tests'] = _unit_and_integration(p, transactions)
        tests, transactions = zip(*transactions)
        components_activity = transpose(transactions)
        ddus[p]['number_of_components'] = len(components)
        ddus[p]['number_of_tests'] = len(transactions)
        ddus[p]['density'] = _density(components_activity)
        ddus[p]['normalized_density'] = normalized_density(components_activity)
        ddus[p]['diversity'] = diversity(components_activity)
        ddus[p]['uniqueness'] = uniqueness(components_activity)
        # res = ddus[p]['normalized_density'] + ddus[p]['diversity'] + ddus[p]['uniqueness']
        # ddus[p]['ddu'] = res / 3.0 if res else 0
        ddus[p]['ddu'] = ddus[p]['normalized_density'] * ddus[p][
            'diversity'] * ddus[p]['uniqueness']
    return ddus
Ejemplo n.º 3
0
def addBezier(m, x1, y1, x2, y2, x3, y3, x4, y4, step):
    bezMatrix = [[-1, 3, -3, 1], [3, -6, 3, 0], [-3, 3, 0, 0], [1, 0, 0, 0]]
    xcoef = matrix.multiply(bezMatrix, matrix.transpose([[x1, x2, x3, x4]]))
    ycoef = matrix.multiply(bezMatrix, matrix.transpose([[y1, y2, y3, y4]]))
    x = polyParametrize(matrix.transpose(xcoef)[0])
    y = polyParametrize(matrix.transpose(ycoef)[0])
    z = lambda t: 0
    addEdgesFromParam(m, x, y, z, step)
Ejemplo n.º 4
0
def addHermite(m, p0x, p0y, p1x, p1y, m0x, m0y, m1x, m1y, step):
    hermMatrix = [[2, -2, 1, 1], [-3, 3, -2, -1], [0, 0, 1, 0], [1, 0, 0, 0]]
    xcoef = matrix.multiply(hermMatrix, matrix.transpose([[p0x, p1x, m0x,
                                                           m1x]]))
    ycoef = matrix.multiply(hermMatrix, matrix.transpose([[p0y, p1y, m0y,
                                                           m1y]]))
    x = polyParametrize(matrix.transpose(xcoef)[0])
    y = polyParametrize(matrix.transpose(ycoef)[0])
    z = lambda t: 0
    addEdgesFromParam(m, x, y, z, step)
Ejemplo n.º 5
0
def _flatten_spectra(spectra):
    components = spectra[0]
    transactions = spectra[1:]
    unique_components = set(components[1:])
    for component in unique_components:
        positions = _indexes(component, components)
        for j, transaction in enumerate(transactions):
            if sum([transaction[i] for i in positions]) > 0:
                for p in positions:
                    transactions[j][p] = 1
    data = [components] + transactions
    component_columns = transpose(data)
    return transpose(unique(component_columns))
Ejemplo n.º 6
0
    def cam_observation_update(self, cam_obs):
        '''Single bearing-color observation'''
        zt = Matrix([
            cam_obs.bearing, cam_obs.color.r, cam_obs.color.g, cam_obs.color.b
        ])

        self.motion_update(self.last_twist)

        for particle in self.robot_particles:
            j = particle.get_feature_id(zt)
            if j < 0:  # not seen before
                # note, this will automagically add a new feature if possible
                particle.weight = self.add_hypothesis(particle.state, zt)
            else:  # j seen before
                feature = particle.get_feature_by_id(j)
                # pylint: disable=line-too-long
                # naming explains functionality
                z_hat = particle.measurement_prediction(
                    feature.mean, particle.state)
                H = self.jacobian_of_motion_model(particle.state, feature.mean)
                Q = mm(mm(H, feature.covar), transpose(H)) + self.Qt
                Q_inverse = inverse(Q)
                K = mm(mm(feature.covar, transpose(H)), Q_inverse)

                new_mean = feature.mean + mm(K, zt - z_hat)
                new_covar = mm(identity(5) - mm(K, H), feature.covar)

                particle.replace_feature_ekf(j, new_mean, new_covar)
                particle.weight = pow(
                    2 * math.pi * magnitude(Q), -1 / 2) * math.exp(
                        -0.5 * (transpose(zt - z_hat) * Q_inverse *
                                (zt - z_hat)))
            # endif
            # for all other features...do nothing
        # end for

        temp_particle_list = []
        sum_ = 0
        for particle in self.robot_particles:
            sum_ = sum_ + particle.weight

        chosen = random() * sum_

        for _ in range(0, len(self.robot_particles)):
            for particle in self.robot_particles:
                chosen = chosen - particle.weight
                if chosen < 0:
                    # choose this particle
                    temp_particle_list.append(particle.deep_copy())

        self.robot_particles = temp_particle_list
Ejemplo n.º 7
0
def resize_image(image, factor):
    image = zero_seperate(image)
    new_image = []
    for row in image:
        vec = [1] * factor + [0] * (len(row) - factor)
        new_row = ifft(hadamard(fft(row), fft(vec)))
        new_image.append(new_row)
    image = transpose(new_image)
    new_image = []
    for col in image:
        vec = [1] * factor + [0] * (len(col) - factor)
        new_col = ifft(hadamard(fft(col), fft(vec)))
        new_image.append(new_col)
    image = transpose(new_image)
    return convert_int(image)
Ejemplo n.º 8
0
    def train(self, input_array, target_array):
        # Generating the hidden outputs
        inputs = matrix.fromArray(input_array)
        hidden = matrix.multiply(self.weights_ih, inputs)
        hidden.add(self.bias_h)
        # activation function
        hidden.map(sigmoid)
        # Generating the output layer's output
        outputs = matrix.multiply(self.weights_ho, hidden)
        outputs.map(sigmoid)

        targets = matrix.fromArray(target_array)

        output_errors = matrix.subtract(targets, outputs)

        # gradient = outputs * (1 - outputs)
        # Calculate gradient
        gradients = matrix.map(outputs, dsigmoid)
        # get hadamard product
        gradients.multiply(output_errors)
        # perform scalar multiplication
        gradients.multiply(self.learning_rate)

        # Calculate deltas
        hidden_t = matrix.transpose(hidden)
        weight_ho_deltas = matrix.multiply(gradients, hidden_t)

        # Change weights by the calculated deltas
        self.weights_ho.add(weight_ho_deltas)
        # Adjust bias by the gradient
        self.bias_o.add(gradients)

        # after output errors are calculated, they are backpropagated to hidden layers for hidden layer error calculation
        weights_ho_t = matrix.transpose(self.weights_ho)
        hidden_errors = matrix.multiply(weights_ho_t, output_errors)

        # Calculate hidden gradient
        hidden_gradient = matrix.map(hidden, dsigmoid)
        # hadamard product
        hidden_gradient.multiply(hidden_errors)
        hidden_gradient.multiply(self.learning_rate)

        # Calculate input->hidden deltas
        inputs_t = matrix.transpose(inputs)
        weight_ih_deltas = matrix.multiply(hidden_gradient, inputs_t)

        self.weights_ih.add(weight_ih_deltas)
        self.bias_h.add(hidden_gradient)
Ejemplo n.º 9
0
 def run(self):
     features = self.CreateFeatures()
     M = self.CreateMatrix(features)
     b = matrix.multiply(matrix.transpose(features), self.y)
     A = matrix.inverse(M)
     weight = matrix.multiply(A, b)
     error = self.CalculateError(weight)
     self.PrintResult(weight, error)
Ejemplo n.º 10
0
def genb(features, y):
    """
    input: A, b
    output: (A^T)(b)
    """
    y = np.array(y).reshape(-1, 1)
    b = mat.mul(mat.transpose(features), y)
    return b
Ejemplo n.º 11
0
def sample(mean, cov , dimensions):
    r = matrix.transpose( matrix.Cholesky(cov) )

    randoms = matrix.zero(dimensions, 1)
    for i in range(len(randoms)):
        randoms[i][0] = random.gauss(0,0.05)

    return matrix.plus( mean , matrix.mult(r, randoms))
Ejemplo n.º 12
0
def sample(mean, cov , dimensions):
    r = matrix.transpose( matrix.Cholesky(cov) )

    randoms = matrix.zero(dimensions, 1)
    for i in range(len(randoms)):
        randoms[i][0] = random.gauss(0,0.025)

    return matrix.plus( mean , matrix.mult(r, randoms))
Ejemplo n.º 13
0
    def cam_observation_update(self, cam_obs):
        '''Single bearing-color observation'''
        zt = Matrix([cam_obs.bearing,
            cam_obs.color.r, cam_obs.color.g, cam_obs.color.b])

        self.motion_update(self.last_twist)

        for particle in self.robot_particles:
            j = particle.get_feature_id(zt)
            if j < 0: # not seen before
                # note, this will automagically add a new feature if possible
                particle.weight = self.add_hypothesis(particle.state, zt)
            else: # j seen before
                feature = particle.get_feature_by_id(j)
                # pylint: disable=line-too-long
                # naming explains functionality
                z_hat = particle.measurement_prediction(feature.mean, particle.state)
                H = self.jacobian_of_motion_model(particle.state, feature.mean)
                Q = mm(mm(H, feature.covar), transpose(H)) + self.Qt
                Q_inverse = inverse(Q)
                K = mm(mm(feature.covar, transpose(H)), Q_inverse)

                new_mean = feature.mean + mm(K, zt - z_hat)
                new_covar = mm(identity(5) - mm(K, H), feature.covar)

                particle.replace_feature_ekf(j, new_mean, new_covar)
                particle.weight = pow(2*math.pi*magnitude(Q), -1/2) * math.exp(-0.5 * (transpose(zt - z_hat)*Q_inverse*(zt - z_hat)))
            # endif
            # for all other features...do nothing
        # end for

        temp_particle_list = []
        sum_ = 0
        for particle in self.robot_particles:
            sum_ = sum_ + particle.weight

        chosen = random()*sum_

        for _ in range(0, len(self.robot_particles)):
            for particle in self.robot_particles:
                chosen = chosen - particle.weight
                if chosen < 0:
                    # choose this particle
                    temp_particle_list.append(particle.deep_copy())

        self.robot_particles = temp_particle_list
Ejemplo n.º 14
0
def getHessionInv(features):
    """
    This function calculates hession matrix of LSE( 2(A^T)A )
    """
    # hession = 2(A^T)A
    features_T = mat.transpose(features)
    hession = 2 * mat.mul(features_T, features)
    return mat.inv(hession)
def get_transforms(transform: Matrix):
    position = Point3D(*transform[3])
    scale = Scale3D(*map(mod_vector, transform[0:3]))
    rotation_matrix = unscale(transform[0:3], scale.as_vector())
    rotation = Rotation3D(
        *map(degrees, get_angles(transpose(rotation_matrix))))

    return position, rotation, scale
Ejemplo n.º 16
0
def triIter(m):
    for i in range(0, len(m[0]), 3):
        t = matrix.transpose([m[j][i:i+3] for j in xrange(3)])
        x = 0
        for j in t:
            for k in range(len(j)):
                j[k] *= 250./m[3][i + x]
            x += 1
        yield t
Ejemplo n.º 17
0
def genmatrix(features, rate):
    """
    input: A, rate
    output: (A^T)(A) - (rate)(I)
    This function will return (A^T)(A) - rate * I
    """
    matrix = mat.mul(mat.transpose(features), features)
    matrix -= rate * np.eye(matrix.shape[0])
    return matrix
Ejemplo n.º 18
0
def getGradient(features, weight, target):
    """
    This function calculates gradient of LSE( 2(A^T)Ax - 2(A^T)b )
    """
    # gradient = 2(A^T)Ax - 2(A^T)b
    features_T = mat.transpose(features)
    gradient = 2 * mat.mul(mat.mul(features_T, features), weight)
    gradient -= 2 * mat.mul(features_T, target)
    return gradient
Ejemplo n.º 19
0
def normal_to_world(shape, normal):
    normal = transpose(inverse(shape.transform)) * normal
    normal.w = 0
    normal = normalize(normal)

    if shape.parent is not None:
        normal = normal_to_world(shape.parent, normal)

    return normal
Ejemplo n.º 20
0
 def CalculateError(self, weight):
     error = 0.0
     weight = matrix.transpose(weight)
     for (xi, yi) in zip(self.x, self.y):
         predict = 0.0
         for d in range(self.degree):
             predict += (weight[0][d] * (xi**d))
         error += (predict - yi[0])**2
     return error
Ejemplo n.º 21
0
def diversity(activity):
    transactions = transpose(activity)
    unique_transactions = unique(transactions)
    buckets = list(map(lambda t: transactions.count(t), unique_transactions))
    numerator = reduce(lambda s, n: s + n * (n - 1), buckets, 0)
    num_of_transactions = len(transactions)
    denominator = num_of_transactions * (num_of_transactions - 1)
    try:
        return 1 - numerator / denominator
    except ZeroDivisionError:
        return 0
Ejemplo n.º 22
0
def min_max_scaler(features):
    """对每个特征按最大最小值进行线性缩放归一化至[0, 1]
    x = (x - min) / (max - min)

    Args:
        features: 特征

    Returns: 归一化后的特征

    Examples:
        >>> min_max_scaler([[-1, 2], [-0.5, 6], [0, 10], [1, 18]])
        [[0.0, 0.0], [0.25, 0.25], [0.5, 0.5], [1.0, 1.0]]

    """
    transposed = transpose(features)
    for i in range(len(transposed)):
        min_value, max_value = min(transposed[i]), max(transposed[i])
        transposed[i] = each(
            transposed[i], lambda x: (x - min_value) / (max_value - min_value))
    return transpose(transposed)
Ejemplo n.º 23
0
    def move_is_possible(self, direction):
        def row_is_left_movable(row):
            def change(i):
                if row[i] == 0 and row[i + 1] != 0:
                    # able to move
                    return True
                if row[i] != 0 and row[i + 1] == row[i]:
                    # able to merge
                    return True
                return False

            return any(change(i) for i in range(len(row) - 1))

        check = {}
        check['LEFT'] = lambda field: any(
            row_is_left_movable(row) for row in field)
        check['RIGHT'] = lambda field: check['LEFT'](invert(field))
        check['UP'] = lambda field: check['LEFT'](transpose(field))
        check['DOWN'] = lambda field: check['RIGHT'](transpose(field))
        if direction in check:
            return check[direction](self.field)
        return False
Ejemplo n.º 24
0
    def move(self, direction):
        def move_row_left(row):
            def tighten(row):
                new_row = [i for i in row if i != 0]
                new_row = [0 for i in range(len(row) - len(new_row))]
                return new_row

            def merge(row):
                pair = False
                new_row = []
                for i in range(len(row)):
                    if pair:
                        new_row.append(2 * row[i])
                        self.score += 2 * row[i]
                        pair = False
                    else:
                        if i + 1 < len(row) and row[i] == row[i + 1]:
                            pair = True
                            new_row.append(0)
                        else:
                            new_row.append(row[i])
                assert len(new_row) == len(row)
                return new_row

            return tighten(merge(tighten(row)))

        moves = {}
        moves['LEFT'] = lambda field: [move_row_left(row) for row in field]
        moves['RIGHT'] = lambda field: invert(moves['LEFT'](invert(field)))
        moves['UP'] = lambda field: transpose(moves['LEFT'](transpose(field)))
        moves['DOWN'] = lambda field: transpose(moves['RIGHT']
                                                (transpose(field)))
        if direction in moves:
            if self.move_is_possible(direction):
                self.field = moves[direction](self.field)
                self.spawn()
                return True
            else:
                return False
def get_transforms(transform: Matrix):
    x, z, y = map(lambda t: t * 100, transform[3])
    position = RelativeLocation(-x, y, z)

    scale_temp = [*map(mod_vector, transform[0:3])]
    rotation_matrix = unscale(transform[0:3], scale_temp)
    scale = RelativeScale3D(scale_temp[0], scale_temp[2], scale_temp[1])
    temp_rotation = [
        *map(degrees, get_angles_XZY_new(transpose(rotation_matrix)))
    ]
    rotation = RelativeRotation(temp_rotation[0], temp_rotation[2],
                                temp_rotation[1])

    return position, rotation, scale
Ejemplo n.º 26
0
def main ( lines ) :

	M , m , n = parse( lines )

	if n > m :

		M = transpose( M , m , n )
		reverse( M )
		m , n = n , m

	print( "partial information" )
	print( show( M ) , end = "" )

	compare = comparator( M )

	print( compute( compare , m , n ) )
Ejemplo n.º 27
0
def main(lines):

    M, m, n = parse(lines)

    if n > m:

        M = transpose(M, m, n)
        reverse(M)
        m, n = n, m

    print("partial information")
    print(show(M), end="")

    compare = comparator(M)

    print(compute(compare, m, n))
Ejemplo n.º 28
0
def _remove_tests(spectra):
    components = transpose(spectra)
    no_tests = reduce(_is_test, components, [])
    return transpose(no_tests)
Ejemplo n.º 29
0
def predict(matrix, model):
	matrix = numpy.array(matrix, dtype = 'float')
	matrix = matrix.transpose()
	return numpy.array(model.predict(matrix), dtype = 'float')
Ejemplo n.º 30
0
def main(x, hidden, b, epochs, test, w, g, n_d, m, p_m):
    random.seed(1)
    training_data = x[:]
    noise_data = n_d[:]

    # Adding bias to training data
    training_data.append([])
    noise_data.append([])

    for _ in x[0]:
        training_data[len(training_data)-1].append(b)
        noise_data[len(noise_data)-1].append(b)

    # Random weights for synapses
    synapses0 = []
    synapses1 = []

    for f in range(hidden):
        synapses0.append([])
        for _ in range(len(training_data)):
            synapses0[f].append(random.uniform(w, -w))  # second rand for bias synapses
    for j in range(hidden + 1):  # +1 for bias
        synapses1.append([random.uniform(w, -w)])

    sig_layer2 = []
    error_log = []
    error_log2 = []
    gamma_log = []
    global loading_message
    global loading_progress

    # learning loop (learning = iterations)
    for i in xrange(epochs):
        loading_progress = round((float(i) / float(iterations)) * 100, 1)

        # # # Forward pass
        # # Input Layer
        layer1 = matrix.multiply(synapses0, training_data)

        # Activation level
        sig_layer1 = matrix.sig(layer1)

        # # Hidden Layer
        # Adding bias to layer1
        b_sig_layer1 = sig_layer1[:]

        b_sig_layer1.append([])

        for _ in b_sig_layer1[0]:
            b_sig_layer1[len(b_sig_layer1) - 1].append(b)

        layer2 = matrix.multiply(matrix.transpose(synapses1), b_sig_layer1)

        sig_layer2 = matrix.sig(layer2)

        # # # ----------------
        # # Calculate net error
        error = [matrix.subtract(test, matrix.transpose(sig_layer2))]
        # error = [matrix.error(test, matrix.transpose(sig_layer2))]
        # if i % 5000 == 0:
        #     print(error)

        temp = 0
        for j in range(len(error)):
            temp += temp + error[0][j]

        error_log.append(temp/len(error))

        # Test with test data
        sig_noise = []
        l1 = matrix.multiply(synapses0, noise_data)
        sig_l1 = matrix.sig(l1)
        b_sig_l1 = sig_l1[:]
        b_sig_l1.append([])

        for _ in b_sig_l1[0]:
            b_sig_l1[len(b_sig_l1) - 1].append(b)

        l2 = matrix.multiply(matrix.transpose(synapses1), b_sig_l1)
        sig_noise = matrix.sig(l2)

        error2 = [matrix.subtract(test, matrix.transpose(sig_noise))]

        temp2 = 0
        for j in range(len(error2)):
            temp2 += temp2 + error2[0][j]

        error_log2.append(temp2 / len(error2))

        # # # ----------------
        # # Calculating weight updates
        # Delta for neuron in output layer (1 for each training data)
        deriv_sig_layer2 = matrix.derivative(sig_layer2)
        delta_layer2 = [[]]

        # temp_g = (g/(i+1))
        # gamma_log.append(temp_g)

        for j in range(len(error[0])):
            delta_layer2[0].append(deriv_sig_layer2[0][j] * error[0][j] * g)

        # Delta for neurons in hidden layer
        deriv_sig_layer1 = matrix.derivative(sig_layer1)
        delta_layer1 = []
        delta_weight_sum = []

        for k in range(len(synapses1)):
            delta_weight_sum.append([])
            for j in range(len(delta_layer2[0])):
                delta_weight_sum[k].append(synapses1[k][0] * delta_layer2[0][j])

        for k in range(len(deriv_sig_layer1)):
            delta_layer1.append([])
            for j in range(len(deriv_sig_layer1[0])):
                delta_layer1[k].append(deriv_sig_layer1[k][j] * delta_weight_sum[k][j] * g)

        delta_w_oh = matrix.multiply(delta_layer2, matrix.transpose(b_sig_layer1))
        delta_w_hi = matrix.multiply(delta_layer1, matrix.transpose(training_data))

        # # # Backwards pass
        # # Update weights
        synapses1 = matrix.add(synapses1, matrix.transpose(delta_w_oh))

        synapses0 = matrix.add(synapses0, delta_w_hi)

        if i > epochs * 0.5:
            if i > epochs * 0.95:
                loading_message = "I'm nearly done, good training."
            else:
                loading_message = "Well, I'm halfway through."

        # # # End of learning

    # Testing net with noised/test data
    sig_noise = []
    l1 = matrix.multiply(synapses0, noise_data)
    sig_l1 = matrix.sig(l1)
    b_sig_l1 = sig_l1[:]
    b_sig_l1.append([])

    for _ in b_sig_l1[0]:
        b_sig_l1[len(b_sig_l1) - 1].append(b)

    l2 = matrix.multiply(matrix.transpose(synapses1), b_sig_l1)
    sig_noise = matrix.sig(l2)

    # formatting net output for plot
    result1 = []  # training data
    result2 = []  # noised data
    for i in range(len(sig_layer2[0])):
        result1.append(sig_layer2[0][i] * 2 - 1)
        result2.append(sig_noise[0][i] * 2 - 1)

    if m == "sin":
        # Plot
        # Some code lines from: https://matplotlib.org/users/legend_guide.html
        neuron_patch = mpatches.Patch(label='Neurons: ' + str(hidden))
        bias_patch = mpatches.Patch(label='Bias: ' + str(b))
        iteration_patch = mpatches.Patch(label='Iterations: ' + str(epochs))
        epsilon_patch = mpatches.Patch(label='Gamma: ' + str(g))
        weight_patch = mpatches.Patch(label='Weight range: +/- ' + str(w))
        time_patch = mpatches.Patch(label=str(round((time.time() - start_time) / 60, 2)) + " min")
        first_legend = plt.legend(
            handles=[bias_patch, time_patch, epsilon_patch, neuron_patch, iteration_patch, weight_patch],
            bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
            ncol=3, mode="expand", borderaxespad=0.)

        line4, = plt.plot(error_axis[0], error_log, label="Error", linewidth=0.5)
        line6, = plt.plot(error_axis[0], error_log2, label="Error2", linewidth=0.5)
        line1, = plt.plot(inputData[0], result1, label="Training Data", linewidth=0.75)
        line2, = plt.plot(inputData[0], result2, label="Test Data", linestyle=':', linewidth=0.75)
        line3, = plt.plot(x_data, y_data, label="sin(x)", linestyle='--', linewidth=0.75)
        line5, = plt.plot(x_axis, y_axis, label="Axis", linewidth=0.5)
        ax = plt.gca().add_artist(first_legend)
        plt.legend(handles=[line4, line1, line2, line3, line5, line6])

        if p_m:
            plt.savefig('./plots/' + str(time.time())[2:10] + '.png')
        else:
            plt.show()

        plt.clf()
        plt.cla()
        plt.close()

    elif m == "xor":
        print("-----")
        for i in range(len(sig_noise[0])):
            print "Input: " + str(round(noise_data[0][i], 0)) + " & " \
                  + str(round(noise_data[1][i], 0)) + " = " + str(round(sig_noise[0][i], 0)) + " (" \
                  + str(round(sig_noise[0][i] * 100, 4)) + "% for True)"
Ejemplo n.º 31
0
 def CreateMatrix(self, features):
     AtA = matrix.multiply(matrix.transpose(features), features)
     return matrix.addition(AtA, matrix.diagonal(self.degree, self.lm))
Ejemplo n.º 32
0
        data.append(one[1:])
        i += 1

    #check the data,
    if len(data[0]) != len(data[1]):
        print("The Data May be not completely! Please Check it.")

    return data


if __name__ == "__main__":
    n = len(sys.argv)
    if n == 1:
        print("Input error!")
        print("chart [-w | datafile] distfile ")
        sys.exit(1)

    distfile = 'OffsetChart'

    blocks = ReadData(sys.argv[1])
    wData = HandleData(blocks)
    transData = matrix.transpose(wData)
    oData = FormatOutput(transData)
    chart = ChartData(oData)
    generateJS(chart, distfile + '.js')

    #produce a file that contain offset data used to check
    f = open(distfile + '.dat', 'w')
    f.writelines(repr(transData))
    f.close()
Ejemplo n.º 33
0
def camtest():
    import shape
    fov = 100
    cam = Camera(0.5,0.5,0.8,0,0,0,0,0,1 / math.tan(fov / 2.))
    camargs = [100,100,-50,-300]
    camT = transform.C3(*camargs)*transform.T(-250, -250,-175)
    print camT
    ncamT = transform.C3invT(*camargs)
    print ncamT
    v = [250,250,1000]
    lights = [Light(500,0,500,(20,20,20),(200,200,200),(255,255,255)),
              Light(500,500,200,(20,20,20),(200,200,200),(255,255,255)),
              Light(0,250,500,(20,20,20),(200,200,200),(255,255,255))
    ]
    camlights = []
    for l in lights:
        x = dot4xyz(camT[0], l.x, l.y, l.z)
        y = dot4xyz(camT[1], l.x, l.y, l.z)
        z = dot4xyz(camT[2], l.x, l.y, l.z)
        w = dot4xyz(camT[3], l.x, l.y, l.z)*1.
        print x/w*250,y/w*250,z/w*250
        camlights.append(Light(x/w*250, y/w*250, z/w*250,l.Ia,l.Id,l.Is))
    tris, norms = shape.box(200,200,-100,100,100,200)
    print norms
    print ncamT * norms
    print list(triIter(tris))
    trot = transform.R('y',5)
    nrot = transform.TransMatrix()
    nrot.lst = matrix.transpose(transform.R('y',-5))
    tmat = transform.T(250,250,0)*trot*transform.T(-250,-250,0)
    tris = tmat*tmat*tmat*tris
    norms= trot*trot*trot*norms
    print norms
    print ncamT*norms
    amb = Texture(False, [255,0,0])
    diff = Texture(False, [255,0,0])
    spec = Texture(False, [255,150,150])
    mat = Material(amb, diff, spec, 10)
    for i in range(72):
        #print tris
        tricam = camT * tris
        #print tricam
        #tricam[3] = [1.] * len(tricam[3])
        #tricam = transform.T(*v) * tricam
        print 'trans done'
        a = time()
        zbuf = [[None]*500 for _ in range(500)]
        img = Image(500,500)
        trit = list(triIter(tricam))
        #print trit,tricam
        trit.sort(key=lambda tri: -tri[0][2] - tri[1][2] - tri[2][2])
        normcam = ncamT*norms
        normt = []
        for j in range(len(normcam[0])):
            sgn = (normcam[3][j] > 0) * 2 - 1
            normt.append(normalize(normcam[0][j]*sgn, normcam[1][j]*sgn, normcam[2][j]*sgn))
        print normt
        #print len(trit), len(normt)
        for j in range(len(trit)):
            # (p1, p2, p3, mat, vx, vy, vz, lights, texcache, zbuf):
            t = trit[j]
            ps = []
            for pt in t:
                pt[0]+=250
                pt[1]+=250
                pt[2]+=0
                print pt
                ps.append(Point(*pt + normt[j] + [0,0]))
            img.setPixels(renderTriangle(*ps + [mat] + v + [camlights, {}, zbuf]))
        for t in trit:
            l = line(*t[0][:2]+t[1][:2])
            l += line(*t[0][:2]+t[2][:2])
            l += line(*t[2][:2]+t[1][:2])
            img.setPixels([p + ((0,0,0),) for p in l])

        for j in range(len(trit)):
            t = trit[j]
            ps = []
            for pt in t:
                nls = line(pt[0] - 4, pt[1], pt[0] + 4, pt[1])
                nls += line(pt[0], pt[1] - 4, pt[0], pt[1] + 4)
                nls += line(pt[0] - 4, pt[1] - 4, pt[0] + 4, pt[1] + 4)
                nls += line(pt[0] - 4, pt[1] + 4, pt[0] + 4, pt[1] - 4)
                nls += line(pt[0], pt[1], pt[0] + normt[j][0]*20, pt[1] + normt[j][1]*20)
                print normt[j][0], normt[j][1]
                img.setPixels([p + ((0,255,0),) for p in nls])
        
        img.savePpm('cube/%d.ppm'%(i))
        tris = tmat * tris
        norms = nrot * norms
        print norms
        print i, (time() - a) * 1000, 'ms'
Ejemplo n.º 34
0
def main(x, hidden, b, learning, test, w, g, n_d):
    random.seed(1)
    training_data = x[:]
    noise_data = n_d[:]

    # Adding bias to training data
    training_data.append([])
    noise_data.append([])

    for _ in x[0]:
        training_data[1].append(b)
        noise_data[1].append(b)

    # Random weights for synapses
    synapses0 = []
    synapses1 = []

    for _ in range(hidden):
        synapses0.append([random.uniform(w, -w), random.uniform(w, -w)])  # second rand for bias synapses
    for j in range(hidden + 1):  # +1 for bias
        synapses1.append([random.uniform(w, -w)])

    sig_layer2 = []

    global loading_message
    global loading_progress
    # learning loop (learning = iterations)
    for i in xrange(learning):
        temp = i+1
        loading_progress = round((float(temp) / float(iterations)) * 100, 1)

        # loading_progress = (temp / learning) * 100
        # # # Forward pass
        # # Input Layer

        layer1 = matrix.multiply(synapses0, training_data)

        # Activation level
        sig_layer1 = matrix.sig(layer1)

        # # Hidden Layer
        # Adding bias to layer1

        b_sig_layer1 = sig_layer1[:]

        b_sig_layer1.append([])

        for _ in b_sig_layer1[0]:
            b_sig_layer1[len(b_sig_layer1) - 1].append(b)

        layer2 = matrix.multiply(matrix.transpose(synapses1), b_sig_layer1)

        sig_layer2 = matrix.sig(layer2)

        # Calculate net error
        error = [matrix.subtract(test, matrix.transpose(sig_layer2))]

        # if i % 25000 == 0:
        #     temp = 0
        #     for j in range(len(error)):
        #         temp += temp + error[0][j]
        #     print i, temp

        # Delta for neuron in output layer (1 for each training data)
        deriv_sig_layer2 = matrix.derivative(sig_layer2)
        delta_layer2 = [[]]

        for j in range(len(error[0])):
            delta_layer2[0].append(deriv_sig_layer2[0][j] * error[0][j] * g)

        # Delta for neurons in hidden layer
        deriv_sig_layer1 = matrix.derivative(sig_layer1)
        delta_layer1 = []
        delta_weight_sum = []

        for k in range(len(synapses1)):
            delta_weight_sum.append([])
            for j in range(len(delta_layer2[0])):
                delta_weight_sum[k].append(synapses1[k][0] * delta_layer2[0][j])

        for k in range(len(deriv_sig_layer1)):
            delta_layer1.append([])
            for j in range(len(deriv_sig_layer1[0])):
                delta_layer1[k].append(deriv_sig_layer1[k][j] * delta_weight_sum[k][j] * g)

        delta_w_oh = matrix.multiply(delta_layer2, matrix.transpose(b_sig_layer1))
        delta_w_hi = matrix.multiply(delta_layer1, matrix.transpose(training_data))

        # # Update weights
        synapses1 = matrix.add(synapses1, matrix.transpose(delta_w_oh))

        synapses0 = matrix.add(synapses0, delta_w_hi)

        if i > learning * 0.5:
            if i > learning * 0.95:
                loading_message = "I'm nearly done, good training."
            else:
                loading_message = "Well, I'm halfway through."

    # Testing net with noised data
    sig_noise = []
    if len(n_d) > 0:
        # print "testing with noise data"

        l1 = matrix.multiply(synapses0, noise_data)

        sig_l1 = matrix.sig(l1)

        b_sig_l1 = sig_l1[:]

        b_sig_l1.append([])

        for _ in b_sig_l1[0]:
            b_sig_l1[len(b_sig_l1) - 1].append(b)

        l2 = matrix.multiply(matrix.transpose(synapses1), b_sig_l1)

        sig_noise = matrix.sig(l2)

    # formatting net output for plot
    result1 = []  # training data
    result2 = []  # noised data
    for i in range(len(sig_layer2[0])):
        result1.append(sig_layer2[0][i] * 2 - 1)
        result2.append(sig_noise[0][i] * 2 - 1)

    # Plot
    # Some code lines from: https://matplotlib.org/users/legend_guide.html
    neuron_patch = mpatches.Patch(label='Neurons: ' + str(hidden))
    bias_patch = mpatches.Patch(label='Bias: ' + str(b))
    iteration_patch = mpatches.Patch(label='Iterations: ' + str(learning))
    epsilon_patch = mpatches.Patch(label='Epsilon: ' + str(g))
    weight_patch = mpatches.Patch(label='Weight range (0 +/-): ' + str(w))
    time_patch = mpatches.Patch(label=str(round((time.time() - start_time) / 60, 2)) + " min")
    first_legend = plt.legend(
        handles=[bias_patch, time_patch, epsilon_patch, neuron_patch, iteration_patch, weight_patch],
        bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
        ncol=3, mode="expand", borderaxespad=0.)

    line1, = plt.plot(inputData[0], result1, label="Training Data", linewidth=0.75)
    line2, = plt.plot(inputData[0], result2, label="Test Data", linestyle=':', linewidth=0.75)
    line3, = plt.plot(x_data, y_data, label="sin(x)", linestyle='--', linewidth=0.75)
    ax = plt.gca().add_artist(first_legend)
    plt.legend(handles=[line1, line2, line3])
    plt.savefig('./plots/plot' + str(time.time())[2:10] + '.png')

    plt.clf()
    plt.cla()
    plt.close()
Ejemplo n.º 35
0
        data.append(one[1:])
        i += 1

    # check the data,
    if len(data[0]) != len(data[1]):
        print("The Data May be not completely! Please Check it.")

    return data


if __name__ == "__main__":
    n = len(sys.argv)
    if n == 1:
        print("Input error!")
        print("chart [-w | datafile] distfile ")
        sys.exit(1)

    distfile = "OffsetChart"

    blocks = ReadData(sys.argv[1])
    wData = HandleData(blocks)
    transData = matrix.transpose(wData)
    oData = FormatOutput(transData)
    chart = ChartData(oData)
    generateJS(chart, distfile + ".js")

    # produce a file that contain offset data used to check
    f = open(distfile + ".dat", "w")
    f.writelines(repr(transData))
    f.close()
Ejemplo n.º 36
0
error_axis = [tools.linspace(0, 6.4, iterations)]

loading_message = "I'm learning right now."
loading_progress = 0.0

for _ in range(100):
    for _ in range(1):
        mode = raw_input("What do you want to learn? (1 = SIN(), 2 = XOR) ")
        if mode == "1":
            mode = "sin"
            plot_mode = raw_input("Save Plot? (1 = save plot, 2 = show plot) ")
            if plot_mode == "1":
                plot = True
        elif mode == "2":
            mode = "xor"
            inputData = matrix.transpose(inputData_xor)
            testdata = testdata_xor
            noise_d = inputData

        # gamma += 0.001

        done = False

        t = threading.Thread(target=animate)
        t.start()

        start_time = time.time()

        main(inputData, hiddenNeurons, bias, iterations, testdata, weight, gamma, noise_d, mode, plot)

        time.sleep(0.5)