Пример #1
0
 def forward(self, train_data):
     self.nurons["z2"] = fc_forward(train_data, self.weights["W1"], self.weights["b1"])
     self.nurons["z2_relu"] = relu_forward(self.nurons["z2"])
     self.nurons["z3"] = fc_forward(self.nurons["z2_relu"], self.weights["W2"], self.weights["b2"])
     self.nurons["z3_relu"] = relu_forward(self.nurons["z3"])
     self.nurons["y"] = fc_forward(self.nurons["z3_relu"], self.weights["W3"], self.weights["b3"])
     return self.nurons["y"]
Пример #2
0
 def forward(self, train_data):
     self.nodes["A1"] = layer.fc_forward(self.Parameters["W1"], train_data,
                                         self.Parameters["B1"])
     self.nodes["Z1"] = activations.relu_forward(self.nodes["A1"])
     self.nodes["A2"] = layer.fc_forward(self.Parameters["W2"],
                                         self.nodes["Z1"],
                                         self.Parameters["B2"])
     self.nodes["Z2"] = activations.relu_forward(self.nodes["A2"])
     self.nodes["A3"] = layer.fc_forward(self.Parameters["W3"],
                                         self.nodes["Z2"],
                                         self.Parameters["B3"])
     self.nodes["Z3"] = activations.relu_forward(self.nodes["A3"])
     self.nodes["y"] = np.argmax(self.nodes["A3"], axis=0)
     return self.nodes["y"]
Пример #3
0
    def forward(self, train_data):
        # X1 = train_data[0]
        self.nodes["Conv1"] = \
            layer.conv_forward(train_data, self.Parameters["K1"], self.Parameters["Kb1"])
        self.nodes["Maxpool1"] = layer.max_pooling_forward(self.nodes["Conv1"], (2, 2), (2, 2))
        self.nodes["Conv2"] = \
            layer.conv_forward(self.nodes["Maxpool1"], self.Parameters["K2"], self.Parameters["Kb2"])
        self.nodes["MaxPool2"] = layer.max_pooling_forward(self.nodes["Conv2"], (2, 2))

        self.nodes["X2"] = self.nodes["MaxPool2"].reshape((128, -1)).T

        self.nodes["A1"] = layer.fc_forward(self.Parameters["W1"], self.nodes["X2"], self.Parameters["B1"])
        self.nodes["Z1"] = activations.relu_forward(self.nodes["A1"])
        self.nodes["A2"] = layer.fc_forward(self.Parameters["W2"], self.nodes["Z1"], self.Parameters["B2"])
        self.nodes["Z2"] = activations.relu_forward(self.nodes["A2"])
        self.nodes["A3"] = layer.fc_forward(self.Parameters["W3"], self.nodes["Z2"], self.Parameters["B3"])
        self.nodes["Z3"] = activations.relu_forward(self.nodes["A3"])
        self.nodes["y"] = np.argmax(self.nodes["A3"], axis=0)
        return self.nodes["y"]
Пример #4
0
def network(params,
            layers,
            data,
            labels,
            reconstruction=False,
            addnoise=False):

    l = len(layers)
    batch_size = layers[1]['batch_size']

    param_grad = {}
    cp = {}
    output = {}

    data_orig = copy.deepcopy(data)
    if addnoise:
        noise = np.random.binomial(1, 0.75, size=data.shape)
        data = data * noise

    output[1] = {
        'data': data,
        'height': layers[1]['height'],
        'channel': layers[1]['channel'],
        'batch_size': layers[1]['batch_size'],
        'diff': 0
    }

    for i in range(2, l + 1):
        if layers[i]['type'] == 'IP':
            output[i] = fully_connected.inner_product_forward(
                output[i - 1], layers[i], params[i - 1])
        elif layers[i]['type'] == 'RELU':
            output[i] = activations.relu_forward(output[i - 1], layers[i])
        elif layers[i]['type'] == 'Sigmoid':
            output[i] = activations.sigmoid_forward(output[i - 1], layers[i])
        elif layers[i]['type'] == 'Tanh':
            output[i] = activations.tanh_forward(output[i - 1], layers[i])
        elif layers[i]['type'] == 'LOSS':
            [obj, grad_w, grad_b, input_back_deriv,
             success_rate] = loss_func(params[i - 1]['w'], params[i - 1]['b'],
                                       output[i - 1]['data'], labels,
                                       layers[i]['num'], 1)
            param_grad[i - 1] = {
                'w': grad_w / batch_size,
                'b': grad_b / batch_size
            }
        elif layers[i]['type'] == 'autoEnc':
            [obj, input_back_deriv,
             success_rate] = autoEnc_loss(output[i - 1]['data'], data_orig)
            param_grad[i - 1] = {'w': 0.0, 'b': 0.0}

    if reconstruction:
        return output[i - 1]['data']

    for i in range(l - 1, 1, -1):
        param_grad[i - 1] = {}
        param_grad[i - 1]['w'] = np.array([])
        param_grad[i - 1]['b'] = np.array([])
        if layers[i]['type'] == 'IP':
            output[i]['diff'] = input_back_deriv
            param_grad[
                i -
                1], input_back_deriv = fully_connected.inner_product_backward(
                    output[i], output[i - 1], layers[i], params[i - 1])
        elif layers[i]['type'] == 'RELU':
            output[i]['diff'] = input_back_deriv
            input_back_deriv = activations.relu_backward(
                output[i], output[i - 1], layers[i])
            param_grad[i - 1]['w'] = np.array([])
            param_grad[i - 1]['b'] = np.array([])
        elif layers[i]['type'] == 'Sigmoid':
            output[i]['diff'] = input_back_deriv
            input_back_deriv = activations.sigmoid_backward(
                output[i], output[i - 1], layers[i])
            param_grad[i - 1]['w'] = np.array([])
            param_grad[i - 1]['b'] = np.array([])
        elif layers[i]['type'] == 'Tanh':
            output[i]['diff'] = input_back_deriv
            input_back_deriv = activations.tanh_backward(
                output[i], output[i - 1], layers[i])
            param_grad[i - 1]['w'] = np.array([])
            param_grad[i - 1]['b'] = np.array([])

    return (obj / batch_size), success_rate, param_grad
Пример #5
0
def network(params, layers, data, labels):

    l = len(layers)
    batch_size = layers[1]['batch_size']

    param_grad = {}
    cp = {}
    output = {}
    output[1] = {
        'data': data,
        'height': layers[1]['height'],
        'channel': layers[1]['channel'],
        'batch_size': layers[1]['batch_size'],
        'diff': 0
    }

    for i in range(2, l):
        if layers[i]['type'] == 'IP':
            output[i] = fully_connected.inner_product_forward(
                output[i - 1], layers[i], params[i - 1])
        elif layers[i]['type'] == 'RELU':
            output[i] = activations.relu_forward(output[i - 1], layers[i])
        elif layers[i]['type'] == 'Sigmoid':
            output[i] = activations.sigmoid_forward(output[i - 1], layers[i])
        elif layers[i]['type'] == 'Tanh':
            output[i] = activations.tanh_forward(output[i - 1], layers[i])
        elif layers[i]['type'] == 'batch_norm':
            output[i] = activations.batch_normalization_forward(
                output[i - 1], layers[i], params[i - 1])
    i = l
    [obj, grad_w, grad_b, input_back_deriv,
     success_rate] = loss_func(params[i - 1]['w'], params[i - 1]['b'],
                               output[i - 1]['data'], labels, layers[i]['num'],
                               1)

    param_grad[i - 1] = {'w': grad_w / batch_size, 'b': grad_b / batch_size}

    for i in range(l - 1, 1, -1):
        param_grad[i - 1] = {}
        param_grad[i - 1]['w'] = np.array([])
        param_grad[i - 1]['b'] = np.array([])
        if layers[i]['type'] == 'IP':
            output[i]['diff'] = input_back_deriv
            param_grad[
                i -
                1], input_back_deriv = fully_connected.inner_product_backward(
                    output[i], output[i - 1], layers[i], params[i - 1])
        elif layers[i]['type'] == 'RELU':
            output[i]['diff'] = input_back_deriv
            input_back_deriv = activations.relu_backward(
                output[i], output[i - 1], layers[i])
            param_grad[i - 1]['w'] = np.array([])
            param_grad[i - 1]['b'] = np.array([])
        elif layers[i]['type'] == 'Sigmoid':
            output[i]['diff'] = input_back_deriv
            input_back_deriv = activations.sigmoid_backward(
                output[i], output[i - 1], layers[i])
            param_grad[i - 1]['w'] = np.array([])
            param_grad[i - 1]['b'] = np.array([])
        elif layers[i]['type'] == 'Tanh':
            output[i]['diff'] = input_back_deriv
            input_back_deriv = activations.tanh_backward(
                output[i], output[i - 1], layers[i])
            param_grad[i - 1]['w'] = np.array([])
            param_grad[i - 1]['b'] = np.array([])
        elif layers[i]['type'] == 'batch_norm':
            output[i]['diff'] = input_back_deriv
            param_grad[
                i -
                1], input_back_deriv = activations.batch_normalization_backward(
                    output[i], output[i - 1], layers[i], params[i - 1])
        param_grad[i - 1]['w'] = param_grad[i - 1]['w'] / batch_size
        param_grad[i - 1]['b'] = param_grad[i - 1]['b'] / batch_size

    return (obj / batch_size), success_rate, param_grad