Пример #1
0
def build_model():

    model = NeuralNetwork(loss=CrossEntropy(),
                          n_iterations=800,
                          learning_rate=0.13)
    model.add(Layer(4, 10, activation=Relu()))
    model.add(Layer(10, 3, activation=Softmax()))
    return model
Пример #2
0
    def test_softmax_calculate_gradient(self):
        # Given
        pre_activation = np.array([[1, 2, 3, 6], [2, 4, 5, 6], [3, 8, 7, 6]])
        target = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
        softmax = Softmax()

        # When
        activation = softmax.apply_activation(pre_activation)
        grad = softmax.calculate_gradient(activation, target)
Пример #3
0
    def __init__(self):
        self.d1_layer = Dense(784, 100)
        self.a1_layer = ReLu()
        self.drop1_layer = Dropout(0.5)

        self.d2_layer = Dense(100, 50)
        self.a2_layer = ReLu()
        self.drop2_layer = Dropout(0.25)

        self.d3_layer = Dense(50, 10)
        self.a3_layer = Softmax()
Пример #4
0
    def test_softmax_apply_activation(self):
        # Given
        pre_activation = np.array([[1, 2, 3, 6], [2, 4, 5, 6], [3, 8, 7, 6]])
        softmax = Softmax()

        # When
        activation = softmax.apply_activation(pre_activation)

        # Then
        sum_of_columns = 4.0
        self.assertTrue(
            np.isclose(sum_of_columns, np.sum(np.sum(activation, axis=0)),
                       1e-3))
Пример #5
0
    def sequential(self,
                   network=[1, 1, 1],
                   activation=[ReLU(), Softmax()],
                   loss=Cross_entropy(),
                   regu=Default(),
                   weight_type='default'):

        self.net = network
        self.activation1 = activation[0]
        self.activation2 = activation[1]
        self.loss = loss

        self.regu = regu
        self.init_weight(weight_type)
Пример #6
0
val = df.drop(train.index)

yr = train.iloc[:, 0].to_numpy()
X_train, y_train = train.iloc[:, 1:].to_numpy() / 255.0, onehotcode(yr)

X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
y_train = y_train.reshape((y_train.shape[0], y_train.shape[1], 1))
print(X_train.shape, y_train.shape)

X_val, y_val = val.iloc[:, 1:].to_numpy() / 255.0, val.iloc[:, 0].to_numpy()

X_val = X_val.reshape((X_val.shape[0], X_val.shape[1], 1))
print(X_val.shape, y_val.shape)

nn = NN()
nn.sequential(network=[784, 128, 10],
              activation=[Tanh(), Softmax()],
              loss=Cross_entropy(),
              regu=Ridge(n=X_train.shape[0], lmda=5),
              weight_type='glorot_normal')
nn.load_model('tanL2128')
#nn.fit(X_train,y_train,X_val,y_val,32,2)
#nn.save_model('tanL2128')
nn.weight_heatmap()

df2 = pd.read_csv('mnist_test.csv')
X_test, y_test = df2.iloc[:, 1:].to_numpy() / 255.0, df2.iloc[:, 0].to_numpy()
X_test = X_test.reshape((X_test.shape[0], 28, 28))
print(X_test.shape, y_test.shape)
nn.annote_test(X_test[:100], 10, 10)
Пример #7
0
 def __init__(self):
     self.layers = []
     self.activation = Softmax()
     self.init()