예제 #1
0
 def test_6_2_1_1(self):
     nn1 = Network(cost=QuadraticCost(), utmode=True)
     nn1.fit(self.training_data[0], self.training_data[1],
             self.test_data[0], self.test_data[1])
     nn2 = Network(cost=CrossEntropy(), utmode=True)
     nn2.fit(self.training_data[0], self.training_data[1],
             self.test_data[0], self.test_data[1])
예제 #2
0
 def test_6_2_2_1(self):
     nn = Network(layer=[
         Layer(LinearUnit(), 784),
         Layer(LinearUnit(), 30),
         Layer(LinearUnit(), 10)
     ],
                  cost=CrossEntropy(),
                  utmode=True)
     nn.fit(self.training_data[0],
            self.training_data[1],
            self.test_data[0],
            self.test_data[1],
            learning_rate=7 * 1e-7)
예제 #3
0
def train_regression(net: Network,
                     dataset: pd.DataFrame,
                     max_epochs: int,
                     learning_rate: float,
                     batch_size: int = 1):
    """
    Train net for a regression task.
    The dataset consists of features and regression value (in the last column).
    """
    train_df, valid_df = split_dataset(dataset, 0.2)

    train_losses = []
    validation_losses = []
    for epoch in range(max_epochs):
        train_loss = 0
        validation_loss = 0
        for i in range(0, len(train_df) - batch_size, batch_size):
            x = np.array(train_df.iloc[i:i + batch_size, :-1])
            y = np.reshape(np.array(train_df.iloc[i:i + batch_size, -1]),
                           (batch_size, 1))
            loss = net.fit(x, y, learning_rate, batch_size)
            train_loss += loss

        for i in range(0, len(valid_df) - batch_size, batch_size):
            x = np.array(valid_df.iloc[i:i + batch_size, :-1])
            y = np.reshape(np.array(valid_df.iloc[i:i + batch_size, -1]),
                           (batch_size, 1))
            loss = net.validate(x, y, learning_rate, batch_size)
            validation_loss += loss

        train_losses.append(train_loss / len(train_df))
        validation_losses.append(validation_loss / len(valid_df))
    return train_losses, validation_losses
예제 #4
0
def main():
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper(
    )

    architecture = [{
        'size': 784,
        'activation': 'sigmoid'
    }, {
        'size': 30,
        'activation': 'sigmoid'
    }, {
        'size': 10,
        'activation': 'sigmoid'
    }]

    net = Network(architecture, 'mse', seed=1)

    train_X = np.array([pair[0] for pair in training_data]).reshape(50000, 784)
    train_Y = np.array([pair[1] for pair in training_data]).reshape(50000, 10)
    test_X = np.array([pair[0] for pair in test_data]).reshape(10000, 784)
    test_y = np.array([pair[1] for pair in test_data]).reshape(10000)

    net.fit(train_X, train_Y, 30, 30, 80.0, test_X, test_y)
예제 #5
0
def main():
    batch_size: int = 10
    input_size: int = 20
    output_size: int = 3
    alpha: float = 0.01
    seed: int = 5

    # (number of neurons, activation function, use bias)
    layers = [(50, SigmoidActivation(), True), (10, SigmoidActivation(), True),
              (3, LinearActivation(), True)]

    error = MeanAbsoluteError()
    network = Network(input_size, layers, error, seed)

    x = np.random.rand(batch_size, input_size)
    y = np.random.rand(batch_size, output_size)
    for i in range(1000):
        loss = network.fit(x, y, alpha)
        print("{0} iteration, loss = {1}.".format(i, loss))
예제 #6
0
def train_classification(net: Network,
                         dataset: pd.DataFrame,
                         max_epochs: int,
                         learning_rate: float,
                         batch_size: int = 1,
                         multiclass: bool = False):
    """
    Train net for a classification task.
    The dataset consists of features and class label (in the last column).

    If the multiclass is True, uses one-hot encoding.
    """

    train_df, valid_df = split_dataset(dataset, 0.2)
    train_y_df = pd.get_dummies(
        train_df['cls'], dtype=float) if multiclass else train_df['cls'] - 1.0
    valid_y_df = pd.get_dummies(
        valid_df['cls'], dtype=float) if multiclass else valid_df['cls'] - 1.0
    y_dim = train_y_df.shape[1] if multiclass else 1

    train_losses = []
    validation_losses = []
    for epoch in range(max_epochs):
        train_loss = 0
        validation_loss = 0
        for i in range(0, len(train_df) - batch_size, batch_size):
            x = np.array(train_df.iloc[i:i + batch_size, :-1])
            y = np.reshape(np.array(train_y_df.iloc[i:i + batch_size]),
                           (batch_size, y_dim))
            loss = net.fit(x, y, learning_rate, batch_size)
            train_loss += loss

        for i in range(0, len(valid_df) - batch_size, batch_size):
            x = np.array(valid_df.iloc[i:i + batch_size, :-1])
            y = np.reshape(np.array(valid_y_df.iloc[i:i + batch_size]),
                           (batch_size, y_dim))
            loss = net.validate(x, y, learning_rate, batch_size)
            validation_loss += loss

        train_losses.append(train_loss / len(train_df))
        validation_losses.append(validation_loss / len(valid_df))
    return train_losses, validation_losses