Esempio n. 1
0
def train_regression(net: Network,
                     dataset: pd.DataFrame,
                     max_epochs: int,
                     learning_rate: float,
                     batch_size: int = 1):
    """
    Train net for a regression task.
    The dataset consists of features and regression value (in the last column).
    """
    train_df, valid_df = split_dataset(dataset, 0.2)

    train_losses = []
    validation_losses = []
    for epoch in range(max_epochs):
        train_loss = 0
        validation_loss = 0
        for i in range(0, len(train_df) - batch_size, batch_size):
            x = np.array(train_df.iloc[i:i + batch_size, :-1])
            y = np.reshape(np.array(train_df.iloc[i:i + batch_size, -1]),
                           (batch_size, 1))
            loss = net.fit(x, y, learning_rate, batch_size)
            train_loss += loss

        for i in range(0, len(valid_df) - batch_size, batch_size):
            x = np.array(valid_df.iloc[i:i + batch_size, :-1])
            y = np.reshape(np.array(valid_df.iloc[i:i + batch_size, -1]),
                           (batch_size, 1))
            loss = net.validate(x, y, learning_rate, batch_size)
            validation_loss += loss

        train_losses.append(train_loss / len(train_df))
        validation_losses.append(validation_loss / len(valid_df))
    return train_losses, validation_losses
Esempio n. 2
0
def train_classification(net: Network,
                         dataset: pd.DataFrame,
                         max_epochs: int,
                         learning_rate: float,
                         batch_size: int = 1,
                         multiclass: bool = False):
    """
    Train net for a classification task.
    The dataset consists of features and class label (in the last column).

    If the multiclass is True, uses one-hot encoding.
    """

    train_df, valid_df = split_dataset(dataset, 0.2)
    train_y_df = pd.get_dummies(
        train_df['cls'], dtype=float) if multiclass else train_df['cls'] - 1.0
    valid_y_df = pd.get_dummies(
        valid_df['cls'], dtype=float) if multiclass else valid_df['cls'] - 1.0
    y_dim = train_y_df.shape[1] if multiclass else 1

    train_losses = []
    validation_losses = []
    for epoch in range(max_epochs):
        train_loss = 0
        validation_loss = 0
        for i in range(0, len(train_df) - batch_size, batch_size):
            x = np.array(train_df.iloc[i:i + batch_size, :-1])
            y = np.reshape(np.array(train_y_df.iloc[i:i + batch_size]),
                           (batch_size, y_dim))
            loss = net.fit(x, y, learning_rate, batch_size)
            train_loss += loss

        for i in range(0, len(valid_df) - batch_size, batch_size):
            x = np.array(valid_df.iloc[i:i + batch_size, :-1])
            y = np.reshape(np.array(valid_y_df.iloc[i:i + batch_size]),
                           (batch_size, y_dim))
            loss = net.validate(x, y, learning_rate, batch_size)
            validation_loss += loss

        train_losses.append(train_loss / len(train_df))
        validation_losses.append(validation_loss / len(valid_df))
    return train_losses, validation_losses