コード例 #1
0
 def test_init(self, char_level, num_tokens, separator, token_to_index,
               expected_token_to_index):
     tokenizer = data.Tokenizer(char_level=char_level,
                                num_tokens=num_tokens,
                                token_to_index=token_to_index)
     assert tokenizer.separator == separator
     assert tokenizer.token_to_index == expected_token_to_index
コード例 #2
0
ファイル: test_data.py プロジェクト: aditya-chaturvedi/mlops
 def test_save_and_load(self):
     with tempfile.TemporaryDirectory() as dp:
         tokenizer = data.Tokenizer(
             char_level=False, token_to_index={"<PAD>": 0, "<UNK>": 1, "hello": 2, "world": 3}
         )
         fp = Path(dp, "label_encoder.json")
         tokenizer.save(fp=fp)
         tokenizer = data.Tokenizer.load(fp=fp)
         assert len(tokenizer) == 4
コード例 #3
0
ファイル: test_data.py プロジェクト: aditya-chaturvedi/mlops
 def test_fit_on_texts(self, char_level, num_tokens, texts, vocab_size):
     tokenizer = data.Tokenizer(char_level=char_level, num_tokens=num_tokens)
     tokenizer.fit_on_texts(texts=texts)
     assert len(tokenizer) == vocab_size
コード例 #4
0
ファイル: test_data.py プロジェクト: aditya-chaturvedi/mlops
 def setup_method(self):
     """Called before every method."""
     self.tokenizer = data.Tokenizer(char_level=True, num_tokens=None)
コード例 #5
0
ファイル: test_data.py プロジェクト: aditya-chaturvedi/mlops
class TestTokenizer:
    def setup_method(self):
        """Called before every method."""
        self.tokenizer = data.Tokenizer(char_level=True, num_tokens=None)

    def teardown_method(self):
        """Called after every method."""
        del self.tokenizer

    @pytest.mark.parametrize(
        "char_level, num_tokens, separator, token_to_index, expected_token_to_index",
        [
            (True, None, "", None, {"<PAD>": 0, "<UNK>": 1}),
            (False, None, " ", None, {"<PAD>": 0, "<UNK>": 1}),
            (
                False,
                None,
                " ",
                {"<PAD>": 0, "<UNK>": 1, "hello": 2},
                {"<PAD>": 0, "<UNK>": 1, "hello": 2},
            ),
        ],
    )
    def test_init(self, char_level, num_tokens, separator, token_to_index, expected_token_to_index):
        tokenizer = data.Tokenizer(
            char_level=char_level, num_tokens=num_tokens, token_to_index=token_to_index
        )
        assert tokenizer.separator == separator
        assert tokenizer.token_to_index == expected_token_to_index

    def test_len(self):
        assert len(self.tokenizer) == 2

    def test_str(self):
        assert str(self.tokenizer) == f"<Tokenizer(num_tokens={len(self.tokenizer)})>"

    @pytest.mark.parametrize(
        "char_level, num_tokens, texts, vocab_size",
        [(False, None, ["hello world", "goodbye"], 5), (False, 4, ["hello world", "goodbye"], 4)],
    )
    def test_fit_on_texts(self, char_level, num_tokens, texts, vocab_size):
        tokenizer = data.Tokenizer(char_level=char_level, num_tokens=num_tokens)
        tokenizer.fit_on_texts(texts=texts)
        assert len(tokenizer) == vocab_size

    @pytest.mark.parametrize(
        "tokenizer, texts, sequences, decoded",
        [
            (
                data.Tokenizer(
                    char_level=False,
                    token_to_index={"<PAD>": 0, "<UNK>": 1, "hello": 2, "world": 3},
                ),
                ["hello world", "hi world", "apple"],
                [[2, 3], [1, 3], [1]],
                ["hello world", "<UNK> world", "<UNK>"],
            ),
            (
                data.Tokenizer(
                    char_level=True, token_to_index={"<PAD>": 0, "<UNK>": 1, " ": 2, "a": 3, "b": 4}
                ),
                ["ab", "b", "a x ab"],
                [[3, 4], [4], [3, 2, 1, 2, 3, 4]],
                ["ab", "b", "a <UNK> ab"],
            ),
        ],
    )
    def test_encode_decode(self, tokenizer, texts, sequences, decoded):
        assert tokenizer.texts_to_sequences(texts=texts) == sequences
        assert tokenizer.sequences_to_texts(sequences=sequences) == decoded

    def test_save_and_load(self):
        with tempfile.TemporaryDirectory() as dp:
            tokenizer = data.Tokenizer(
                char_level=False, token_to_index={"<PAD>": 0, "<UNK>": 1, "hello": 2, "world": 3}
            )
            fp = Path(dp, "label_encoder.json")
            tokenizer.save(fp=fp)
            tokenizer = data.Tokenizer.load(fp=fp)
            assert len(tokenizer) == 4
コード例 #6
0
        X=X_, y=y_, train_size=0.5)

    # View slices
    test_df = pd.DataFrame({
        "text": X_test,
        "tags": label_encoder.decode(y_test)
    })
    cv_transformers_df = slice_dataframe(test_df, cv_transformers)
    print(f"{len(cv_transformers_df)} projects")
    print(cv_transformers_df[["text", "tags"]].head())
    short_text_df = slice_dataframe(test_df, short_text)
    print(f"{len(short_text_df)} projects")
    print(short_text_df[["text", "tags"]].head())

    # 8. Tokenize inputs
    tokenizer = data.Tokenizer(char_level=args.char_level)
    tokenizer.fit_on_texts(texts=X_train)
    X_train = np.array(tokenizer.texts_to_sequences(X_train), dtype=object)
    X_val = np.array(tokenizer.texts_to_sequences(X_val), dtype=object)
    X_test = np.array(tokenizer.texts_to_sequences(X_test), dtype=object)

    # 9. Create dataloaders
    train_dataset = data.CNNTextDataset(X=X_train,
                                        y=y_train,
                                        max_filter_size=args.max_filter_size)
    val_dataset = data.CNNTextDataset(X=X_val,
                                      y=y_val,
                                      max_filter_size=args.max_filter_size)
    test_dataset = data.CNNTextDataset(X=X_test,
                                       y=y_test,
                                       max_filter_size=args.max_filter_size)
コード例 #7
0
ファイル: main.py プロジェクト: gzmkobe/MLOps
def run(params: Namespace, trial: optuna.trial._trial.Trial = None) -> Dict:
    """Operations for training.

    Args:
        params (Namespace): Input parameters for operations.
        trial (optuna.trial._trial.Trail, optional): Optuna optimization trial. Defaults to None.

    Returns:
        Artifacts to save and load for later.
    """
    # 1. Set seed
    utils.set_seed(seed=params.seed)

    # 2. Set device
    device = utils.set_device(cuda=params.cuda)

    # 3. Load data
    projects_fp = Path(config.DATA_DIR, "projects.json")
    tags_fp = Path(config.DATA_DIR, "tags.json")
    projects = utils.load_dict(filepath=projects_fp)
    tags_dict = utils.list_to_dict(utils.load_dict(filepath=tags_fp),
                                   key="tag")
    df = pd.DataFrame(projects)
    if params.shuffle:
        df = df.sample(frac=1).reset_index(drop=True)
    df = df[:params.subset]  # None = all samples

    # 4. Prepare data (feature engineering, filter, clean)
    df, tags_above_freq, tags_below_freq = data.prepare(
        df=df,
        include=list(tags_dict.keys()),
        exclude=config.EXCLUDED_TAGS,
        min_tag_freq=params.min_tag_freq,
    )
    params.num_samples = len(df)

    # 5. Preprocess data
    df.text = df.text.apply(data.preprocess,
                            lower=params.lower,
                            stem=params.stem)

    # 6. Encode labels
    labels = df.tags
    label_encoder = data.MultiLabelLabelEncoder()
    label_encoder.fit(labels)
    y = label_encoder.encode(labels)

    # Class weights
    all_tags = list(itertools.chain.from_iterable(labels.values))
    counts = np.bincount(
        [label_encoder.class_to_index[class_] for class_ in all_tags])
    class_weights = {i: 1.0 / count for i, count in enumerate(counts)}

    # 7. Split data
    utils.set_seed(seed=params.seed)  # needed for skmultilearn
    X = df.text.to_numpy()
    X_train, X_, y_train, y_ = data.iterative_train_test_split(
        X=X, y=y, train_size=params.train_size)
    X_val, X_test, y_val, y_test = data.iterative_train_test_split(
        X=X_, y=y_, train_size=0.5)
    test_df = pd.DataFrame({
        "text": X_test,
        "tags": label_encoder.decode(y_test)
    })

    # 8. Tokenize inputs
    tokenizer = data.Tokenizer(char_level=params.char_level)
    tokenizer.fit_on_texts(texts=X_train)
    X_train = np.array(tokenizer.texts_to_sequences(X_train), dtype=object)
    X_val = np.array(tokenizer.texts_to_sequences(X_val), dtype=object)
    X_test = np.array(tokenizer.texts_to_sequences(X_test), dtype=object)

    # 9. Create dataloaders
    train_dataset = data.CNNTextDataset(X=X_train,
                                        y=y_train,
                                        max_filter_size=params.max_filter_size)
    val_dataset = data.CNNTextDataset(X=X_val,
                                      y=y_val,
                                      max_filter_size=params.max_filter_size)
    train_dataloader = train_dataset.create_dataloader(
        batch_size=params.batch_size)
    val_dataloader = val_dataset.create_dataloader(
        batch_size=params.batch_size)

    # 10. Initialize model
    model = models.initialize_model(
        params=params,
        vocab_size=len(tokenizer),
        num_classes=len(label_encoder),
        device=device,
    )

    # 11. Train model
    logger.info(
        f"Parameters: {json.dumps(params.__dict__, indent=2, cls=NumpyEncoder)}"
    )
    params, model, loss = train.train(
        params=params,
        train_dataloader=train_dataloader,
        val_dataloader=val_dataloader,
        model=model,
        device=device,
        class_weights=class_weights,
        trial=trial,
    )

    # 12. Evaluate model
    artifacts = {
        "params": params,
        "label_encoder": label_encoder,
        "tokenizer": tokenizer,
        "model": model,
        "loss": loss,
    }
    device = torch.device("cpu")
    y_true, y_pred, performance = eval.evaluate(df=test_df,
                                                artifacts=artifacts)
    artifacts["performance"] = performance

    return artifacts
コード例 #8
0
ファイル: train.py プロジェクト: GokuMohandas/MLOps
def train(params: Namespace, trial: optuna.trial._trial.Trial = None) -> Dict:
    """Operations for training.

    Args:
        params (Namespace): Input parameters for operations.
        trial (optuna.trial._trial.Trail, optional): Optuna optimization trial. Defaults to None.

    Returns:
        Artifacts to save and load for later.
    """
    # Set up
    utils.set_seed(seed=params.seed)
    device = utils.set_device(cuda=params.cuda)

    # Load features
    features_fp = Path(config.DATA_DIR, "features.json")
    tags_fp = Path(config.DATA_DIR, "tags.json")
    features = utils.load_dict(filepath=features_fp)
    tags_dict = utils.list_to_dict(utils.load_dict(filepath=tags_fp),
                                   key="tag")
    df = pd.DataFrame(features)
    if params.shuffle:
        df = df.sample(frac=1).reset_index(drop=True)
    df = df[:params.subset]  # None = all samples

    # Prepare data (filter, clean, etc.)
    df, tags_above_freq, tags_below_freq = data.prepare(
        df=df,
        include=list(tags_dict.keys()),
        exclude=config.EXCLUDED_TAGS,
        min_tag_freq=params.min_tag_freq,
    )
    params.num_samples = len(df)

    # Preprocess data
    df.text = df.text.apply(data.preprocess,
                            lower=params.lower,
                            stem=params.stem)

    # Encode labels
    labels = df.tags
    label_encoder = data.MultiLabelLabelEncoder()
    label_encoder.fit(labels)
    y = label_encoder.encode(labels)

    # Class weights
    all_tags = list(itertools.chain.from_iterable(labels.values))
    counts = np.bincount(
        [label_encoder.class_to_index[class_] for class_ in all_tags])
    class_weights = {i: 1.0 / count for i, count in enumerate(counts)}

    # Split data
    utils.set_seed(seed=params.seed)  # needed for skmultilearn
    X = df.text.to_numpy()
    X_train, X_, y_train, y_ = data.iterative_train_test_split(
        X=X, y=y, train_size=params.train_size)
    X_val, X_test, y_val, y_test = data.iterative_train_test_split(
        X=X_, y=y_, train_size=0.5)
    test_df = pd.DataFrame({
        "text": X_test,
        "tags": label_encoder.decode(y_test)
    })

    # Tokenize inputs
    tokenizer = data.Tokenizer(char_level=params.char_level)
    tokenizer.fit_on_texts(texts=X_train)
    X_train = np.array(tokenizer.texts_to_sequences(X_train), dtype=object)
    X_val = np.array(tokenizer.texts_to_sequences(X_val), dtype=object)
    X_test = np.array(tokenizer.texts_to_sequences(X_test), dtype=object)

    # Create dataloaders
    train_dataset = data.CNNTextDataset(X=X_train,
                                        y=y_train,
                                        max_filter_size=params.max_filter_size)
    val_dataset = data.CNNTextDataset(X=X_val,
                                      y=y_val,
                                      max_filter_size=params.max_filter_size)
    train_dataloader = train_dataset.create_dataloader(
        batch_size=params.batch_size)
    val_dataloader = val_dataset.create_dataloader(
        batch_size=params.batch_size)

    # Initialize model
    model = models.initialize_model(
        params=params,
        vocab_size=len(tokenizer),
        num_classes=len(label_encoder),
        device=device,
    )

    # Train model
    logger.info(
        f"Parameters: {json.dumps(params.__dict__, indent=2, cls=NumpyEncoder)}"
    )
    class_weights_tensor = torch.Tensor(np.array(list(class_weights.values())))
    loss_fn = nn.BCEWithLogitsLoss(weight=class_weights_tensor)
    optimizer = torch.optim.Adam(model.parameters(), lr=params.lr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           mode="min",
                                                           factor=0.05,
                                                           patience=5)

    # Trainer module
    trainer = Trainer(
        model=model,
        device=device,
        loss_fn=loss_fn,
        optimizer=optimizer,
        scheduler=scheduler,
        trial=trial,
    )

    # Train
    best_val_loss, best_model = trainer.train(params.num_epochs,
                                              params.patience,
                                              train_dataloader, val_dataloader)

    # Find best threshold
    _, y_true, y_prob = trainer.eval_step(dataloader=train_dataloader)
    params.threshold = find_best_threshold(y_true=y_true, y_prob=y_prob)

    # Evaluate model
    artifacts = {
        "params": params,
        "label_encoder": label_encoder,
        "tokenizer": tokenizer,
        "model": best_model,
        "loss": best_val_loss,
    }
    device = torch.device("cpu")
    y_true, y_pred, performance = eval.evaluate(df=test_df,
                                                artifacts=artifacts)
    artifacts["performance"] = performance

    return artifacts