Ejemplo n.º 1
0
            crop = x[start:start + self.dim[0], :]

            X[i, ] = crop
        return X


########################################################################

########################################################################
# main 00_train.py
########################################################################
if __name__ == "__main__":
    # check mode
    # "development": mode == True
    # "evaluation": mode == False
    mode, target = com.command_line_chk()
    if mode is None:
        sys.exit(-1)

    # make output directory
    os.makedirs(param["model_directory"], exist_ok=True)

    # initialize the visualizer
    visualizer = visualizer()

    # load base_directory list
    dirs = com.select_dirs(param=param, mode=mode, target=target)

    # loop of the base directory (machine types)
    for idx, target_dir in enumerate(dirs):
        print("\n===========================")
Ejemplo n.º 2
0
            com.logger.exception("no_wav_file!!")
        com.logger.info("=========================================")

    return files, labels


########################################################################

########################################################################
# main 01_test.py
########################################################################
if __name__ == "__main__":
    # check mode
    # "development": mode == True
    # "evaluation": mode == False
    mode = com.command_line_chk()
    if mode is None:
        sys.exit(-1)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # make output result directory
    os.makedirs(PARAM["result_directory"], exist_ok=True)

    # load base directory
    dirs = com.select_dirs(param=PARAM, mode=mode)

    # initialize lines in csv for AUC and pAUC
    csv_lines = []

    # loop of the base directory
Ejemplo n.º 3
0
def main():
    """
    perform model training and validation
    """

    # check mode
    # "development": mode == True
    # "evaluation": mode == False
    mode = com.command_line_chk()  # constant: True or False
    if mode is None:
        sys.exit(-1)

    # make output directory
    os.makedirs(PARAM["model_directory"], exist_ok=True)

    # load base_directory list
    dir_list = com.select_dirs(param=PARAM, mode=mode)

    # loop of the base directory (for each machine)
    dir_list = [
        '/work/tamamori/dcase2020/dcase2020_task2_baseline/dev_data/ToyCar'
    ]
    for idx, target_dir in enumerate(dir_list):
        com.logger.info("===========================")
        com.logger.info("[%d/%d] %s", idx + 1, len(dir_list), target_dir)

        com.logger.info("============== DATASET_GENERATOR ==============")
        dcase_dataset = DcaseDataset(target_dir)
        n_samples = len(dcase_dataset)  # total number of frames
        train_size = int(n_samples *
                         (1.0 - PARAM["training"]["validation_split"]))
        dataset = {"train": None, "val": None}
        dataset["train"] = Subset(dcase_dataset, list(range(0, train_size)))
        dataset["val"] = Subset(dcase_dataset,
                                list(range(train_size, n_samples)))

        com.logger.info("============== DATALOADER_GENERATOR ==============")
        data_loader = {"train": None, "val": None}
        data_loader["train"] = torch.utils.data.DataLoader(
            dataset["train"],
            batch_size=PARAM["training"]["batch_size"],
            shuffle=PARAM["training"]["shuffle"],
            drop_last=True)

        data_loader["val"] = torch.utils.data.DataLoader(
            dataset["val"],
            batch_size=PARAM["training"]["batch_size"],
            shuffle=False,
            drop_last=False)

        com.logger.info("============== MODEL TRAINING ==============")
        model = VAE(x_dim=PARAM["feature"]["n_mels"] *
                    PARAM["feature"]["frames"],
                    h_dim=PARAM["model"]["hidden_dim"],
                    z_dim=PARAM["model"]["latent_dim"],
                    n_hidden=PARAM["model"]["n_hidden"]).to(DEVICE)

        optimizer = optim.Adam(model.parameters(),
                               weight_decay=PARAM["training"]["weight_decay"])
        criterion = nn.MSELoss(reduction='mean')

        summary(model,
                input_size=(PARAM["feature"]["n_mels"] *
                            PARAM["feature"]["frames"], ))

        loss = {"train": 0.0, "val": 0.0}
        for epoch in range(1, PARAM["training"]["epochs"] + 1):
            loss["train"] = training(model, data_loader["train"], optimizer,
                                     criterion)

            loss["val"] = validation(model, data_loader["train"], criterion)

            com.logger.info(
                "Epoch %2d: Average train_loss: %.6f, "
                "Average validation_loss: %.6f", epoch, loss["train"],
                loss["val"])

        com.logger.info("============== SAVE MODEL ==============")
        torch.save(
            model.state_dict(), "%s/model_ae_%s.pt" %
            (PARAM["model_directory"], os.path.split(target_dir)[1]))
        com.logger.info(
            "save_model -> %s", "%s/model_ae_%s.pt" %
            (PARAM["model_directory"], os.path.split(target_dir)[1]))
        com.logger.info("============== END TRAINING ==============")