コード例 #1
0
def test_model(state,
               reference_tsv_path,
               reduced_number_of_data=None,
               strore_predicitions_fname=None):
    dataset = DatasetDcase2019Task4(os.path.join(cfg.workspace),
                                    base_feature_dir=os.path.join(
                                        cfg.workspace, "dataset", "features"),
                                    save_log_feature=False)

    crnn_kwargs = state["model"]["kwargs"]
    crnn = CRNN(**crnn_kwargs)
    crnn.load(parameters=state["model"]["state_dict"])
    LOG.info("Model loaded at epoch: {}".format(state["epoch"]))
    pooling_time_ratio = state["pooling_time_ratio"]

    crnn.load(parameters=state["model"]["state_dict"])
    scaler = Scaler()
    scaler.load_state_dict(state["scaler"])
    classes = cfg.classes
    many_hot_encoder = ManyHotEncoder.load_state_dict(
        state["many_hot_encoder"])

    crnn = crnn.eval()
    [crnn] = to_cuda_if_available([crnn])
    transforms_valid = get_transforms(cfg.max_frames, scaler=scaler)

    LOG.info(reference_tsv_path)
    df = dataset.initialize_and_get_df(reference_tsv_path,
                                       reduced_number_of_data)
    strong_dataload = DataLoadDf(df,
                                 dataset.get_feature_file,
                                 many_hot_encoder.encode_strong_df,
                                 transform=transforms_valid)

    predictions = get_predictions(crnn,
                                  strong_dataload,
                                  many_hot_encoder.decode_strong,
                                  pooling_time_ratio,
                                  save_predictions=strore_predicitions_fname)
    compute_strong_metrics(predictions, df)

    weak_dataload = DataLoadDf(df,
                               dataset.get_feature_file,
                               many_hot_encoder.encode_weak,
                               transform=transforms_valid)
    weak_metric = get_f_measure_by_class(
        crnn, len(classes), DataLoader(weak_dataload,
                                       batch_size=cfg.batch_size))
    LOG.info("Weak F1-score per class: \n {}".format(
        pd.DataFrame(weak_metric * 100, many_hot_encoder.labels)))
    LOG.info("Weak F1-score macro averaged: {}".format(np.mean(weak_metric)))
コード例 #2
0
    def set_df_list(self, train):
        dataset = DatasetDcase2019Task4(cfg.workspace,
                                        base_feature_dir=os.path.join(cfg.workspace, "dataset", "features"),
                                        save_log_feature=False)

        transforms = get_transforms(cfg.max_frames)

        weak_df = dataset.initialize_and_get_df(cfg.weak)
        load_weak = DataLoadDf(weak_df, dataset.get_feature_file, None, transform=transforms)
        if train ==True:
            self.list_dataset = [load_weak]

        else:
            synthetic_df = dataset.initialize_and_get_df(cfg.synthetic, download=False)
            synthetic_df.onset = synthetic_df.onset * cfg.sample_rate // cfg.hop_length
            synthetic_df.offset = synthetic_df.offset * cfg.sample_rate // cfg.hop_length

            validation_df = dataset.initialize_and_get_df(cfg.validation)
            validation_df.onset = validation_df.onset * cfg.sample_rate // cfg.hop_length
            validation_df.offset = validation_df.offset * cfg.sample_rate // cfg.hop_length

            eval_desed_df = dataset.initialize_and_get_df(cfg.eval_desed)
            eval_desed_df.onset = eval_desed_df.onset * cfg.sample_rate // cfg.hop_length
            eval_desed_df.offset = eval_desed_df.offset * cfg.sample_rate // cfg.hop_length

            # many_hot_encoder = ManyHotEncoder(classes, n_frames=cfg.max_frames // pooling_time_ratio)

            load_synthetic = DataLoadDf(synthetic_df, dataset.get_feature_file, None, transform=transforms)
            load_validation = DataLoadDf(validation_df, dataset.get_feature_file, None, transform=transforms)
            load_eval_desed = DataLoadDf(eval_desed_df, dataset.get_feature_file, None, transform=transforms)


            self.list_dataset = [load_weak, load_synthetic, load_validation, load_eval_desed]

        scaler = Scaler()
        scaler.calculate_scaler(ConcatDataset(self.list_dataset))

        transforms = get_transforms(cfg.max_frames, scaler)
        for i in range(len(self.list_dataset)):
            self.list_dataset[i].set_transform(transforms)
        print(self.list_dataset)
コード例 #3
0
ファイル: main_triplet.py プロジェクト: turpaultn/walle
                  fixed_segment=f_args.fixed_segment
                  )

    if resume_training is None:
        classes = dataset.classes
        many_hot_encoder = ManyHotEncoder(classes)
    else:
        many_hot_encoder = ManyHotEncoder.load_state_dict(state["many_hot_encoder"])
        classes = many_hot_encoder.labels
    encode_function_label = many_hot_encoder.encode_weak

    # Datasets
    trans_fr = [ApplyLog(), ToTensor(), Unsqueeze(0)]

    train_weak_df_fr = dfs["train"]
    train_weak_dl_fr = DataLoadDf(train_weak_df_fr, encode_function_label, transform=Compose(trans_fr))

    if type_positive != "label" or type_negative != "label":
        unlabel_df_fr = dataset.get_df_feat_dir(cfg.unlabel, subpart_data=subpart_data, frames_in_sec=frames_in_sec)
        unlabel_dl_fr = DataLoadDf(unlabel_df_fr, encode_function_label, transform=Compose(trans_fr))
        datasets_mean = [train_weak_dl_fr, unlabel_dl_fr]
    else:
        datasets_mean = [train_weak_dl_fr]
    # Normalize
    if resume_training is None:
        scaler = ScalerSum()
        scaler.calculate_scaler(ConcatDataset(datasets_mean))
    else:
        scaler = ScalerSum.load_state_dict(state["scaler"])
    LOG.debug(scaler.mean_)
コード例 #4
0
    dfs = get_dfs(dataset,
                  reduced_number_of_data,
                  separated_sources=use_separated_sources)

    # Meta path for psds
    durations_synth = get_durations_df(cfg.synthetic)
    many_hot_encoder = ManyHotEncoder(cfg.classes,
                                      n_frames=cfg.max_frames //
                                      pooling_time_ratio)
    encod_func = many_hot_encoder.encode_strong_df

    # Normalisation per audio or on the full dataset
    if cfg.scaler_type == "dataset":
        transforms = get_transforms(cfg.max_frames,
                                    add_axis_conv=add_axis_conv)
        weak_data = DataLoadDf(dfs["weak"], encod_func, transforms)
        unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms)
        train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func,
                                      transforms)
        scaler_args = []
        scaler = Scaler()
        # # Only on real data since that's our final goal and test data are real
        scaler.calculate_scaler(
            ConcatDataset([weak_data, unlabel_data, train_synth_data]))
        logger.debug(f"scaler mean: {scaler.mean_}")
    else:
        scaler_args = ["global", "min-max"]
        scaler = ScalerPerAudio(*scaler_args)

    transforms = get_transforms(cfg.max_frames,
                                scaler,
コード例 #5
0
ファイル: TestModel.py プロジェクト: aagnone3/dc19t2
def test_model(state, reduced_number_of_data, strore_predicitions_fname=None):
    crnn_kwargs = state["model"]["kwargs"]
    crnn = CRNN(**crnn_kwargs)
    crnn.load(parameters=state["model"]["state_dict"])
    LOG.info("Model loaded at epoch: {}".format(state["epoch"]))
    pooling_time_ratio = state["pooling_time_ratio"]

    crnn.load(parameters=state["model"]["state_dict"])
    scaler = Scaler()
    scaler.load_state_dict(state["scaler"])
    classes = cfg.classes
    many_hot_encoder = ManyHotEncoder.load_state_dict(
        state["many_hot_encoder"])

    # ##############
    # Validation
    # ##############
    crnn = crnn.eval()
    [crnn] = to_cuda_if_available([crnn])
    transforms_valid = get_transforms(cfg.max_frames, scaler=scaler)

    # # 2018
    # LOG.info("Eval 2018")
    # eval_2018_df = dataset.initialize_and_get_df(cfg.eval2018, reduced_number_of_data)
    # # Strong
    # eval_2018_strong = DataLoadDf(eval_2018_df, dataset.get_feature_file, many_hot_encoder.encode_strong_df,
    #                               transform=transforms_valid)
    # predictions = get_predictions(crnn, eval_2018_strong, many_hot_encoder.decode_strong)
    # compute_strong_metrics(predictions, eval_2018_df, pooling_time_ratio)
    # # Weak
    # eval_2018_weak = DataLoadDf(eval_2018_df, dataset.get_feature_file, many_hot_encoder.encode_weak,
    #                             transform=transforms_valid)
    # weak_metric = get_f_measure_by_class(crnn, len(classes), DataLoader(eval_2018_weak, batch_size=cfg.batch_size))
    # LOG.info("Weak F1-score per class: \n {}".format(pd.DataFrame(weak_metric * 100, many_hot_encoder.labels)))
    # LOG.info("Weak F1-score macro averaged: {}".format(np.mean(weak_metric)))

    # Validation 2019
    # LOG.info("Validation 2019 (original code)")
    # b_dataset = B_DatasetDcase2019Task4(cfg.workspace,
    #                                   base_feature_dir=os.path.join(cfg.workspace, 'dataset', 'features'),
    #                                   save_log_feature=False)
    # b_validation_df = b_dataset.initialize_and_get_df(cfg.validation, reduced_number_of_data)
    # b_validation_df.to_csv('old.csv')
    # b_validation_strong = B_DataLoadDf(b_validation_df,
    #                                  b_dataset.get_feature_file, many_hot_encoder.encode_strong_df,
    #                                  transform=transforms_valid)

    # predictions2 = get_predictions(crnn, b_validation_strong, many_hot_encoder.decode_strong,
    #                               save_predictions=strore_predicitions_fname)
    # compute_strong_metrics(predictions2, b_validation_df, pooling_time_ratio)

    # b_validation_weak = B_DataLoadDf(b_validation_df, b_dataset.get_feature_file, many_hot_encoder.encode_weak,
    #                              transform=transforms_valid)
    # weak_metric = get_f_measure_by_class(crnn, len(classes), DataLoader(b_validation_weak, batch_size=cfg.batch_size))
    # LOG.info("Weak F1-score per class: \n {}".format(pd.DataFrame(weak_metric * 100, many_hot_encoder.labels)))
    # LOG.info("Weak F1-score macro averaged: {}".format(np.mean(weak_metric)))

    # ============================================================================================
    # ============================================================================================
    # ============================================================================================

    dataset = DatasetDcase2019Task4(feature_dir=cfg.feature_dir,
                                    local_path=cfg.workspace,
                                    exp_tag=cfg.exp_tag,
                                    save_log_feature=False)
    # Validation 2019
    LOG.info("Validation 2019")
    validation_df = dataset.initialize_and_get_df(cfg.validation,
                                                  reduced_number_of_data)
    validation_strong = DataLoadDf(validation_df,
                                   dataset.get_feature_file,
                                   many_hot_encoder.encode_strong_df,
                                   transform=transforms_valid)

    predictions = get_predictions(crnn,
                                  validation_strong,
                                  many_hot_encoder.decode_strong,
                                  save_predictions=strore_predicitions_fname)
    vdf = validation_df.copy()
    vdf.filename = vdf.filename.str.replace('.npy', '.wav')
    pdf = predictions.copy()
    pdf.filename = pdf.filename.str.replace('.npy', '.wav')
    compute_strong_metrics(pdf, vdf, pooling_time_ratio)

    validation_weak = DataLoadDf(validation_df,
                                 dataset.get_feature_file,
                                 many_hot_encoder.encode_weak,
                                 transform=transforms_valid)
    weak_metric = get_f_measure_by_class(
        crnn, len(classes),
        DataLoader(validation_weak, batch_size=cfg.batch_size))
    LOG.info("Weak F1-score per class: \n {}".format(
        pd.DataFrame(weak_metric * 100, many_hot_encoder.labels)))
    LOG.info("Weak F1-score macro averaged: {}".format(np.mean(weak_metric)))
コード例 #6
0
ファイル: Embedding.py プロジェクト: turpaultn/walle
    if torch.cuda.is_available():
        emb_model = emb_model.cuda()
    emb_model.eval()

    many_hot_encoder = ManyHotEncoder.load_state_dict(state['many_hot_encoder'])
    encode_function_label = many_hot_encoder.encode_weak
    scaler = ScalerSum.load_state_dict(state['scaler'])

    frames_in_sec = cfg.frames_in_sec

    transf = Compose([ApplyLog(), PadOrTrunc(nb_frames=cfg.frames), ToTensor(), Unsqueeze(0),
                      Normalize(scaler), Unsqueeze(1)])
    test_fr = dataset.get_df_feat_dir(cfg.test2018, frames_in_sec=frames_in_sec, subpart_data=subpart_data)
    print(len(test_fr))

    test_dataset = DataLoadDf(test_fr, many_hot_encoder.encode_weak, transform=transf)

    embed_set = "embedding"
    embed_dir = "stored_data/embeddings"
    embed_dir = os.path.join(embed_dir, embed_name, "embeddings")
    create_folder(embed_dir)
    fig_dir = os.path.join(embed_dir, "figures")
    create_folder(fig_dir)

    df_emb, embeddings = calculate_embedding(test_dataset, emb_model,
                                             savedir=os.path.join(embed_dir, embed_set), concatenate="append")
    print(embeddings.mean())
    print(embeddings.var())
    embeddings = sklearn.preprocessing.StandardScaler().fit_transform(embeddings.reshape(embeddings.shape[0], -1))
    print("normalized")
    print(embeddings.mean())
コード例 #7
0
        [weak_df, validation_df, synthetic_df])

    # Be careful, frames is max_frames // pooling_time_ratio because max_pooling is applied on time axis in the model
    many_hot_encoder = ManyHotEncoder(classes,
                                      n_frames=cfg.max_frames //
                                      pooling_time_ratio)

    transforms = get_transforms(cfg.max_frames)

    # Divide weak in train and valid
    train_weak_df = weak_df.sample(frac=0.8, random_state=26)
    valid_weak_df = weak_df.drop(train_weak_df.index).reset_index(drop=True)
    train_weak_df = train_weak_df.reset_index(drop=True)
    LOG.debug(valid_weak_df.event_labels.value_counts())
    train_weak_data = DataLoadDf(train_weak_df,
                                 dataset.get_feature_file,
                                 many_hot_encoder.encode_strong_df,
                                 transform=transforms)

    # Divide synthetic in train and valid
    filenames_train = synthetic_df.filename.drop_duplicates().sample(
        frac=0.8, random_state=26)
    train_synth_df = synthetic_df[synthetic_df.filename.isin(filenames_train)]
    valid_synth_df = synthetic_df.drop(
        train_synth_df.index).reset_index(drop=True)

    # Put train_synth in frames so many_hot_encoder can work.
    #  Not doing it for valid, because not using labels (when prediction) and event based metric expect sec.
    train_synth_df_frames = train_synth_df.copy()
    train_synth_df_frames.onset = train_synth_df_frames.onset * cfg.sample_rate // cfg.hop_length // pooling_time_ratio
    train_synth_df_frames.offset = train_synth_df_frames.offset * cfg.sample_rate // cfg.hop_length // pooling_time_ratio
    LOG.debug(valid_synth_df.event_label.value_counts())
コード例 #8
0
    train_weak_df = dfs["train"]
    classes = dataset.classes
    many_hot_encoder = ManyHotEncoder(classes)

    # ##############
    # Triplet dataset
    # #############
    batch_size = cfg.batch_size
    num_workers = cfg.num_workers

    list_trans_fr = [ApplyLog(), ToTensor(), Unsqueeze(0)]
    if args.segment:
        list_trans_fr.append(Unsqueeze(0))

    train_set = DataLoadDf(train_weak_df,
                           many_hot_encoder.encode_weak,
                           Compose(list_trans_fr),
                           return_indexes=False)
    LOG.debug("len train : {}".format(len(train_set)))
    # train_load = DataLoader(train_set, batch_size=batch_size, num_workers=num_workers, shuffle=True,
    #                         drop_last=True, collate_fn=default_collate)

    # scaler = Scaler()
    scaler = ScalerSum()
    scaler.calculate_scaler(train_set)
    LOG.debug(scaler.mean_)

    list_trans_fr.append(Normalize(scaler))
    train_set.set_transform(Compose(list_trans_fr))
    # Validation data
    valid_weak_df = dfs["valid"]
    if valid_weak_df is not None:
コード例 #9
0
ファイル: main_classifier.py プロジェクト: turpaultn/walle
    LOG.info("train_classes repartition: \n {}".format(
        train_weak_df.event_labels.value_counts()))
    classes = dataset.classes
    many_hot_encoder = ManyHotEncoder(classes)

    # Model
    batch_size = cfg.batch_size
    num_workers = cfg.num_workers

    list_trans_fr = [ApplyLog(), ToTensor(), Unsqueeze(0)]

    if args.segment:
        list_trans_fr.append(Unsqueeze(0))

    train_set = DataLoadDf(train_weak_df,
                           many_hot_encoder.encode_weak,
                           Compose(list_trans_fr),
                           return_indexes=False)
    if args.balance:
        train_sampler = CategoriesSampler(train_set.df.event_labels, classes,
                                          round(cfg.batch_size / len(classes)))
        train_load = DataLoader(train_set,
                                num_workers=num_workers,
                                batch_sampler=train_sampler)
    else:
        train_load = DataLoader(train_set,
                                num_workers=num_workers,
                                batch_size=batch_size,
                                shuffle=True)
        train_sampler = train_load.batch_sampler
    LOG.debug("len train : {}".format(len(train_set)))
    scaler = ScalerSum()
コード例 #10
0
                                             reduced_number_of_data,
                                             training=True)
    validation_df = dataset.initialize_and_get_df(cfg.validation,
                                                  reduced_number_of_data,
                                                  training=True)
    test_df = dataset.initialize_and_get_df(cfg.test,
                                            reduced_number_of_data,
                                            training=True)

    many_hot_encoder = ManyHotEncoder(cfg.classes,
                                      n_frames=cfg.max_frames //
                                      pooling_time_ratio)
    transforms = get_transforms(cfg.max_frames)

    train_data = DataLoadDf(train_df,
                            dataset.get_feature_file,
                            many_hot_encoder.encode_weak,
                            transform=transforms)
    validation_data = DataLoadDf(validation_df,
                                 dataset.get_feature_file,
                                 many_hot_encoder.encode_weak,
                                 transform=transforms)
    test_data = DataLoadDf(test_df,
                           dataset.get_feature_file,
                           many_hot_encoder.encode_weak,
                           transform=transforms)

    list_dataset = [train_data]
    batch_sizes = [cfg.batch_size]
    # batch_sizes = [cfg.batch_size // len(list_dataset)] * len(list_dataset)
    weak_mask = slice(cfg.batch_size)
    strong_mask = None
コード例 #11
0
ファイル: common.py プロジェクト: turpaultn/walle
def datasets_classif(model,
                     train_weak_embed,
                     valid_weak_dl_fr,
                     test_dl_fr,
                     args,
                     many_hot_encoder,
                     classes,
                     save_name="",
                     eval_dl=None):
    encode_function_label = many_hot_encoder.encode_weak
    num_workers = cfg.num_workers
    model.eval()
    embed_dir = "stored_data/embeddings"
    embed_dir = os.path.join(embed_dir, save_name)
    create_folder(embed_dir)
    fig_dir = os.path.join(embed_dir, "figures")
    create_folder(fig_dir)

    if args.agg_time is not None:
        trans_embedding = [ToTensor(), View(-1)]
    else:
        trans_embedding = [ToTensor()]

    model = to_cuda_if_available(model)
    embed_set = "final"
    train_embed_dir = os.path.join(embed_dir, embed_set)
    df_weak, embed_weak = calculate_embedding(train_weak_embed,
                                              model,
                                              savedir=train_embed_dir,
                                              concatenate="append")
    weak_embed = DataLoadDf(df_weak,
                            encode_function_label,
                            transform=Compose(trans_embedding))
    LOG.info(f"len weak embed: {len(weak_embed)}")
    weak_embed.set_transform(Compose(trans_embedding))

    batch_size_classif = cfg.batch_size_classif
    df_valid, embed_valid = calculate_embedding(valid_weak_dl_fr,
                                                model,
                                                savedir=train_embed_dir,
                                                concatenate="append")

    valid_embed = DataLoadDf(df_valid,
                             encode_function_label,
                             transform=Compose(trans_embedding))
    embed_set = "final_test"
    test_embed_dir = os.path.join(embed_dir, embed_set)
    df_test_embed, emb_test = calculate_embedding(test_dl_fr,
                                                  model,
                                                  savedir=test_embed_dir,
                                                  concatenate="append")

    test_embed = DataLoadDf(df_test_embed,
                            encode_function_label,
                            transform=Compose(trans_embedding))

    if args.balance:
        n_per_class = max(round(batch_size_classif / len(classes)), 1)
        weak_sampler = CategoriesSampler(weak_embed.df.event_labels, classes,
                                         n_per_class)
        weak_embed_loader = DataLoader(weak_embed,
                                       batch_sampler=weak_sampler,
                                       num_workers=num_workers)
        valid_sampler = CategoriesSampler(valid_embed.df.event_labels, classes,
                                          n_per_class)
        valid_embed_loader = DataLoader(valid_embed,
                                        batch_sampler=valid_sampler,
                                        num_workers=num_workers)
        test_sampler = CategoriesSampler(test_embed.df.event_labels, classes,
                                         n_per_class)
        test_embed_loader = DataLoader(test_embed,
                                       batch_sampler=test_sampler,
                                       num_workers=num_workers)
    else:
        weak_embed_loader = DataLoader(weak_embed,
                                       batch_size=batch_size_classif,
                                       num_workers=num_workers,
                                       shuffle=True,
                                       drop_last=True)
        valid_embed_loader = DataLoader(valid_embed,
                                        batch_size=batch_size_classif,
                                        shuffle=False,
                                        num_workers=num_workers,
                                        drop_last=False)
        test_embed_loader = DataLoader(test_embed,
                                       batch_size=batch_size_classif,
                                       shuffle=False,
                                       num_workers=num_workers,
                                       drop_last=False)

    if eval_dl is not None:
        model = to_cuda_if_available(model)
        embed_set = "final_eval"
        eval_embed_dir = os.path.join(embed_dir, embed_set)
        df_eval_embed, embed_eval = calculate_embedding(eval_dl,
                                                        model,
                                                        savedir=eval_embed_dir,
                                                        concatenate="append")

        eval_embed = DataLoadDf(df_eval_embed,
                                encode_function_label,
                                transform=Compose(trans_embedding))
        if args.balance:
            eval_sampler = CategoriesSampler(eval_embed.df.event_labels,
                                             classes, n_per_class)
            eval_embed_loader = DataLoader(eval_embed,
                                           batch_sampler=eval_sampler,
                                           num_workers=num_workers)
        else:
            eval_embed_loader = DataLoader(eval_embed,
                                           batch_size=batch_size_classif,
                                           shuffle=False,
                                           num_workers=num_workers,
                                           drop_last=False)
    else:
        eval_embed_loader = None

    model = to_cpu(model)
    return {
        "train": weak_embed_loader,
        "valid": valid_embed_loader,
        "test": test_embed_loader,
        "eval": eval_embed_loader
    }