コード例 #1
0
    def __init__(self,
                 scp_fpath,
                 wavdir,
                 fs,
                 cw_len,
                 cw_shift,
                 fact_amp,
                 lab_dict_fpath,
                 lab2_dict_fpath=None,
                 doNormalize=False):

        # class1toclass2_dict_fpath=None,

        # wav list
        self.wav_lst = ReadList(scp_fpath)
        self.data_folder = wavdir

        self.fs = fs
        # Converting context and shift in samples
        self.wlen = int(fs * cw_len / 1000.00)
        self.wshift = int(fs * cw_shift / 1000.00)
        self.fact_amp = fact_amp
        self.doNormalize = doNormalize

        self.lab_dict = np.load(lab_dict_fpath).item()

        if not lab2_dict_fpath is None:
            self.lab2_dict = np.load(lab2_dict_fpath).item()
        else:
            self.lab2_dict = None
コード例 #2
0
#[optimization]
lr=float(options.lr)
batch_size=int(options.batch_size)
N_epochs=int(options.N_epochs)
N_batches=int(options.N_batches)
N_eval_epoch=int(options.N_eval_epoch)
seed=int(options.seed)


# training list
#wav_lst_tr=ReadList(tr_lst)
#snt_tr=len(wav_lst_tr)

# test list
wav_lst_te=ReadList(te_lst)
snt_te=len(wav_lst_te)


# Folder creation
#try:
#    os.stat(output_folder)
#except:
#    os.mkdir(output_folder) 
    
    
# setting seed
torch.manual_seed(seed)
np.random.seed(seed)

# loss function
コード例 #3
0
    map(str_to_bool, options.class_use_batchnorm.split(',')))
class_use_laynorm = list(map(str_to_bool,
                             options.class_use_laynorm.split(',')))
class_act = list(map(str, options.class_act.split(',')))

#[optimization]
lr = float(options.lr)
batch_size = int(options.batch_size)
begin_epochs = int(options.begin_epochs)
N_epochs = int(options.N_epochs)
N_batches = int(options.N_batches)
N_eval_epoch = int(options.N_eval_epoch)
seed = int(options.seed)

# training list
wav_lst_tr = ReadList(tr_lst)
snt_tr = len(wav_lst_tr)

# test list
wav_lst_te = ReadList(te_lst)
snt_te = len(wav_lst_te)

# Folder creation
try:
    os.stat(output_folder)
except:
    os.mkdir(output_folder)

# setting seed
torch.manual_seed(seed)
np.random.seed(seed)
コード例 #4
0
class_use_batchnorm = list(
    map(str_to_bool, options.class_use_batchnorm.split(',')))
class_use_laynorm = list(map(str_to_bool,
                             options.class_use_laynorm.split(',')))
class_act = list(map(str, options.class_act.split(',')))

#[optimization]
lr = float(options.lr)
batch_size = int(options.batch_size)
N_epochs = int(options.N_epochs)
N_batches = int(options.N_batches)
N_eval_epoch = int(options.N_eval_epoch)
seed = int(options.seed)

# training list
wav_lst_tr = ReadList(tr_lst)  #Elenco file audio per il train
snt_tr = len(wav_lst_tr)

# test list
wav_lst_te = ReadList(te_lst)
snt_te = len(wav_lst_te)

# Converting context and shift in samples
wlen = int(fs * cw_len / 1000.00)
wshift = int(fs * cw_shift / 1000.00)

# Batch_dev
Batch_dev = 128

# Loading label dictionary
lab_dict = np.load(class_dict_file, allow_pickle=True).item()
コード例 #5
0
def get_cfg():
    # Reading cfg file
    options=read_conf()
    #[data]
    args = Namespace()
    args.tr_lst=options.tr_lst
    args.te_lst=options.te_lst
    args.pt_file=options.pt_file if options.pt_file_reset=='' else options.pt_file_reset
    args.class_dict_file=options.lab_dict
    args.data_folder=(options.data_folder if options.data_folder_reset=='' else options.data_folder_reset)+'/'
    args.output_folder=options.output_folder
    #[windowing]
    args.fs=int(options.fs)
    args.cw_len=int(options.cw_len)
    args.cw_shift=int(options.cw_shift)
    #[cnn]
    args.cnn_N_filt=list(map(int, options.cnn_N_filt.split(',')))
    args.cnn_len_filt=list(map(int, options.cnn_len_filt.split(',')))
    args.cnn_max_pool_len=list(map(int, options.cnn_max_pool_len.split(',')))
    args.cnn_use_laynorm_inp=str_to_bool(options.cnn_use_laynorm_inp)
    args.cnn_use_batchnorm_inp=str_to_bool(options.cnn_use_batchnorm_inp)
    args.cnn_use_laynorm=list(map(str_to_bool, options.cnn_use_laynorm.split(',')))
    args.cnn_use_batchnorm=list(map(str_to_bool, options.cnn_use_batchnorm.split(',')))
    args.cnn_act=list(map(str, options.cnn_act.split(',')))
    args.cnn_drop=list(map(float, options.cnn_drop.split(',')))
    #[dnn]
    args.fc_lay=list(map(int, options.fc_lay.split(',')))
    args.fc_drop=list(map(float, options.fc_drop.split(',')))
    args.fc_use_laynorm_inp=str_to_bool(options.fc_use_laynorm_inp)
    args.fc_use_batchnorm_inp=str_to_bool(options.fc_use_batchnorm_inp)
    args.fc_use_batchnorm=list(map(str_to_bool, options.fc_use_batchnorm.split(',')))
    args.fc_use_laynorm=list(map(str_to_bool, options.fc_use_laynorm.split(',')))
    args.fc_act=list(map(str, options.fc_act.split(',')))
    #[class]
    args.class_lay=list(map(int, options.class_lay.split(',')))
    args.class_drop=list(map(float, options.class_drop.split(',')))
    args.class_use_laynorm_inp=str_to_bool(options.class_use_laynorm_inp)
    args.class_use_batchnorm_inp=str_to_bool(options.class_use_batchnorm_inp)
    args.class_use_batchnorm=list(map(str_to_bool, options.class_use_batchnorm.split(',')))
    args.class_use_laynorm=list(map(str_to_bool, options.class_use_laynorm.split(',')))
    args.class_act=list(map(str, options.class_act.split(',')))
    #[optimization]
    args.lr=float(options.lr)
    args.batch_size=int(options.batch_size)
    args.N_epochs=int(options.N_epochs)
    args.N_batches=int(options.N_batches)
    args.N_eval_epoch=int(options.N_eval_epoch)
    args.seed=int(options.seed)
    # training list
    args.wav_lst_tr=ReadList(args.tr_lst)
    args.snt_tr=len(args.wav_lst_tr)
    # test list
    args.wav_lst_te=ReadList(args.te_lst)
    args.snt_te=len(args.wav_lst_te)
    args.eval = options.eval
    # Folder creation
    try:
        os.stat(args.output_folder)
    except:
        os.mkdir(args.output_folder) 
    return args
コード例 #6
0
ファイル: predict_cpu.py プロジェクト: mwang-lifesize/SincNet
    def __init__(self, weights_file=None):
        self.device = 'cpu'  # torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        # Reading cfg file
        options = read_conf()

        #[data]
        tr_lst = options.tr_lst
        te_lst = options.te_lst
        pt_file = options.pt_file
        class_dict_file = options.lab_dict
        data_folder = options.data_folder + '/'
        output_folder = options.output_folder

        #[windowing]
        fs = int(options.fs)
        cw_len = int(options.cw_len)
        cw_shift = int(options.cw_shift)

        #[cnn]
        cnn_N_filt = list(map(int, options.cnn_N_filt.split(',')))
        cnn_len_filt = list(map(int, options.cnn_len_filt.split(',')))
        cnn_max_pool_len = list(map(int, options.cnn_max_pool_len.split(',')))
        cnn_use_laynorm_inp = str_to_bool(options.cnn_use_laynorm_inp)
        cnn_use_batchnorm_inp = str_to_bool(options.cnn_use_batchnorm_inp)
        cnn_use_laynorm = list(
            map(str_to_bool, options.cnn_use_laynorm.split(',')))
        cnn_use_batchnorm = list(
            map(str_to_bool, options.cnn_use_batchnorm.split(',')))
        cnn_act = list(map(str, options.cnn_act.split(',')))
        cnn_drop = list(map(float, options.cnn_drop.split(',')))

        #[dnn]
        fc_lay = list(map(int, options.fc_lay.split(',')))
        fc_drop = list(map(float, options.fc_drop.split(',')))
        fc_use_laynorm_inp = str_to_bool(options.fc_use_laynorm_inp)
        fc_use_batchnorm_inp = str_to_bool(options.fc_use_batchnorm_inp)
        fc_use_batchnorm = list(
            map(str_to_bool, options.fc_use_batchnorm.split(',')))
        fc_use_laynorm = list(
            map(str_to_bool, options.fc_use_laynorm.split(',')))
        fc_act = list(map(str, options.fc_act.split(',')))

        #[class]
        self.class_lay = list(map(int, options.class_lay.split(',')))
        class_drop = list(map(float, options.class_drop.split(',')))
        class_use_laynorm_inp = str_to_bool(options.class_use_laynorm_inp)
        class_use_batchnorm_inp = str_to_bool(options.class_use_batchnorm_inp)
        class_use_batchnorm = list(
            map(str_to_bool, options.class_use_batchnorm.split(',')))
        class_use_laynorm = list(
            map(str_to_bool, options.class_use_laynorm.split(',')))
        class_act = list(map(str, options.class_act.split(',')))

        #[optimization]
        lr = float(options.lr)
        batch_size = int(options.batch_size)
        N_epochs = int(options.N_epochs)
        N_batches = int(options.N_batches)
        N_eval_epoch = int(options.N_eval_epoch)
        seed = int(options.seed)

        # training list
        wav_lst_tr = ReadList(tr_lst)
        snt_tr = len(wav_lst_tr)

        # test list
        wav_lst_te = ReadList(te_lst)
        snt_te = len(wav_lst_te)

        # setting seed
        torch.manual_seed(seed)
        np.random.seed(seed)

        # Converting context and shift in samples
        self.wlen = int(fs * cw_len / 1000.00)
        self.wshift = int(fs * cw_shift / 1000.00)

        # Batch_dev
        self.Batch_dev = 128

        # Feature extractor CNN
        CNN_arch = {
            'input_dim': self.wlen,
            'fs': fs,
            'cnn_N_filt': cnn_N_filt,
            'cnn_len_filt': cnn_len_filt,
            'cnn_max_pool_len': cnn_max_pool_len,
            'cnn_use_laynorm_inp': cnn_use_laynorm_inp,
            'cnn_use_batchnorm_inp': cnn_use_batchnorm_inp,
            'cnn_use_laynorm': cnn_use_laynorm,
            'cnn_use_batchnorm': cnn_use_batchnorm,
            'cnn_act': cnn_act,
            'cnn_drop': cnn_drop,
        }

        self.CNN_net = CNN(CNN_arch)
        self.CNN_net.to(self.device)

        # Loading label dictionary
        lab_dict = np.load(class_dict_file).item()

        DNN1_arch = {
            'input_dim': self.CNN_net.out_dim,
            'fc_lay': fc_lay,
            'fc_drop': fc_drop,
            'fc_use_batchnorm': fc_use_batchnorm,
            'fc_use_laynorm': fc_use_laynorm,
            'fc_use_laynorm_inp': fc_use_laynorm_inp,
            'fc_use_batchnorm_inp': fc_use_batchnorm_inp,
            'fc_act': fc_act,
        }

        self.DNN1_net = MLP(DNN1_arch)
        #self.DNN1_net.cuda()
        self.DNN1_net.to(self.device)

        DNN2_arch = {
            'input_dim': fc_lay[-1],
            'fc_lay': self.class_lay,
            'fc_drop': class_drop,
            'fc_use_batchnorm': class_use_batchnorm,
            'fc_use_laynorm': class_use_laynorm,
            'fc_use_laynorm_inp': class_use_laynorm_inp,
            'fc_use_batchnorm_inp': class_use_batchnorm_inp,
            'fc_act': class_act,
        }

        self.DNN2_net = MLP(DNN2_arch)
        #self.DNN2_net.cuda()
        self.DNN2_net.to(self.device)

        pre_train_file = None
        if weights_file != 'none':
            #let's load this otherwise load pt_file
            pre_train_file = weights_file
        elif pt_file != 'none':
            pre_train_file = pt_file

        if pre_train_file != 'none':
            print("loading pre trained file", pre_train_file)
            checkpoint_load = torch.load(pre_train_file, map_location=device)
            self.CNN_net.load_state_dict(checkpoint_load['CNN_model_par'])
            self.DNN1_net.load_state_dict(checkpoint_load['DNN1_model_par'])
            self.DNN2_net.load_state_dict(checkpoint_load['DNN2_model_par'])

        self.CNN_net.eval()
        self.DNN1_net.eval()
        self.DNN2_net.eval()
        test_flag = 1
        loss_sum = 0
        err_sum = 0
        err_sum_snt = 0