def main(args):

    pth_model = args.pth_model
    save_path = args.save_path
    list_of_files = args.list_of_files

    audio_path = np.array(pd.read_csv(list_of_files, header=None))[0][0]
    fname_list = np.array(pd.read_csv(list_of_files, header=None))[1:]

    save_key = 'exp5multif0'
    model_path = os.path.join(pth_model, "{}.pkl".format(save_key))

    model = models.build_model3()
    model.load_weights(model_path)

    model.compile(loss=utils_train.bkld,
                  metrics=['mse', utils_train.soft_binary_accuracy],
                  optimizer='adam')

    print("Model compiled")

    thresh = 0.5

    for fname in fname_list:

        fname = fname[0]

        if not fname.endswith('.wav'): continue

        if 'rev_' in fname:
            fname = fname[4:]

        # predict using trained model
        predicted_output, _, _ = utils_train.get_single_test_prediction(
            model, npy_file=None, audio_file=os.path.join(audio_path, fname))

        #predicted_output = medfilt2d(predicted_output, kernel_size=(1, 11))

        np.save(
            os.path.join(save_path,
                         "{}_prediction.npy".format(fname.split('.')[0])),
            predicted_output.astype(np.float32))

        # get multif0 output from prediction
        for thresh in [0.2, 0.3, 0.4, 0.5]:

            est_times, est_freqs = utils_train.pitch_activations_to_mf0(
                predicted_output, thresh)

            for i, (tms, fqs) in enumerate(zip(est_times, est_freqs)):
                if any(fqs <= 0):
                    est_freqs[i] = np.array([f for f in fqs if f > 0])

            output_mf0 = os.path.join(
                save_path, "{}_{}.csv".format(fname.split('.')[0], thresh))
            utils_train.save_multif0_output(est_times, est_freqs, output_mf0)

            print("     Multiple F0 prediction exported for {}".format(fname))
Ejemplo n.º 2
0
def run_model3(store_data_file, store_weather_file, test_data_file, model_param=1, validate_only=False):
    print "start here"
    test_result_file ='test_result.csv'

    # write header to test result
    with open(test_result_file, 'w') as f:
        f.write('id,units\n')
        f.close()

    # load data
    store_data, store_weather, test = load_data2(store_data_file, \
          store_weather_file, test_data_file)

    # compute max item sales for each store as denominator
    store_data_max = store_data.groupby(level=1).max()

    # develop training and validation set
    train, valid = develop_valid_set2(store_data, store_weather, valid_size=100)

    # categorize testing data with a relevant but much smaller training set
    target_set = build_target_set3(train, valid, test, store_weather, store_data_max)

    # run prediction on testing data of each category
    for col, trn, vld, tst in target_set:
        print "%s, train(%d), valid(%d), test(%d)" % (col, len(trn), len(vld), len(tst))

        # normalize training, validing and testing data set
        nm_trn = normalize_store_data(trn, store_data_max)
        nm_vld = normalize_store_data(vld, store_data_max)
        nm_tst = normalize_store_data(tst, store_data_max)

        Y_hat=build_model3(nm_trn, nm_vld, nm_tst, store_weather, column=col, alpha_train=model_param)

        # denormalize the sale
        Y_hat2 = denormalize_store_data(trn, vld, tst, Y_hat, store_data_max, column=col)

        # evaluate error in training and validation set
        e1, e2 = eval_model(trn, vld, Y_hat2, column=col)
        print "error is: train(%f), valid(%f)" % (e1, e2)

        # write results to test result
        write_submission(trn, vld, tst, Y_hat2, test_result_file, 'valid_result', column=col)

    # write out zero estimation
    if not validate_only:
        write_submission_zero(test, store_data_max, test_result_file)
Ejemplo n.º 3
0
def main(args):

    batch_size = 32
    active_str = 100
    muxrate = 32

    save_key = args.save_key
    data_splits_file = args.data_splits_file

    if args.model_name == 'model1':
        model = models.build_model1()
    elif args.model_name == 'model2':
        model = models.build_model2()
    elif args.model_name == 'model3':
        model = models.build_model3()
    else:
        print(
            "Specified model does not exist. Please choose an valid model: model1, model2 or model3."
        )
        return

    experiment(save_key, model, data_splits_file, batch_size, active_str,
               muxrate)
Ejemplo n.º 4
0
def run_model3(store_data_file,
               store_weather_file,
               test_data_file,
               model_param=1,
               validate_only=False):
    print "start here"
    test_result_file = 'test_result.csv'

    # write header to test result
    with open(test_result_file, 'w') as f:
        f.write('id,units\n')
        f.close()

    # load data
    store_data, store_weather, test = load_data2(store_data_file, \
          store_weather_file, test_data_file)

    # compute max item sales for each store as denominator
    store_data_max = store_data.groupby(level=1).max()

    # develop training and validation set
    train, valid = develop_valid_set2(store_data,
                                      store_weather,
                                      valid_size=100)

    # categorize testing data with a relevant but much smaller training set
    target_set = build_target_set3(train, valid, test, store_weather,
                                   store_data_max)

    # run prediction on testing data of each category
    for col, trn, vld, tst in target_set:
        print "%s, train(%d), valid(%d), test(%d)" % (col, len(trn), len(vld),
                                                      len(tst))

        # normalize training, validing and testing data set
        nm_trn = normalize_store_data(trn, store_data_max)
        nm_vld = normalize_store_data(vld, store_data_max)
        nm_tst = normalize_store_data(tst, store_data_max)

        Y_hat = build_model3(nm_trn,
                             nm_vld,
                             nm_tst,
                             store_weather,
                             column=col,
                             alpha_train=model_param)

        # denormalize the sale
        Y_hat2 = denormalize_store_data(trn,
                                        vld,
                                        tst,
                                        Y_hat,
                                        store_data_max,
                                        column=col)

        # evaluate error in training and validation set
        e1, e2 = eval_model(trn, vld, Y_hat2, column=col)
        print "error is: train(%f), valid(%f)" % (e1, e2)

        # write results to test result
        write_submission(trn,
                         vld,
                         tst,
                         Y_hat2,
                         test_result_file,
                         'valid_result',
                         column=col)

    # write out zero estimation
    if not validate_only:
        write_submission_zero(test, store_data_max, test_result_file)
Ejemplo n.º 5
0
def main(args):

    model_name = args.model_name
    audiofile = args.audiofile
    audio_folder = args.audio_folder

    # load model weights
    if model_name == 'model1':

        save_key = 'exp1multif0'
        model_path = "./models/{}.pkl".format(save_key)
        model = models.build_model1()
        model.load_weights(model_path)
        thresh = 0.4

    elif model_name == 'model2':

        save_key = 'exp2multif0'
        model_path = "./models/{}.pkl".format(save_key)
        model = models.build_model2()
        model.load_weights(model_path)
        thresh = 0.5

    elif model_name == 'model3':

        save_key = 'exp3multif0'
        model_path = "./models/{}.pkl".format(save_key)
        model = models.build_model3()
        model.load_weights(model_path)
        thresh = 0.5

    elif model_name == 'model4':

        save_key = 'exp4multif0'
        model_path = "./models/{}.pkl".format(save_key)
        model = models.build_model3()
        model.load_weights(model_path)
        thresh = 0.4

    elif model_name == 'model7':

        save_key = 'exp7multif0'
        model_path = "./models/{}.pkl".format(save_key)
        model = models.build_model3_mag()
        model.load_weights(model_path)
        thresh = 0.4

    else:
        raise ValueError("Specified model must be model1, model2 or model3.")

    # compile model

    model.compile(loss=utils_train.bkld,
                  metrics=['mse', utils_train.soft_binary_accuracy],
                  optimizer='adam')
    print("Model compiled")

    # select operation mode and compute prediction
    if audiofile is not "0":

        if model_name == 'model7':
            # predict using trained model
            predicted_output, _ = get_single_test_prediction_phase_free(
                model, audio_file=os.path.join(audio_folder, audiofile))
        else:
            # predict using trained model
            predicted_output, _, _ = get_single_test_prediction(
                model, audio_file=audiofile)

        predicted_output = predicted_output.astype(np.float32)

        est_times, est_freqs = utils_train.pitch_activations_to_mf0(
            predicted_output, thresh)

        # rearrange output
        for i, (tms, fqs) in enumerate(zip(est_times, est_freqs)):
            if any(fqs <= 0):
                est_freqs[i] = np.array([f for f in fqs if f > 0])

        output_path = audiofile.replace('wav', 'csv')
        utils_train.save_multif0_output(est_times, est_freqs, output_path)

        print(" > > > Multiple F0 prediction for {} exported as {}.".format(
            audiofile, audiofile.replace('wav', 'csv')))

    elif audio_folder is not "0":

        for audiofile in os.listdir(audio_folder):

            if not audiofile.endswith('wav'): continue

            if model_name == 'model7':
                # predict using trained model
                predicted_output, _ = get_single_test_prediction_phase_free(
                    model, audio_file=os.path.join(audio_folder, audiofile))

            else:

                # predict using trained model
                predicted_output, _, _ = get_single_test_prediction(
                    model, audio_file=os.path.join(audio_folder, audiofile))

            predicted_output = predicted_output.astype(np.float32)

            est_times, est_freqs = utils_train.pitch_activations_to_mf0(
                predicted_output, thresh)

            # rearrange output
            for i, (tms, fqs) in enumerate(zip(est_times, est_freqs)):
                if any(fqs <= 0):
                    est_freqs[i] = np.array([f for f in fqs if f > 0])

            output_path = os.path.join(audio_folder,
                                       audiofile.replace('wav', 'csv'))
            utils_train.save_multif0_output(est_times, est_freqs, output_path)

            print(
                " > > > Multiple F0 prediction for {} exported as {}.".format(
                    audiofile,
                    os.path.join(audio_folder, audiofile.replace('wav',
                                                                 'csv'))))
    else:
        raise ValueError(
            "One of audiofile and audio_folder must be specified.")
def main(args):

    pth_model = args.pth_model
    save_path = args.save_path
    list_of_files = args.list_of_files

    gt_path = '/scratch/hc2945/data/test_data'

    audio_path = np.array(pd.read_csv(list_of_files, header=None))[0][0]
    fname_list = np.array(pd.read_csv(list_of_files, header=None))[1:]

    save_key = 'exp4multif0'
    model_path = os.path.join(pth_model, "{}.pkl".format(save_key))

    model = models.build_model3()
    model.load_weights(model_path)

    model.compile(loss=utils_train.bkld,
                  metrics=['mse', utils_train.soft_binary_accuracy],
                  optimizer='adam')

    print("Model compiled")

    all_scores_100 = []
    all_scores_20 = []

    for fname in fname_list:

        fname = fname[0]

        if not fname.endswith('.wav'): continue

        # predict using trained model
        predicted_output, _, _ = utils_train.get_single_test_prediction(
            model, npy_file=None, audio_file=os.path.join(audio_path, fname))
        '''
        np.save(os.path.join(
            save_path, "{}_prediction.npy".format(fname.split('.')[0])),
            predicted_output.astype(np.float32))
        '''

        # load ground truth
        ref_times, ref_freqs = mir_eval.io.load_ragged_time_series(
            os.path.join(gt_path, fname.replace('wav', 'csv')))
        for i, (tms, fqs) in enumerate(zip(ref_times, ref_freqs)):
            if any(fqs <= 0):
                ref_freqs[i] = np.array([f for f in fqs if f > 0])

        trsh = 0.5

        est_times, est_freqs = utils_train.pitch_activations_to_mf0(
            predicted_output, trsh)
        for i, (tms, fqs) in enumerate(zip(est_times, est_freqs)):
            if any(fqs <= 0):
                est_freqs[i] = np.array([f for f in fqs if f > 0])
        '''Save output multi-f0
        '''
        output_mf0 = os.path.join(
            save_path, "{}_{}.csv".format(fname.split('.')[0], trsh))
        utils_train.save_multif0_output(est_times, est_freqs, output_mf0)

        scores_100 = mir_eval.multipitch.evaluate(ref_times,
                                                  ref_freqs,
                                                  est_times,
                                                  est_freqs,
                                                  window=1)
        scores_100['track'] = fname.replace('.wav', '')
        all_scores_100.append(scores_100)

        scores_20 = mir_eval.multipitch.evaluate(ref_times,
                                                 ref_freqs,
                                                 est_times,
                                                 est_freqs,
                                                 window=0.2)
        scores_20['track'] = fname.replace('.wav', '')
        all_scores_20.append(scores_20)

        print(
            "     Multiple F0 prediction exported and evaluated on 100 and 20 cents for {}"
            .format(fname))

    # export results with 100 cents
    scores_path = os.path.join(
        save_path, '{}_all_scores_100_cents.csv'.format('test_set'))
    score_summary_path = os.path.join(
        save_path, "{}_score_100_cents_summary.csv".format('test_set'))
    df = pd.DataFrame(all_scores_100)
    df.to_csv(scores_path)
    df.describe().to_csv(score_summary_path)

    # export results with 20 cents
    scores_path = os.path.join(save_path,
                               '{}_all_scores_20_cents.csv'.format('test_set'))
    score_summary_path = os.path.join(
        save_path, "{}_score_20_cents_summary.csv".format('test_set'))
    df = pd.DataFrame(all_scores_20)
    df.to_csv(scores_path)
    df.describe().to_csv(score_summary_path)