def eval_plda(iv_file, ndx_file, enroll_file, test_subseg2orig_file,
              preproc_file,
              model_file, score_file, plda_type,
              **kwargs):
    
    logging.info('loading data')
    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

    tdr = TDR(iv_file, ndx_file, enroll_file, None, test_subseg2orig_file, preproc)
    x_e, x_t, enroll, ndx, orig_seg = tdr.read()

    logging.info('loading plda model: %s' % (model_file))
    model = F.load_plda(plda_type, model_file)
    
    t1 = time.time()
    
    logging.info('computing llr')
    scores = model.llr_1vs1(x_e, x_t)
    
    dt = time.time() - t1
    num_trials = len(enroll) * x_t.shape[0]
    logging.info('scoring elapsed time: %.2f s. elapsed time per trial: %.2f ms.'
          % (dt, dt/num_trials*1000))

    logging.info('combine cluster scores') 
    scores = combine_diar_scores(ndx, orig_seg, scores)

    logging.info('saving scores to %s' % (score_file))
    s = TrialScores(enroll, ndx.seg_set, scores)
    s = s.align_with_ndx(ndx)
    s.save_txt(score_file)
예제 #2
0
def eval_plda(iv_file, ndx_file, enroll_file, test_subseg2orig_file,
              preproc_file, coh_iv_file, coh_list, coh_nbest,
              coh_nbest_discard, model_file, score_file, plda_type, **kwargs):

    logging.info('loading data')
    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

    tdr = TDR(iv_file, ndx_file, enroll_file, None, test_subseg2orig_file,
              preproc)
    x_e, x_t, enroll, ndx, orig_seg = tdr.read()

    logging.info('loading plda model: %s' % (model_file))
    model = F.load_plda(plda_type, model_file)

    t1 = time.time()
    logging.info('computing llr')
    scores = model.llr_1vs1(x_e, x_t)

    dt = time.time() - t1
    num_trials = len(enroll) * x_t.shape[0]
    logging.info(
        'scoring elapsed time: %.2f s. elapsed time per trial: %.2f ms.' %
        (dt, dt / num_trials * 1000))

    logging.info('loading cohort data')
    vr = VR(coh_iv_file, coh_list, preproc)
    x_coh = vr.read()

    t2 = time.time()
    logging.info('score cohort vs test')
    scores_coh_test = model.llr_1vs1(x_coh, x_t)
    logging.info('score enroll vs cohort')
    scores_enr_coh = model.llr_1vs1(x_e, x_coh)

    dt = time.time() - t2
    logging.info('cohort-scoring elapsed time: %.2f s.' % (dt))

    t2 = time.time()
    logging.info('apply s-norm')
    snorm = SNorm(nbest=coh_nbest, nbest_discard=coh_nbest_discard)
    scores = snorm.predict(scores, scores_coh_test, scores_enr_coh)
    dt = time.time() - t2
    logging.info('s-norm elapsed time: %.2f s.' % (dt))

    dt = time.time() - t1
    logging.info(
        ('total-scoring elapsed time: %.2f s. '
         'elapsed time per trial: %.2f ms.') % (dt, dt / num_trials * 1000))

    logging.info('combine cluster scores')
    scores = combine_diar_scores(ndx, orig_seg, scores)

    logging.info('saving scores to %s' % (score_file))
    s = TrialScores(enroll, ndx.seg_set, scores)
    s = s.align_with_ndx(ndx)
    s.save_txt(score_file)