コード例 #1
0
def extract_embed(seq_file, model_file, preproc_file, output_path,
                  max_seq_length, pooling_output, write_format, **kwargs):

    set_float_cpu('float32')
    
    sr_args = SDRF.filter_args(**kwargs)
    
    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

    sr = SDRF.create(seq_file, transform=preproc, **sr_args)
    
    t1 = time.time()

    model = SeqQEmbed.load(model_file)
    model.build(max_seq_length)
    model.build_embed(pooling_output)
    y_dim = model.embed_dim

    _, seq_lengths = sr.read_num_rows()
    sr.reset()
    num_seqs = len(seq_lengths)

    p1_y = np.zeros((num_seqs, y_dim), dtype=float_keras())
    p2_y = np.zeros((num_seqs, y_dim), dtype=float_keras())
    keys = []

    for i in xrange(num_seqs):
        ti1 = time.time()
        key, data = sr.read(1)
        
        ti2 = time.time()
        logging.info('Extracting embeddings %d/%d for %s, num_frames: %d' %
              (i, num_seqs, key[0], data[0].shape[0]))
        keys.append(key[0])
        p1_y[i], p2_y[i] = model.predict_embed(data[0])
                
        ti4 = time.time()
        logging.info('Elapsed time embeddings %d/%d for %s, total: %.2f read: %.2f, vae: %.2f' %
              (i, num_seqs, key, ti4-ti1, ti2-ti1, ti4-ti2))
            
    logging.info('Extract elapsed time: %.2f' % (time.time() - t1))

    if write_format == 'p1':
        y = p1_y
    elif write_format == 'p1+p2':
        y = np.hstack((p1_y, p2_y))
    else:
        y = p2_y
    
    hw = DWF.create(output_path)
    hw.write(keys, y)
コード例 #2
0
def extract_ivector(seq_file, file_list, gmm_file, model_file, preproc_file, output_path,
                    qy_only, **kwargs):

    set_float_cpu('float32')
    
    sr_args = SR.filter_eval_args(**kwargs)
    
    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

    gmm = DiagGMM.load_from_kaldi(gmm_file)
        
    sr = SR(seq_file, file_list, batch_size=1,
            shuffle_seqs=False,
            preproc=preproc, **sr_args)
    
    t1 = time.time()

    # if qy_only:
    #     model = TVAEY.load(model_file)
    # else:
    model = TVAEYZ.load(model_file)
        
    model.build(max_seq_length=sr.max_batch_seq_length)
            
    y = np.zeros((sr.num_seqs, model.y_dim), dtype=float_keras())
    xx = np.zeros((1, sr.max_batch_seq_length, model.x_dim), dtype=float_keras())
    rr = np.zeros((1, sr.max_batch_seq_length, model.r_dim), dtype=float_keras())
    keys = []
    for i in xrange(sr.num_seqs):
        ti1 = time.time()
        x, key = sr.read_next_seq()
        ti2 = time.time()
        r = gmm.compute_z(x)
        ti3 = time.time()
        logging.info('Extracting i-vector %d/%d for %s, num_frames: %d' % (i, sr.num_seqs, key, x.shape[0]))
        keys.append(key)
        xx[:,:,:] = 0
        rr[:,:,:] = 0
        xx[0,:x.shape[0]] = x
        rr[0,:x.shape[0]] = r
        y[i] = model.compute_qy_x([xx, rr], batch_size=1)[0]
        ti4 = time.time()
        logging.info('Elapsed time i-vector %d/%d for %s, total: %.2f read: %.2f, gmm: %.2f, vae: %.2f' %
                     (i, sr.num_seqs, key, ti4-ti1, ti2-ti1, ti3-ti2, ti4-ti3))
            
    logging.info('Extract elapsed time: %.2f' % (time.time() - t1))
    
    hw = HypDataWriter(output_path)
    hw.write(keys, '', y)
コード例 #3
0
def extract_ivector(seq_file, file_list, model_file, preproc_file, output_path,
                    qy_only, **kwargs):

    set_float_cpu('float32')

    sr_args = SR.filter_eval_args(**kwargs)

    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

    sr = SR(seq_file,
            file_list,
            batch_size=1,
            shuffle_seqs=False,
            preproc=preproc,
            **sr_args)

    t1 = time.time()

    if qy_only:
        model = TVAEY.load(model_file)
    else:
        model = TVAEYZ.load(model_file)

    model.build(max_seq_length=sr.max_batch_seq_length)

    logging.info(time.time() - t1)
    logging.info(model.y_dim)
    y = np.zeros((sr.num_seqs, model.y_dim), dtype=float_keras())
    xx = np.zeros((1, sr.max_batch_seq_length, model.x_dim),
                  dtype=float_keras())
    keys = []
    for i in xrange(sr.num_seqs):
        x, key = sr.read_next_seq()
        logging.info('Extracting i-vector %d/%d for %s\n' %
                     (i, sr.num_seqs, key))
        keys.append(key)
        xx[:, :, :] = 0
        xx[0, :x.shape[0]] = x
        y[i] = model.compute_qy_x(xx, batch_size=1)[0]

    logging.info('Extract elapsed time: %.2f' % (time.time() - t1))

    hw = HypDataWriter(output_path)
    hw.write(keys, '', y)
コード例 #4
0
def eval_pdda(iv_file, ndx_file, enroll_file, test_file,
              preproc_file,
              model_file, score_file,
              pool_method, eval_method,
              num_samples_y, num_samples_z, num_samples_elbo, qy_only,
              **kwargs):

    set_float_cpu('float32')
    
    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

    tdr_args = TDR.filter_args(**kwargs)
    tdr = TDR(iv_file, ndx_file, enroll_file, test_file, preproc, **tdt_args)
    x_e, x_t, enroll, ndx = tdr.read()
    enroll, ids_e = np.unique(enroll, return_inverse=True)

    if qy_only:
        model = TVAEY.load(model_file)
        model.build(max_seq_length=2, num_samples=num_samples_y)
    else:
        model = TVAEYZ.load(model_file)
        model.build(max_seq_length=2,
                    num_samples_y=num_samples_y, num_samples_z=num_samples_z)

    t1 = time.time()
    scores = model.eval_llr_Nvs1(x_e, ids_e, x_t,
                                 pool_method=pool_method,
                                 eval_method=eval_method,
                                 num_samples=num_samples_elbo)
    dt = time.time() - t1
    num_trials = len(enroll) * x_t.shape[0]
    logging.info('Elapsed time: %.2f s. Elapsed time per trial: %.2f ms.' %
                 (dt, dt/num_trials*1000))

    s = TrialScores(enroll, ndx.seg_set, scores)
    s.save(score_file)
コード例 #5
0
def extract_ivector(seq_file, file_list, gmm_file, model_file, preproc_file,
                    output_path, qy_only, **kwargs):

    set_float_cpu('float32')

    sr_args = SR.filter_eval_args(**kwargs)

    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

    gmm = DiagGMM.load_from_kaldi(gmm_file)

    sr = SR(seq_file,
            file_list,
            batch_size=1,
            shuffle_seqs=False,
            preproc=preproc,
            **sr_args)

    t1 = time.time()

    # if qy_only:
    #     model = TVAEY.load(model_file)
    # else:
    model = TVAEYZ.load(model_file)

    #model.build(max_seq_length=sr.max_batch_seq_length)
    #model.build(max_seq_length=1)
    model.x_dim = 60
    model.r_dim = 2048
    model.y_dim = 400

    y = np.zeros((sr.num_seqs, model.y_dim), dtype=float_keras())
    xx = np.zeros((1, sr.max_batch_seq_length, model.x_dim),
                  dtype=float_keras())
    rr = np.zeros((1, sr.max_batch_seq_length, model.r_dim),
                  dtype=float_keras())
    keys = []

    xp = Input(shape=(
        sr.max_batch_seq_length,
        model.x_dim,
    ))
    rp = Input(shape=(
        sr.max_batch_seq_length,
        model.r_dim,
    ))
    qy_param = model.qy_net([xp, rp])
    qy_net = Model([xp, rp], qy_param)
    for i in xrange(sr.num_seqs):
        ti1 = time.time()
        x, key = sr.read_next_seq()
        ti2 = time.time()
        r = gmm.compute_z(x)
        ti3 = time.time()
        logging.info('Extracting i-vector %d/%d for %s, num_frames: %d' %
                     (i, sr.num_seqs, key, x.shape[0]))
        keys.append(key)
        # xp = Input(shape=(x.shape[0], model.x_dim,))
        # rp = Input(shape=(x.shape[0], model.r_dim,))
        # qy_param = model.qy_net([xp, rp])
        ti5 = time.time()
        xx[:, :, :] = 0
        rr[:, :, :] = 0
        xx[0, :x.shape[0]] = x
        rr[0, :x.shape[0]] = r
        # x = np.expand_dims(x, axis=0)
        # r = np.expand_dims(r, axis=0)
        # qy_net = Model([xp, rp], qy_param)
        y[i] = qy_net.predict([xx, rr], batch_size=1)[0]
        # del qy_net
        # y[i] = model.compute_qy_x2([x, r], batch_size=1)[0]
        #for i in xrange(10):
        #gc.collect()
        ti4 = time.time()
        logging.info(
            'Elapsed time i-vector %d/%d for %s, total: %.2f read: %.2f, gmm: %.2f, vae: %.2f qy: %.2f'
            % (i, sr.num_seqs, key, ti4 - ti1, ti2 - ti1, ti3 - ti2, ti4 - ti5,
               ti5 - ti3))

        # print('Elapsed time i-vector %d/%d for %s, total: %.2f read: %.2f, gmm: %.2f, vae: %.2f' %
        #       (i, sr.num_seqs, key, ti4-ti1, ti2-ti1, ti3-ti2, ti4-ti3))

    logging.info('Extract elapsed time: %.2f' % (time.time() - t1))

    hw = HypDataWriter(output_path)
    hw.write(keys, '', y)
コード例 #6
0
def extract_ivector(seq_file, file_list, post_file, model_file, preproc_file,
                    output_path, qy_only, max_length, layer_name, **kwargs):

    set_float_cpu('float32')

    sr_args = SR.filter_eval_args(**kwargs)

    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

    sr = SR(seq_file,
            file_list,
            post_file,
            batch_size=1,
            shuffle_seqs=False,
            preproc=preproc,
            **sr_args)

    t1 = time.time()

    # if qy_only:
    #     model = TVAEY.load(model_file)
    # else:
    model = TVAEYZ.load(model_file)

    pt_input = model.pt_net.input
    pt_output = model.pt_net.get_layer(layer_name).output
    pt_dim = model.pt_net.get_layer(layer_name).output_shape[-1]
    #model.build(max_seq_length=sr.max_batch_seq_length)
    model.build(max_seq_length=1)

    max_length = np.minimum(sr.max_batch_seq_length, max_length)

    y = np.zeros((sr.num_seqs, pt_dim), dtype=float_keras())
    xx = np.zeros((1, max_length, model.x_dim), dtype=float_keras())
    rr = np.zeros((1, max_length, model.r_dim), dtype=float_keras())
    keys = []

    xp = Input(shape=(
        max_length,
        model.x_dim,
    ))
    rp = Input(shape=(
        max_length,
        model.r_dim,
    ))
    qy_param = model.qy_net([xp, rp])

    pt_net = Model(pt_input, pt_output)
    emb = pt_net(qy_param[0])
    emb_net = Model([xp, rp], emb)
    model.pt_net.summary()
    pt_net.summary()
    emb_net.summary()
    logging.info(layer_name)
    #emb_net = Model([xp, rp], pt_net.get_layer('pt').get_layer(layer_name).output)
    #pt_net = Model(model.pt_net.input, model.pt_net.get_layer(layer_name).output)
    # emb = pt_net(qy_param[0])
    #emb_net = Model([xp, rp], emb)

    for i in xrange(sr.num_seqs):
        ti1 = time.time()
        x, r, key = sr.read_next_seq()
        ti2 = time.time()
        logging.info('Extracting i-vector %d/%d for %s, num_frames: %d' %
                     (i, sr.num_seqs, key, x.shape[0]))
        keys.append(key)
        xx[:, :, :] = 0
        rr[:, :, :] = 0

        if x.shape[0] <= max_length:
            xx[0, :x.shape[0]] = x
            rr[0, :x.shape[0]] = r
            y[i] = emb_net.predict([xx, rr], batch_size=1)
        else:
            num_batches = int(np.ceil(x.shape[0] / max_length))
            for j in xrange(num_batches - 1):
                start = j * max_length
                xx[0] = x[start:start + max_length]
                rr[0] = r[start:start + max_length]
                y[i] += emb_net.predict([xx, rr], batch_size=1).ravel()
            xx[0] = x[-max_length:]
            rr[0] = r[-max_length:]
            y[i] += emb_net.predict([xx, rr], batch_size=1).ravel()
            y[i] /= num_batches

        ti4 = time.time()
        logging.info(
            'Elapsed time i-vector %d/%d for %s, total: %.2f read: %.2f, vae: %.2f'
            % (i, sr.num_seqs, key, ti4 - ti1, ti2 - ti1, ti4 - ti2))

    logging.info('Extract elapsed time: %.2f' % (time.time() - t1))

    hw = HypDataWriter(output_path)
    hw.write(keys, '', y)
コード例 #7
0
def train_embed(data_path, train_list, val_list, px_net_path, pt_net_path,
                qy_net_path, qz_net_path, init_path, epochs, preproc_file,
                output_path, freeze_embed, **kwargs):

    g = reserve_gpu()
    set_float_cpu(float_keras())

    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

    sg_args = G.filter_args(**kwargs)
    sg = G(data_path,
           train_list,
           shuffle_seqs=True,
           reset_rng=False,
           transform=preproc,
           **sg_args)
    max_length = sg.max_seq_length
    gen_val = None
    if val_list is not None:
        sg_val = G(data_path,
                   val_list,
                   transform=preproc,
                   shuffle_seqs=False,
                   reset_rng=True,
                   **sg_args)
        max_length = max(max_length, sg_val.max_seq_length)
        gen_val = data_generator(sg_val, max_length)

    gen_train = data_generator(sg, max_length)

    if init_path is None:
        model, init_epoch = KML.load_checkpoint(output_path, epochs)
        if model is None:
            embed_args = VAE.filter_args(**kwargs)
            logging.debug(embed_args)
            px_net = load_model_arch(px_net_path)
            qy_net = load_model_arch(qy_net_path)
            qz_net = load_model_arch(qz_net_path)
            pt_net = load_model_arch(pt_net_path)

            model = VAE(px_net, qy_net, qz_net, pt_net, **embed_args)
        else:
            sg.cur_epoch = init_epoch
            sg.reset()
    else:
        logging.info('loading init model: %s' % init_path)
        model = KML.load(init_path)

    model.px_weight = kwargs['px_weight']
    model.pt_weight = kwargs['pt_weight']
    model.kl_qy_weight = kwargs['kl_qy_weight']
    model.kl_qz_weight = kwargs['kl_qz_weight']

    opt_args = KOF.filter_args(**kwargs)
    cb_args = KCF.filter_args(**kwargs)
    logging.debug(sg_args)
    logging.debug(opt_args)
    logging.debug(cb_args)

    logging.info('max length: %d' % max_length)

    t1 = time.time()

    if freeze_embed:
        model.prepool_net.trainable = False

    model.build(max_length)
    logging.info(time.time() - t1)

    cb = KCF.create_callbacks(model, output_path, **cb_args)
    opt = KOF.create_optimizer(**opt_args)
    model.compile(optimizer=opt)

    h = model.fit_generator(gen_train,
                            validation_data=gen_val,
                            steps_per_epoch=sg.steps_per_epoch,
                            validation_steps=sg_val.steps_per_epoch,
                            initial_epoch=sg.cur_epoch,
                            epochs=epochs,
                            callbacks=cb,
                            max_queue_size=10)

    logging.info('Train elapsed time: %.2f' % (time.time() - t1))

    model.save(output_path + '/model')
コード例 #8
0
def extract_embed(seq_file, file_list, model_file, preproc_file, output_path,
                  max_length, layer_names, **kwargs):

    set_float_cpu('float32')

    sr_args = SR.filter_eval_args(**kwargs)

    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

    sr = SR(seq_file,
            file_list,
            batch_size=1,
            shuffle_seqs=False,
            preproc=preproc,
            **sr_args)

    t1 = time.time()

    model = SeqEmbed.load(model_file)
    model.build()
    print(layer_names)
    model.build_embed(layer_names)
    y_dim = model.embed_dim

    max_length = np.minimum(sr.max_batch_seq_length, max_length)

    y = np.zeros((sr.num_seqs, y_dim), dtype=float_keras())
    xx = np.zeros((1, max_length, model.x_dim), dtype=float_keras())
    keys = []

    for i in xrange(sr.num_seqs):
        ti1 = time.time()
        x, key = sr.read_next_seq()
        ti2 = time.time()
        print('Extracting embeddings %d/%d for %s, num_frames: %d' %
              (i, sr.num_seqs, key, x.shape[0]))
        keys.append(key)
        xx[:, :, :] = 0

        if x.shape[0] <= max_length:
            xx[0, :x.shape[0]] = x
            y[i] = model.predict_embed(xx, batch_size=1)
        else:
            num_chunks = int(np.ceil(float(x.shape[0]) / max_length))
            chunk_size = int(np.ceil(float(x.shape[0]) / num_chunks))
            for j in xrange(num_chunks - 1):
                start = j * chunk_size
                xx[0, :chunk_size] = x[start:start + chunk_size]
                y[i] += model.predict_embed(xx, batch_size=1).ravel()
            xx[0, :chunk_size] = x[-chunk_size:]
            y[i] += model.predict_embed(xx, batch_size=1).ravel()
            y[i] /= num_chunks

        ti4 = time.time()
        print(
            'Elapsed time embeddings %d/%d for %s, total: %.2f read: %.2f, vae: %.2f'
            % (i, sr.num_seqs, key, ti4 - ti1, ti2 - ti1, ti4 - ti2))

    print('Extract elapsed time: %.2f' % (time.time() - t1))

    hw = HypDataWriter(output_path)
    hw.write(keys, '', y)
コード例 #9
0
def train_embed(seq_file, train_list, val_list, class_list, embed_file,
                init_path, epochs, batch_size, preproc_file, output_path,
                post_pdf, pooling_input, pooling_output, min_var, **kwargs):

    set_float_cpu(float_keras())

    sr_args = SR.filter_args(**kwargs)
    sr_val_args = SR.filter_val_args(**kwargs)
    opt_args = KOF.filter_args(**kwargs)
    cb_args = KCF.filter_args(**kwargs)

    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

    sr = SR(seq_file,
            train_list,
            class_list,
            batch_size=batch_size,
            preproc=preproc,
            **sr_args)
    max_length = sr.max_batch_seq_length
    gen_val = None
    if val_list is not None:
        sr_val = SR(seq_file,
                    val_list,
                    class_list,
                    batch_size=batch_size,
                    preproc=preproc,
                    shuffle_seqs=False,
                    seq_split_mode='sequential',
                    seq_split_overlap=0,
                    reset_rng=True,
                    **sr_val_args)
        max_length = max(max_length, sr_val.max_batch_seq_length)
        gen_val = data_generator(sr_val, max_length)

    gen_train = data_generator(sr, max_length)

    t1 = time.time()
    if init_path is None:
        embed_net = load_model_arch(embed_file)

        model = SeqMetaEmbed(embed_net,
                             num_classes=sr.num_classes,
                             post_pdf=post_pdf,
                             pooling_input=pooling_input,
                             pooling_output=pooling_output,
                             min_var=min_var)
    else:
        logging.info('loading init model: %s' % init_path)
        model = SeqMetaEmbed.load(init_path)

    logging.info('max length: %d' % max_length)
    model.build(max_length)
    logging.info(time.time() - t1)

    cb = KCF.create_callbacks(model, output_path, **cb_args)
    opt = KOF.create_optimizer(**opt_args)
    model.compile(optimizer=opt)

    h = model.fit_generator(gen_train,
                            validation_data=gen_val,
                            steps_per_epoch=sr.num_batches,
                            validation_steps=sr_val.num_batches,
                            epochs=epochs,
                            callbacks=cb,
                            max_queue_size=10)

    logging.info('Train elapsed time: %.2f' % (time.time() - t1))

    model.save(output_path + '/model')
コード例 #10
0
def train_embed(data_path, train_list, val_list,
                train_list_adapt, val_list_adapt,
                prepool_net_path, postpool_net_path,
                init_path,
                epochs, 
                preproc_file, output_path,
                freeze_prepool, freeze_postpool_layers,
                **kwargs):

    set_float_cpu(float_keras())
        
    if init_path is None:
        model, init_epoch = KML.load_checkpoint(output_path, epochs)
        if model is None:
            emb_args = SeqEmbed.filter_args(**kwargs)
            prepool_net = load_model_arch(prepool_net_path)
            postpool_net = load_model_arch(postpool_net_path)

            model = SeqEmbed(prepool_net, postpool_net,
                             loss='categorical_crossentropy',
                             **emb_args)
        else:
            kwargs['init_epoch'] = init_epoch
    else:
        logging.info('loading init model: %s' % init_path)
        model = KML.load(init_path)

    
    sg_args = G.filter_args(**kwargs)
    opt_args = KOF.filter_args(**kwargs)
    cb_args = KCF.filter_args(**kwargs)
    logging.debug(sg_args)
    logging.debug(opt_args)
    logging.debug(cb_args)
    
    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None
        
    sg = G(data_path, train_list, train_list_adapt,
           shuffle_seqs=True, reset_rng=False,
           transform=preproc, **sg_args)
    max_length = sg.max_seq_length
    gen_val = None
    if val_list is not None:
        sg_val = G(data_path, val_list, val_list_adapt,
                    transform=preproc,
                    shuffle_seqs=False, reset_rng=True,
                    **sg_args)
        max_length = max(max_length, sg_val.max_seq_length)
        gen_val = data_generator(sg_val, max_length)

    gen_train = data_generator(sg, max_length)
    
    logging.info('max length: %d' % max_length)
    
    t1 = time.time()
    if freeze_prepool:
        model.freeze_prepool_net()

    if freeze_postpool_layers is not None:
        model.freeze_postpool_net_layers(freeze_postpool_layers)
    
    model.build(max_length)
    
    cb = KCF.create_callbacks(model, output_path, **cb_args)
    opt = KOF.create_optimizer(**opt_args)
    model.compile(optimizer=opt)
    
    h = model.fit_generator(gen_train, validation_data=gen_val,
                            steps_per_epoch=sg.steps_per_epoch,
                            validation_steps=sg_val.steps_per_epoch,
                            initial_epoch=sg.cur_epoch,
                            epochs=epochs, callbacks=cb, max_queue_size=10)
                          
    logging.info('Train elapsed time: %.2f' % (time.time() - t1))
    
    model.save(output_path + '/model')
コード例 #11
0
def extract_ivector(seq_file, file_list, gmm_file, model_file, preproc_file,
                    output_path, qy_only, max_length, **kwargs):

    set_float_cpu('float32')

    sr_args = SR.filter_eval_args(**kwargs)

    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

    gmm = DiagGMM.load_from_kaldi(gmm_file)

    sr = SR(seq_file,
            file_list,
            batch_size=1,
            shuffle_seqs=False,
            preproc=preproc,
            **sr_args)

    t1 = time.time()

    # if qy_only:
    #     model = TVAEY.load(model_file)
    # else:
    model = TVAEYZ.load(model_file)

    #model.build(max_seq_length=sr.max_batch_seq_length)
    model.build(max_seq_length=1)

    max_length = np.minimum(sr.max_batch_seq_length, max_length)

    y = np.zeros((sr.num_seqs, model.y_dim), dtype=float_keras())
    xx = np.zeros((1, max_length, model.x_dim), dtype=float_keras())
    rr = np.zeros((1, max_length, model.r_dim), dtype=float_keras())
    keys = []

    xp = Input(shape=(
        max_length,
        model.x_dim,
    ))
    rp = Input(shape=(
        max_length,
        model.r_dim,
    ))
    qy_param = model.qy_net([xp, rp])
    qy_net = Model([xp, rp], qy_param)

    for i in xrange(sr.num_seqs):
        ti1 = time.time()
        x, key = sr.read_next_seq()
        ti2 = time.time()
        r = gmm.compute_z(x)
        ti3 = time.time()
        logging.info('Extracting i-vector %d/%d for %s, num_frames: %d' %
                     (i, sr.num_seqs, key, x.shape[0]))
        keys.append(key)
        xx[:, :, :] = 0
        rr[:, :, :] = 0

        if x.shape[0] <= max_length:
            xx[0, :x.shape[0]] = x
            rr[0, :x.shape[0]] = r
            y[i] = qy_net.predict([xx, rr], batch_size=1)[0]
        else:
            num_batches = int(np.ceil(x.shape[0] / max_length))
            for j in xrange(num_batches - 1):
                start = j * max_length
                xx[0] = x[start:start + max_length]
                rr[0] = r[start:start + max_length]
                y[i] += qy_net.predict([xx, rr], batch_size=1)[0].ravel()
            xx[0] = x[-max_length:]
            rr[0] = r[-max_length:]
            y[i] += qy_net.predict([xx, rr], batch_size=1)[0].ravel()
            y[i] /= num_batches

        ti4 = time.time()
        logging.info(
            'Elapsed time i-vector %d/%d for %s, total: %.2f read: %.2f, gmm: %.2f, vae: %.2f'
            %
            (i, sr.num_seqs, key, ti4 - ti1, ti2 - ti1, ti3 - ti2, ti4 - ti3))

    logging.info('Extract elapsed time: %.2f' % (time.time() - t1))

    hw = HypDataWriter(output_path)
    hw.write(keys, '', y)
コード例 #12
0
def train_tvae(seq_file, train_list, val_list,
               decoder_file, qy_file, qz_file,
               epochs, batch_size,
               preproc_file, output_path,
               num_samples_y, num_samples_z,
               px_form, qy_form, qz_form,
               min_kl, **kwargs):

    set_float_cpu(float_keras())
    
    sr_args = SR.filter_args(**kwargs)
    sr_val_args = SR.filter_val_args(**kwargs)
    opt_args = KOF.filter_args(**kwargs)
    cb_args = KCF.filter_args(**kwargs)
    
    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

    sr = SR(seq_file, train_list, batch_size=batch_size,
                  preproc=preproc, **sr_args)
    max_length = sr.max_batch_seq_length
    gen_val = None
    if val_list is not None:
        sr_val = SR(seq_file, val_list, batch_size=batch_size,
                    preproc=preproc,
                    shuffle_seqs=False,
                    seq_split_mode='sequential', seq_split_overlap=0,
                    reset_rng=True,
                    **sr_val_args)
        max_length = max(max_length, sr_val.max_batch_seq_length)
        gen_val = data_generator(sr_val, max_length)

    gen_train = data_generator(sr, max_length)
    
            
    t1 = time.time()
    decoder = load_model_arch(decoder_file)
    qy = load_model_arch(qy_file)


    if qz_file is None:
        vae = TVAEY(qy, decoder, px_cond_form=px_form,
                    qy_form=qy_form, min_kl=min_kl)
        vae.build(num_samples=num_samples_y, 
                  max_seq_length = max_length)
    else:
        qz = load_model_arch(qz_file)
        vae = TVAEYZ(qy, qz, decoder, px_cond_form=px_form,
                   qy_form=qy_form, qz_form=qz_form, min_kl=min_kl)
        vae.build(num_samples_y=num_samples_y, num_samples_z=num_samples_z,
                  max_seq_length = max_length)
    logging.info(time.time()-t1)
    
    cb = KCF.create_callbacks(vae, output_path, **cb_args)
    opt = KOF.create_optimizer(**opt_args)

    h = vae.fit_generator(gen_train, x_val=gen_val,
                          steps_per_epoch=sr.num_batches,
                          validation_steps=sr_val.num_batches,
                          optimizer=opt, epochs=epochs,
                          callbacks=cb, max_q_size=10)

    # if vae.x_chol is not None:
    #     x_chol = np.array(K.eval(vae.x_chol))
    #     logging.info(x_chol[:4,:4])
        
    
    logging.info('Train elapsed time: %.2f' % (time.time() - t1))
    
    vae.save(output_path + '/model')
コード例 #13
0
def extract_embed(seq_file, model_file, preproc_file, output_path, max_length,
                  layer_names, **kwargs):

    set_float_cpu('float32')

    sr_args = SDRF.filter_args(**kwargs)

    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

    sr = SDRF.create(seq_file, transform=preproc, **sr_args)

    t1 = time.time()

    model = SeqEmbed.load(model_file)
    model.build()
    model.build_embed(layer_names)
    y_dim = model.embed_dim

    _, seq_lengths = sr.read_num_rows()
    sr.reset()
    num_seqs = len(seq_lengths)
    max_length = np.minimum(np.max(seq_lengths), max_length)

    y = np.zeros((num_seqs, y_dim), dtype=float_keras())
    xx = np.zeros((1, max_length, model.x_dim), dtype=float_keras())
    keys = []

    for i in xrange(num_seqs):
        ti1 = time.time()
        data = sr.read(1)
        key = data[0][0]
        x = data[1][0]

        ti2 = time.time()
        logging.info('Extracting embeddings %d/%d for %s, num_frames: %d' %
                     (i, num_seqs, key, x.shape[0]))
        keys.append(key)
        xx[:, :, :] = 0

        if x.shape[0] <= max_length:
            xx[0, :x.shape[0]] = x
            y[i] = model.predict_embed(xx, batch_size=1)
        else:
            num_chunks = int(np.ceil(float(x.shape[0]) / max_length))
            chunk_size = int(np.ceil(float(x.shape[0]) / num_chunks))
            for j in xrange(num_chunks - 1):
                start = j * chunk_size
                xx[0, :chunk_size] = x[start:start + chunk_size]
                y[i] += model.predict_embed(xx, batch_size=1).ravel()
            xx[0, :chunk_size] = x[-chunk_size:]
            y[i] += model.predict_embed(xx, batch_size=1).ravel()
            y[i] /= num_chunks

        ti4 = time.time()
        logging.info(
            'Elapsed time embeddings %d/%d for %s, total: %.2f read: %.2f, vae: %.2f'
            % (i, num_seqs, key, ti4 - ti1, ti2 - ti1, ti4 - ti2))

    logging.info('Extract elapsed time: %.2f' % (time.time() - t1))

    hw = DWF.create(output_path)
    hw.write(keys, y)
コード例 #14
0
def train_tvae(seq_file, train_list, val_list, class_list, post_file,
               decoder_file, pt_file, qy_file, qz_file, init_path, epochs,
               batch_size, preproc_file, output_path, num_samples_y,
               num_samples_z, px_form, qy_form, qz_form, min_kl, loss_weights,
               **kwargs):

    set_float_cpu(float_keras())

    sr_args = SR.filter_args(**kwargs)
    sr_val_args = SR.filter_val_args(**kwargs)
    opt_args = KOF.filter_args(**kwargs)
    cb_args = KCF.filter_args(**kwargs)

    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

    sr = SR(seq_file,
            train_list,
            post_file,
            class_list,
            batch_size=batch_size,
            preproc=preproc,
            **sr_args)
    max_length = sr.max_batch_seq_length
    gen_val = None
    if val_list is not None:
        sr_val = SR(seq_file,
                    val_list,
                    post_file,
                    class_list,
                    batch_size=batch_size,
                    preproc=preproc,
                    shuffle_seqs=False,
                    seq_split_mode='sequential',
                    seq_split_overlap=0,
                    reset_rng=True,
                    **sr_val_args)
        max_length = max(max_length, sr_val.max_batch_seq_length)
        gen_val = data_generator(sr_val, max_length)

    gen_train = data_generator(sr, max_length)

    t1 = time.time()
    decoder = load_model_arch(decoder_file)
    qy = load_model_arch(qy_file)
    pt = load_model_arch(pt_file)

    if init_path is None:
        decoder = load_model_arch(decoder_file)
        qy = load_model_arch(qy_file)

        # if qz_file is None:
        #     vae = TVAEY(qy, decoder, px_cond_form=px_form,
        #                 qy_form=qy_form, min_kl=min_kl)
        #     vae.build(num_samples=num_samples_y,
        #               max_seq_length = max_length)
        # else:
        qz = load_model_arch(qz_file)
        vae = TVAEYZ(qy,
                     qz,
                     decoder,
                     pt,
                     px_cond_form=px_form,
                     qy_form=qy_form,
                     qz_form=qz_form,
                     min_kl=min_kl,
                     loss_weights=loss_weights)
    else:
        logging.info('loading init model: %s' % init_path)
        vae = TVAEYZ.load(init_path)

    logging.info('max length: %d' % max_length)
    vae.build(num_samples_y=num_samples_y,
              num_samples_z=num_samples_z,
              max_seq_length=max_length)
    logging.info(time.time() - t1)

    cb = KCF.create_callbacks(vae, output_path, **cb_args)
    opt = KOF.create_optimizer(**opt_args)

    h = vae.fit_generator(gen_train,
                          x_val=gen_val,
                          steps_per_epoch=sr.num_batches,
                          validation_steps=sr_val.num_batches,
                          optimizer=opt,
                          epochs=epochs,
                          callbacks=cb,
                          max_queue_size=10)

    # if vae.x_chol is not None:
    #     x_chol = np.array(K.eval(vae.x_chol))
    #     logging.info(x_chol[:4,:4])

    logging.info('Train elapsed time: %.2f' % (time.time() - t1))

    vae.save(output_path + '/model')
    sr_val.reset()
    y_val, sy_val, z_val, srz_val = vae.encoder_net.predict_generator(
        gen_val, steps=400)

    from scipy import linalg as la
    yy = y_val - np.mean(y_val, axis=0)
    cy = np.dot(yy.T, yy) / yy.shape[0]
    l, v = la.eigh(cy)
    np.savetxt(output_path + '/l1.txt', l)

    sr_val.reset()
    y_val2, sy_val2 = vae.qy_net.predict_generator(gen_val, steps=400)
    yy = y_val2 - np.mean(y_val, axis=0)
    cy = np.dot(yy.T, yy) / yy.shape[0]
    l, v = la.eigh(cy)
    np.savetxt(output_path + '/l2.txt', l)

    logging.info(y_val - y_val2)
コード例 #15
0
def train_pdda(iv_file, train_list, val_list,
               decoder_file, qy_file, qz_file,
               epochs, batch_size,
               preproc_file, output_path,
               num_samples_y, num_samples_z,
               px_form, qy_form, qz_form,
               min_kl, **kwargs):

    set_float_cpu('float32')
    
    vcr_args = VCR.filter_args(**kwargs)
    opt_args = KOF.filter_args(**kwargs)
    cb_args = KCF.filter_args(**kwargs)

    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

        
    vcr_train = VCR(iv_file, train_list, preproc, **vcr_args)
    max_length = vcr_train.max_samples_per_class
    
    x_val = None
    sw_val = None
    if val_list is not None:
        vcr_val = VCR(iv_file, val_list, preproc, **vcr_args)
        max_length = max(max_length, vcr_val.max_samples_per_class)
        x_val, sw_val = vcr_val.read(return_3d=True, max_length=max_length)
        
    x, sw = vcr_train.read(return_3d=True, max_length=max_length)
        
    t1 = time.time()
    decoder = load_model_arch(decoder_file)
    qy = load_model_arch(qy_file)

    if qz_file is None:
        vae = TVAEY(qy, decoder, px_cond_form=px_form,
                    qy_form=qy_form, min_kl=min_kl)
        vae.build(num_samples=num_samples_y, 
                  max_seq_length = x.shape[1])
    else:
        qz = load_model_arch(qz_file)
        vae = TVAEYZ(qy, qz, decoder, px_cond_form=px_form,
                   qy_form=qy_form, qz_form=qz_form, min_kl=min_kl)
        vae.build(num_samples_y=num_samples_y, num_samples_z=num_samples_z,
                  max_seq_length = x.shape[1])
    logging.info(time.time()-t1)
    # opt = create_optimizer(**opt_args)
    # cb = create_basic_callbacks(vae, output_path, **cb_args)
    # h = vae.fit(x, x_val=x_val,
    #             sample_weight_train=sw, sample_weight_val=sw_val,
    #             optimizer=opt, shuffle=True, epochs=100,
    #             batch_size=batch_size, callbacks=cb)

    # opt = create_optimizer(**opt_args)
    # cb = create_basic_callbacks(vae, output_path, **cb_args)
    # h = vae.fit_mdy(x, x_val=x_val,
    #                 sample_weight_train=sw, sample_weight_val=sw_val,
    #                 optimizer=opt, shuffle=True, epochs=200,
    #                 batch_size=batch_size, callbacks=cb)
    
    # y_mean, y_logvar, z_mean, z_logvar = vae.compute_qyz_x(
    #     x, batch_size=batch_size)
    # sw = np.expand_dims(sw, axis=-1)
    # m_y = np.mean(np.mean(y_mean, axis=0))
    # s2_y = np.sum(np.sum(np.exp(y_logvar)+y_mean**2, axis=0)/
    #               y_logvar.shape[0]-m_y**2)
    # m_z = np.mean(np.sum(np.sum(z_mean*sw, axis=1), axis=0)
    #               /np.sum(sw))
    # s2_z = np.sum(np.sum(np.sum((np.exp(z_logvar)+z_mean**2)*sw, axis=1), axis=0)
    #               /np.sum(sw)-m_z**2)
    # logging.info('m_y: %.2f, trace_y: %.2f, m_z: %.2f, trace_z: %.2f' %
    #       (m_y, s2_y, m_z, s2_z))

    
    cb = KCF.create_callbacks(vae, output_path, **cb_args)
    opt = KOF.create_optimizer(**opt_args)

    h = vae.fit(x, x_val=x_val,
                sample_weight_train=sw, sample_weight_val=sw_val,
                optimizer=opt, shuffle=True, epochs=epochs,
                batch_size=batch_size, callbacks=cb)

    if vae.x_chol is not None:
        x_chol = np.array(K.eval(vae.x_chol))
        logging.info(x_chol[:4,:4])
        
    
    logging.info('Train elapsed time: %.2f' % (time.time() - t1))
    
    vae.save(output_path + '/model')

    t1 = time.time()
    elbo = np.mean(vae.elbo(x, num_samples=1, batch_size=batch_size))
    logging.info('elbo: %.2f' % elbo)

    logging.info('Elbo elapsed  time: %.2f' % (time.time() - t1))

    t1 = time.time()
    vae.build(num_samples_y=1, num_samples_z=1, max_seq_length = x.shape[1])
    vae.compile()


    qyz = vae.compute_qyz_x(x, batch_size=batch_size)
    if vae.qy_form == 'diag_normal':
        y_mean, y_logvar = qyz[:2]
        qz = qyz[2:]
    else:
        y_mean, y_logvar, y_chol = qyz[:3]
        qz = qyz[3:]
    if vae.qz_form == 'diag_normal':
        z_mean, z_logvar = qz[:2]
    else:
        z_mean, z_logvar, z_chol = qz[:3]

    sw = np.expand_dims(sw, axis=-1)
    m_y = np.mean(np.mean(y_mean, axis=0))
    s2_y = np.sum(np.sum(np.exp(y_logvar)+y_mean**2, axis=0)/
                  y_logvar.shape[0]-m_y**2)
    m_z = np.mean(np.sum(np.sum(z_mean*sw, axis=1), axis=0)
                  /np.sum(sw))
    s2_z = np.sum(np.sum(np.sum((np.exp(z_logvar)+z_mean**2)*sw, axis=1), axis=0)
                  /np.sum(sw)-m_z**2)
    logging.info('m_y: %.2f, trace_y: %.2f, m_z: %.2f, trace_z: %.2f' %
          (m_y, s2_y, m_z, s2_z))

    logging.info('Trace elapsed time: %.2f' % (time.time() - t1))

    t1 = time.time()
    vae.build(num_samples_y=1, num_samples_z=1, max_seq_length = 2)
    vae.compile()
    
    x1 = x[:,0,:]
    x2 = x[:,1,:]
    # scores = vae.eval_llr_1vs1_elbo(x1, x2, num_samples=10)
    # tar = scores[np.eye(scores.shape[0], dtype=bool)]
    # non = scores[np.logical_not(np.eye(scores.shape[0], dtype=bool))]
    # logging.info('m_tar: %.2f s_tar: %.2f' % (np.mean(tar), np.std(tar)))
    # logging.info('m_non: %.2f s_non: %.2f' % (np.mean(non), np.std(non)))

    # scores = vae.eval_llr_1vs1_cand(x1, x2)
    # tar = scores[np.eye(scores.shape[0], dtype=bool)]
    # non = scores[np.logical_not(np.eye(scores.shape[0], dtype=bool))]
    # logging.info('m_tar: %.2f s_tar: %.2f' % (np.mean(tar), np.std(tar)))
    # logging.info('m_non: %.2f s_non: %.2f' % (np.mean(non), np.std(non)))

    scores = vae.eval_llr_1vs1_qscr(x1, x2)
    tar = scores[np.eye(scores.shape[0], dtype=bool)]
    non = scores[np.logical_not(np.eye(scores.shape[0], dtype=bool))]
    logging.info('m_tar: %.2f s_tar: %.2f' % (np.mean(tar), np.std(tar)))
    logging.info('m_non: %.2f s_non: %.2f' % (np.mean(non), np.std(non)))
    
    logging.info('Eval elapsed time: %.2f' % (time.time() - t1))