Example #1
0
def main(params):
  
  # main training and validation loop goes here
  # This code should be independent of which model we use
  batch_size = params['batch_size']
  max_epochs = params['max_epochs']
  
  
  # fetch the data provider object
  dp = DataProvider(params)
  params['feat_size'] = dp.feat_size
  params['phone_vocab_size'] = dp.phone_vocab
  # Get the solver object, optional not needed for kerras
  # solver = Solver(params['solver'])
  ## Add the model intiailization code here
  
  modelObj = getModelObj(params)

  # Build the model Architecture
  f_train = modelObj.build_model(params)
  
  if params['saved_model'] !=None: 
    cv = json.load(open(params['saved_model'],'r'))
    modelObj.model.load_weights(cv['weights_file'])
    print 'Conitnuing training from model %s'%(params['saved_model'])
  
  train_x, train_y, val_x, val_y = dp.get_data_array(params['model_type'], ['train', 'devel'], cntxt=params['context'])
  fname, best_val_loss = modelObj.train_model(train_x, train_y, val_x, val_y, params)

  checkpoint = {}
    
  checkpoint['params'] = params
  checkpoint['weights_file'] = fname.format(val_loss=best_val_loss)
  filename = 'model_%s_%s_%s_%.2f.json' % (params['dataset'], params['model_type'], params['out_file_append'], best_val_loss)
  filename = os.path.join(params['out_dir'],filename)
  print 'Saving to File %s'%(filename)
  json.dump(checkpoint, open(filename,'w'))

  ## Now let's build a gradient computation graph and rmsprop update mechanism
  ##grads = tensor.grad(cost, wrt=model.values())
  ##lr = tensor.scalar(name='lr',dtype=config.floatX)
  ##f_grad_shared, f_update, zg, rg, ud = solver.build_solver_model(lr, model, grads,
  ##                                   inp_list, cost, params)

  #num_frames_total = dp.getSplitSize('train')
  #num_iters_one_epoch = num_frames_total/ batch_size
  #max_iters = max_epochs * num_iters_one_epoch
  ##
  #for it in xrange(max_iters):
  #  batch = dp.getBatch(batch_size)
  #  cost = f_train(*batch)
    
    #cost = f_grad_shared(inp_list)
    #f_update(params['learning_rate'])

    #Save model periodically
  return modelObj
Example #2
0
def main(params):

    # main training and validation loop goes here
    # This code should be independent of which model we use
    batch_size = params['batch_size']
    max_epochs = params['max_epochs']

    # fetch the data provider object
    dp = DataProvider(params)
    params['feat_size'] = dp.feat_size
    params['phone_vocab_size'] = dp.phone_vocab
    # Get the solver object, optional not needed for kerras
    # solver = Solver(params['solver'])
    ## Add the model intiailization code here

    modelObj = getModelObj(params)

    # Build the model Architecture
    f_train = modelObj.build_model(params)

    if params['saved_model'] != None:
        cv = json.load(open(params['saved_model'], 'r'))
        modelObj.model.load_weights(cv['weights_file'])
        print 'Conitnuing training from model %s' % (params['saved_model'])

    train_x, train_y, val_x, val_y = dp.get_data_array(params['model_type'],
                                                       ['train', 'devel'],
                                                       cntxt=params['context'])
    fname, best_val_loss = modelObj.train_model(train_x, train_y, val_x, val_y,
                                                params)

    checkpoint = {}

    checkpoint['params'] = params
    checkpoint['weights_file'] = fname.format(val_loss=best_val_loss)
    filename = 'model_%s_%s_%s_%.2f.json' % (
        params['dataset'], params['model_type'], params['out_file_append'],
        best_val_loss)
    filename = os.path.join(params['out_dir'], filename)
    print 'Saving to File %s' % (filename)
    json.dump(checkpoint, open(filename, 'w'))

    ## Now let's build a gradient computation graph and rmsprop update mechanism
    ##grads = tensor.grad(cost, wrt=model.values())
    ##lr = tensor.scalar(name='lr',dtype=config.floatX)
    ##f_grad_shared, f_update, zg, rg, ud = solver.build_solver_model(lr, model, grads,
    ##                                   inp_list, cost, params)

    #num_frames_total = dp.getSplitSize('train')
    #num_iters_one_epoch = num_frames_total/ batch_size
    #max_iters = max_epochs * num_iters_one_epoch
    ##
    #for it in xrange(max_iters):
    #  batch = dp.getBatch(batch_size)
    #  cost = f_train(*batch)

    #cost = f_grad_shared(inp_list)
    #f_update(params['learning_rate'])

    #Save model periodically
    return modelObj
Example #3
0
def main(params):
  # check if having a model_list  
  if params['model_list'] != None:
    with open(params['model_list']) as f:
      model_file_list = f.readlines()
  else:
    model_file_list = [(params['saved_model'])]
  
  # check dp loaded or not to load it once
  dp_loaded = False
  for m in model_file_list:
    m = re.sub("\n","", m)
    cv = json.load(open(m,'r'))
    cv_params = cv['params']
    
    if params['dataset'] != None:
        cv_params['dataset'] = params['dataset']
        cv_params['dataset_desc'] = params['dataset_desc']
    if not dp_loaded:
      dp_loaded = True
      dp = DataProvider(cv_params)
    cv_params['feat_size'] = dp.feat_size
    cv_params['phone_vocab_size'] = dp.phone_vocab
    
    
    # Get the model object and build the model Architecture
    if cv_params['model_type']!='DBN':
        modelObj = getModelObj(cv_params)
        f_train = modelObj.build_model(cv_params)
        modelObj.model.load_weights(cv['weights_file'])
    else:
        modelObj = cPickle.load(open(cv['weights_file']))
        
    inpt_x, inpt_y = dp.get_data_array(cv_params['model_type'],[params['split']],cntxt = cv_params['context'])

    predOut = modelObj.model.predict_classes(inpt_x, batch_size=100)
    accuracy =  100.0*np.sum(predOut == inpt_y.nonzero()[1]) / predOut.shape[0]
    print('Accuracy of %s the %s set is %0.2f'%(params['saved_model'], params['split'],accuracy))


    # Get the phone order
    ph2bin = dp.dataDesc['ph2bin']
    phoneList = ['']*len(ph2bin)
    for ph in ph2bin:
        phoneList[ph2bin[ph].split().index('1')] = ph

    # plotting confusion matrix
    if params['plot_confmat'] != 0:
        cm = confusion_matrix(inpt_y.nonzero()[1], predOut) 
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        plt.figure()
        plot_confusion_matrix(cm, phoneList)
        plt.show()

    if params['dump_lna_dir'] != None:
        spt = params['split']
        phones_targ = [l.strip() for l in codecs.open(params['lna_ph_order'], encoding='utf-8')]
        assert(set(phones_targ) == set(phoneList))
        shuffle_order = np.zeros(len(phones_targ),dtype=np.int32)
        for i,ph in enumerate(phones_targ):
            shuffle_order[i] = phoneList.index(ph)
        ## Now for evert utterance sample predict probabilities and dump lna files
        for i,inp_file in enumerate(dp.dataDesc[spt+'_x']): 
            lna_file = os.path.join(params['dump_lna_dir'], os.path.basename(inp_file).split('.')[0]+'.lna')
            inpt_x,inp_y = dp.get_data_array(cv_params['model_type'],[params['split']],cntxt = cv_params['context'], shufdata=0, idx = i)
            probs = modelObj.model.predict(inpt_x, batch_size=100)
            #dump_lna(inp_y[:,shuffle_order].flatten(), lna_file, probs.shape[1])
            dump_lna(probs[:,shuffle_order].flatten(), lna_file, probs.shape[1])
            print lna_file
Example #4
0
def main(params):
    # check if having a model_list
    if params['model_list'] != None:
        with open(params['model_list']) as f:
            model_file_list = f.readlines()
    else:
        model_file_list = [(params['saved_model'])]

    # check dp loaded or not to load it once
    dp_loaded = False
    for m in model_file_list:
        m = re.sub("\n", "", m)
        cv = json.load(open(m, 'r'))
        cv_params = cv['params']

        if params['dataset'] != None:
            cv_params['dataset'] = params['dataset']
            cv_params['dataset_desc'] = params['dataset_desc']
        if not dp_loaded:
            dp_loaded = True
            dp = DataProvider(cv_params)
        cv_params['feat_size'] = dp.feat_size
        cv_params['phone_vocab_size'] = dp.phone_vocab

        # Get the model object and build the model Architecture
        if cv_params['model_type'] != 'DBN':
            modelObj = getModelObj(cv_params)
            f_train = modelObj.build_model(cv_params)
            modelObj.model.load_weights(cv['weights_file'])
        else:
            modelObj = cPickle.load(open(cv['weights_file']))

        inpt_x, inpt_y = dp.get_data_array(cv_params['model_type'],
                                           [params['split']],
                                           cntxt=cv_params['context'])

        predOut = modelObj.model.predict_classes(inpt_x, batch_size=100)
        accuracy = 100.0 * np.sum(
            predOut == inpt_y.nonzero()[1]) / predOut.shape[0]
        print('Accuracy of %s the %s set is %0.2f' %
              (params['saved_model'], params['split'], accuracy))

        # Get the phone order
        ph2bin = dp.dataDesc['ph2bin']
        phoneList = [''] * len(ph2bin)
        for ph in ph2bin:
            phoneList[ph2bin[ph].split().index('1')] = ph

        # plotting confusion matrix
        if params['plot_confmat'] != 0:
            cm = confusion_matrix(inpt_y.nonzero()[1], predOut)
            cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
            plt.figure()
            plot_confusion_matrix(cm, phoneList)
            plt.show()

        if params['dump_lna_dir'] != None:
            spt = params['split']
            phones_targ = [
                l.strip()
                for l in codecs.open(params['lna_ph_order'], encoding='utf-8')
            ]
            assert (set(phones_targ) == set(phoneList))
            shuffle_order = np.zeros(len(phones_targ), dtype=np.int32)
            for i, ph in enumerate(phones_targ):
                shuffle_order[i] = phoneList.index(ph)
            ## Now for evert utterance sample predict probabilities and dump lna files
            for i, inp_file in enumerate(dp.dataDesc[spt + '_x']):
                lna_file = os.path.join(
                    params['dump_lna_dir'],
                    os.path.basename(inp_file).split('.')[0] + '.lna')
                inpt_x, inp_y = dp.get_data_array(cv_params['model_type'],
                                                  [params['split']],
                                                  cntxt=cv_params['context'],
                                                  shufdata=0,
                                                  idx=i)
                probs = modelObj.model.predict(inpt_x, batch_size=100)
                #dump_lna(inp_y[:,shuffle_order].flatten(), lna_file, probs.shape[1])
                dump_lna(probs[:, shuffle_order].flatten(), lna_file,
                         probs.shape[1])
                print lna_file