def test_all_pipeline_1_15(myFileList):
    for indice_fichier in range(len(myFileList)):
        fichier_train = [myFileList[indice_fichier]]
        fichier_test = myFileList[:indice_fichier]

        if (indice_fichier != len(myFileList) - 1):
            fichier_test += myFileList[indice_fichier + 1:]

        mesDonnees_train = dataLoader.DataLoader(fichier_train,
                                                 concatenate=False)
        mesDonnees_test = dataLoader.DataLoader(fichier_test, concatenate=True)
        mesDonnees_train.filtre_Matrice()
        mesDonnees_test.filtre_Matrice()

        clf = LogisticRegression(penalty="l1", max_iter=10000, C=0.05)
        m = Model_all_pipeline(clf, mesDonnees_train)
        new_labels_train = m.concat_labels(mesDonnees_train.labels)
        new_labels_test = m.concat_labels(mesDonnees_test.labels)
        m.fit(mesDonnees_train.data[0], new_labels_train)
        pred = m.predict(mesDonnees_test.data)
        score = classification_report(new_labels_test, pred, output_dict=True)
        file = open(str(indice_fichier) + ".txt", "w")
        file.write(str(score))
        file.close()
        print(score)
def test_all_pipeline_16_16(myFileList):
    #minimum 580 trial par sujet donc on coupe pour que ça soit plus simple( 5 partie de 116)

    for mult in range(5):
        indice = int(mult * 116)
        mesDonnees_train = dataLoader.DataLoader(myFileList, concatenate=False)
        mesDonnees_test = dataLoader.DataLoader(myFileList, concatenate=False)

        for k in range(len(mesDonnees_train.data)):
            mesDonnees_train.data[k] = np.concatenate(
                (mesDonnees_train.data[k][:indice],
                 mesDonnees_train.data[k][indice + 116:580]))
            mesDonnees_train.labels[k] = np.concatenate(
                (mesDonnees_train.labels[k][:indice],
                 mesDonnees_train.labels[k][indice + 116:580]))
            mesDonnees_test.data[k] = mesDonnees_test.data[k][indice:indice +
                                                              116]
            mesDonnees_test.labels[k] = mesDonnees_test.labels[k][
                indice:indice + 116]

        mesDonnees_train.filtre_Matrice()
        mesDonnees_test.filtre_Matrice()

        clf = LogisticRegression(penalty="l1", max_iter=10000, C=0.05)
        m = Model_all_pipeline(clf, mesDonnees_train)
        new_labels_train = m.concat_labels(mesDonnees_train.labels)
        new_labels_test = m.concat_labels(mesDonnees_test.labels)
        m.fit(mesDonnees_train.data, new_labels_train)
        pred = m.predict(mesDonnees_test.data)
        score = classification_report(new_labels_test, pred, output_dict=True)
        file = open(str(mult) + ".txt", "w")
        file.write(str(score))
        file.close()
        print(score)
    def train(self):
        if (not self.graph_build):
            raise Exception("Graph has to be build before Training")
        input_tensor = self.graph.get_tensor_by_name("input:0")
        label_tensor = self.graph.get_tensor_by_name("label:0")
        loss_tensor = self.graph.get_tensor_by_name("loss:0")
        optimizer_operation = self.graph.get_operation_by_name("optimizer")
        self.data_loader = dataLoader.DataLoader()
        log.train_log("Training of model " + self.model_name + " started")

        for epoch in range(self.epochs):
            epoch_loss = 0
            n = int(self.data_loader.train_size / self.batch_size)
            for i in range(n):
                input_batch, label_batch = self.data_loader.get_batch(
                    self.batch_size)
                _, loss = self.sess.run([optimizer_operation, loss_tensor],
                                        feed_dict={
                                            input_tensor: input_batch,
                                            label_tensor: label_batch
                                        })
                epoch_loss += loss
            log.train_log("Epoch " + str(epoch) + ": " +
                          str(epoch_loss / (n * self.batch_size)))
            self.saver.save(self.sess,
                            self.model_save_path + "/checkpoint",
                            global_step=epoch)

        log.train_log("Training of model " + self.model_name + " finished")
Beispiel #4
0
def gradcheck(epsilon=1e-4):

    import dataLoader as dl
    import random 

    loader = dl.DataLoader('/scail/group/deeplearning/speech/awni/kaldi-stanford/kaldi-trunk/egs/timit/s5/exp/nn_train/',41*23,41*23)
    nn = NNet(41*23,41*23,[1024])
    nn.initParams()

    data_dict,alis,keys,sizes = loader.loadDataFileDict(1)

    k = random.sample(keys,1)[0]

    data = gp.garray(data_dict[k])
    labels = np.array(alis[k],dtype=np.int32)

    cost,grad,_ = nn.costAndGrad(data,labels)
    print data.shape
    print labels.shape

    while True:
        m,n = nn.stack[1][0].shape
        msample,nsample = random.randint(0,m-1),random.randint(0,n-1)
        nn.stack[1][0][msample,nsample] += epsilon

        cost2,grad,_ = nn.costAndGrad(data,labels)
    
        nn.stack[1][0][msample,nsample] -= epsilon

        finite_diff = (cost2 - cost) / epsilon
        print "Analytic %.6f -- Finite %.6f"%(grad[1][0][msample,nsample],finite_diff)
            
        # Clear gp mem
        gp.free_reuse_cache()
Beispiel #5
0
def runSeq(opts):
    #fid = open(opts.out_file, 'w')
    # phone_map = get_char_map(opts.dataDir)

    # initialize loader to not read actual data
    loader = dl.DataLoader(opts.ali_dir,
                           -1,
                           -1,
                           load_ali=True,
                           load_data=False)
    #likelihoodsDir = pjoin(SCAIL_DATA_DIR, 'ctc_loglikes_%s' % DATASET)

    hyps = list()
    refs = list()
    hypscores = list()
    refscores = list()
    numphones = list()

    for i in range(opts.start_file, opts.start_file + opts.num_files):
        data_dict, alis, keys, sizes = loader.loadDataFileDict(i)

        ll_file = pjoin(opts.lik_dir, 'loglikelihoods_%d.pk' % i)
        with open(ll_file, 'rb') as ll_fid:
            probs_dict = pickle.load(ll_fid)

        # Parallelize decoding over utterances

        print 'Decoding utterances in parallel, n_jobs=%d' % NUM_CPUS
        decoded_utts = Parallel(n_jobs=NUM_CPUS)(delayed(decode_utterance_clm)(
            k, probs_dict[k], alis[k], opts.charmap_file, opts.lm_file)
                                                 for k in keys)

        for k, (hyp, ref, hypscore, refscore) in zip(keys, decoded_utts):
            if refscore is None:
                refscore = 0.0
            if hypscore is None:
                hypscore = 0.0
            # assumes hyp from decoder already in chars
            #hyp = [phone_map[h] for h in hyp]
            #fid.write(k + ' ' + ' '.join(hyp) + '\n')
            print k + ' ' + ' '.join(hyp)
            hyps.append(hyp)
            refs.append(ref)
            hypscores.append(hypscore)
            refscores.append(refscore)
            numphones.append(len(alis[k]))

    #fid.close()

    # Pickle some values for computeStats.py
    with open(opts.out_file.replace('.txt', '.pk'), 'wb') as pkid:
        pickle.dump(hyps, pkid)
        pickle.dump(refs, pkid)
        pickle.dump(hypscores, pkid)
        pickle.dump(refscores, pkid)
        pickle.dump(numphones, pkid)
Beispiel #6
0
def test(opts):
    import editDistance as ed

    print "Testing model %s" % opts.inFile

    phone_map = get_phone_map_swbd()

    with open(opts.inFile, 'r') as fid:
        old_opts = pickle.load(fid)
        _ = pickle.load(fid)
        _ = pickle.load(fid)
        loader = dl.DataLoader(opts.dataDir, old_opts.rawDim,
                               old_opts.inputDim)
        if 'layers' not in dir(old_opts):
            old_opts.layers = [old_opts.layerSize] * old_opts.numLayers
        nn = nnet.NNet(old_opts.inputDim,
                       old_opts.outputDim,
                       old_opts.layers,
                       train=False)
        nn.initParams()
        nn.fromFile(fid)

    totdist = numphones = 0

    fid = open('hyp.txt', 'w')
    for i in range(1, opts.numFiles + 1):
        data_dict, alis, keys, sizes = loader.loadDataFileDict(i)
        for k in keys:
            gp.free_reuse_cache()
            hyp = nn.costAndGrad(data_dict[k])
            hyp = [phone_map[h] for h in hyp]
            ref = [phone_map[int(r)] for r in alis[k]]
            dist, ins, dels, subs, corr = ed.edit_distance(ref, hyp)
            print "Distance %d/%d" % (dist, len(ref))
            fid.write(k + ' ' + ' '.join(hyp) + '\n')
            totdist += dist
            numphones += len(alis[k])

    fid.close()
    print "PER : %f" % (100 * totdist / float(numphones))
Beispiel #7
0
def test(opts):
    old_opts = CfgStruct(**load_config(opts.cfg_file))

    logging.basicConfig(filename=pjoin(opts.output_dir, 'test.log'),
                        level=logging.DEBUG)
    logger = logging.getLogger()
    logger.addHandler(logging.StreamHandler())
    logger.info('Running on %s' % get_hostname())

    with open(old_opts.in_file, 'r') as fid:
        pickle.load(fid)  # SGD data, not needed
        print 'rawDim:', old_opts.rawDim, 'inputDim:', old_opts.inputDim,\
            'layerSize:', old_opts.layerSize, 'numLayers:', old_opts.numLayers,\
            'maxUttLen:', old_opts.maxUttLen
        print 'temporalLayer:', old_opts.temporalLayer, 'outputDim:', old_opts.outputDim

        alisDir = opts.alisDir if opts.alisDir else opts.dataDir
        loader = dl.DataLoader(opts.dataDir, old_opts.rawDim,
                               old_opts.inputDim, alisDir)
        nn = rnnet.NNet(old_opts.inputDim,
                        old_opts.outputDim,
                        old_opts.layerSize,
                        old_opts.numLayers,
                        old_opts.maxUttLen,
                        temporalLayer=old_opts.temporalLayer,
                        train=False)
        nn.initParams()
        nn.fromFile(fid)

    # FIXME Different output directory specific to test set
    out_dir = pjoin(SCAIL_DATA_DIR,
                    'ctc_loglikes_%s_%s' % (DATASET, DATA_SUBSET))
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    for i in range(opts.startFile, opts.numFiles + 1):
        writeLogLikes(loader, nn, i, out_dir, writePickle=True)
Beispiel #8
0
def saveResult(name, result):
    myfile = open(name, 'w')
    wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
    wr.writerow(["Time(second)", "F1 Score"])
    for r in result:
        wr.writerow(r)


nomTest = "test_all_other_model_1train_1test"

myFileList = ["data/train/train_subject1.mat"]
#myFileList = getListOfFiles("data/train/")[:4]

mesDonnees = dict()
mesDonnees[1.5] = dataLoader.DataLoader(myFileList, concatenate=True)
#print("j'ai enlevé le filtre passe bande car les matrices netaits plus définit positives apres")
#mesDonnees[1.5].filtre_Matrice()
print(len(mesDonnees[1.5].data))
print(len(mesDonnees[1.5].labels))

#saveResult("resultats/"+nomTest+"/resultat_riemann_MDM_xDawn.csv",test_all_pipeline(mesDonnees))
#print("Attention nouveau pre processing uniquement pour pyriemann")
#saveResult("resultats/"+nomTest+"/resultat_riemann_MDM_xDawn.csv",testRiemannMDMPlusXdawn(mesDonnees))
os.mkdir("resultats/" + nomTest)

saveResult("resultats/" + nomTest + "/resultat_riemann_MDM.csv",
           testRiemannMDM(mesDonnees))

#saveResult("resultats/"+nomTest+"/resultat_riemann_KNN.csv",testRiemannKNN(mesDonnees))
Beispiel #9
0
def run(args=None):
    usage = "usage : %prog [options]"
    parser = optparse.OptionParser(usage=usage)

    parser.add_option('--cfg_file',
                      dest='cfg_file',
                      default=None,
                      help='File with settings from previously trained net')

    parser.add_option("--test",
                      action="store_true",
                      dest="test",
                      default=False)

    # Architecture
    parser.add_option("--layerSize",
                      dest="layerSize",
                      type="int",
                      default=1824)
    parser.add_option("--numLayers", dest="numLayers", type="int", default=5)
    parser.add_option("--temporalLayer",
                      dest="temporalLayer",
                      type="int",
                      default=3)

    # Optimization
    parser.add_option("--momentum",
                      dest="momentum",
                      type="float",
                      default=0.95)
    parser.add_option("--epochs", dest="epochs", type="int", default=20)
    parser.add_option("--step", dest="step", type="float", default=1e-5)
    parser.add_option(
        "--anneal",
        dest="anneal",
        type="float",
        default=1.3,
        help="Sets (learning rate := learning rate / anneal) after each epoch."
    )
    parser.add_option(
        '--reg',
        dest='reg',
        type='float',
        default=0.0,
        help='lambda for L2 regularization of the weight matrices')

    # Data
    parser.add_option("--dataDir",
                      dest="dataDir",
                      type="string",
                      default=TRAIN_DATA_DIR['fbank'])
    parser.add_option('--alisDir',
                      dest='alisDir',
                      type='string',
                      default=TRAIN_ALIS_DIR)
    parser.add_option('--startFile',
                      dest='startFile',
                      type='int',
                      default=1,
                      help='Start file for running testing')
    parser.add_option("--numFiles", dest="numFiles", type="int", default=384)
    parser.add_option("--inputDim",
                      dest="inputDim",
                      type="int",
                      default=41 * 15)
    parser.add_option("--rawDim", dest="rawDim", type="int", default=41 * 15)
    parser.add_option("--outputDim", dest="outputDim", type="int", default=35)
    parser.add_option("--maxUttLen",
                      dest="maxUttLen",
                      type="int",
                      default=MAX_UTT_LEN)

    # Save/Load
    parser.add_option(
        '--save_every',
        dest='save_every',
        type='int',
        default=10,
        help='During training, save parameters every x number of files')

    parser.add_option('--run_desc',
                      dest='run_desc',
                      type='string',
                      default='',
                      help='Description of experiment run')

    (opts, args) = parser.parse_args(args)

    if opts.cfg_file:
        cfg = load_config(opts.cfg_file)
    else:
        cfg = vars(opts)

    # These config values should be updated every time
    cfg['host'] = get_hostname()
    cfg['git_rev'] = get_git_revision()
    cfg['pid'] = os.getpid()

    # Create experiment output directory

    if not opts.cfg_file:
        time_string = str(TimeString())
        output_dir = pjoin(RUN_DIR, time_string)
        cfg['output_dir'] = output_dir
        if not os.path.exists(output_dir):
            print 'Creating %s' % output_dir
            os.makedirs(output_dir)
        opts.cfg_file = pjoin(output_dir, 'cfg.json')
    else:
        output_dir = cfg['output_dir']

    cfg['output_dir'] = output_dir
    cfg['in_file'] = pjoin(output_dir, 'params.pk')
    cfg['out_file'] = pjoin(output_dir, 'params.pk')
    cfg['test'] = opts.test
    if opts.test:
        cfg['dataDir'] = opts.dataDir
        cfg['numFiles'] = opts.numFiles
        cfg['startFile'] = opts.startFile
    if 'reg' not in cfg:
        cfg['reg'] = 0.0

    # Logging

    logging.basicConfig(filename=pjoin(output_dir, 'train.log'),
                        level=logging.DEBUG)
    logger = logging.getLogger()
    logger.addHandler(logging.StreamHandler())
    logger.info('Running on %s' % cfg['host'])

    # seed for debugging, turn off when stable
    np.random.seed(33)
    import random
    random.seed(33)

    if 'CUDA_DEVICE' in os.environ:
        cm.cuda_set_device(int(os.environ['CUDA_DEVICE']))
    else:
        cm.cuda_set_device(0)  # Default

    opts = CfgStruct(**cfg)

    # Testing
    if opts.test:
        test(opts)
        return

    alisDir = opts.alisDir if opts.alisDir else opts.dataDir
    loader = dl.DataLoader(opts.dataDir, opts.rawDim, opts.inputDim, alisDir)

    nn = rnnet.NNet(opts.inputDim,
                    opts.outputDim,
                    opts.layerSize,
                    opts.numLayers,
                    opts.maxUttLen,
                    temporalLayer=opts.temporalLayer,
                    reg=opts.reg)
    nn.initParams()

    SGD = sgd.SGD(nn, opts.maxUttLen, alpha=opts.step, momentum=opts.momentum)

    # Dump config
    cfg['param_count'] = nn.paramCount()
    dump_config(cfg, opts.cfg_file)

    # Training
    epoch_file = pjoin(output_dir, 'epoch')
    if os.path.exists(epoch_file):
        start_epoch = int(open(epoch_file, 'r').read()) + 1
    else:
        start_epoch = 0

    # Load model if specified
    if os.path.exists(opts.in_file):
        with open(opts.in_file, 'r') as fid:
            SGD.fromFile(fid)
            SGD.alpha = SGD.alpha / (opts.anneal**start_epoch)
            nn.fromFile(fid)

    num_files_file = pjoin(output_dir, 'num_files')

    for k in range(start_epoch, opts.epochs):
        perm = np.random.permutation(opts.numFiles) + 1
        loader.loadDataFileAsynch(perm[0])

        file_start = 0
        if k == start_epoch:
            if os.path.exists(num_files_file):
                file_start = int(open(num_files_file, 'r').read().strip())
                logger.info('Starting from file %d, epoch %d' %
                            (file_start, start_epoch))
        else:
            open(num_files_file, 'w').write(str(file_start))

        for i in xrange(file_start, perm.shape[0]):
            start = time.time()
            data_dict, alis, keys, sizes = loader.getDataAsynch()
            # Prefetch
            if i + 1 < perm.shape[0]:
                loader.loadDataFileAsynch(perm[i + 1])
            SGD.run(data_dict, alis, keys, sizes)
            end = time.time()
            logger.info('File time %f' % (end - start))

            # Save parameters and cost
            if (i + 1) % opts.save_every == 0:
                logger.info('Saving parameters')
                with open(opts.out_file, 'wb') as fid:
                    SGD.toFile(fid)
                    nn.toFile(fid)
                    open(num_files_file, 'w').write('%d' % (i + 1))
                logger.info('Done saving parameters')
                with open(pjoin(output_dir, 'last_cost'), 'w') as fid:
                    if opts.reg > 0.0:
                        fid.write(str(SGD.expcost[-1] - SGD.regcost[-1]))
                    else:
                        fid.write(str(SGD.expcost[-1]))

        # Save epoch completed
        open(pjoin(output_dir, 'epoch'), 'w').write(str(k))

        # Save parameters for the epoch
        with open(opts.out_file + '.epoch{0:02}'.format(k), 'wb') as fid:
            SGD.toFile(fid)
            nn.toFile(fid)

        SGD.alpha = SGD.alpha / opts.anneal

    # Run now complete, touch sentinel file
    touch_file(pjoin(output_dir, 'sentinel'))
Beispiel #10
0
import dataLoader
import numpy

myFileList = ["data/train/train_subject01.mat"]
mesDonnees = dataLoader.DataLoader(myFileList)
mesDonnees.pre_processing()
print(mesDonnees.data.shape)
print(mesDonnees.labels.shape)
    return new_fileList

def saveResult(name,result):
    myfile = open(name, 'w')
    wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
    wr.writerow(["Time(second)","F1 Score"])
    for r in result:
        wr.writerow(r)

nomTest = "test_all_other_model_1train_1test"

#myFileList=["data/train/train_subject1.mat"]
#myFileList = getListOfFiles("data/train/")[:4]

mesDonnees = dict()
mesDonnees[1.5]=dataLoader.DataLoader(["data/train/train_subject01.mat"],concatenate=True)

mesDonnees_test = dict()
mesDonnees_test[1.5]=dataLoader.DataLoader(["data/train/train_subject02.mat"],concatenate=True)
#print("j'ai enlevé le filtre passe bande car les matrices netaits plus définit positives apres")
#mesDonnees[1.5].filtre_Matrice()
print(len(mesDonnees[1.5].data))
print(len(mesDonnees[1.5].labels))

#saveResult("resultats/"+nomTest+"/resultat_riemann_MDM_xDawn.csv",test_all_pipeline(mesDonnees))
#print("Attention nouveau pre processing uniquement pour pyriemann")
#saveResult("resultats/"+nomTest+"/resultat_riemann_MDM_xDawn.csv",testRiemannMDMPlusXdawn(mesDonnees))
os.mkdir("resultats/"+nomTest)

saveResult("resultats/"+nomTest+"/resultat_riemann_MDM.csv",testRiemannMDM(mesDonnees,mesDonnees_test))
Beispiel #12
0
def runSeq(opts):
    fid = open(opts.out_file, 'w')
    phone_map = get_char_map(opts.dataDir)
    print phone_map
    print len(phone_map)

    alisDir = opts.alisDir if opts.alisDir else opts.dataDir
    loader = dl.DataLoader(opts.dataDir, opts.rawDim, opts.inputDim, alisDir)

    hyps = list()
    refs = list()
    hypscores = list()
    refscores = list()
    numphones = list()
    subsets = list()
    alignments = list()

    if MODEL_TYPE != 'ngram':
        cfg_file = '/deep/u/zxie/rnnlm/13/cfg.json'
        params_file = '/deep/u/zxie/rnnlm/13/params.pk'
        #cfg_file = '/deep/u/zxie/dnn/11/cfg.json'
        #params_file = '/deep/u/zxie/dnn/11/params.pk'

        cfg = load_config(cfg_file)
        model_class, model_hps = get_model_class_and_params(MODEL_TYPE)
        opt_hps = OptimizerHyperparams()
        model_hps.set_from_dict(cfg)
        opt_hps.set_from_dict(cfg)

        clm = model_class(None, model_hps, opt_hps, train=False, opt='nag')
        with open(params_file, 'rb') as fin:
            clm.from_file(fin)
    else:
        from srilm import LM
        from decoder_config import LM_ARPA_FILE
        print 'Loading %s...' % LM_ARPA_FILE
        clm = LM(LM_ARPA_FILE)
        print 'Done.'
    #clm = None

    for i in range(opts.start_file, opts.start_file + opts.numFiles):
        data_dict, alis, keys, _ = loader.loadDataFileDict(i)
        # For later alignments
        keys = sorted(keys)

        # For Switchboard filter
        if DATA_SUBSET == 'eval2000':
            if SWBD_SUBSET == 'swbd':
                keys = [k for k in keys if k.startswith('sw')]
            elif SWBD_SUBSET == 'callhome':
                keys = [k for k in keys if k.startswith('en')]

        ll_file = pjoin(LIKELIHOODS_DIR, 'loglikelihoods_%d.pk' % i)
        ll_fid = open(ll_file, 'rb')
        probs_dict = pickle.load(ll_fid)

        # Parallelize decoding over utterances
        print 'Decoding utterances in parallel, n_jobs=%d, file=%d' % (
            NUM_CPUS, i)
        decoded_utts = Parallel(n_jobs=NUM_CPUS)(delayed(decode_utterance)(
            k, probs_dict[k], alis[k], phone_map, lm=clm) for k in keys)

        for k, (hyp, ref, hypscore, refscore,
                align) in zip(keys, decoded_utts):
            if refscore is None:
                refscore = 0.0
            if hypscore is None:
                hypscore = 0.0
            hyp = replace_contractions(hyp)
            fid.write(k + ' ' + ' '.join(hyp) + '\n')

            hyps.append(hyp)
            refs.append(ref)
            hypscores.append(hypscore)
            refscores.append(refscore)
            numphones.append(len(alis[k]))
            subsets.append('callhm' if k.startswith('en') else 'swbd')
            alignments.append(align)

    fid.close()

    # Pickle some values for computeStats.py
    pkid = open(opts.out_file.replace('.txt', '.pk'), 'wb')
    pickle.dump(hyps, pkid)
    pickle.dump(refs, pkid)
    pickle.dump(hypscores, pkid)
    pickle.dump(refscores, pkid)
    pickle.dump(numphones, pkid)
    pickle.dump(subsets, pkid)
    pickle.dump(alignments, pkid)
    pkid.close()
def load_data(fnum=1):
    loader = dl.DataLoader(DATA_DIR, RAW_DIM, INPUT_DIM)
    data_dict, alis, keys, _ = loader.loadDataFileDict(fnum)
    return data_dict, alis, keys
Beispiel #14
0
    clf = LogisticRegression(penalty="l1", max_iter=10000, C=0.05)
    m = Model_all_pipeline(clf, mesDonnees_train)
    m.fit_representation()
    new_data = m.predict_representation(mesDonnees_train.data)
    new_labels = m.concat_labels(mesDonnees_train.labels)
    new_list_id = m.concat_labels(mesDonnees_test.list_id)
    #s = m.f1Score(new_data,new_labels)
    #print("f1score : ",s)
    m.fit(new_data, new_labels)
    new_data_test = m.predict_representation(mesDonnees_test.data)
    pred = m.predict(new_data_test)
    write_to_file("prediction_test.csv", pred, new_list_id)


#myFileList_train=["data/train/train_subject01.mat","data/train/train_subject02.mat"]
myFileList_train = getListOfFiles("data/train/")
myFileList_test = getListOfFiles("data/test/")

mesDonnees = dict()
mesDonnees_train = dataLoader.DataLoader(myFileList_train, concatenate=False)
mesDonnees_test = dataLoader.DataLoader(myFileList_test,
                                        concatenate=False,
                                        test_data=True)
print(
    "j'ai enlevé le filtre passe bande car les matrices netaits plus définit positives apres"
)
mesDonnees_train.filtre_Matrice()
mesDonnees_test.filtre_Matrice()
print("Attention nouveau pre processing uniquement pour pyriemann")
test_all_pipeline(mesDonnees_train, mesDonnees_test)
Beispiel #15
0
    jusqua = int(len(l) / taille)
    for k in range(jusqua):
        new.append(np.mean(l[k * taille:(k + 1) * taille]))
    return new


def moyennationWindow(l, taille, decallage):
    new = []
    jusqua = int(len(l) / decallage)
    for k in range(jusqua):
        new.append(np.mean(l[k * decallage:k * decallage + taille]))
    return new


reactionTime = 1
dataL = dataLoader.DataLoader(["data/subject1/Session1/1.gdf"], reactionTime)

#Visualiser les données
#Deux type de labels 1 et 2
data = dataL.data
labels = dataL.labels
print(len(labels))
collect = collections.Counter(labels)
print("répartition des différentes classes : ")
print(collect)
groupeFlash = labels.reshape((-1, 12))
"""
On predit la classe majoritaire
classe 1 :
Précision de 0.8333
Rappel de 1
Beispiel #16
0

def saveResult(name, result):
    myfile = open(name, 'w')
    wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
    wr.writerow(["Time(second)", "F1 Score"])
    for r in result:
        wr.writerow(r)


myFileList = ["data/subject1/Session1/1.gdf"]
reactTimeToTest = [1, 0.1, 0.04]

mesDonnees = dict()
for r in reactTimeToTest:
    mesDonnees[r] = dataLoader.DataLoader(myFileList, r)

os.mkdir("resultats/TFKNN_valeurs_k")
for r, donnees in mesDonnees.items():
    print(r)
    result = []
    for k in range(1, 20, 2):
        neigh = KNeighborsClassifier(n_neighbors=k)
        m = GenericModele(neigh, donnees)
        m.dataToTf()
        m.vectorize(tf=True)
        m.dataToMoy(nbPoint, slider)
        s = m.f1Score()
        result.append([k, s])
    plt.clf()
    f = plt.figure()
Beispiel #17
0
def run(args=None):
    usage = "usage : %prog [options]"
    parser = optparse.OptionParser(usage=usage)

    parser.add_option("--test",
                      action="store_true",
                      dest="test",
                      default=False)

    # Architecture
    parser.add_option("--layers",
                      dest="layers",
                      type="string",
                      default="100,100",
                      help="layer1size,layer2size,...,layernsize")
    parser.add_option("--temporal_layer",
                      dest="temporalLayer",
                      type="int",
                      default=-1)

    # Optimization
    parser.add_option("--optimizer",
                      dest="optimizer",
                      type="string",
                      default="momentum")
    parser.add_option("--momentum", dest="momentum", type="float", default=0.9)
    parser.add_option("--epochs", dest="epochs", type="int", default=1)
    parser.add_option("--step", dest="step", type="float", default=1e-4)
    parser.add_option(
        "--anneal",
        dest="anneal",
        type="float",
        default=1,
        help="Sets (learning rate := learning rate / anneal) after each epoch."
    )

    # Data
    parser.add_option(
        "--dataDir",
        dest="dataDir",
        type="string",
        default=
        "/scail/group/deeplearning/speech/awni/kaldi-stanford/kaldi-trunk/egs/swbd/s5b/exp/train_ctc/"
    )
    parser.add_option("--numFiles", dest="numFiles", type="int", default=384)
    parser.add_option("--inputDim",
                      dest="inputDim",
                      type="int",
                      default=41 * 15)
    parser.add_option("--rawDim", dest="rawDim", type="int", default=41 * 15)
    parser.add_option("--outputDim", dest="outputDim", type="int", default=34)

    parser.add_option("--outFile",
                      dest="outFile",
                      type="string",
                      default="models/test.bin")
    parser.add_option("--inFile", dest="inFile", type="string", default=None)

    (opts, args) = parser.parse_args(args)
    opts.layers = [int(l) for l in opts.layers.split(',')]

    # Testing
    if opts.test:
        test(opts)
        return

    loader = dl.DataLoader(opts.dataDir, opts.rawDim, opts.inputDim)
    #NOTE at some point we need to unify the nnet and rnnet modules
    nn = None
    if opts.temporalLayer > 0:
        nn = rnnet.RNNet(opts.inputDim, opts.outputDim, opts.layers,
                         opts.temporalLayer)
    else:
        nn = nnet.NNet(opts.inputDim, opts.outputDim, opts.layers)

    nn.initParams()

    # Load model if exists
    if opts.inFile is not None:
        with open(opts.inFile, 'r') as fid:
            _ = pickle.load(fid)
            _ = pickle.load(fid)
            _ = pickle.load(fid)
            nn.fromFile(fid)

    SGD = sgd.SGD(nn,
                  alpha=opts.step,
                  optimizer=opts.optimizer,
                  momentum=opts.momentum)

    # Setup some random keys for tracing
    with open('randKeys.bin', 'r') as fid:
        traceK = pickle.load(fid)
    for k in traceK:
        nn.hist[k] = []

    # write initial model to disk
    with open(opts.outFile, 'w') as fid:
        pickle.dump(opts, fid)
        pickle.dump(SGD.costt, fid)
        pickle.dump(nn.hist, fid)
        nn.toFile(fid)

    # Training
    import time
    for _ in range(opts.epochs):
        for i in np.random.permutation(opts.numFiles) + 1:
            start = time.time()
            data_dict, alis, keys, sizes = loader.loadDataFileDict(i)
            SGD.run_seq(data_dict, alis, keys, sizes)
            end = time.time()
            print "File time %f" % (end - start)

    # Save anneal after epoch
        SGD.alpha = SGD.alpha / opts.anneal
        with open(opts.outFile, 'w') as fid:
            pickle.dump(opts, fid)
            pickle.dump(SGD.costt, fid)
            pickle.dump(nn.hist, fid)
            nn.toFile(fid)
Beispiel #18
0
            for i in xrange(w.shape[0]):
                for j in xrange(w.shape[1]):
                    w.numpy_array[i,j] += epsilon
                    w.copy_to_device()
                    costP,_,_ = self.costAndGrad(data,labels)
                    numGrad = (costP - cost) / epsilon
                    w.numpy_array[i,j] -= epsilon
                    print "Analytic %f, Numeric %f"%(dw.numpy_array[i,j],numGrad)

if __name__=='__main__':
    import dataLoader as dl
    np.random.seed(33)
    layerSize = 200
    numLayers = 3

    dataDir = "/scail/group/deeplearning/speech/awni/kaldi-stanford/kaldi-trunk/egs/swbd/s5b/exp/train_ctc/"
    inputDim = 41*15
    rawDim = 41*15
    outputDim = 35
    maxUttLen = 1500

    loader = dl.DataLoader(dataDir,rawDim,inputDim)
    data_dict,alis,keys,_ = loader.loadDataFileDict(1)
    data,labels = data_dict[keys[3]],np.array(alis[keys[3]],dtype=np.int32)
    nn = NNet(inputDim,outputDim,layerSize,numLayers,maxUttLen)
    nn.initParams()
    cost,grad,_ = nn.costAndGrad(data,labels)

    nn.check_grad(data,labels)

Beispiel #19
0
import sgd
import rnnet as rnnet
import dataLoader as dl
import pdb

inFile = "models/swbd_layers_5_2048_temporal_3_step_1e-5_mom_.95_anneal_1.3.bin"

np.random.seed(33)
import random
random.seed(33)

# Load model if specified
with open(inFile, 'r') as fid:
    opts = pickle.load(fid)
    loader = dl.DataLoader(opts.dataDir, opts.rawDim, opts.inputDim)

    nn = rnnet.NNet(opts.inputDim,
                    opts.outputDim,
                    opts.layerSize,
                    opts.numLayers,
                    opts.maxUttLen,
                    temporalLayer=opts.temporalLayer)
    nn.initParams()
    SGD = sgd.SGD(nn, opts.maxUttLen, alpha=opts.step, momentum=opts.momentum)
    SGD.expcost = pickle.load(fid)
    SGD.it = 100
    nn.fromFile(fid)
    velocity = pickle.load(fid)
    for (w, b), (wv, bv) in zip(velocity, SGD.velocity):
        wv.copy_to_host()