Beispiel #1
0
    def display_wrong_preds(self, splitClf, devX, devy):
        add_dict = {
            0: '0, No additional grammar role',
            1: '1, Appositional modifier',
            2: '2, Noun compound modifier',
        }
        if splitClf.usepytorch:
            clf = MLP(splitClf.classifier_config,
                      inputdim=splitClf.featdim,
                      nclasses=splitClf.nclasses,
                      l2reg=0.0001,
                      seed=splitClf.seed,
                      cudaEfficient=splitClf.cudaEfficient)
            clf.optim = splitClf.config['optim']
            clf.epoch_size = splitClf.config['epoch_size']
            print(
                f"MLP has epoch size {clf.epoch_size}, optimizer {clf.optim}.")
            clf.fit(splitClf.X['train'],
                    splitClf.y['train'],
                    validation_data=(splitClf.X['valid'], splitClf.y['valid']))
        clf.model.eval()
        correct = 0
        if not isinstance(devX, torch.cuda.FloatTensor) or clf.cudaEfficient:
            devX = torch.FloatTensor(devX).cuda()
            devy = torch.LongTensor(devy).cuda()

        with torch.no_grad():
            Xbatch = devX
            ybatch = devy
            if clf.cudaEfficient:
                Xbatch = Xbatch.cuda()
                ybatch = ybatch.cuda()
            output = clf.model(Xbatch)
            pred = output.data.max(1)[1]
            correct += pred.long().eq(ybatch.data.long()).sum().item()

            # Randomly display 10 samples, for error analysis
            ran_i = np.random.randint(0, len(devX) - 9)
            for i in range(ran_i, ran_i + 10):
                print(f"Sentence id: {self.task_data['test']['id'][i]}")
                tokens = self.task_data['test']['X'][i]
                sentence = ' '.join(tokens)
                for j in range(len(sentence) // 150 +
                               1):  # Wrap the text manually
                    print(sentence[150 * j:150 * (j + 1)])
                head_index = self.task_data['test']['head'][i]
                tail_index = self.task_data['test']['tail'][i]
                head = ' '.join(tokens[head_index[0]:head_index[1] + 1])
                tail = ' '.join(tokens[tail_index[0]:tail_index[1] + 1])
                print(
                    f"Head: {head} at {head_index}, tail: {tail} at {tail_index}."
                )
                print(
                    f"Prediction: {pred[i]}, Actual y: {add_dict[ybatch[i].item()]}. \n"
                )
            accuracy = 1.0 * correct / len(devX)
            print(f"Test Accuracy: {accuracy}.")
Beispiel #2
0
    def run(self):
        logging.info(
            'Training {0} with (inner) {1}-fold cross-validation'.format(
                self.modelname, self.k))
        regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
               [2**t for t in range(-2, 4, 1)]
        skf = StratifiedKFold(n_splits=self.k, shuffle=True, random_state=1111)
        innerskf = StratifiedKFold(n_splits=self.k,
                                   shuffle=True,
                                   random_state=1111)
        count = 0
        for train_idx, test_idx in skf.split(self.X, self.y):
            count += 1
            X_train, X_test = self.X[train_idx], self.X[test_idx]
            y_train, y_test = self.y[train_idx], self.y[test_idx]
            scores = []
            for reg in regs:
                regscores = []
                for inner_train_idx, inner_test_idx in innerskf.split(
                        X_train, y_train):
                    X_in_train, X_in_test = X_train[inner_train_idx], X_train[
                        inner_test_idx]
                    y_in_train, y_in_test = y_train[inner_train_idx], y_train[
                        inner_test_idx]
                    if self.usepytorch:
                        clf = MLP(self.classifier_config,
                                  inputdim=self.featdim,
                                  nclasses=self.nclasses,
                                  l2reg=reg,
                                  seed=self.seed)
                        clf.fit(X_in_train,
                                y_in_train,
                                validation_data=(X_in_test, y_in_test))
                    else:
                        clf = LogisticRegression(C=reg, random_state=self.seed)
                        clf.fit(X_in_train, y_in_train)
                    regscores.append(clf.score(X_in_test, y_in_test))
                scores.append(round(100 * np.mean(regscores), 2))
            optreg = regs[np.argmax(scores)]
            logging.info('Best param found at split {0}: l2reg = {1} \
                with score {2}'.format(count, optreg, np.max(scores)))
            self.devresults.append(np.max(scores))
            if self.usepytorch:
                clf = MLP(self.classifier_config,
                          inputdim=self.featdim,
                          nclasses=self.nclasses,
                          l2reg=optreg,
                          seed=self.seed)
                clf.fit(X_train, y_train, validation_split=0.05)
            else:
                clf = LogisticRegression(C=optreg, random_state=self.seed)
                clf.fit(X_train, y_train)

            yhat = clf.predict(X_test)
            if (self.nclasses == 2):
                self.f1results.append(round(100 * f1_score(yhat, y_test), 2))

            self.testresults.append(round(100 * clf.score(X_test, y_test), 2))

        devaccuracy = round(np.mean(self.devresults), 2)
        testaccuracy = round(np.mean(self.testresults), 2)
        if self.f1results:
            testf1 = round(np.mean(self.f1results), 2)
        else:
            testf1 = None
        return devaccuracy, testaccuracy, testf1
Beispiel #3
0
 def run(self):
     print(self.classifier_config)
     logging.info('Training {0} with standard validation..'.format(
         self.modelname))
     regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
            [2**t for t in range(-2, 4, 1)]
     if self.noreg:
         regs = [1e-9 if self.usepytorch else 1e9]
     scores = []
     for reg in regs:
         if self.usepytorch:
             clf = MLP(self.classifier_config,
                       inputdim=self.featdim,
                       nclasses=self.nclasses,
                       l2reg=reg,
                       seed=self.seed,
                       cudaEfficient=self.cudaEfficient)
             # TODO: Find a hack for reducing nb epoches in SNLI
             clf.fit(self.X['train'],
                     self.y['train'],
                     validation_data=(self.X['valid'], self.y['valid']))
         else:
             clf = LogisticRegression(C=reg, random_state=self.seed)
             clf.fit(self.X['train'], self.y['train'])
         scores.append(
             round(100 * clf.score(self.X['valid'], self.y['valid']), 2))
     logging.info([('reg:' + str(regs[idx]), scores[idx])
                   for idx in range(len(scores))])
     optreg = regs[np.argmax(scores)]
     devaccuracy = np.max(scores)
     logging.info('Validation : best param found is reg = {0} with score \
         {1}'.format(optreg, devaccuracy))
     clf = LogisticRegression(C=optreg, random_state=self.seed)
     logging.info('Evaluating...')
     if self.usepytorch:
         clf = MLP(self.classifier_config,
                   inputdim=self.featdim,
                   nclasses=self.nclasses,
                   l2reg=optreg,
                   seed=self.seed,
                   cudaEfficient=self.cudaEfficient)
         # TODO: Find a hack for reducing nb epoches in SNLI
         clf.fit(self.X['train'],
                 self.y['train'],
                 validation_data=(self.X['valid'], self.y['valid']))
     else:
         clf = LogisticRegression(C=optreg, random_state=self.seed)
         clf.fit(self.X['train'], self.y['train'])
     yhat, yprobs = clf.predict(self.X['test'])
     pred = []
     probs = []
     for i in yprobs:
         probs.append(list(i))
     print(probs)
     for i in yhat:
         pred.append(list(i))
     print(pred)
     testaccuracy = clf.score(self.X['test'], self.y['test'])
     testaccuracy = round(100 * testaccuracy, 2)
     return devaccuracy, testaccuracy, pred, probs
Beispiel #4
0
 def run(self):
     # cross-validation
     logging.info('Training {0} with {1}-fold cross-validation'.format(
         self.modelname, self.k))
     regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
            [2**t for t in range(-1, 6, 1)]
     skf = StratifiedKFold(n_splits=self.k,
                           shuffle=True,
                           random_state=self.seed)
     scores = []
     for reg in regs:
         scanscores = []
         for train_idx, test_idx in skf.split(self.train['X'],
                                              self.train['y']):
             # Split data
             X_train, y_train = self.train['X'][train_idx], self.train['y'][
                 train_idx]
             X_test, y_test = self.train['X'][test_idx], self.train['y'][
                 test_idx]
             # Train classifier
             if self.usepytorch:
                 clf = MLP(self.classifier_config,
                           inputdim=self.featdim,
                           nclasses=self.nclasses,
                           l2reg=reg,
                           seed=self.seed)
                 clf.fit(X_train, y_train, validation_data=(X_test, y_test))
             else:
                 clf = LogisticRegression(C=reg, random_state=self.seed)
                 clf.fit(X_train, y_train)
             score = clf.score(X_test, y_test)
             scanscores.append(score)
         # Append mean score
         scores.append(round(100 * np.mean(scanscores), 2))
     # evaluation
     logging.info([('reg:' + str(regs[idx]), scores[idx])
                   for idx in range(len(scores))])
     optreg = regs[np.argmax(scores)]
     devaccuracy = np.max(scores)
     logging.info('Cross-validation : best param found is reg = {0} \
         with score {1}'.format(optreg, devaccuracy))
     logging.info('Evaluating...')
     if self.usepytorch:
         clf = MLP(self.classifier_config,
                   inputdim=self.featdim,
                   nclasses=self.nclasses,
                   l2reg=optreg,
                   seed=self.seed)
         clf.fit(self.train['X'], self.train['y'], validation_split=0.05)
     else:
         clf = LogisticRegression(C=optreg, random_state=self.seed)
         clf.fit(self.train['X'], self.train['y'])
     yhat = clf.predict(self.test['X'])
     testaccuracy = clf.score(self.test['X'], self.test['y'])
     testaccuracy = round(100 * testaccuracy, 2)
     return devaccuracy, testaccuracy, yhat
Beispiel #5
0
    def run(self):
        logging.info('Training {0} with (inner) {1}-fold cross-validation'
                     .format(self.modelname, self.k))
        
        regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
               [2**t for t in range(-2, 4, 1)]
        skf = StratifiedKFold(n_splits=self.k, shuffle=True, random_state=1111)
        innerskf = StratifiedKFold(n_splits=self.k, shuffle=True,
                                   random_state=1111)
        count = 0
        train_idx0, test_idx0=list(skf.split(self.X0, self.y0))[0]
        train_idx1, test_idx1=list(skf.split(self.X1, self.y1))[0]
        train_idx2, test_idx2=list(skf.split(self.X2, self.y2))[0]
        count += 1
        X_train0, X_test0 = self.X0[train_idx0],self.X0[test_idx0]
        y_train0, y_test0 = self.y0[train_idx0], self.y0[test_idx0]
        X_train1, X_test1 = self.X1[train_idx1],self.X1[test_idx1]
        y_train1, y_test1 = self.y1[train_idx1],self.y1[test_idx1]
        X_train2,X_test2 = self.X2[train_idx2],self.X2[test_idx2]
        y_train2, y_test2 = self.y2[train_idx2],self.y2[test_idx2]
        scores = []
        for reg in regs:
            print(len(regs))
            regscores = []
            isk0=list(innerskf.split(X_train0, y_train0))
            isk1=list(innerskf.split(X_train1, y_train1))
            isk2=list(innerskf.split(X_train2, y_train2))
            n=len(isk0)
                #for inner_train_idx, inner_test_idx in innerskf.split(X_train, y_train):
            inner_train_idx0, inner_test_idx0=isk0[0]
            inner_train_idx1, inner_test_idx1=isk1[0]
            inner_train_idx2, inner_test_idx2=isk2[0]
            X0_in_train,y0_in_train=X_train0[inner_train_idx0],y_train0[inner_train_idx0]
            X1_in_train,y1_in_train=X_train1[inner_train_idx1],y_train1[inner_train_idx1]
            X2_in_train,y2_in_train=X_train2[inner_train_idx2],y_train2[inner_train_idx2]
            X0_in_test=X_train0[inner_test_idx0]
            y0_in_test=y_train0[inner_test_idx0]
            X1_in_test,y1_in_test=X_train1[inner_test_idx1],y_train1[inner_test_idx1]
            X2_in_test,y2_in_test=X_train2[inner_test_idx2],y_train2[inner_test_idx2]
            if self.clf==0:#True:#
                if self.usepytorch:
                    clf = MLP(self.classifier_config, appr=0, inputdim=self.featdim,
                        nclasses=self.nclasses, precedent_splits=self.precedent_splits, 
                        devothertask=self.devothertask, l2reg=reg, seed=self.seed)
                    clf.fit(X0_in_train, y0_in_train,X1_in_train, y1_in_train,X2_in_train, y2_in_train,
                    validation_data=[(X0_in_test, y0_in_test),(X1_in_test, y1_in_test),(X2_in_test, y2_in_test)])
                else:
                    clf = LogisticRegression(C=reg, random_state=self.seed)
                    clf.fit(X0_in_train, y0_in_train)
            else :
                clf = MLP(self.classifier_config, appr=0, inputdim=self.featdim,
                        nclasses=self.nclasses, precedent_splits=self.precedent_splits, 
                        devothertask=self.clf.devothertask, l2reg=reg, seed=self.seed)
                clf.model.load_state_dict(self.clf.model.state_dict())
                clf.fit(X0_in_train, y0_in_train,X1_in_train, y1_in_train,X2_in_train, y2_in_train,
                    validation_data=[(X0_in_test, y0_in_test),(X1_in_test, y1_in_test),(X2_in_test, y2_in_test)])
                    
            regscores.append((clf.score(X0_in_test, y0_in_test)+clf.score(X1_in_test, y1_in_test)+clf.score(X2_in_test, y2_in_test))/3)
            scores.append(round(100*np.mean(regscores), 2))
        scores.append(0.77)
        optreg = 10**(-5)#regs[np.argmax(scores)]
        logging.info('Best param found at split {0}: l2reg = {1} \
            with score {2}'.format(count, optreg, np.max(scores)))
        self.devresults.append(np.max(scores))
        if self.clf==0:#True:#
            if self.usepytorch:
                clf = MLP(self.classifier_config, appr=1, inputdim=self.featdim,
                        nclasses=self.nclasses, precedent_splits=self.precedent_splits,
                        devothertask=self.devothertask, l2reg=optreg, seed=self.seed)

                clf.fit(X_train0, y_train0,X_train1, y_train1,X_train2, y_train2, validation_split=0.05)
            else:
                clf = LogisticRegression(C=optreg, random_state=self.seed)
                clf.fit(X_train0, y_train0)
                print('fail')
        else :
            clf = MLP(self.classifier_config, appr=1, inputdim=self.featdim,
                nclasses=self.nclasses, precedent_splits=self.precedent_splits, 
                devothertask=self.clf.devothertask, l2reg=optreg, seed=self.seed)
            clf.model.load_state_dict(self.clf.model.state_dict())
            clf.fit(X_train0, y_train0,X_train1, y_train1,X_train2, y_train2, validation_split=0.05)
        self.clf=clf
            
        self.testresults.append(round(100*(self.clf.score(X_test0, y_test0)+self.clf.score(X_test1, y_test1)+self.clf.score(X_test2, y_test2))/3, 2))
        print('Test tache 1: ',round(100*(self.clf.score(X_test0, y_test0))))
        print('Test tache 2: ',round(100*(self.clf.score(X_test1, y_test1))))
        print('Test tache 3: ',round(100*(self.clf.score(X_test2, y_test2))))
        devaccuracy = round(np.mean(self.devresults), 2)
        testaccuracy = round(np.mean(self.testresults), 2)
        return devaccuracy, testaccuracy, self.clf
Beispiel #6
0
    def run(self):
        logging.info('Training {0} with standard validation..'.format(
            self.modelname))
        regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
               [2**t for t in range(-2, 4, 1)]
        if self.noreg:
            regs = [0.]
        scores = []
        for reg in regs:
            if self.usepytorch:
                if self.classifier == 'LogReg':
                    clf = LogReg(inputdim=self.featdim,
                                 nclasses=self.nclasses,
                                 l2reg=reg,
                                 seed=self.seed,
                                 cudaEfficient=self.cudaEfficient)
                elif self.classifier == 'MLP':
                    clf = MLP(inputdim=self.featdim,
                              hiddendim=self.nhid,
                              nclasses=self.nclasses,
                              l2reg=reg,
                              seed=self.seed,
                              cudaEfficient=self.cudaEfficient)
                # small hack : SNLI specific
                if self.nepoches:
                    clf.nepoches = self.nepoches
                if self.maxepoch:
                    clf.maxepoch = self.maxepoch
                clf.fit(self.X['train'],
                        self.y['train'],
                        validation_data=(self.X['valid'], self.y['valid']))
            else:
                clf = LogisticRegression(C=reg, random_state=self.seed)
                clf.fit(self.X['train'], self.y['train'])
            scores.append(
                round(100 * clf.score(self.X['valid'], self.y['valid']), 2))
        logging.info([('reg:' + str(regs[idx]), scores[idx])
                      for idx in range(len(scores))])
        optreg = regs[np.argmax(scores)]
        devaccuracy = np.max(scores)
        logging.info('Validation : best param found is reg = {0} with score \
            {1}'.format(optreg, devaccuracy))
        clf = LogisticRegression(C=optreg, random_state=self.seed)
        logging.info('Evaluating...')
        if self.usepytorch:
            if self.classifier == 'LogReg':
                clf = LogReg(inputdim=self.featdim,
                             nclasses=self.nclasses,
                             l2reg=optreg,
                             seed=self.seed,
                             cudaEfficient=self.cudaEfficient)
            elif self.classifier == 'MLP':
                clf = MLP(inputdim=self.featdim,
                          hiddendim=self.nhid,
                          nclasses=self.nclasses,
                          l2reg=optreg,
                          seed=self.seed,
                          cudaEfficient=self.cudaEfficient)
            # small hack : MultiNLI/SNLI specific
            if self.nepoches:
                clf.nepoches = self.nepoches
            if self.maxepoch:
                clf.maxepoch = self.maxepoch
            clf.fit(self.X['train'],
                    self.y['train'],
                    validation_data=(self.X['valid'], self.y['valid']))
        else:
            clf = LogisticRegression(C=optreg, random_state=self.seed)
            clf.fit(self.X['train'], self.y['train'])

        testaccuracy = clf.score(self.X['test'], self.y['test'])
        testaccuracy = round(100 * testaccuracy, 2)
        return devaccuracy, testaccuracy
Beispiel #7
0
    def run(self):
        logging.info("Training {0} with standard validation..".format(
            self.modelname))
        regs = ([10**t for t in range(-5, -1)]
                if self.usepytorch else [2**t for t in range(-2, 4, 1)])
        if self.noreg:
            regs = [0.0]
        scores = []
        for reg in regs:
            if self.usepytorch:
                clf = MLP(
                    self.classifier_config,
                    inputdim=self.featdim,
                    nclasses=self.nclasses,
                    l2reg=reg,
                    seed=self.seed,
                    cudaEfficient=self.cudaEfficient,
                )

                # TODO: Find a hack for reducing nb epoches in SNLI
                clf.fit(
                    self.X["train"],
                    self.y["train"],
                    validation_data=(self.X["valid"], self.y["valid"]),
                )
            else:
                clf = LogisticRegression(C=reg, random_state=self.seed)
                clf.fit(self.X["train"], self.y["train"])
            score = 100 * clf.score(self.X["valid"], self.y["valid"])
            scores.append(round(score.item(), 2))
        logging.info([("reg:" + str(regs[idx]), scores[idx])
                      for idx in range(len(scores))])
        optreg = regs[np.argmax(scores)]
        devaccuracy = np.max(scores)
        logging.info("Validation : best param found is reg = {0} with score \
            {1}".format(optreg, devaccuracy))
        clf = LogisticRegression(C=optreg, random_state=self.seed)
        logging.info("Evaluating...")
        if self.usepytorch:
            clf = MLP(
                self.classifier_config,
                inputdim=self.featdim,
                nclasses=self.nclasses,
                l2reg=optreg,
                seed=self.seed,
                cudaEfficient=self.cudaEfficient,
            )

            # TODO: Find a hack for reducing nb epoches in SNLI
            clf.fit(
                self.X["train"],
                self.y["train"],
                validation_data=(self.X["valid"], self.y["valid"]),
            )
        else:
            clf = LogisticRegression(C=optreg, random_state=self.seed)
            clf.fit(self.X["train"], self.y["train"])

        testaccuracy = clf.score(self.X["test"], self.y["test"])
        testaccuracy = round(100 * testaccuracy.item(), 2)
        return devaccuracy, testaccuracy
Beispiel #8
0
    def run(self):
        logging.info('Training {0} with standard validation..'.format(
            self.modelname))
        regs = [10**t for t in range(-5, 6)] if self.usepytorch else \
               [2**t for t in range(-2, 4, 1)]
        if self.noreg:
            regs = []  #[0.]
            optreg = 0.
        else:
            logging.debug("\tStarting to tune regularization")
        scores = []
        for reg in regs:
            if self.usepytorch:
                if self.classifier == 'LogReg':
                    clf = LogReg(inputdim=self.featdim,
                                 nclasses=self.nclasses,
                                 l2reg=reg,
                                 seed=self.seed,
                                 cudaEfficient=self.cudaEfficient,
                                 train_rank=self.train_rank)
                elif self.classifier == 'MLP':
                    clf = MLP(inputdim=self.featdim,
                              hiddendim=self.nhid,
                              nclasses=self.nclasses,
                              l2reg=reg,
                              seed=self.seed,
                              cudaEfficient=self.cudaEfficient,
                              train_rank=self.train_rank)
                # small hack : SNLI specific
                if self.nepoches:
                    clf.nepoches = self.nepoches
                if self.maxepoch:
                    clf.maxepoch = self.maxepoch
                clf.fit(self.X['train'],
                        self.y['train'],
                        validation_data=(self.X['valid'], self.y['valid']))
            else:
                clf = LogisticRegression(C=reg, random_state=self.seed)
                clf.fit(self.X['train'], self.y['train'])
            scores.append(
                round(100 * clf.score(self.X['valid'], self.y['valid']), 2))
        logging.info([('reg:' + str(regs[idx]), scores[idx])
                      for idx in range(len(scores))])
        if not self.noreg:
            optreg = regs[np.argmax(scores)]
            devaccuracy = np.max(scores)
            logging.info(
                'Validation : best param found is reg = {0} with score \
                {1}'.format(optreg, devaccuracy))
        #clf = LogisticRegression(C=optreg, random_state=self.seed) #??
        logging.info('Evaluating...')
        if self.usepytorch:
            if self.classifier == 'LogReg':
                logging.debug("\tCreating log reg classifier")
                clf = LogReg(inputdim=self.featdim,
                             nclasses=self.nclasses,
                             l2reg=optreg,
                             seed=self.seed,
                             cudaEfficient=self.cudaEfficient,
                             train_rank=self.train_rank)
            elif self.classifier == 'MLP':
                logging.debug("\Creating MLP classifier")
                clf = MLP(inputdim=self.featdim,
                          hiddendim=self.nhid,
                          nclasses=self.nclasses,
                          l2reg=optreg,
                          seed=self.seed,
                          cudaEfficient=self.cudaEfficient,
                          train_rank=self.train_rank)
            # small hack : MultiNLI/SNLI specific
            if self.nepoches:
                clf.nepoches = self.nepoches
            if self.maxepoch:
                clf.maxepoch = self.maxepoch
            clf.fit(self.X['train'],
                    self.y['train'],
                    validation_data=(self.X['valid'], self.y['valid']))
        else:
            clf = LogisticRegression(C=optreg, random_state=self.seed)
            clf.fit(self.X['train'], self.y['train'])
        logging.debug("\tFinished training!")

        if self.noreg:
            devaccuracy = round(
                100 * clf.score(self.X['valid'], self.y['valid']), 2)

        testaccuracy = clf.score(self.X['test'], self.y['test'])
        testaccuracy = round(100 * testaccuracy, 2)
        devprobs = clf.predict_proba(self.X['valid'])
        testprobs = clf.predict_proba(self.X['test'])
        return devaccuracy, testaccuracy, devprobs, testprobs