Beispiel #1
0
    if DEVorTEST == 'DEV':
        ls, ps, f1_score = eval(eval_iter, model, tags2idx, device, MULTI_TASK)
        if f1_score > best_score:
              best_score = f1_score
              best_epoch = epoch+1
              config["best_score"] = best_score
              config["best_epoch"] = best_epoch

              torch.save(model.state_dict(), to_save_path + '/tagger.torch')
              np.save(to_save_path +"/Idx2Tags.npy", idx2tags)
              json.dump(config, open(to_save_path+"/config_saved.json", 'w'))


model = model.eval()

if DEVorTEST == 'TEST':
    torch.save(model.state_dict(), to_save_path + '/tagger.torch')
    np.save(to_save_path +"/Idx2Tags.npy", idx2tags)
    json.dump(config, open(to_save_path+"/config_saved.json", 'w'))


prediction_file_name = to_save_path + '/bertTaggerResults_'+DEVorTEST
if DEVorTEST == 'TEST':
	labels, probs = eval_blind(test_iter, model, tags2idx, device, MULTI_TASK)
	labels2Parsemetsv(labels, data_path+'test.blind.cupt', prediction_file_name+'_system.cupt')
else:
	labels, probs, _ = eval(eval_iter, model, tags2idx, device, MULTI_TASK)
	labels2Parsemetsv(labels, data_path+'dev.cupt', prediction_file_name+'_system.cupt')

	
Beispiel #2
0
    def train_predict_test(self):
        self.model = getattr(self, self.model_name)()

        res_dir = "./{}".format(self.lang) + "_" + self.model_name + "_results"
        if not os.path.exists(res_dir):
            os.makedirs(res_dir)
        filepath = res_dir + "/weights-improvement-{epoch:02d}-{acc:.2f}.hdf5"
        checkpoint = ModelCheckpoint(filepath,
                                     monitor='acc',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='max',
                                     period=10)
        callbacks_list = [checkpoint]

        # since we are not using early stopping, we set validation split to 0
        #model.fit(X_train_enc, y_train_enc, validation_split=0, batch_size=100, epochs=50, callbacks=callbacks_list)
        if self.pos:
            self.model.fit([self.X_train_enc, self.pos_train_enc],
                           self.y_train_enc,
                           validation_split=0,
                           batch_size=self.batch_size,
                           epochs=self.epoch,
                           callbacks=callbacks_list)
        else:
            self.model.fit(self.X_train_enc,
                           self.y_train_enc,
                           validation_split=0,
                           batch_size=self.batch_size,
                           epochs=self.epoch,
                           callbacks=callbacks_list)
        if self.pos:
            preds = self.model.predict([self.X_test_enc, self.pos_test_enc],
                                       batch_size=16,
                                       verbose=1)
        else:
            preds = self.model.predict(self.X_test_enc,
                                       batch_size=16,
                                       verbose=1)
        final_preds = []
        for i in range(len(self.X_test_enc)):
            pred = np.argmax(preds[i], -1)
            pred = [self.idx2l[p] for p in pred]
            final_preds.append(pred)
        predictionFileName = res_dir + '/predicted_{}'.format(
            self.lang) + '_' + self.model_name
        # save the predicted labels to a list
        with open(predictionFileName + '.pkl', 'wb') as f:
            pickle.dump(final_preds, f)
        with open(predictionFileName + '.pkl', 'rb') as f:
            labels1 = pickle.load(f)
        labels2Parsemetsv(labels1,
                          '../DATA/{}/test.blind.cupt'.format(self.lang),
                          predictionFileName + '_system.cupt')

        with open(
                res_dir + '/eval_'.format(self.lang) + self.model_name +
                '.txt', 'w') as f:
            f.write(
                subprocess.check_output([
                    "../bin/evaluate.py", "--gold",
                    "../DATA/{}/test.cupt".format(self.lang), "--pred",
                    predictionFileName + "_system.cupt"
                ]).decode())
Beispiel #3
0
    def cross_validation(self, epoch, batch_size, data_path):
        if self.data.testORdev == "CROSS_VAL":
            self.res_dir = "./results/CROSSVAL_{}".format(
                self.data.lang) + "_" + self.tagger_name + "_results"
        else:
            pass
        if not os.path.exists(self.res_dir):
            os.makedirs(self.res_dir)

        kf = KFold(n_splits=5)
        i = 0
        final_preds = [0] * len(self.data.X_train_enc)
        for train_index, test_index in kf.split(self.data.X_train_enc):
            print("Running Fold", i + 1, "/", "5")
            X_train, X_test = self.data.X_train_enc[
                train_index], self.data.X_train_enc[test_index]
            pos_train, pos_test = self.data.pos_train_enc[
                train_index], self.data.pos_train_enc[test_index]
            y_train, y_test = self.data.y_train_enc[
                train_index], self.data.y_train_enc[test_index]
            inputs = []
            if "elmo" in self.tagger_name.lower():
                X_train, X_test = self.data.train_weights[
                    train_index], self.data.train_weights[test_index]
                inputs += [X_train]
            if self.pos:
                inputs += [pos_train]
            X_train_adj, X_test_adj = [], []
            if self.data.depAdjacency_gcn:
                for j in range(len(self.data.train_adjacency_matrices)):
                    X_train_adj.append(
                        self.data.train_adjacency_matrices[j][train_index])
                    X_test_adj += [
                        self.data.train_adjacency_matrices[j][test_index]
                    ]
                inputs += X_train_adj
            print(X_train.shape)

            model = None  # Clearing the NN.
            model = Tagger(self.data, self.data.max_length,
                           self.data.input_dim, self.data.n_poses,
                           self.data.n_classes, "")
            model = getattr(model, self.tagger_name)()
            #if "elmo" in self.tagger_name.lower():
            #	model.fit(train_text, y_train, validation_split=0, batch_size=10, epochs=1)

            if len(inputs) == 1:
                model.fit(X_train,
                          y_train,
                          validation_split=0,
                          batch_size=batch_size,
                          epochs=epoch)
            else:
                model.fit(inputs,
                          y_train,
                          validation_split=0,
                          batch_size=batch_size,
                          epochs=epoch)
            i += 1

            for t in test_index:
                inputs = [np.array([self.data.train_weights[t]])]
                if self.pos:
                    inputs += [np.array([self.data.pos_train_enc[t]])]
                if self.data.depAdjacency_gcn:
                    inputs += [
                        np.array([self.data.train_adjacency_matrices[j][t]])
                        for j in range(len(self.data.train_adjacency_matrices))
                    ]

                if len(inputs) == 1:
                    pred = model.predict(np.array([self.data.train_weights[t]
                                                   ]))  #.reshape(1, -1))
                else:
                    pred = model.predict(inputs)
                pred = np.argmax(pred, -1)[0]
                pred = [self.data.idx2l[p] for p in pred]
                final_preds[t] = pred

        prediction_file_name = self.res_dir + '/predicted_{}'.format(
            self.data.lang) + '_' + self.tagger_name
        with open(prediction_file_name + '.pkl', 'wb') as f:
            pickle.dump(final_preds, f)
        with open(prediction_file_name + '.pkl', 'rb') as f:
            labels1 = pickle.load(f)
        print("len(labels1)", len(labels1))
        labels2Parsemetsv(labels1,
                          data_path + '{}/train.cupt'.format(self.data.lang),
                          prediction_file_name + '_system.cupt')

        with open(
                self.res_dir + '/eval'.format(self.data.lang) +
                self.tagger_name + '.txt', 'w') as f:
            f.write(
                subprocess.check_output([
                    data_path + "bin/evaluate_v1.py", "--gold",
                    data_path + "{}/train.cupt".format(self.data.lang),
                    "--pred", prediction_file_name + "_system.cupt"
                ]).decode())
Beispiel #4
0
    def test(self, data_path):
        inputs = []
        if "elmo" in self.tagger_name.lower():
            inputs = [self.data.test_weights]
        if self.w2v:
            inputs += [self.data.X_test_enc]
        if self.pos:
            inputs += [self.data.pos_test_enc]
        if self.data.depAdjacency_gcn:
            inputs += self.data.test_adjacency_matrices

        if len(inputs) == 1:
            preds = self.tagger.predict(inputs[0], batch_size=16, verbose=1)
        else:
            preds = self.tagger.predict(inputs, batch_size=16, verbose=1)

        final_preds = []
        for i in range(len(self.data.X_test_enc)):
            pred = np.argmax(preds[i], -1)
            pred = [self.data.idx2l[p] for p in pred]
            final_preds.append(pred)
        # preparing the name for the prediction file corresponding to the name of the model, settings and language.
        prediction_file_name = self.res_dir + '/predicted_{}'.format(
            self.data.lang) + '_' + self.tagger_name
        # save the predicted labels to a pickle list
        with open(prediction_file_name + '.pkl', 'wb') as f:
            pickle.dump(final_preds, f)
        with open(prediction_file_name + '.pkl', 'rb') as f:
            labels1 = pickle.load(f)
        if self.data.testORdev == "TEST":  # we have DEV as part of training and are evaluating the test
            labels2Parsemetsv(
                labels1, data_path + '{}/test.cupt'.format(self.data.lang),
                prediction_file_name + '_system.cupt')

            with open(
                    self.res_dir + '/eval'.format(self.data.lang) +
                    self.tagger_name + '.txt', 'w') as f:
                f.write(
                    subprocess.check_output([
                        data_path + "bin/evaluate_v1.py", "--train",
                        data_path + "{}/train.cupt".format(self.data.lang),
                        "--gold",
                        data_path + "{}/test.cupt".format(self.data.lang),
                        "--pred", prediction_file_name + "_system.cupt"
                    ]).decode())
        else:
            labels2Parsemetsv(
                labels1, data_path + '/{}/dev.cupt'.format(self.data.lang),
                prediction_file_name + '_system.cupt')

            with open(
                    self.res_dir + '/eval'.format(self.data.lang) +
                    self.tagger_name + '.txt', 'w') as f:
                f.write(
                    subprocess.check_output([
                        data_path + "bin/evaluate_v1.py", "--train",
                        data_path + "{}/train.cupt".format(self.data.lang),
                        "--gold",
                        data_path + "{}/dev.cupt".format(self.data.lang),
                        "--pred", prediction_file_name + "_system.cupt"
                    ]).decode())
    def test(self, data_path):
        ts_inputs = []
        if "elmo" in self.tagger_name.lower():
            ts_inputs = [self.data.test_weights]
        else:
            ts_inputs = [self.data.X_test_enc]
        if self.w2v:
            ts_inputs += [self.data.X_test_enc]
        if self.pos:
            ts_inputs += [self.data.pos_test_enc]
        if self.data.depAdjacency_gcn:
            ts_inputs += self.data.test_adjacency_matrices

        if len(ts_inputs) == 1:
            preds, preds2, preds3 = self.tagger.predict(ts_inputs[0],
                                                        batch_size=16,
                                                        verbose=1)
        else:
            preds, preds2, preds3 = self.tagger.predict(ts_inputs,
                                                        batch_size=16,
                                                        verbose=1)

        self.ts_inputs = ts_inputs
        self.ts_targets1 = self.data.y_test_enc  # np.concatenate((self.data.y_train_enc, self.data.y_dev_enc), axis = 0)
        self.ts_targets2 = self.data.test_adjacency_matrices[
            0]  # multi-tasking with dep adjacency significantly deteriorated the results for FA
        self.ts_targets3 = self.data.dep_test_enc

        final_preds = []
        for i in range(len(self.data.X_test_enc)):
            pred = np.argmax(preds[i], -1)
            pred = [self.data.idx2l[p] for p in pred]
            final_preds.append(pred)
        prediction_file_name = self.res_dir + '/predicted_{}'.format(
            self.data.lang_ts) + '_' + self.tagger_name
        # save the predicted labels to a list
        with open(prediction_file_name + '.pkl', 'wb') as f:
            pickle.dump(final_preds, f)

        with open(prediction_file_name + '.pkl', 'rb') as f:
            labels1 = pickle.load(f)
        if self.devORTest == "TEST":  # we have DEV as part of training and are evaluating the test
            labels2Parsemetsv(
                labels1,
                data_path + '{}/test.blind.cupt'.format(self.data.lang_ts),
                prediction_file_name + '_system.cupt')

            with open(
                    self.res_dir + '/eval'.format(self.data.lang_ts) +
                    self.tagger_name + '.txt', 'w') as f:
                f.write(
                    subprocess.check_output([
                        data_path + "bin/evaluate_v1.py", "--train",
                        data_path + "{}/train.cupt".format(self.data.lang_ts),
                        "--gold",
                        data_path + "{}/test.cupt".format(self.data.lang_ts),
                        "--pred", prediction_file_name + "_system.cupt"
                    ]).decode())
        else:
            labels2Parsemetsv(
                labels1, data_path + '/{}/dev.cupt'.format(self.data.lang_dev),
                prediction_file_name + '_system.cupt')

            with open(
                    self.res_dir + '/eval'.format(self.data.lang_dev) +
                    self.tagger_name + '.txt', 'w') as f:
                f.write(
                    subprocess.check_output([
                        data_path + "bin/evaluate_v1.py", "--train",
                        data_path + "{}/train.cupt".format(self.data.lang_dev),
                        "--gold",
                        data_path + "{}/dev.cupt".format(self.data.lang_dev),
                        "--pred", prediction_file_name + "_system.cupt"
                    ]).decode())