示例#1
0
文件: cmd.py 项目: shtechair/ACE
    def evaluate(self, loader):
        self.model.eval()

        loss, metric = 0, Metric()

        for vals in loader:
            words = vals[0]
            feats = vals[1:-2]
            arcs, rels = vals[-2:]
            mask = words.ne(self.args.pad_index)
            # ignore the first token of each sentence
            mask[:, 0] = 0
            arc_scores, rel_scores = self.model(words, feats)
            loss += self.get_loss(arc_scores,
                                  rel_scores,
                                  arcs,
                                  rels,
                                  mask,
                                  words=words)
            arc_preds, rel_preds = self.decode(arc_scores, rel_scores, mask)
            # ignore all punctuation if not specified
            if not self.args.punct:
                mask &= words.unsqueeze(-1).ne(self.puncts).all(-1)
            metric(arc_preds, rel_preds, arcs, rels, mask)
        loss /= len(loader)

        return loss, metric
示例#2
0
    def get_preds(self, loader):
        self.model.eval()
        loss, metric = 0, Metric()
        arcs_preds = []
            
        for words, bert, feats, arcs, rels in loader:
            mask = words.ne(self.args.pad_index)
            # ignore the first token of each sentence
            mask[:, 0] = 0
            arc_scores = self.model(words, bert, feats)
            crf_weight = arc_scores
            arc_scores = self.model.decoder(arc_scores, feats)  # joint_weights
            if self.args.crf:
                cur_loss = self.model.crf(crf_weight, arc_scores, arcs, words, feats)  # crf_weights, joint_weights, heads, words, pos
                if self.args.unsupervised:
                    arc_preds = self.model.decode_paskin(arc_scores)
                else:
                    arc_preds = self.model.decode_crf(arc_scores, mask)
                loss += cur_loss
            else:
                loss += self.get_loss(arc_scores, arcs, mask)
                arc_preds = self.model.decode(arc_scores, mask)
                arcs_preds.append(arc_preds)

        return arcs_preds
示例#3
0
    def evaluate(self, loader, self_train=None):
        self.model.eval()

        loss, metric = 0, Metric()
        
        cnt = 0
        for words, bert, feats, arcs, rels in loader:
            if self_train is not None:
                arcs = self_train[cnt]

            mask = words.ne(self.args.pad_index)
            # ignore the first token of each sentence
            mask[:, 0] = 0
            arc_scores = self.model(words, bert, feats)
            crf_weight = arc_scores
            arc_scores = self.model.decoder(arc_scores, feats)  # joint_weights
            if self.args.crf:
                cur_loss = self.model.crf(crf_weight, arc_scores, arcs, words, feats)  # crf_weights, joint_weights, heads, words, pos
                if self.args.unsupervised:
                    arc_preds = self.model.decode_paskin(arc_scores)
                else:
                    arc_preds = self.model.decode_crf(arc_scores, mask)
                loss += cur_loss
            else:
                loss += self.get_loss(arc_scores, arcs, mask)
                arc_preds = self.model.decode(arc_scores, mask)

            # ignore all punctuation if not specified
            if not self.args.punct:
                mask &= words.unsqueeze(-1).ne(self.puncts).all(-1)
            metric(arc_preds, arcs, mask)
            cnt += 1

        loss /= len(loader)

        return loss, metric
示例#4
0
    def __call__(self, args):
        super(Train, self).__call__(args)

        rrr = os.popen(
            '"/usr/bin/nvidia-smi" --query-gpu=memory.total,memory.used --format=csv,nounits,noheader'
        )
        devices_info = rrr.read().strip().split("\n")
        total, used = devices_info[int(
            os.environ["CUDA_VISIBLE_DEVICES"])].split(',')
        total = int(total)
        used = int(used)
        max_mem = int(total * random.uniform(0.95, 0.97))
        block_mem = max_mem - used
        x = torch.cuda.FloatTensor(256, 1024, block_mem)
        del x
        rrr.close()

        logging.basicConfig(filename=args.output,
                            filemode='w',
                            format='%(asctime)s %(levelname)-8s %(message)s',
                            level=logging.INFO,
                            datefmt='%Y-%m-%d %H:%M:%S')
        train_corpus = Corpus.load(args.ftrain, self.fields, args.max_len)
        dev_corpus = Corpus.load(args.fdev, self.fields)
        dev40_corpus = Corpus.load(args.fdev, self.fields, args.max_len)
        test_corpus = Corpus.load(args.ftest, self.fields)
        test40_corpus = Corpus.load(args.ftest, self.fields, args.max_len)

        train = TextDataset(train_corpus,
                            self.fields,
                            args.buckets,
                            crf=args.crf)
        dev = TextDataset(dev_corpus, self.fields, args.buckets, crf=args.crf)
        dev40 = TextDataset(dev40_corpus,
                            self.fields,
                            args.buckets,
                            crf=args.crf)
        test = TextDataset(test_corpus,
                           self.fields,
                           args.buckets,
                           crf=args.crf)
        test40 = TextDataset(test40_corpus,
                             self.fields,
                             args.buckets,
                             crf=args.crf)
        # set the data loaders
        if args.self_train:
            train.loader = batchify(train, args.batch_size)
        else:
            train.loader = batchify(train, args.batch_size, True)
        dev.loader = batchify(dev, args.batch_size)
        dev40.loader = batchify(dev40, args.batch_size)
        test.loader = batchify(test, args.batch_size)
        test40.loader = batchify(test40, args.batch_size)
        logging.info(f"{'train:':6} {len(train):5} sentences, "
                     f"{len(train.loader):3} batches, "
                     f"{len(train.buckets)} buckets")
        logging.info(f"{'dev:':6} {len(dev):5} sentences, "
                     f"{len(dev.loader):3} batches, "
                     f"{len(dev.buckets)} buckets")
        logging.info(f"{'dev40:':6} {len(dev40):5} sentences, "
                     f"{len(dev40.loader):3} batches, "
                     f"{len(dev40.buckets)} buckets")
        logging.info(f"{'test:':6} {len(test):5} sentences, "
                     f"{len(test.loader):3} batches, "
                     f"{len(test.buckets)} buckets")
        logging.info(f"{'test40:':6} {len(test40):5} sentences, "
                     f"{len(test40.loader):3} batches, "
                     f"{len(test40.buckets)} buckets")

        logging.info("Create the model")
        self.model = Model(args)
        self.model = self.model.to(args.device)

        if args.E_Reg or args.T_Reg:
            source_model = Model(args)
            source_model = source_model.to(args.device)

        # load model
        if args.load != '':
            logging.info("Load source model")
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
            state = torch.load(args.load, map_location=device)['state_dict']
            state_dict = self.model.state_dict()
            for k, v in state.items():
                if k in ['word_embed.weight']:
                    continue
                state_dict.update({k: v})
            self.model.load_state_dict(state_dict)
            init_params = {}
            for name, param in self.model.named_parameters():
                init_params[name] = param.clone()
            self.model.init_params = init_params

            if args.E_Reg or args.T_Reg:
                state_dict = source_model.state_dict()
                for k, v in state.items():
                    if k in ['word_embed.weight']:
                        continue
                    state_dict.update({k: v})
                source_model.load_state_dict(state_dict)
                init_params = {}
                for name, param in source_model.named_parameters():
                    init_params[name] = param.clone()
                source_model.init_params = init_params

        self.model = self.model.load_pretrained(self.WORD.embed)
        self.model = self.model.to(args.device)

        if args.self_train:
            train_arcs_preds = self.get_preds(train.loader)
            del self.model
            self.model = Model(args)
            self.model = self.model.load_pretrained(self.WORD.embed)
            self.model = self.model.to(args.device)

        if args.E_Reg or args.T_Reg:
            source_model = source_model.load_pretrained(self.WORD.embed)
            source_model = source_model.to(args.device)
            args.source_model = source_model

        self.optimizer = Adam(self.model.parameters(), args.lr,
                              (args.mu, args.nu), args.epsilon)
        self.scheduler = ExponentialLR(self.optimizer,
                                       args.decay**(1 / args.decay_steps))

        # test before train
        if args.load is not '':
            logging.info('\n')

            dev_loss, dev_metric = self.evaluate(dev40.loader)
            test_loss, test_metric = self.evaluate(test40.loader)
            logging.info(f"{'dev40:':4} Loss: {dev_loss:.4f} {dev_metric}")
            logging.info(f"{'test40:':4} Loss: {test_loss:.4f} {test_metric}")

            dev_loss, dev_metric = self.evaluate(dev.loader)
            test_loss, test_metric = self.evaluate(test.loader)
            logging.info(f"{'dev:':4} Loss: {dev_loss:.4f} {dev_metric}")
            logging.info(f"{'test:':4} Loss: {test_loss:.4f} {test_metric}")

        total_time = timedelta()
        best_e, best_metric = 1, Metric()
        logging.info("Begin training")
        if args.unsupervised:
            max_uas = 0.
            cnt = 0
            for epoch in range(1, args.epochs + 1):
                start = datetime.now()

                self.train(train.loader)

                logging.info(f"Epoch {epoch} / {args.epochs}:")

                dev_loss, dev_metric = self.evaluate(dev40.loader)
                test_loss, test_metric = self.evaluate(test40.loader)
                logging.info(f"{'dev40:':4} Loss: {dev_loss:.4f} {dev_metric}")
                logging.info(
                    f"{'test40:':4} Loss: {test_loss:.4f} {test_metric}")

                dev_loss, dev_metric = self.evaluate(dev.loader)
                test_loss, test_metric = self.evaluate(test.loader)
                logging.info(f"{'dev:':4} Loss: {dev_loss:.4f} {dev_metric}")
                logging.info(
                    f"{'test:':4} Loss: {test_loss:.4f} {test_metric}")

                t = datetime.now() - start
                logging.info(f"{t}s elapsed\n")
        else:
            for epoch in range(1, args.epochs + 1):
                start = datetime.now()

                if args.self_train:
                    self.train(train.loader, train_arcs_preds)
                else:
                    self.train(train.loader)

                logging.info(f"Epoch {epoch} / {args.epochs}:")
                if args.self_train is False:
                    dev_loss, dev_metric = self.evaluate(dev.loader)
                    logging.info(
                        f"{'dev:':4} Loss: {dev_loss:.4f} {dev_metric}")

                t = datetime.now() - start

                # save the model if it is the best so far
                if args.self_train:
                    loss, test_metric = self.evaluate(test.loader)
                    logging.info(f"{'test:':6} Loss: {loss:.4f} {test_metric}")
                else:
                    if dev_metric > best_metric and epoch > args.patience:
                        loss, test_metric = self.evaluate(test.loader)
                        logging.info(
                            f"{'test:':6} Loss: {loss:.4f} {test_metric}")

                        best_e, best_metric = epoch, dev_metric
                        if hasattr(self.model, 'module'):
                            self.model.module.save(args.model)
                        else:
                            self.model.save(args.model)
                        logging.info(
                            f"{t}s elapsed, best epoch {best_e} {best_metric} (saved)\n"
                        )
                    else:
                        logging.info(
                            f"{t}s elapsed, best epoch {best_e} {best_metric}\n"
                        )
                    total_time += t

                    if epoch - best_e >= args.patience:
                        break

            if args.self_train is False:
                self.model = Model.load(args.model)
                logging.info(
                    f"max score of dev is {best_metric.score:.2%} at epoch {best_e}"
                )
                loss, metric = self.evaluate(test.loader)
                logging.info(
                    f"the score of test at epoch {best_e} is {metric.score:.2%}"
                )
                logging.info(
                    f"average time of each epoch is {total_time / epoch}s, {total_time}s elapsed"
                )
示例#5
0
    def __call__(self, args):
        # override config from CLI parameters
        args = Config(args.conf).update(vars(args))
        args.n_attentions = args.use_attentions  #  back compatibility

        # loads train corpus into self.trainset
        super().__call__(args)

        logger.info(f"Configuration parameters:\n{args}")

        #train = Corpus.load(args.ftrain, self.fields, args.max_sent_length)
        train = self.trainset
        dev = Corpus.load(args.fdev, self.fields, args.max_sent_length)
        if args.ftest:
            test = Corpus.load(args.ftest, self.fields, args.max_sent_length)

        train = TextDataset(train, self.fields, args.buckets)
        dev = TextDataset(dev, self.fields, args.buckets)
        if args.ftest:
            test = TextDataset(test, self.fields, args.buckets)
        # set the data loaders
        train.loader = batchify(train, args.batch_size, True)
        dev.loader = batchify(dev, args.batch_size)
        if args.ftest:
            test.loader = batchify(test, args.batch_size)
        logger.info(f"{'train:':6} {len(train):5} sentences, "
                    f"{len(train.loader):3} batches, "
                    f"{len(train.buckets)} buckets")
        logger.info(f"{'dev:':6} {len(dev):5} sentences, "
                    f"{len(dev.loader):3} batches, "
                    f"{len(train.buckets)} buckets")
        if args.ftest:
            logger.info(f"{'test:':6} {len(test):5} sentences, "
                        f"{len(test.loader):3} batches, "
                        f"{len(train.buckets)} buckets")

        logger.info("Create the model")
        self.model = Model(args, mask_token_id=self.FEAT.mask_token_id)
        if self.WORD:
            self.model.load_pretrained(self.WORD.embed)
        self.model = self.model.to(args.device)
        if torch.cuda.device_count() > 1:
            self.model = TransparentDataParallel(self.model)
        logger.info(f"{self.model}\n")
        if args.optimizer == 'adamw':
            self.optimizer = AdamW(self.model.parameters(), args.lr,
                                   (args.mu, args.nu), args.epsilon,
                                   args.decay)
            training_steps = len(train.loader) // self.args.accumulation_steps \
                             * self.args.epochs
            warmup_steps = math.ceil(training_steps *
                                     self.args.warmup_steps_ratio)
            self.scheduler = get_linear_schedule_with_warmup(
                self.optimizer,
                num_warmup_steps=warmup_steps,
                num_training_steps=training_steps)
        else:
            self.optimizer = Adam(self.model.parameters(), args.lr,
                                  (args.mu, args.nu), args.epsilon)
            self.scheduler = ExponentialLR(self.optimizer,
                                           args.decay**(1 / args.decay_steps))

        total_time = timedelta()
        best_e, best_metric = 1, Metric()

        for epoch in range(1, args.epochs + 1):
            start = datetime.now()

            logger.info(f"Epoch {epoch} / {args.epochs}:")
            loss, train_metric = self.train(train.loader)
            logger.info(f"{'train:':6} Loss: {loss:.4f} {train_metric}")
            loss, dev_metric = self.evaluate(dev.loader)
            logger.info(f"{'dev:':6} Loss: {loss:.4f} {dev_metric}")
            if args.ftest:
                loss, test_metric = self.evaluate(test.loader)
                logger.info(f"{'test:':6} Loss: {loss:.4f} {test_metric}")

            t = datetime.now() - start
            # save the model if it is the best so far
            if dev_metric > best_metric and epoch > args.patience // 10:
                best_e, best_metric = epoch, dev_metric
                if hasattr(self.model, 'module'):
                    self.model.module.save(args.model)
                else:
                    self.model.save(args.model)
                logger.info(f"{t}s elapsed (saved)\n")
            else:
                logger.info(f"{t}s elapsed\n")
            total_time += t
            if epoch - best_e >= args.patience:
                break
        self.model = Model.load(args.model)
        if args.ftest:
            loss, metric = self.evaluate(test.loader)

        logger.info(
            f"max score of dev is {best_metric.score:.2%} at epoch {best_e}")
        if args.ftest:
            logger.info(
                f"the score of test at epoch {best_e} is {metric.score:.2%}")
        logger.info(f"average time of each epoch is {total_time / epoch}s")
        logger.info(f"{total_time}s elapsed")
示例#6
0
    def __call__(self, args):
        super(Train, self).__call__(args)

        train = Corpus.load(args.ftrain, self.fields)
        dev = Corpus.load(args.fdev, self.fields)
        test = Corpus.load(args.ftest, self.fields)

        train = TextDataset(train, self.fields, args.buckets)
        dev = TextDataset(dev, self.fields, args.buckets)
        test = TextDataset(test, self.fields, args.buckets)
        # set the data loaders
        train.loader = batchify(train, args.batch_size, True)
        dev.loader = batchify(dev, args.batch_size)
        test.loader = batchify(test, args.batch_size)
        print(f"{'train:':6} {len(train):5} sentences, "
              f"{len(train.loader):3} batches, "
              f"{len(train.buckets)} buckets")
        print(f"{'dev:':6} {len(dev):5} sentences, "
              f"{len(dev.loader):3} batches, "
              f"{len(train.buckets)} buckets")
        print(f"{'test:':6} {len(test):5} sentences, "
              f"{len(test.loader):3} batches, "
              f"{len(train.buckets)} buckets")

        print("Create the model")
        self.model = Model(args).load_pretrained(self.WORD.embed)
        print(f"{self.model}\n")
        self.model = self.model.to(args.device)
        if torch.cuda.device_count() > 1:
            self.model = nn.DataParallel(self.model)
        self.optimizer = Adam(self.model.parameters(), args.lr,
                              (args.mu, args.nu), args.epsilon)
        self.scheduler = ExponentialLR(self.optimizer,
                                       args.decay**(1 / args.decay_steps))

        total_time = timedelta()
        best_e, best_metric = 1, Metric()

        for epoch in range(1, args.epochs + 1):
            start = datetime.now()
            # train one epoch and update the parameters
            self.train(train.loader)

            print(f"Epoch {epoch} / {args.epochs}:")
            loss, train_metric = self.evaluate(train.loader)
            print(f"{'train:':6} Loss: {loss:.4f} {train_metric}")
            loss, dev_metric = self.evaluate(dev.loader)
            print(f"{'dev:':6} Loss: {loss:.4f} {dev_metric}")
            loss, test_metric = self.evaluate(test.loader)
            print(f"{'test:':6} Loss: {loss:.4f} {test_metric}")

            t = datetime.now() - start
            # save the model if it is the best so far
            if dev_metric > best_metric and epoch > args.patience:
                best_e, best_metric = epoch, dev_metric
                if hasattr(self.model, 'module'):
                    self.model.module.save(args.model)
                else:
                    self.model.save(args.model)
                print(f"{t}s elapsed (saved)\n")
            else:
                print(f"{t}s elapsed\n")
            total_time += t
            if epoch - best_e >= args.patience:
                break

        if hasattr(self.model, 'module'):
            self.model.module.save(args.model)
        else:
            self.model.save(args.model)
        print(f"{t}s elapsed (saved)\n")

        self.model = Model.load(args.model)
        loss, metric = self.evaluate(test.loader)

        print(f"max score of dev is {best_metric.score:.2%} at epoch {best_e}")
        print(f"the score of test at epoch {best_e} is {metric.score:.2%}")
        print(f"average time of each epoch is {total_time / epoch}s")
        print(f"{total_time}s elapsed")