예제 #1
0
def test(config_args, ckpt_file):
    args = get_args()
    # Add the args from the config file
    args.__dict__.update(config_args)
    args.split = "dev"
    args.infer = True
    args.trained_model = os.path.join(cwd, args.EXPT_DIR, ckpt_file)
    args.data_dir = os.path.join(syspath, "hedwig-data", "datasets")

    _, _, predictions, labels = __main__.main(args)

    return {"predictions": predictions, "labels": labels}
예제 #2
0
def train(config_args, labeled, resume_from: int = 0, ckpt_file: str = ""):
    args = get_args()
    # Add the args from the config file
    args.__dict__.update(config_args)
    args.labeled = labeled
    args.snapshot_path = os.path.join(cwd, args.EXPT_DIR, ckpt_file)
    args.data_dir = os.path.join(syspath, "hedwig-data", "datasets")
    args.infer = False

    if not os.path.isdir(os.path.join(cwd, args.EXPT_DIR)):
        os.mkdir(os.path.join(cwd, args.EXPT_DIR))

    __main__.main(args)
    return
예제 #3
0
def infer(config_args, unlabeled, ckpt_file):
    args = get_args()
    # Add the args from the config file
    args.__dict__.update(config_args)
    args.split = "train"
    args.infer = True
    args.trained_model = os.path.join(cwd, args.EXPT_DIR, ckpt_file)
    args.data_dir = os.path.join(syspath, "hedwig-data", "datasets")

    scores = __main__.main(args)

    d = dict(zip(range(args.trainsize), scores))
    outputs = {}
    for l in unlabeled:
        outputs[l] = {"softmax": d[l]}

    return {"outputs": outputs}
예제 #4
0
파일: __main__.py 프로젝트: vrmpx/hedwig
def evaluate_split(model, processor, args, split='dev'):
    evaluator = BertEvaluator(model, processor, args, split)
    start_time = time.time()
    accuracy, precision, recall, f1, avg_loss = evaluator.get_scores(
        silent=True)[0]
    print("Inference time", time.time() - start_time)
    print('\n' + LOG_HEADER)
    print(
        LOG_TEMPLATE.format(split.upper(), accuracy, precision, recall, f1,
                            avg_loss))


if __name__ == '__main__':
    # Set default configuration in args.py
    args = get_args()

    if args.local_rank == -1 or not args.cuda:
        device = torch.device(
            "cuda" if torch.cuda.is_available() and args.cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')

    print('Device:', str(device).upper())
    print('Number of GPUs:', n_gpu)
    print('Distributed training:', bool(args.local_rank != -1))
예제 #5
0
def do_main():
    # Set default configuration in args.py
    args = get_args()

    if args.local_rank == -1 or not args.cuda:
        device = torch.device(
            "cuda" if torch.cuda.is_available() and args.cuda else "cpu")
        n_gpu = torch.cuda.device_count()
        torch.cuda.set_device(args.gpu)
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')

    print('Device:', str(device).upper())
    print('Number of GPUs:', n_gpu)
    print('Distributed training:', bool(args.local_rank != -1))
    print('FP16:', args.fp16)

    # Set random seed for reproducibility
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    dataset_map = {'News_art': News_artProcessor, 'News': News_Processor}

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    if args.dataset not in dataset_map:
        raise ValueError('Unrecognized dataset')

    args.batch_size = args.batch_size // args.gradient_accumulation_steps
    args.device = device
    args.n_gpu = n_gpu
    args.num_labels = dataset_map[args.dataset].NUM_CLASSES
    args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL

    if not args.trained_model:
        save_path = os.path.join(args.save_path,
                                 dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    processor = dataset_map[args.dataset]()
    args.is_lowercase = 'uncased' in args.model
    args.is_hierarchical = False
    tokenizer = BertTokenizer.from_pretrained(args.model,
                                              is_lowercase=args.is_lowercase)

    num_train_optimization_steps = None
    if args.trained_model:
        train_examples = processor.get_train_examples(args.data_dir,
                                                      args.train_name)
        num_train_optimization_steps = int(
            math.ceil(len(train_examples) / args.batch_size) /
            args.gradient_accumulation_steps) * args.epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
            )

    model_bert = BertForSequenceClassification.from_pretrained(args.model,
                                                               num_labels=4)

    model_bert.to(device)

    if args.trained_model:
        model_ = torch.load(args.trained_model,
                            map_location=lambda storage, loc: storage
                            )  # load personality model
        state = {}
        for key in model_.state_dict().keys():
            new_key = key.replace("module.", "")
            state[new_key] = model_.state_dict()[key]

        del state['classifier.weight']  # removing  personality classifier!
        del state['classifier.bias']
        model_bert.load_state_dict(state, strict=False)
        model_bert = model_bert.to(device)
    args.freez_bert = False
    evaluate(model_bert,
             processor,
             args,
             last_bert_layers=-1,
             ngram_range=(1, 1))
예제 #6
0
def do_main():
    # Set default configuration in args.py
    args = get_args()

    if args.local_rank == -1 or not args.cuda:
        device = torch.device(
            "cuda" if torch.cuda.is_available() and args.cuda else "cpu")
        n_gpu = torch.cuda.device_count()
        torch.cuda.set_device(args.gpu)
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')

    print('Device:', str(device).upper())
    print('Number of GPUs:', n_gpu)
    print('Distributed training:', bool(args.local_rank != -1))
    print('FP16:', args.fp16)

    # Set random seed for reproducibility
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    dataset_map = {
        'SST-2': SST2Processor,
        'Reuters': ReutersProcessor,
        'IMDB': IMDBProcessor,
        'AAPD': AAPDProcessor,
        'AGNews': AGNewsProcessor,
        'Yelp2014': Yelp2014Processor,
        'Sogou': SogouProcessor,
        'Personality': PersonalityProcessor,
        'News_art': News_artProcessor,
        'News': News_Processor,
        'UCI_yelp': UCI_yelpProcessor,
        'Procon': ProconProcessor,
        'Style': StyleProcessor,
        'ProconDual': ProconDualProcessor,
        'Pan15': Pan15_Processor,
        'Pan14E': Pan14E_Processor,
        'Pan14N': Pan14N_Processor,
        'Perspectrum': PerspectrumProcessor
    }

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    if args.dataset not in dataset_map:
        raise ValueError('Unrecognized dataset')

    args.batch_size = args.batch_size // args.gradient_accumulation_steps
    args.device = device
    args.n_gpu = n_gpu
    args.num_labels = dataset_map[args.dataset].NUM_CLASSES
    args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL

    if not args.trained_model:
        save_path = os.path.join(args.save_path,
                                 dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    processor = dataset_map[args.dataset]()
    args.is_lowercase = 'uncased' in args.model
    args.is_hierarchical = False
    tokenizer = BertTokenizer.from_pretrained(args.model,
                                              is_lowercase=args.is_lowercase)

    train_examples = None
    num_train_optimization_steps = None
    if not args.trained_model:
        train_examples = processor.get_train_examples(args.data_dir,
                                                      args.train_name)
        num_train_optimization_steps = int(
            len(train_examples) / args.batch_size /
            args.gradient_accumulation_steps) * args.epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
            )

    cache_dir = args.cache_dir if args.cache_dir else os.path.join(
        str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(
            args.local_rank))
    model = BertForSequenceClassification.from_pretrained(
        args.model, cache_dir=cache_dir, num_labels=args.num_labels)

    if args.fp16:
        model.half()
    model.to(device)

    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Install NVIDIA Apex to use distributed and FP16 training.")
        model = DDP(model)
    '''elif n_gpu > 1: changed by marjan

        model = torch.nn.DataParallel(model)'''

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    if args.fp16:
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError(
                "Please install NVIDIA Apex for distributed and FP16 training")

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.lr,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer,
                                       static_loss_scale=args.loss_scale)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.lr,
                             warmup=args.warmup_proportion,
                             t_total=num_train_optimization_steps)

    trainer = BertTrainer(model, optimizer, processor, args)

    if not args.trained_model:
        trainer.train()
        model = torch.load(trainer.snapshot_path)
    else:
        model = BertForSequenceClassification.from_pretrained(
            args.model, num_labels=args.num_labels)
        model_ = torch.load(args.trained_model,
                            map_location=lambda storage, loc: storage)
        state = {}
        for key in model_.state_dict().keys():
            new_key = key.replace("module.", "")
            state[new_key] = model_.state_dict()[key]
        model.load_state_dict(state)
        model = model.to(device)

    evaluate_split(model, processor, args, split=args.dev_name)
    evaluate_split(model, processor, args, split=args.test_name)
예제 #7
0
def do_main():
    # Set default configuration in args.py
    args = get_args()

    if args.local_rank == -1 or not args.cuda:
        device = torch.device(
            "cuda" if torch.cuda.is_available() and args.cuda else "cpu")
        n_gpu = torch.cuda.device_count()
        torch.cuda.set_device(args.gpu)
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')

    print('Device:', str(device).upper())
    print('Number of GPUs:', n_gpu)
    print('Distributed training:', bool(args.local_rank != -1))
    print('FP16:', args.fp16)

    # Set random seed for reproducibility
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    dataset_map = {'News_art': News_artProcessor, 'News': News_Processor}

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    if args.dataset not in dataset_map:
        raise ValueError('Unrecognized dataset')

    args.batch_size = args.batch_size // args.gradient_accumulation_steps
    args.device = device
    args.n_gpu = n_gpu
    args.num_labels = dataset_map[args.dataset].NUM_CLASSES
    args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL

    if not args.trained_model:
        save_path = os.path.join(args.save_path,
                                 dataset_map[args.dataset].NAME)
        os.makedirs(save_path, exist_ok=True)

    processor = dataset_map[args.dataset]()
    args.is_lowercase = 'uncased' in args.model
    args.is_hierarchical = False
    tokenizer = BertTokenizer.from_pretrained(args.model,
                                              is_lowercase=args.is_lowercase)

    train_examples = None
    num_train_optimization_steps = None
    if args.trained_model:
        train_examples = processor.get_train_examples(args.data_dir,
                                                      args.train_name)
        num_train_optimization_steps = int(
            math.ceil(len(train_examples) / args.batch_size) /
            args.gradient_accumulation_steps) * args.epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size(
            )

    cache_dir = args.cache_dir if args.cache_dir else os.path.join(
        str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(
            args.local_rank))
    model = BertForSequenceClassification.from_pretrained(
        args.model, num_labels=2)  # creating news model!
    #model = BertForSequenceClassification.from_pretrained(args.model, cache_dir=cache_dir, num_labels=args.num_labels)

    if args.fp16:
        model.half()
    model.to(device)

    #model = BertForSequenceClassification.from_pretrained(args.model, num_labels=args.num_labels)
    model_ = torch.load(
        args.trained_model,
        map_location=lambda storage, loc: storage)  # load personality model
    state = {}
    for key in model_.state_dict().keys():
        new_key = key.replace("module.", "")
        state[new_key] = model_.state_dict()[key]

    del state['classifier.weight']  # removing  personality classifier!
    del state['classifier.bias']
    model.load_state_dict(state, strict=False)
    model = model.to(device)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    print('t_total :', num_train_optimization_steps)
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=args.lr,
                         warmup=args.warmup_proportion,
                         t_total=num_train_optimization_steps)
    args.freez_bert = False
    trainer = BertTrainer(model, optimizer, processor, args)

    trainer.train()
    model = torch.load(trainer.snapshot_path)

    evaluate_split(model, processor, args, split=args.dev_name)
    evaluate_split(model, processor, args, split=args.test_name)
예제 #8
0
def do_main():
    # Set default configuration in args.py
    args = get_args()

    if args.local_rank == -1 or not args.cuda:
        device = torch.device(
            "cuda" if torch.cuda.is_available() and args.cuda else "cpu")
        n_gpu = torch.cuda.device_count()
        torch.cuda.set_device(args.gpu)
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')

    print('Device:', str(device).upper())
    print('Number of GPUs:', n_gpu)
    print('Distributed training:', bool(args.local_rank != -1))
    print('FP16:', args.fp16)

    # Set random seed for reproducibility
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    dataset_map = {'Personality': PersonalityProcessor}

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    if args.dataset not in dataset_map:
        raise ValueError('Unrecognized dataset')

    args.batch_size = args.batch_size // args.gradient_accumulation_steps
    args.device = device
    args.n_gpu = n_gpu
    args.num_labels = dataset_map[args.dataset].NUM_CLASSES
    args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL

    if not args.trained_model:
        raise Exception('This method only works wit pre-trained models!')

    processor = dataset_map[args.dataset]()
    args.is_lowercase = 'uncased' in args.model
    args.is_hierarchical = False

    if args.trained_model:
        model = BertForSequenceClassification.from_pretrained(
            args.model, num_labels=args.num_labels)
        model_ = torch.load(args.trained_model,
                            map_location=lambda storage, loc: storage)
        state = {}
        for key in model_.state_dict().keys():
            new_key = key.replace("module.", "")
            state[new_key] = model_.state_dict()[key]
        model.load_state_dict(state)
        model = model.to(device)

        #evaluate_split(model, processor, args, split='dev')
        #evaluate_split(model, processor, args, split='test')
        evaluate_split(model, processor, args, split=args.analyze_split)