Esempio n. 1
0
def main(args):
    # Set up logging and devices
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=True)
    log = util.get_logger(args.save_dir, args.name)
    tbx = SummaryWriter(args.save_dir)
    device, args.gpu_ids = util.get_available_devices()
    log.info(f'Args: {dumps(vars(args), indent=4, sort_keys=True)}')
    args.batch_size *= max(1, len(args.gpu_ids))

    # Set random seed
    log.info(f'Using random seed {args.seed}...')
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    # Get embeddings
    log.info('Loading embeddings...')
    word_vectors = util.torch_from_json(args.word_emb_file)

    # Get model
    log.info('Building model...')
    model = BiDAF_3(word_vectors=word_vectors,
                    hidden_size=args.hidden_size,
                    drop_prob=args.drop_prob)
    model = nn.DataParallel(model, args.gpu_ids)
    if args.load_path:
        log.info(f'Loading checkpoint from {args.load_path}...')
        model, step = util.load_model(model, args.load_path, args.gpu_ids)
    else:
        step = 0
    model = model.to(device)
    model.train()
    ema = util.EMA(model, args.ema_decay)

    # Get saver
    saver = util.CheckpointSaver(args.save_dir,
                                 max_checkpoints=args.max_checkpoints,
                                 metric_name=args.metric_name,
                                 maximize_metric=args.maximize_metric,
                                 log=log)

    # Get optimizer and scheduler
    optimizer = optim.Adadelta(model.parameters(),
                               args.lr,
                               weight_decay=args.l2_wd)
    scheduler = sched.LambdaLR(optimizer,
                               lambda epoch: 0.8**(epoch // 5))  # Constant LR

    # Get data loader
    log.info('Building dataset...')
    train_dataset = SQuAD(args.train_record_file, args.use_squad_v2)
    train_loader = data.DataLoader(train_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   collate_fn=collate_fn)
    dev_dataset = SQuAD(args.dev_record_file, args.use_squad_v2)
    dev_loader = data.DataLoader(dev_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers,
                                 collate_fn=collate_fn)

    # Train
    log.info('Training...')
    steps_till_eval = args.eval_steps
    epoch = step // len(train_dataset)
    while epoch != args.num_epochs:
        epoch += 1
        log.info(f'Starting epoch {epoch}...')
        with torch.enable_grad(), \
                tqdm(total=len(train_loader.dataset)) as progress_bar:
            for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in train_loader:
                # Setup for forward
                cw_idxs = cw_idxs.to(device)
                qw_idxs = qw_idxs.to(device)
                batch_size = cw_idxs.size(0)
                optimizer.zero_grad()

                # Forward
                (log_p1, log_p2), Q = model(cw_idxs, qw_idxs)
                y1, y2 = y1.to(device), y2.to(device)
                loss = F.nll_loss(log_p1, y1) + F.nll_loss(
                    log_p2, y2) + (Q * _QUANTIZATION_FACTOR)

                loss_val = loss.item()

                # Backward
                loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(),
                                         args.max_grad_norm)
                optimizer.step()
                scheduler.step(epoch)
                ema(model, step // batch_size)

                # Log info
                step += batch_size
                progress_bar.update(batch_size)
                progress_bar.set_postfix(epoch=epoch, NLL=loss_val)
                tbx.add_scalar('train/NLL', loss_val, step)
                tbx.add_scalar('train/LR', optimizer.param_groups[0]['lr'],
                               step)

                steps_till_eval -= batch_size
                if steps_till_eval <= 0:
                    steps_till_eval = args.eval_steps

                    # Evaluate and save checkpoint
                    log.info(f'Evaluating at step {step}...')
                    ema.assign(model)
                    results, pred_dict = evaluate(model, dev_loader, device,
                                                  args.dev_eval_file,
                                                  args.max_ans_len,
                                                  args.use_squad_v2)
                    saver.save(step, model, results[args.metric_name], device)
                    ema.resume(model)

                    # Log to console
                    results_str = ', '.join(f'{k}: {v:05.2f}'
                                            for k, v in results.items())
                    log.info(f'Dev {results_str}')

                    # Log to TensorBoard
                    log.info('Visualizing in TensorBoard...')
                    for k, v in results.items():
                        tbx.add_scalar(f'dev/{k}', v, step)
                    util.visualize(tbx,
                                   pred_dict=pred_dict,
                                   eval_path=args.dev_eval_file,
                                   step=step,
                                   split='dev',
                                   num_visuals=args.num_visuals)
Esempio n. 2
0
def main(args):
    # Set up logging and devices
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=True)
    log = util.get_logger(args.save_dir, args.name)
    tbx = SummaryWriter(args.save_dir)
    device, args.gpu_ids = util.get_available_devices()
    log.info(f'Args: {dumps(vars(args), indent=4, sort_keys=True)}')
    args.batch_size *= max(1, len(args.gpu_ids))

    # Set random seed
    log.info(f'Using random seed {args.seed}...')
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    # Get embeddings
    log.info('Loading embeddings...')
    word_vectors = util.torch_from_json(args.word_emb_file)
    char_vectors = util.torch_from_json(args.char_emb_file)

    # Get model
    log.info('Building model...')

    model, step = get_model(word_vectors, char_vectors, log, args)
    model = model.to(device)
    model.train()
    ema = util.EMA(model, args.ema_decay)

    # Get saver
    saver = util.CheckpointSaver(args.save_dir,
                                 max_checkpoints=args.max_checkpoints,
                                 metric_name=args.metric_name,
                                 maximize_metric=args.maximize_metric,
                                 log=log)

    # Get optimizer and scheduler
    #optimizer = get_opt(model,args)
    #optimizer = optim.Adam(model.parameters(),lr=1,betas=[0.8,0.999],eps=1e-7,weight_decay=args.l2_wd)
    #warmup=args.warmup
    # cooldown=args.hidden_size*100
    #scheduler=sched.LambdaLR(optimizer,lambda step: min(0.001,0.001*(step/warmup)**0.5) if step<=cooldown else max(1e-4 ,1e-3*(1-min(1,(step-cooldown)/cooldown))**0.5))
    #scheduler=sched.LambdaLR(optimizer,lambda step: min(0.001,0.001*(step/warmup)**0.5))
    '''
        cr = 1.0 / math.log(warmup)
    scheduler = sched.LambdaLR(
        optimizer,
        lr_lambda=lambda ee: cr * math.log(ee + 1)
        if ee < warmup else 1)

    '''
    optimizer = optim.Adadelta(model.parameters(),
                               args.lr,
                               weight_decay=args.l2_wd)
    scheduler = sched.LambdaLR(optimizer, lambda step: 1)

    # Get data loader
    log.info('Building dataset...')
    if (args.model_name == "KnowGQA"):
        train_dataset = SQUADwithGraph(args.train_record_file_graph,
                                       args.use_squad_v2)
        dev_dataset = SQUADwithGraph(args.dev_record_file_graph,
                                     args.use_squad_v2)
        train_loader = data.DataLoader(train_dataset,
                                       batch_size=args.batch_size,
                                       shuffle=True,
                                       num_workers=args.num_workers,
                                       collate_fn=collate_fn_graph)

        dev_loader = data.DataLoader(dev_dataset,
                                     batch_size=4,
                                     shuffle=False,
                                     num_workers=args.num_workers,
                                     collate_fn=collate_fn_graph)
    else:
        train_dataset = SQuAD(args.train_record_file, args.use_squad_v2)
        dev_dataset = SQuAD(args.dev_record_file, args.use_squad_v2)
        train_loader = data.DataLoader(train_dataset,
                                       batch_size=args.batch_size,
                                       shuffle=True,
                                       num_workers=args.num_workers,
                                       collate_fn=collate_fn)

        dev_loader = data.DataLoader(dev_dataset,
                                     batch_size=args.batch_size,
                                     shuffle=False,
                                     num_workers=args.num_workers,
                                     collate_fn=collate_fn)

    # Train
    log.info('Training...')
    steps_till_eval = args.eval_steps
    epoch = step // len(train_dataset)
    while epoch != args.num_epochs:
        epoch += 1
        log.info(f'Starting epoch {epoch}...')
        with torch.enable_grad(), \
                tqdm(total=len(train_loader.dataset)) as progress_bar:
            if (args.model_name == "KnowGQA"):
                for cw_idxs, cc_idxs, qw_idxs, qc_idxs, co_idxs, adjs, y1, y2, ids in train_loader:
                    # Setup for forward
                    cw_idxs = cw_idxs.to(device)
                    cc_idxs = cc_idxs.to(device)
                    qw_idxs = qw_idxs.to(device)
                    qc_idxs = qc_idxs.to(device)
                    co_idxs = co_idxs.to(device)
                    adjs = adjs.to(device)
                    batch_size = cw_idxs.size(0)
                    optimizer.zero_grad()

                    # Forward
                    log_p1, log_p2 = model(cw_idxs, cc_idxs, qw_idxs, qc_idxs,
                                           co_idxs, adjs)

                    y1, y2 = y1.to(device), y2.to(device)
                    loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)
                    loss_val = loss.item()

                    # Backward
                    loss.backward()
                    nn.utils.clip_grad_norm_(model.parameters(),
                                             args.max_grad_norm)
                    optimizer.step()
                    scheduler.step()
                    ema(model, step // batch_size)

                    # Log info
                    step += batch_size
                    progress_bar.update(batch_size)
                    progress_bar.set_postfix(epoch=epoch, NLL=loss_val)
                    tbx.add_scalar('train/NLL', loss_val, step)
                    tbx.add_scalar('train/LR', optimizer.param_groups[0]['lr'],
                                   step)

                    steps_till_eval -= batch_size
                    if steps_till_eval <= 0:
                        steps_till_eval = args.eval_steps

                        # Evaluate and save checkpoint
                        log.info(f'Evaluating at step {step}...')
                        ema.assign(model)
                        results, pred_dict = evaluate(model, "KnowGQA",
                                                      dev_loader, device,
                                                      args.dev_eval_file,
                                                      args.max_ans_len,
                                                      args.use_squad_v2)
                        saver.save(step, model, results[args.metric_name],
                                   device)
                        ema.resume(model)

                        # Log to console
                        results_str = ', '.join(f'{k}: {v:05.2f}'
                                                for k, v in results.items())
                        log.info(f'Dev {results_str}')

                        # Log to TensorBoard
                        log.info('Visualizing in TensorBoard...')
                        for k, v in results.items():
                            tbx.add_scalar(f'dev/{k}', v, step)
                        util.visualize(tbx,
                                       pred_dict=pred_dict,
                                       eval_path=args.dev_eval_file,
                                       step=step,
                                       split='dev',
                                       num_visuals=args.num_visuals)
            elif args.model_name == "BiDAF_nochar":
                for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in train_loader:
                    # Setup for forward
                    cw_idxs = cw_idxs.to(device)
                    qw_idxs = qw_idxs.to(device)
                    batch_size = cw_idxs.size(0)
                    optimizer.zero_grad()

                    # Forward
                    log_p1, log_p2 = model(cw_idxs, qw_idxs)

                    y1, y2 = y1.to(device), y2.to(device)
                    loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)
                    loss_val = loss.item()

                    # Backward
                    loss.backward()
                    nn.utils.clip_grad_norm_(model.parameters(),
                                             args.max_grad_norm)
                    optimizer.step()
                    scheduler.step()
                    ema(model, step // batch_size)

                    # Log info
                    step += batch_size
                    progress_bar.update(batch_size)
                    progress_bar.set_postfix(epoch=epoch, NLL=loss_val)
                    tbx.add_scalar('train/NLL', loss_val, step)
                    tbx.add_scalar('train/LR', optimizer.param_groups[0]['lr'],
                                   step)

                    steps_till_eval -= batch_size
                    if steps_till_eval <= 0:
                        steps_till_eval = args.eval_steps

                        # Evaluate and save checkpoint
                        log.info(f'Evaluating at step {step}...')
                        ema.assign(model)
                        results, pred_dict = evaluate(model, "BiDAF_nochar",
                                                      dev_loader, device,
                                                      args.dev_eval_file,
                                                      args.max_ans_len,
                                                      args.use_squad_v2)
                        saver.save(step, model, results[args.metric_name],
                                   device)
                        ema.resume(model)

                        # Log to console
                        results_str = ', '.join(f'{k}: {v:05.2f}'
                                                for k, v in results.items())
                        log.info(f'Dev {results_str}')

                        # Log to TensorBoard
                        log.info('Visualizing in TensorBoard...')
                        for k, v in results.items():
                            tbx.add_scalar(f'dev/{k}', v, step)
                        util.visualize(tbx,
                                       pred_dict=pred_dict,
                                       eval_path=args.dev_eval_file,
                                       step=step,
                                       split='dev',
                                       num_visuals=args.num_visuals)

            else:
                for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in train_loader:
                    # Setup for forward
                    cw_idxs = cw_idxs.to(device)
                    cc_idxs = cc_idxs.to(device)
                    qw_idxs = qw_idxs.to(device)
                    qc_idxs = qc_idxs.to(device)
                    batch_size = cw_idxs.size(0)
                    optimizer.zero_grad()

                    # Forward
                    log_p1, log_p2 = model(cw_idxs, cc_idxs, qw_idxs, qc_idxs)

                    y1, y2 = y1.to(device), y2.to(device)
                    loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)
                    loss_val = loss.item()

                    # Backward
                    loss.backward()
                    nn.utils.clip_grad_norm_(model.parameters(),
                                             args.max_grad_norm)
                    optimizer.step()
                    scheduler.step()
                    ema(model, step // batch_size)

                    # Log info
                    step += batch_size
                    progress_bar.update(batch_size)
                    progress_bar.set_postfix(epoch=epoch, NLL=loss_val)
                    tbx.add_scalar('train/NLL', loss_val, step)
                    tbx.add_scalar('train/LR', optimizer.param_groups[0]['lr'],
                                   step)

                    steps_till_eval -= batch_size
                    if steps_till_eval <= 0:
                        steps_till_eval = args.eval_steps

                        # Evaluate and save checkpoint
                        log.info(f'Evaluating at step {step}...')
                        ema.assign(model)
                        results, pred_dict = evaluate(model, "BiDAF",
                                                      dev_loader, device,
                                                      args.dev_eval_file,
                                                      args.max_ans_len,
                                                      args.use_squad_v2)
                        saver.save(step, model, results[args.metric_name],
                                   device)
                        ema.resume(model)

                        # Log to console
                        results_str = ', '.join(f'{k}: {v:05.2f}'
                                                for k, v in results.items())
                        log.info(f'Dev {results_str}')

                        # Log to TensorBoard
                        log.info('Visualizing in TensorBoard...')
                        for k, v in results.items():
                            tbx.add_scalar(f'dev/{k}', v, step)
                        util.visualize(tbx,
                                       pred_dict=pred_dict,
                                       eval_path=args.dev_eval_file,
                                       step=step,
                                       split='dev',
                                       num_visuals=args.num_visuals)
Esempio n. 3
0
def main(args):
    # ===========================================================
    torch.cuda.empty_cache()
    # ===========================================================

    # Set up logging and devices
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=True)
    log = util.get_logger(args.save_dir, args.name)
    tbx = SummaryWriter(args.save_dir)
    device, args.gpu_ids = util.get_available_devices()
    log.info(f'Args: {dumps(vars(args), indent=4, sort_keys=True)}')
    args.batch_size *= max(1, len(args.gpu_ids))

    # Set random seed
    log.info(f'Using random seed {args.seed}...')
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    # Get embeddings
    log.info('Loading embeddings...')
    word_vectors = util.torch_from_json(args.word_emb_file)

    # Get model
    log.info('Building model...')

    # ==============================================================================
    # char_vectors = util.torch_from_json(args.char_emb_file)
    # char_vocab_size, _ = tuple(char_vectors.size())
    char_vocab_size = 1376  # 节省内存

    model = QANet(
        word_vectors=word_vectors,
        char_vocab_size=char_vocab_size,
        char_dim=args.char_dim,
        d_model=args.d_model,
        drop_prob=args.drop_prob,
        num_heads=args.num_heads,
        num_mod_blocks=args.
        num_mod_blocks,  # 节省内存=============================
        maximum_context_length=args.maximum_context_length)
    # ==============================================================================

    model = nn.DataParallel(model, args.gpu_ids)
    if args.load_path:
        log.info(f'Loading checkpoint from {args.load_path}...')
        model, step = util.load_model(model, args.load_path, args.gpu_ids)
    else:
        step = 0
    model = model.to(device)
    model.train()
    ema = util.EMA(model, args.ema_decay)

    # Get saver
    saver = util.CheckpointSaver(args.save_dir,
                                 max_checkpoints=args.max_checkpoints,
                                 metric_name=args.metric_name,
                                 maximize_metric=args.maximize_metric,
                                 log=log)

    # Get optimizer and scheduler
    # optimizer = optim.Adadelta(model.parameters(),
    #                            args.lr,
    #                            eps=1e-6,
    #                            weight_decay=args.l2_wd)
    # scheduler = sched.LambdaLR(optimizer, lambda s: 1.)  # Constant LR

    # QANet-02================================================================================================================================
    # optimizer = optim.Adam(lr=1,
    #                        betas=(args.beta1, args.beta2),
    #                        eps=args.adam_eps,
    #                        weight_decay=args.l2_wd,
    #                        params=model.parameters())
    # cr = args.lr / math.log2(args.warm_up)
    # scheduler = optim.lr_scheduler.LambdaLR(
    #     optimizer,
    #     lr_lambda=lambda ee: cr * math.log2(ee + 1)
    #     if ee < args.warm_up else args.lr)
    # ========================================================================================================================================

    # QANet-03-05=============================================================================================================================
    # optimizer = optim.Adam(lr=1,
    #                        betas=(args.beta1, args.beta2),
    #                        eps=args.adam_eps,
    #                        weight_decay=args.l2_wd,
    #                        params=model.parameters())
    # cr = args.lr / math.log2(args.warm_up)
    # scheduler = optim.lr_scheduler.LambdaLR(
    #     optimizer,
    #     lr_lambda=lambda ee: cr * math.log2(ee + 1)
    #     if ee < args.warm_up else args.lr)
    # ========================================================================================================================================

    # QANet-06 ===============================================================================================================================
    # Get optimizer and scheduler
    optimizer = optim.Adadelta(model.parameters(),
                               args.lr,
                               eps=1e-6,
                               weight_decay=args.l2_wd)
    scheduler = sched.LambdaLR(optimizer, lambda s: 1.)  # Constant LR
    # ========================================================================================================================================

    # Get data loader
    log.info('Building dataset...')
    train_dataset = SQuAD(args.train_record_file, args.use_squad_v2)
    train_loader = data.DataLoader(train_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   collate_fn=collate_fn)
    dev_dataset = SQuAD(args.dev_record_file, args.use_squad_v2)
    dev_loader = data.DataLoader(dev_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers,
                                 collate_fn=collate_fn)

    # Train
    log.info('Training...')
    steps_till_eval = args.eval_steps
    epoch = step // len(train_dataset)
    while epoch != args.num_epochs:
        epoch += 1
        log.info(f'Starting epoch {epoch}...')
        with torch.enable_grad(), \
                tqdm(total=len(train_loader.dataset)) as progress_bar:
            for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in train_loader:  # 08/09 可见作者已经贴心地预留了cc_idxs...
                # Setup for forward
                cw_idxs = cw_idxs.to(device)
                cc_idxs = cc_idxs.to(device)
                qw_idxs = qw_idxs.to(device)
                qc_idxs = qc_idxs.to(device)
                batch_size = cw_idxs.size(0)
                optimizer.zero_grad()

                # Forward
                # 8/12 当seq_len > max_context_length = 400 时raise ValueError 并跳过
                try:
                    log_p1, log_p2 = model(
                        cw_idxs, cc_idxs, qw_idxs,
                        qc_idxs)  # 08/09 增加cc_idxs 和 qc_idxs
                except ValueError:
                    continue

                y1, y2 = y1.to(device), y2.to(device)
                loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)
                loss_val = loss.item()

                # Backward
                loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(),
                                         args.max_grad_norm)
                optimizer.step()
                scheduler.step(step // batch_size)
                ema(model, step // batch_size)

                # Log info
                step += batch_size
                progress_bar.update(batch_size)
                progress_bar.set_postfix(epoch=epoch, NLL=loss_val)
                tbx.add_scalar('train/NLL', loss_val, step)
                tbx.add_scalar('train/LR', optimizer.param_groups[0]['lr'],
                               step)

                steps_till_eval -= batch_size
                if steps_till_eval <= 0:
                    steps_till_eval = args.eval_steps

                    # Evaluate and save checkpoint
                    log.info(f'Evaluating at step {step}...')
                    ema.assign(model)
                    results, pred_dict = evaluate(model, dev_loader, device,
                                                  args.dev_eval_file,
                                                  args.max_ans_len,
                                                  args.use_squad_v2)
                    saver.save(step, model, results[args.metric_name], device)
                    ema.resume(model)

                    # Log to console
                    results_str = ', '.join(f'{k}: {v:05.2f}'
                                            for k, v in results.items())
                    log.info(f'Dev {results_str}')

                    # Log to TensorBoard
                    log.info('Visualizing in TensorBoard...')
                    for k, v in results.items():
                        tbx.add_scalar(f'dev/{k}', v, step)
                    util.visualize(tbx,
                                   pred_dict=pred_dict,
                                   eval_path=args.dev_eval_file,
                                   step=step,
                                   split='dev',
                                   num_visuals=args.num_visuals)
def main(args):
    # Set up logging
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=False)
    log = util.get_logger(args.save_dir, args.name)
    log.info('Args: {}'.format(dumps(vars(args), indent=4, sort_keys=True)))
    device, gpu_ids = util.get_available_devices()
    args.batch_size *= max(1, len(gpu_ids))

    # Get embeddings
    log.info('Loading embeddings...')
    word_vectors = util.torch_from_json(args.word_emb_file)

    ### start our code:
    # Get char-embeddings
    log.info('Loading char-embeddings...')
    char_vectors = util.torch_from_json(args.char_emb_file)
    ### end our code

    # Get model
    log.info('Building model...')
    model = BiDAF(word_vectors=word_vectors,
                  hidden_size=args.hidden_size,
                  char_vectors=char_vectors)
    model = nn.DataParallel(model, gpu_ids)
    log.info('Loading checkpoint from {}...'.format(args.load_path))
    model = util.load_model(model, args.load_path, gpu_ids, return_step=False)
    model = model.to(device)
    model.eval()

    # Get data loader
    log.info('Building dataset...')
    record_file = vars(args)['{}_record_file'.format(args.split)]
    dataset = SQuAD(record_file, args.use_squad_v2)
    data_loader = data.DataLoader(dataset,
                                  batch_size=args.batch_size,
                                  shuffle=False,
                                  num_workers=args.num_workers,
                                  collate_fn=collate_fn)

    # Evaluate
    log.info('Evaluating on {} split...'.format(args.split))
    nll_meter = util.AverageMeter()
    pred_dict = {}  # Predictions for TensorBoard
    sub_dict = {}  # Predictions for submission
    eval_file = vars(args)['{}_eval_file'.format(args.split)]
    with open(eval_file, 'r') as fh:
        gold_dict = json_load(fh)
    with torch.no_grad(), \
            tqdm(total=len(dataset)) as progress_bar:
        for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in data_loader:
            # Setup for forward
            cw_idxs = cw_idxs.to(device)
            qw_idxs = qw_idxs.to(device)
            batch_size = cw_idxs.size(0)

            ### start our code:
            cc_idxs = cc_idxs.to(device)
            qc_idxs = qc_idxs.to(device)

            ### end our code

            # Forward
            # log_p1, log_p2 = model(cw_idxs, qw_idxs)

            ### start our code:
            log_p1, log_p2 = model(cw_idxs, qw_idxs, cc_idxs, qc_idxs)

            ### end our code

            y1, y2 = y1.to(device), y2.to(device)
            loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)
            nll_meter.update(loss.item(), batch_size)

            # Get F1 and EM scores
            p1, p2 = log_p1.exp(), log_p2.exp()
            starts, ends = util.discretize(p1, p2, args.max_ans_len,
                                           args.use_squad_v2)

            # Log info
            progress_bar.update(batch_size)
            if args.split != 'test':
                # No labels for the test set, so NLL would be invalid
                progress_bar.set_postfix(NLL=nll_meter.avg)

            idx2pred, uuid2pred = util.convert_tokens(gold_dict, ids.tolist(),
                                                      starts.tolist(),
                                                      ends.tolist(),
                                                      args.use_squad_v2)
            pred_dict.update(idx2pred)
            sub_dict.update(uuid2pred)

    # Log results (except for test set, since it does not come with labels)
    if args.split != 'test':
        results = util.eval_dicts(gold_dict, pred_dict, args.use_squad_v2)
        results_list = [('NLL', nll_meter.avg), ('F1', results['F1']),
                        ('EM', results['EM'])]
        if args.use_squad_v2:
            results_list.append(('AvNA', results['AvNA']))
        results = OrderedDict(results_list)

        # Log to console
        results_str = ', '.join('{}: {:05.2f}'.format(k, v)
                                for k, v in results.items())
        log.info('{} {}'.format(args.split.title(), results_str))

        # Log to TensorBoard
        tbx = SummaryWriter(args.save_dir)
        util.visualize(tbx,
                       pred_dict=pred_dict,
                       eval_path=eval_file,
                       step=0,
                       split=args.split,
                       num_visuals=args.num_visuals)

    # Write submission file
    sub_path = join(args.save_dir, args.split + '_' + args.sub_file)
    log.info('Writing submission file to {}...'.format(sub_path))
    with open(sub_path, 'w') as csv_fh:
        csv_writer = csv.writer(csv_fh, delimiter=',')
        csv_writer.writerow(['Id', 'Predicted'])
        for uuid in sorted(sub_dict):
            csv_writer.writerow([uuid, sub_dict[uuid]])
Esempio n. 5
0
def main(args):

    # Set up logging
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=False)
    log = util.get_logger(args.save_dir, args.name)
    log.info(f'Args: {dumps(vars(args), indent=4, sort_keys=True)}')
    device, gpu_ids = util.get_available_devices()
    args.batch_size *= max(1, len(gpu_ids))

    # Get embeddings
    log.info('Loading embeddings...')
    word_vectors = util.torch_from_json(args.word_emb_file)

    #Load Word2Idx
    log.info('Loading word2Idx...')
    word2Idx = json.loads(open(args.word2idx_file).read())
    idx2Word = {v: k for (k, v) in word2Idx.items()}

    vocab_size = word_vectors.size(0)
    print(f"Vocab size: {vocab_size}")

    def getWords(idxList):
        words = []
        for i in idxList:
            words.append(idx2Word[i])
        return words

    def create_new_model():
        if args.model_type == "seq2seq":
            return Seq2Seq(word_vectors=word_vectors,
                           hidden_size=args.hidden_size,
                           output_size=vocab_size,
                           device=device,
                           num_layers=args.num_layers)
        elif args.model_type == "seq2seq_attn":
            return Seq2SeqAttn(word_vectors=word_vectors,
                               hidden_size=args.hidden_size,
                               output_size=vocab_size,
                               device=device,
                               num_layers=args.num_layers)

    # Get model
    log.info('Building model...')
    model = create_new_model()
    model = nn.DataParallel(model, gpu_ids)

    log.info(f'Loading checkpoint from {args.load_path}...')
    model = util.load_model(model, args.load_path, gpu_ids, return_step=False)
    model = model.to(device)
    model.eval()

    # Get data loader
    log.info('Building dataset...')
    record_file = vars(args)[f'{args.split}_record_file']
    dataset = SQuAD(record_file, args.use_squad_v2)
    data_loader = data.DataLoader(dataset,
                                  batch_size=args.batch_size,
                                  shuffle=False,
                                  num_workers=args.num_workers,
                                  collate_fn=collate_fn)

    # Evaluate
    log.info(f'Evaluating on {args.split} split...')
    nll_meter = util.AverageMeter()
    pred_dict = {}  # Predictions by Beam Search
    eval_file = vars(args)[f'{args.split}_eval_file']
    candidates_corpus = []
    references_corpus = []
    context_file = os.path.join(args.save_dir, args.context_file)
    question_file = os.path.join(args.save_dir, args.question_file)
    prediction_file = os.path.join(args.save_dir, args.prediction_file)

    with open(eval_file, 'r') as fh:
        gold_dict = json_load(fh)
    with torch.no_grad(), \
            tqdm(total=len(dataset)) as progress_bar:

        for cw_idxs, re_cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in data_loader:
            # Setup for forward
            cw_idxs = cw_idxs.to(device)
            re_cw_idxs = re_cw_idxs.to(device)
            qw_idxs = qw_idxs.to(device)
            batch_size = cw_idxs.size(0)

            cw_list = []
            qw_list = []

            # Forward
            for re_cw_idx, qw_idx in zip(
                    torch.split(re_cw_idxs, split_size_or_sections=1, dim=0),
                    torch.split(qw_idxs, split_size_or_sections=1, dim=0)):

                cw_list.append(re_cw_idx)
                qw_list.append(qw_idx)

                #y = F.one_hot(qw_idx, num_classes=len(word_vectors))
                #print(getWords(cw_idx.squeeze().tolist()))
                #print(getWords(qw_idx.squeeze().tolist()))
                #util.TeacherForce(model, word2Idx, Idx2Word, cw_idx, qw_idx, device)
                #util.evaluateRandomly(model, word2Idx, Idx2Word, cw_idx, device)
                '''

                hypotheses = util.beamSearch(model, word2Idx, Idx2Word, cw_idx, device)
                loss = 0.
                pred_dict[cw_idx] = []

                for hyp in hypotheses:
                    loss = loss + hyp.score
                    pred_dict[cw_idx].append(hyp.value)
                nll_meter.update(loss, batch_size)
                #wait = input("Sab chill hai.. press to continue")

                cw_list.append(cw_idx)
                qw_list.append(qw_idx)            
                '''
            candidates, references = util.find_candidates_and_references_for_bleu(
                model, word2Idx, idx2Word, cw_list, qw_list, device,
                context_file, question_file, prediction_file)
            candidates_corpus += candidates
            references_corpus += references
            progress_bar.update(batch_size)

            # Log info
            #if args.split != 'test':
            # No labels for the test set, so NLL would be invalid
            #    progress_bar.set_postfix(NLL=nll_meter.avg)
        print("Calculating corpus BLEU...")
        util.compute_corpus_level_bleu_score(model, args.split,
                                             candidates_corpus,
                                             references_corpus, device)
Esempio n. 6
0
def main(args):
    # Set up logging and devices
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=True)
    log = util.get_logger(args.save_dir, args.name)
    tbx = SummaryWriter(args.save_dir)
    device, args.gpu_ids = util.get_available_devices()
    log.info('Args: {}'.format(dumps(vars(args), indent=4, sort_keys=True)))
    args.batch_size *= max(1, len(args.gpu_ids))

    # Set random seed
    log.info('Using random seed {}...'.format(args.seed))
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    # Get embeddings
    log.info('Loading embeddings...')
    word_vectors = util.torch_from_json(args.word_emb_file)

    # setup_args = get_setup_args()
    with open(args.char2idx_file, "r") as f:
        char2idx = json_load(f)
    # Get model
    log.info('Building model...')
    model = QANet(word_vectors=word_vectors, char2idx=char2idx)
    model = nn.DataParallel(model, args.gpu_ids)
    if args.load_path:
        log.info('Loading checkpoint from {}...'.format(args.load_path))
        model, step = util.load_model(model, args.load_path, args.gpu_ids)
    else:
        step = 0
    model = model.to(device)
    model.train()
    ema = util.EMA(model, args.ema_decay)

    # Get saver
    saver = util.CheckpointSaver(args.save_dir,
                                 max_checkpoints=args.max_checkpoints,
                                 metric_name=args.metric_name,
                                 maximize_metric=args.maximize_metric,
                                 log=log)

    # Get optimizer and scheduler
    optimizer = optim.Adadelta(model.parameters(),
                               args.lr,
                               weight_decay=args.l2_wd)
    # optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.8, 0.999))
    scheduler = sched.LambdaLR(optimizer, lambda s: 1.)  # Constant LR

    # Get data loader
    log.info('Building dataset...')

    dataset1 = np.load(args.train_record_file)
    outfile = '/content/dataset/train_light.npz'
    n_row = 75000
    np.savez(outfile,
             context_idxs=dataset1['context_idxs'][:n_row],
             context_char_idxs=dataset1['context_char_idxs'][:n_row],
             ques_idxs=dataset1['ques_idxs'][:n_row],
             ques_char_idxs=dataset1['ques_char_idxs'][:n_row],
             y1s=dataset1['y1s'][:n_row],
             y2s=dataset1['y2s'][:n_row],
             ids=dataset1['ids'][:n_row])

    # train_dataset = SQuAD(args.train_record_file, args.use_squad_v2)
    train_dataset = SQuAD(outfile, args.use_squad_v2)
    train_loader = data.DataLoader(train_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   collate_fn=collate_fn)
    dev_dataset = SQuAD(args.dev_record_file, args.use_squad_v2)
    dev_loader = data.DataLoader(dev_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers,
                                 collate_fn=collate_fn)

    # Train
    log.info('Training...')
    steps_till_eval = args.eval_steps
    epoch = step // len(train_dataset)
    while epoch != args.num_epochs:
        epoch += 1
        log.info('Starting epoch {}...'.format(epoch))
        with torch.enable_grad(), \
                tqdm(total=len(train_loader.dataset)) as progress_bar:
            for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in train_loader:
                # Setup for forward
                cw_idxs = cw_idxs.to(device)
                qw_idxs = qw_idxs.to(device)
                batch_size = cw_idxs.size(0)
                optimizer.zero_grad()

                # Forward
                log_p1, log_p2 = model(cw_idxs, cc_idxs, qw_idxs, qc_idxs)
                y1, y2 = y1.to(device), y2.to(device)
                loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)
                loss_val = loss.item()
                if step % 10000 == 0:
                    print('loss val: {}'.format(loss_val))

                # Backward
                loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(),
                                         args.max_grad_norm)
                optimizer.step()
                scheduler.step(step // batch_size)
                ema(model, step // batch_size)

                # Log info
                step += batch_size
                progress_bar.update(batch_size)
                progress_bar.set_postfix(epoch=epoch, NLL=loss_val)
                tbx.add_scalar('train/NLL', loss_val, step)
                tbx.add_scalar('train/LR', optimizer.param_groups[0]['lr'],
                               step)

                steps_till_eval -= batch_size
                if steps_till_eval <= 0:
                    steps_till_eval = args.eval_steps

                    # Evaluate and save checkpoint
                    log.info('Evaluating at step {}...'.format(step))
                    ema.assign(model)
                    results, pred_dict = evaluate(model, dev_loader, device,
                                                  args.dev_eval_file,
                                                  args.max_ans_len,
                                                  args.use_squad_v2)
                    saver.save(step, model, results[args.metric_name], device)
                    ema.resume(model)

                    # Log to console
                    results_str = ', '.join('{}: {:05.2f}'.format(k, v)
                                            for k, v in results.items())
                    log.info('Dev {}'.format(results_str))

                    # Log to TensorBoard
                    log.info('Visualizing in TensorBoard...')
                    for k, v in results.items():
                        tbx.add_scalar('dev/{}'.format(k), v, step)
                    util.visualize(tbx,
                                   pred_dict=pred_dict,
                                   eval_path=args.dev_eval_file,
                                   step=step,
                                   split='dev',
                                   num_visuals=args.num_visuals)
Esempio n. 7
0
from util import collate_fn, SQuAD
from pickle import load

split = 'test'

args = get_test_args()
args.split = split

eval_file = vars(args)[f'{args.split}_eval_file']
with open(eval_file, 'r') as fh:
    gold_dict = json_load(fh)

#forgot to take the id of the question, normally ok to do it afterwards
#as the dataset is not shuffle in test and here
record_file = vars(args)[f'{args.split}_record_file']
dataset = SQuAD(record_file, args.use_squad_v2)
data_loader = data.DataLoader(dataset,
                              batch_size=args.batch_size,
                              shuffle=False,
                              collate_fn=collate_fn)

id_quest = []
with torch.no_grad(), \
    tqdm(total=len(dataset)) as progress_bar:
    for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in data_loader:
        id_quest += [ids]

p_start = []
p_end = []

save_dir_bidaf = "../save/test/" + split + "/bidaf_original_char_embed-01/"
Esempio n. 8
0
def main(args):
    # Set up logging and devices
    startime = datetime.now()
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=True)
    log = util.get_logger(args.save_dir, args.name)

    time_log = args.log_time
    if time_log > 0:
        log.info(f'Start training at: {startime.strftime("%H:%M:%S")}')

    tbx = SummaryWriter(args.save_dir)
    device, args.gpu_ids = util.get_available_devices()
    log.info(f'Args: {dumps(vars(args), indent=4, sort_keys=True)}')
    args.batch_size *= max(1, len(args.gpu_ids))
    model_type = args.model

    # Set random seed
    log.info(f'Using random seed {args.seed}...')
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    # check this
    #useCharEmbeddings = args.model == 'BiDAFplus'

    # Get embeddings
    log.info('Loading embeddings...')
    print(f'{args.word_emb_file}')
    word_vectors = util.torch_from_json(args.word_emb_file)
    char_vectors = util.torch_from_json(args.char_emb_file)
    if time_log > 0:
        log.info(f'Loaded embeddings: {(datetime.now()-startime).seconds}')
    # load_char_vectors
    # Get model
    log.info('Building model...')
    if model_type == 'BiDAFplus':  #
        model = BiDAFplus(word_vectors=word_vectors,
                          char_vectors=char_vectors,
                          hidden_size=args.hidden_size,
                          params=get_params(model_type, args.params))

    elif model_type == 'BiDAFbase':
        model = BiDAFbase(word_vectors=word_vectors,
                          hidden_size=args.hidden_size,
                          drop_prob=args.drop_prob)

    elif model_type == "Transformer":
        model = TransformerModel(word_vectors=word_vectors,
                                 char_vectors=char_vectors,
                                 params=get_params(model_type, args.params))

    elif model_type == 'BiDAF':
        model = BiDAF(word_vectors=word_vectors,
                      char_vectors=char_vectors,
                      hidden_size=args.hidden_size,
                      params=get_params(model_type, args.params))

    model = nn.DataParallel(model, args.gpu_ids)
    if time_log > 0:
        log.info(f'Built model: {(datetime.now()-startime).seconds}')
    if args.load_path:
        log.info(f'Loading checkpoint from {args.load_path}...')
        model, step = util.load_model(model, args.load_path, args.gpu_ids)
    else:
        step = 0
    model = model.to(device)
    model.train()
    ema = util.EMA(model, args.ema_decay)

    # Get saver
    saver = util.CheckpointSaver(args.save_dir,
                                 max_checkpoints=args.max_checkpoints,
                                 metric_name=args.metric_name,
                                 maximize_metric=args.maximize_metric,
                                 log=log)

    # Get optimizer and scheduler
    optimizer = optim.Adadelta(model.parameters(),
                               args.lr,
                               weight_decay=args.l2_wd)
    scheduler = sched.LambdaLR(optimizer, lambda s: 1.)  # Constant LR

    # Get data loader
    log.info('Building dataset...')
    if args.mode != 'quick_eval':
        train_dataset = SQuAD(args.train_record_file, args.use_squad_v2)
        train_loader = data.DataLoader(train_dataset,
                                       batch_size=args.batch_size,
                                       shuffle=True,
                                       num_workers=args.num_workers,
                                       collate_fn=collate_fn)

        dev_dataset = SQuAD(args.dev_record_file, args.use_squad_v2)
        dev_loader = data.DataLoader(dev_dataset,
                                     batch_size=args.batch_size,
                                     shuffle=False,
                                     num_workers=args.num_workers,
                                     collate_fn=collate_fn)

    else:
        loaded_data = quick_eval_data_loader()
        train_loader = [loaded_data for _ in range(5)]
        dev_loader = [quick_eval_data_loader(dev=True)]
        train_dataset = train_loader
        dev_dataset = dev_loader

    log.info('Built dataset: {}:{}'.format(*divmod((datetime.now() -
                                                    startime).seconds, 60)))

    # Train
    log.info('Training...')
    steps_till_eval = args.eval_steps
    epoch = step // len(train_dataset)
    if time_log > 0:
        traintime = datetime.now()
    total_iterations = 0
    while epoch != args.num_epochs:
        epoch += 1
        log.info(f'Starting epoch {epoch}...')
        if time_log > 0:
            epochtime = datetime.now()
        if args.mode != 'quick_eval':
            progress_len = len(train_loader.dataset)
        else:
            progress_len = len(train_loader)
        with torch.enable_grad(), \
                tqdm(total=progress_len) as progress_bar:

            for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in train_loader:

                #quick_eval_data_saver(cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids)

                #########
                if time_log > 0:
                    itertime = datetime.now()
                # Setup for forward
                cw_idxs = cw_idxs.to(device)
                qw_idxs = qw_idxs.to(device)
                batch_size = cw_idxs.size(0)
                optimizer.zero_grad()

                if model_type == 'BiDAF' or model_type == "Transformer":
                    cc_idxs = cc_idxs.to(device)
                    qc_idxs = qc_idxs.to(device)

                    log_p1, log_p2 = model(cc_idxs, qc_idxs, cw_idxs, qw_idxs)

                # Forward
                elif model_type == 'BiDAFbase':
                    log_p1, log_p2 = model(cw_idxs, qw_idxs)

                y1, y2 = y1.to(device), y2.to(device)
                loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)
                loss_val = loss.item()

                if time_log > 2:
                    forwardtime = datetime.now()
                    log.info('Forward time {}:{}'.format(
                        *divmod((forwardtime - itertime).seconds, 60)))
                # Backward
                loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(),
                                         args.max_grad_norm)
                optimizer.step()
                scheduler.step(step // batch_size)
                ema(model, step // batch_size)

                if time_log > 2:
                    backwardtime = datetime.now()
                    log.info('Backward time {}:{}'.format(
                        *divmod((backwardtime - forwardtime).seconds, 60)))
                # Log info
                step += batch_size
                progress_bar.update(batch_size)
                progress_bar.set_postfix(epoch=epoch, NLL=loss_val)
                tbx.add_scalar('train/NLL', loss_val, step)
                tbx.add_scalar('train/LR', optimizer.param_groups[0]['lr'],
                               step)

                if time_log > 0:
                    enditertime = datetime.now()
                    #log.info('Iteration {} {}:{}'.format(total_iterations,
                    #    *divmod((enditertime-itertime).seconds, 60)))

                steps_till_eval -= batch_size
                if steps_till_eval <= 0 or args.mode == 'quick_eval':
                    steps_till_eval = args.eval_steps

                    # Evaluate and save checkpoint
                    log.info(f'Evaluating at step {step}...')
                    ema.assign(model)
                    results, pred_dict = evaluate(
                        model,
                        dev_loader,
                        device,
                        args.dev_eval_file,
                        args.max_ans_len,
                        args.use_squad_v2,
                        model_type,
                        quick_eval=args.mode == 'quick_eval')
                    saver.save(step, model, results[args.metric_name], device)
                    ema.resume(model)

                    # Log to console
                    if time_log > 1:
                        log.info('Eval time {}:{}'.format(
                            *divmod((datetime.now() -
                                     enditertime).seconds, 60)))

                    results_str = ', '.join(f'{k}: {v:05.2f}'
                                            for k, v in results.items())
                    log.info(f'Dev {results_str}')

                    # Log to TensorBoard
                    log.info('Visualizing in TensorBoard...')
                    for k, v in results.items():
                        tbx.add_scalar(f'dev/{k}', v, step)
                    util.visualize(tbx,
                                   pred_dict=pred_dict,
                                   eval_path=args.dev_eval_file,
                                   step=step,
                                   split='dev',
                                   num_visuals=args.num_visuals)
                total_iterations += 1
                if ((time_log == 2) and (total_iterations % 10 == 0)) or (
                    (time_log == 1) and (total_iterations % 100 == 0)):
                    log.info('Mean iteration time {}:{}'.format(
                        *divmod((enditertime - traintime).seconds /
                                total_iterations, 60)))

        if time_log > 0:
            endepochtime = datetime.now()
            log.info('Epoch time {}:{}'.format(
                *divmod((endepochtime - epochtime).seconds, 60)))
Esempio n. 9
0
def main(args):

    # Set up logging
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=False)
    log = util.get_logger(args.save_dir, args.name)
    log.info(f'Args: {dumps(vars(args), indent=4, sort_keys=True)}')
    device, gpu_ids = util.get_available_devices()
    args.batch_size *= max(1, len(gpu_ids))

    # Get embeddings
    log.info('Loading embeddings...')
    word_vectors = util.torch_from_json(args.word_emb_file)

    #Load Word2Idx
    log.info('Loading word2Idx...')
    word2Idx = json.loads(open(args.word2idx_file).read())
    Idx2Word = {v: k for (k, v) in word2Idx.items()}
    vocab_size = len(word2Idx)

    def getWords(idxList):
        words = []
        for i in idxList:
            words.append(Idx2Word[i])
        return words

    def create_new_model():
        if args.model_type == "seq2seq":
            return Seq2Seq(word_vectors=word_vectors,
                           hidden_size=args.hidden_size,
                           output_size=vocab_size,
                           device=device)
        elif args.model_type == "seq2seq_attn":
            return Seq2SeqAttn(word_vectors=word_vectors,
                               hidden_size=args.hidden_size,
                               output_size=vocab_size,
                               device=device)
        elif args.model_type == "transformer":
            return TransformerModel(vocab_size,
                                    device,
                                    num_encoder_layers=2,
                                    num_decoder_layers=2,
                                    dropout=0.0)

    # Get model
    log.info('Building model...')
    model = create_new_model()
    model = nn.DataParallel(model, gpu_ids)

    log.info(f'Loading checkpoint from {args.load_path}...')
    model = util.load_model(model, args.load_path, gpu_ids, return_step=False)
    model = model.to(device)
    model.eval()

    # Get data loader
    log.info('Building dataset...')
    record_file = vars(args)[f'{args.split}_record_file']
    dataset = SQuAD(record_file, args.use_squad_v2)
    data_loader = data.DataLoader(dataset,
                                  batch_size=args.batch_size,
                                  shuffle=False,
                                  num_workers=args.num_workers,
                                  collate_fn=collate_fn)

    # Evaluate
    log.info(f'Evaluating on {args.split} split...')
    nll_meter = util.AverageMeter()
    pred_dict = {}  # Predictions by Beam Search
    eval_file = vars(args)[f'{args.split}_eval_file']
    cw_list = []
    qw_list = []

    with open(eval_file, 'r') as fh:
        gold_dict = json_load(fh)
    with torch.no_grad(), \
            tqdm(total=len(dataset)) as progress_bar:
        for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in data_loader:
            cw_idxs = cw_idxs.to(device)
            qw_idxs = qw_idxs.to(device)
            batch_size = cw_idxs.size(0)

            # Setup for forward
            src_idxs = cw_idxs
            src_idxs = torch.cat(
                (torch.zeros((batch_size, 1), device=device,
                             dtype=torch.long), src_idxs,
                 torch.zeros(
                     (batch_size, 1), device=device, dtype=torch.long)),
                dim=-1)
            src_idxs[:, 0] = SOS
            src_idxs[:, -1] = EOS
            tgt_idxs = qw_idxs[:, :-1]
            tgt_idxs_y = qw_idxs[:, 1:]

            # Forward
            for src_idx, tgt_idx in zip(
                    torch.split(src_idxs, split_size_or_sections=1, dim=0),
                    torch.split(tgt_idxs, split_size_or_sections=1, dim=0)):
                src_mask = src_idx == PAD
                print("Context Words:")
                print(getWords(src_idx[0].tolist()))
                #print(getWords(qw_idxs[batch_size-1].squeeze().tolist()))
                #util.evaluateRandomly(model, word2Idx, Idx2Word, re_cw_idxs[batch_size-1].unsqueeze(0), device)

                print("Question Words:")
                print(getWords(tgt_idx[0].tolist()))

                print("Predicted Words:")
                model.eval()
                predicted_words = util.greedy_decode(model,
                                                     src_idx,
                                                     src_mask,
                                                     max_len=30,
                                                     start_symbol=2)
                print(predicted_words)
                print(getWords(predicted_words.squeeze().tolist()))
                '''
                #y = F.one_hot(qw_idx, num_classes=len(word_vectors))
                #print(getWords(cw_idx.squeeze().tolist()))
                #print(getWords(qw_idx.squeeze().tolist()))
                #util.TeacherForce(model, word2Idx, Idx2Word, cw_idx, qw_idx, device)
                #util.evaluateRandomly(model, word2Idx, Idx2Word, cw_idx, device)

                hypotheses = util.beamSearch(model, word2Idx, Idx2Word, cw_idx, device)
                loss = 0.
                pred_dict[cw_idx] = []

                for hyp in hypotheses:
                    loss = loss + hyp.score
                    pred_dict[cw_idx].append(hyp.value)
                nll_meter.update(loss, batch_size)
                #wait = input("Sab chill hai.. press to continue")

                cw_list.append(cw_idx)
                qw_list.append(qw_idx)
                '''

            # Log info
            progress_bar.update(batch_size)
            if args.split != 'test':
                # No labels for the test set, so NLL would be invalid
                progress_bar.set_postfix(NLL=nll_meter.avg)
Esempio n. 10
0
def train_QaNet(args):
    device, args.gpu_ids = util.get_available_devices()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    word_mat = util.torch_from_json(args.word_emb_file)
    char_mat = util.torch_from_json(args.char_emb_file)

    with open(args.dev_eval_file, 'r') as fh:
        dev_eval_file = json_load(fh)

    print("Building model...")

    train_dataset = SQuAD(args.train_record_file, args.use_squad_v2)
    train_dataset = data.DataLoader(train_dataset,
                                    batch_size=args.batch_size,
                                    shuffle=True,
                                    num_workers=args.num_workers,
                                    collate_fn=collate_fn)
    dev_dataset = SQuAD(args.dev_record_file, args.use_squad_v2)
    dev_dataset = data.DataLoader(dev_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=False,
                                  num_workers=args.num_workers,
                                  collate_fn=collate_fn)

    lr = args.lr
    base_lr = 1
    lr_warm_up_num = args.lr_warm_up_num

    model = QaNet(word_mat, char_mat, args.connector_dim, args.glove_dim,
                  args.char_dim, args.drop_prob, args.dropout_char,
                  args.num_heads, args.c_len, args.q_len).to(device)
    ema = util.EMA(model, args.ema_decay)

    parameters = filter(lambda param: param.requires_grad, model.parameters())
    optimizer = optim.Adam(lr=base_lr,
                           betas=(0.9, 0.999),
                           eps=1e-7,
                           weight_decay=5e-8,
                           params=parameters)
    cr = lr / math.log2(lr_warm_up_num)
    scheduler = optim.lr_scheduler.LambdaLR(
        optimizer,
        lr_lambda=lambda ee: cr * math.log2(ee + 1)
        if ee < lr_warm_up_num else lr)

    best_f1 = 0
    best_em = 0
    patience = 0
    unused = False
    for iter in range(args.num_epochs):

        train(model, optimizer, scheduler, train_dataset, dev_dataset,
              dev_eval_file, iter, ema, device)

        ema.assign(model)
        metrics = test(model, dev_dataset, dev_eval_file,
                       (iter + 1) * len(train_dataset))
        dev_f1 = metrics["f1"]
        dev_em = metrics["exact_match"]
        if dev_f1 < best_f1 and dev_em < best_em:
            patience += 1
            if patience > args.early_stop:
                break
        else:
            patience = 0
            best_f1 = max(best_f1, dev_f1)
            best_em = max(best_em, dev_em)

        fn = os.path.join(args.save_dir, "model.pt")
        torch.save(model, fn)
        ema.resume(model)
Esempio n. 11
0
def main(args, actions=None):
    """"
    actions is a tuple (action, number of actions to be taken)

    action can be either: "substitute", "delete" or "add".
    number of actions to be taken: the number of words to apply the "substitute", "delete" or "add" action.

    """

    # check that actions parameters received
    #print("actions: ",actions)

    # Set up logging
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=False)
    log = util.get_logger(args.save_dir, args.name)
    log.info(f'Args: {dumps(vars(args), indent=4, sort_keys=True)}')
    device, gpu_ids = util.get_available_devices()
    args.batch_size *= max(1, len(gpu_ids))

    # Get embeddings
    log.info('Loading embeddings...')
    word_vectors = util.torch_from_json(args.word_emb_file)

    # Get model
    log.info('Building model...')
    if args.model == 'bidaf':
        model = BiDAF(word_vectors=word_vectors, hidden_size=args.hidden_size)
    elif args.model == 'bidafextra':
        model = BiDAFExtra(word_vectors=word_vectors, args=args)
    elif args.model == 'fusionnet':
        model = FusionNet(word_vectors=word_vectors, args=args)

    model = nn.DataParallel(model, gpu_ids)
    log.info(f'Loading checkpoint from {args.load_path}...')
    model = util.load_model(model, args.load_path, gpu_ids, return_step=False)
    model = model.to(device)
    model.eval()

    # Get data loader
    log.info('Building dataset...')
    record_file = vars(args)[f'{args.split}_record_file']
    dataset = SQuAD(record_file, args)
    data_loader = data.DataLoader(dataset,
                                  batch_size=args.batch_size,
                                  shuffle=False,
                                  num_workers=args.num_workers,
                                  collate_fn=collate_fn)
    # print("*"*80)
    #print(len(dataset.question_idxs))

    #for question_idx in dataset.question_idxs:
    #    print(question_idx)
    #    print("*" * 80)

    #print(self.question_idxs[question_idx])
    #self.question_idxs[idx]
    # print("data_loader: ",data_loader)
    # Evaluate
    log.info(f'Evaluating on {args.split} split...')
    nll_meter = util.AverageMeter()
    pred_dict = {}  # Predictions for TensorBoard
    sub_dict = {}  # Predictions for submission
    eval_file = vars(args)[f'{args.split}_eval_file']
    with open(eval_file, 'r') as fh:
        gold_dict = json_load(fh)

    count_questions_type = defaultdict(lambda: 0)

    audit_trail_from_question_type = defaultdict(lambda: [])
    list_of_interrogative_pronouns = [
        "what", "whose", "why", "which", "where", "when", "how", "who", "whom"
    ]

    for index in range(1, len(gold_dict)):
        # transform the question in lower case to simplify the analysis, thus losing the benefit of the capital letters
        # possibly indicating the position of the interrogative pronoun in the sentence.
        question_lower_case = gold_dict[str(index)]['question'].lower()

        list_question_lower_case_with_punctuation = question_lower_case.translate(
            {ord(i): " "
             for i in "'"}).split()

        #
        question_lower_case = []
        for item in list_question_lower_case_with_punctuation:
            question_lower_case.append(
                item.translate({ord(i): ""
                                for i in ",.<>!@£$%^&*()_-+=?"}))

        # defining a variable for the first word
        first_word_question_lower_case = question_lower_case[0]

        # defining variable for the second word
        second_word_question_lower_case = question_lower_case[1]

        # defining variable for the first and second word
        combined_first_and_second_words = first_word_question_lower_case + " " + second_word_question_lower_case

        # Analyzing the sentence
        if first_word_question_lower_case in list_of_interrogative_pronouns:
            count_questions_type[first_word_question_lower_case] += 1
            audit_trail_from_question_type[
                first_word_question_lower_case].append(str(index))
        # composed question starting by in
        elif first_word_question_lower_case == "in":
            if second_word_question_lower_case in list_of_interrogative_pronouns and second_word_question_lower_case != "whose":
                count_questions_type[combined_first_and_second_words] += 1
                audit_trail_from_question_type[
                    combined_first_and_second_words].append(str(index))
            else:
                pronoun = find_first_interrogative_pronoun(
                    list_of_interrogative_pronouns, question_lower_case)
                count_questions_type[pronoun] += 1
                audit_trail_from_question_type[pronoun].append(str(index))

        # composed question starting by by
        elif first_word_question_lower_case == "by":
            if second_word_question_lower_case in list_of_interrogative_pronouns \
                    and second_word_question_lower_case != "whom" \
                    and second_word_question_lower_case != "which" \
                    and second_word_question_lower_case != "when" \
                    and second_word_question_lower_case != "how":
                count_questions_type[combined_first_and_second_words] += 1
                audit_trail_from_question_type[
                    combined_first_and_second_words].append(str(index))
            else:
                pronoun = find_first_interrogative_pronoun(
                    list_of_interrogative_pronouns, question_lower_case)
                count_questions_type[pronoun] += 1
                audit_trail_from_question_type[pronoun].append(str(index))

        else:
            pronoun = find_first_interrogative_pronoun(
                list_of_interrogative_pronouns, question_lower_case)
            count_questions_type[pronoun] += 1
            audit_trail_from_question_type[pronoun].append(str(index))

    reverse_dict_by_value = OrderedDict(
        sorted(count_questions_type.items(), key=lambda x: x[1]))
    total_questions = sum(count_questions_type.values())

    with torch.no_grad(), \
         tqdm(total=len(dataset)) as progress_bar:
        for cw_idxs, cc_idxs, qw_idxs, qc_idxs, cw_pos, cw_ner, cw_freq, cqw_extra, y1, y2, ids in data_loader:
            # Setup for forward

            # **********************************************************
            #
            # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
            #
            # Where we make the modif:

            if actions[0] == "substitute":
                # substitute to random token in each question of the batch (substitution is made within the same sentence:
                batch_size = cw_idxs.size()[0]
                number_of_actions = actions[1]
                for _ in range(number_of_actions):
                    length_index_batch = cw_idxs.size()[1]
                    for i in range(batch_size):
                        tensor_with_zero_value = ((
                            cw_idxs[i] == 0).nonzero()).squeeze()

                        try:
                            first_zero_value = torch.min(
                                tensor_with_zero_value)
                        except:
                            first_zero_value = length_index_batch

                        if first_zero_value > 2:
                            select_item_idx_1 = random.randint(
                                0, first_zero_value - 1)
                            select_item_idx_2 = random.randint(
                                0, first_zero_value - 1)
                            save_value_1 = copy.deepcopy(
                                cw_idxs[i, select_item_idx_1])
                            cw_idxs[i, select_item_idx_1] = cw_idxs[
                                i, select_item_idx_2]
                            cw_idxs[i, select_item_idx_2] = save_value_1

            elif actions[0] == "delete":
                # substitute to random token in each question of the batch (substitution is made within the same sentence:
                batch_size = cw_idxs.size()[0]
                number_of_actions = actions[1]
                for _ in range(number_of_actions):
                    length_index_batch = cw_idxs.size()[1]
                    for i in range(batch_size):
                        tensor_with_zero_value = ((
                            cw_idxs[i] == 0).nonzero()).squeeze()

                        try:
                            first_zero_value = torch.min(
                                tensor_with_zero_value)
                        except:
                            first_zero_value = length_index_batch

                        if first_zero_value > 2:
                            print("debug:", i)
                            print("> size before amendment of cw_idxs",
                                  cw_idxs.size())
                            print(cw_idxs[i])
                            select_item_idx_1 = random.randint(
                                0, first_zero_value - 1)
                            #print("section 1 :", cw_idxs[i, 0:select_item_idx_1)
                            #print("remove:", cw_idxs[i,0:select_item_idx_1)
                            #print("section 2 :", cw_idxs[i, 0:select_item_idx_1)
                            cw_idxs[i, :] = torch.cat(
                                (cw_idxs[i, 0:select_item_idx_1],
                                 cw_idxs[i, select_item_idx_1 + 1:],
                                 torch.tensor([0])), -1)
                            print("> size before amendment of cw_idxs",
                                  cw_idxs.size())
                            print(cw_idxs[i])

            elif actions[0] == "add":
                batch_size = cw_idxs.size()[0]
                number_of_actions = actions[1]
                for _ in range(number_of_actions):
                    length_index_batch = cw_idxs.size()[1]
                    for i in range(batch_size):
                        tensor_with_zero_value = ((
                            cw_idxs[i] == 0).nonzero()).squeeze()

                        try:
                            first_zero_value = torch.min(
                                tensor_with_zero_value)
                        except:
                            first_zero_value = length_index_batch

                        if first_zero_value > 2:
                            select_item_idx_1 = random.randint(
                                0, first_zero_value - 1)
                            cw_idxs[i, select_item_idx_1] = random.randint(
                                1, 50000)

            elif actions[0] == "add2":
                # substitute to random token in each question of the batch (substitution is made within the same sentence:
                batch_size = cw_idxs.size()[0]
                number_of_actions = actions[1]
                for _ in range(number_of_actions):
                    length_index_batch = cw_idxs.size()[1]
                    for i in range(batch_size):
                        tensor_with_zero_value = ((
                            cw_idxs[i] == 0).nonzero()).squeeze()

                        try:
                            first_zero_value = torch.min(
                                tensor_with_zero_value)
                        except:
                            first_zero_value = length_index_batch

                        if first_zero_value > 2:
                            select_item_idx_1 = random.randint(
                                0, first_zero_value - 1)
                            select_item_idx_2 = random.randint(
                                0, first_zero_value - 1)
                            cw_idxs[i, select_item_idx_1] = random.randint(
                                1, 50000)
                            cw_idxs[i, select_item_idx_2] = random.randint(
                                1, 50000)

            else:
                print("Incorrect command: exiting")
                exit()
            cw_idxs = cw_idxs.to(device)
            qw_idxs = qw_idxs.to(device)
            batch_size = cw_idxs.size(0)

            # Forward
            if args.model == 'bidaf':
                log_p1, log_p2 = model(cw_idxs, qw_idxs)
            else:
                log_p1, log_p2 = model(cw_idxs, qw_idxs, cw_pos, cw_ner,
                                       cw_freq, cqw_extra)

            y1, y2 = y1.to(device), y2.to(device)
            loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)
            nll_meter.update(loss.item(), batch_size)

            # Get F1 and EM scores
            p1, p2 = log_p1.exp(), log_p2.exp()
            starts, ends = util.discretize(p1, p2, args.max_ans_len,
                                           args.use_squad_v2)

            # Log info
            progress_bar.update(batch_size)
            if args.split != 'test':
                # No labels for the test set, so NLL would be invalid
                progress_bar.set_postfix(NLL=nll_meter.avg)

            idx2pred, uuid2pred = util.convert_tokens(gold_dict, ids.tolist(),
                                                      starts.tolist(),
                                                      ends.tolist(),
                                                      args.use_squad_v2)
            pred_dict.update(idx2pred)
            sub_dict.update(uuid2pred)

    # Log results (except for test set, since it does not come with labels)
    if args.split != 'test':
        results = util.eval_dicts(gold_dict, pred_dict, args.use_squad_v2)

        # Printing information for questions without interrogative pronouns
        """"
        print("len(gold_dict): ", len(gold_dict))
        print("len(pred_dict): ", len(pred_dict))
        print("Is gold_dict.keys() identical to pred_dict.keys(): ", gold_dict.keys()==pred_dict.keys())
        if gold_dict.keys()!=pred_dict.keys():
            for key in gold_dict.keys():
                if key not in pred_dict.keys():
                    print("key ", key, " missing in pred_dict.keys(")
        """
        results_list = [('NLL', nll_meter.avg), ('F1', results['F1']),
                        ('EM', results['EM'])]
        if args.use_squad_v2:
            results_list.append(('AvNA', results['AvNA']))
        results = OrderedDict(results_list)

        # Computing the F1 score for each type of question
        print("for ", actions, ": ", results['F1'])

        # create a list of the types of questions by extracting the keys from the dict audit_trail_from_question_type

        # Log to console
        results_str = ', '.join(f'{k}: {v:05.2f}' for k, v in results.items())
        log.info(f'{args.split.title()} {results_str}')

        # Log to TensorBoard
        tbx = SummaryWriter(args.save_dir)
        util.visualize(tbx,
                       pred_dict=pred_dict,
                       eval_path=eval_file,
                       step=0,
                       split=args.split,
                       num_visuals=args.num_visuals)

    # Write submission file
    sub_path = join(args.save_dir, args.split + '_' + args.sub_file)
    log.info(f'Writing submission file to {sub_path}...')
    with open(sub_path, 'w', newline='', encoding='utf-8') as csv_fh:
        csv_writer = csv.writer(csv_fh, delimiter=',')
        csv_writer.writerow(['Id', 'Predicted'])
        for uuid in sorted(sub_dict):
            csv_writer.writerow([uuid, sub_dict[uuid]])

    return results['F1']
Esempio n. 12
0
def main(args):
    ###############################################################################
    # Build the model
    ###############################################################################
    def init_weight(weight):
        nn.init.uniform_(weight, -args.init_range, args.init_range)

    def init_bias(bias):
        nn.init.constant_(bias, 0.0)

    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Linear') != -1:
            if hasattr(m, 'weight') and m.weight is not None:
                init_weight(m.weight)
            if hasattr(m, 'bias') and m.bias is not None:
                init_bias(m.bias)
        elif classname.find('AdaptiveEmbedding') != -1:
            if hasattr(m, 'emb_projs'):
                for i in range(len(m.emb_projs)):
                    if m.emb_projs[i] is not None:
                        nn.init.normal_(m.emb_projs[i], 0.0,
                                        args.proj_init_std)
        elif classname.find('Embedding') != -1:
            if hasattr(m, 'weight'):
                init_weight(m.weight)
        elif classname.find('ProjectedAdaptiveLogSoftmax') != -1:
            if hasattr(m, 'cluster_weight') and m.cluster_weight is not None:
                init_weight(m.cluster_weight)
            if hasattr(m, 'cluster_bias') and m.cluster_bias is not None:
                init_bias(m.cluster_bias)
            if hasattr(m, 'out_projs'):
                for i in range(len(m.out_projs)):
                    if m.out_projs[i] is not None:
                        nn.init.normal_(m.out_projs[i], 0.0,
                                        args.proj_init_std)
        elif classname.find('LayerNorm') != -1:
            if hasattr(m, 'weight'):
                nn.init.normal_(m.weight, 1.0, args.init_std)
            if hasattr(m, 'bias') and m.bias is not None:
                init_bias(m.bias)
        elif classname.find('TransformerLM') != -1:
            if hasattr(m, 'r_emb'):
                init_weight(m.r_emb)
            if hasattr(m, 'r_w_bias'):
                init_weight(m.r_w_bias)
            if hasattr(m, 'r_r_bias'):
                init_weight(m.r_r_bias)
            if hasattr(m, 'r_bias'):
                init_bias(m.r_bias)

    # Set up logging and devices
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=True)
    log = util.get_logger(args.save_dir, args.name)
    tbx = SummaryWriter(args.save_dir)
    device, args.gpu_ids = util.get_available_devices()
    log.info('Args: {}'.format(dumps(vars(args), indent=4, sort_keys=True)))
    args.batch_size *= max(1, len(args.gpu_ids))

    # Set random seed
    log.info('Using random seed {}...'.format(args.seed))
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    # Get embeddings
    # Args:  word_vectors: word vector tensor of dimension [vocab_size * wemb_dim]
    log.info('Loading embeddings...')
    word_vectors = util.torch_from_json(args.word_emb_file)
    char_vectors = util.torch_from_json(args.char_emb_file)

    # Get Model
    log.info('Building Model...')
    model = QANet(
        word_vectors,
        char_vectors,
        args.para_limit,
        args.ques_limit,
        args.f_model,
        args.d_head,
        same_length=False,  #Check back on this
        mem_len=args.mem_len,
        clamp_len=-1,
        num_head=args.num_head,
        train_cemb=(not args.pretrained_char))
    model = nn.DataParallel(model, args.gpu_ids)

    if args.load_path:
        log.info('Loading checkpoint from {}...'.format(args.load_path))
        model, step = util.load_model(model, args.load_path, args.gpu_ids)
    else:
        step = 0
        #model.apply(weights_init)
    model = model.to(device)
    model.train()
    ema = util.EMA(model, args.ema_decay)

    # Get saver
    saver = util.CheckpointSaver(args.save_dir,
                                 max_checkpoints=args.max_checkpoints,
                                 metric_name=args.metric_name,
                                 maximize_metric=args.maximize_metric,
                                 log=log)

    # Get optimizer and scheduler
    parameters = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = optim.Adam(params=parameters,
                           lr=args.lr,
                           betas=(args.beta1, args.beta2),
                           eps=1e-8,
                           weight_decay=3e-7)
    cr = 1.0 / math.log(args.lr_warm_up_num)
    scheduler = optim.lr_scheduler.LambdaLR(
        optimizer,
        lr_lambda=lambda ee: cr * math.log(ee + 1)
        if ee < args.lr_warm_up_num else 1)
    loss_f = torch.nn.CrossEntropyLoss()

    # Get data loader
    log.info('Building dataset...')
    train_dataset = SQuAD(args.train_record_file, args.use_squad_v2)
    train_loader = data.DataLoader(train_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   collate_fn=collate_fn,
                                   drop_last=True)
    dev_dataset = SQuAD(args.dev_record_file, args.use_squad_v2)
    dev_loader = data.DataLoader(dev_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers,
                                 collate_fn=collate_fn,
                                 drop_last=True)

    # Train
    log.info('Training...')
    steps_till_eval = args.eval_steps
    epoch = step // len(train_dataset)
    mems = (tuple(), tuple(), tuple())
    while epoch != args.num_epochs:
        epoch += 1
        log.info('Starting epoch {}...'.format(epoch))
        with torch.enable_grad(), \
                tqdm(total=len(train_loader.dataset)) as progress_bar:
            for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in train_loader:
                # Setup for forward
                cw_idxs = cw_idxs.to(device)
                qw_idxs = qw_idxs.to(device)

                cc_idxs = cc_idxs.to(device)
                qc_idxs = qc_idxs.to(device)

                batch_size = cw_idxs.size(0)
                optimizer.zero_grad()

                # Forward
                log_p1, log_p2, mems = model(cw_idxs, cc_idxs, qw_idxs,
                                             qc_idxs, *mems)
                y1, y2 = y1.to(device), y2.to(device)
                loss = torch.mean(loss_f(log_p1, y1) + loss_f(log_p2, y2))
                loss_val = loss.item()

                # Backward
                loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(),
                                         args.max_grad_norm)
                optimizer.step()
                scheduler.step(step // batch_size)
                ema(model, step // batch_size)

                # Log info
                step += batch_size
                progress_bar.update(batch_size)
                progress_bar.set_postfix(epoch=epoch, NLL=loss_val)
                tbx.add_scalar('train/NLL', loss_val, step)
                tbx.add_scalar('train/LR', optimizer.param_groups[0]['lr'],
                               step)

                steps_till_eval -= batch_size
                if steps_till_eval <= 0:
                    steps_till_eval = args.eval_steps

                    # Evaluate and save checkpoint
                    log.info('Evaluating at step {}...'.format(step))
                    ema.assign(model)
                    results, pred_dict = evaluate(model, dev_loader, device,
                                                  args.dev_eval_file,
                                                  args.max_ans_len,
                                                  args.use_squad_v2)
                    saver.save(step, model, results[args.metric_name], device)
                    ema.resume(model)

                    # Log to console
                    results_str = ', '.join('{}: {:05.2f}'.format(k, v)
                                            for k, v in results.items())
                    log.info('Dev {}'.format(results_str))

                    # Log to TensorBoard
                    log.info('Visualizing in TensorBoard...')
                    for k, v in results.items():
                        tbx.add_scalar('dev/{}'.format(k), v, step)
                    util.visualize(tbx,
                                   pred_dict=pred_dict,
                                   eval_path=args.dev_eval_file,
                                   step=step,
                                   split='dev',
                                   num_visuals=args.num_visuals)
Esempio n. 13
0
def main(args):
    # Set up logging and devices
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=True)
    log = util.get_logger(args.save_dir, args.name)
    tbx = SummaryWriter(args.save_dir)
    device, args.gpu_ids = util.get_available_devices()
    log.info('Args: {}'.format(dumps(vars(args), indent=4, sort_keys=True)))
    args.batch_size *= max(1, len(args.gpu_ids))

    # Set random seed
    log.info('Using random seed {}...'.format(args.seed))
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    # Get embeddings
    log.info('Loading word embeddings...')
    word_vectors = util.torch_from_json(args.word_emb_file)

    log.info('Loading char embeddings...')
    char_vectors = util.torch_from_json(args.char_emb_file)

    # Get model
    log.info('Building model...')
    # ---
    if args.name == "baseline":
        model = BiDAF(word_vectors=word_vectors,
                      hidden_size=args.hidden_size,
                      drop_prob=args.drop_prob)

    elif args.name == "slqa":
        model = SLQA(word_vectors=word_vectors,
                     char_vectors=char_vectors,
                     hidden_size=args.hidden_size,
                     drop_prob=args.drop_prob)

    # ---
    model = nn.DataParallel(model, args.gpu_ids)
    if args.load_path:
        log.info('Loading checkpoint from {}...'.format(args.load_path))
        model, step = util.load_model(model, args.load_path, args.gpu_ids)
    else:
        step = 0
    model = model.to(device)
    model.train()
    ema = util.EMA(model, args.ema_decay)

    # Get saver
    saver = util.CheckpointSaver(args.save_dir,
                                 max_checkpoints=args.max_checkpoints,
                                 metric_name=args.metric_name,
                                 maximize_metric=args.maximize_metric,
                                 log=log)

    # Get optimizer and scheduler
    if args.opt == 'adam':
        print("args.opt: ", args.opt)
        optimizer = optim.Adam(model.parameters(), lr=args.lr)
    elif args.opt == 'adadelta':
        print("args.opt: ", args.opt)
        optimizer = optim.Adadelta(model.parameters(),
                                   args.lr,
                                   weight_decay=args.l2_wd)

    scheduler = sched.LambdaLR(optimizer, lambda s: 1.)  # Constant LR

    # Get data loader
    log.info('Building dataset...')
    train_dataset = SQuAD(args.train_record_file, args.use_squad_v2)
    train_loader = data.DataLoader(train_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   collate_fn=collate_fn)
    dev_dataset = SQuAD(args.dev_record_file, args.use_squad_v2)
    dev_loader = data.DataLoader(dev_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers,
                                 collate_fn=collate_fn)

    # Train
    log.info('Training...')
    steps_till_eval = args.eval_steps
    epoch = step // len(train_dataset)
    while epoch != args.num_epochs:
        epoch += 1
        log.info('Starting epoch {}...'.format(epoch))
        with torch.enable_grad(), \
                tqdm(total=len(train_loader.dataset)) as progress_bar:
            for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in train_loader:
                # Setup for forward
                cw_idxs = cw_idxs.to(device)
                qw_idxs = qw_idxs.to(device)
                batch_size = cw_idxs.size(0)
                optimizer.zero_grad()

                # For char_embedding
                cc_idxs = cc_idxs.to(device)
                qc_idxs = qc_idxs.to(device)
                y1, y2 = y1.to(device), y2.to(device)

                # Forward
                # Original model forwad, without char_embedding
                if args.name == "baseline":
                    log_p1, log_p2 = model(cw_idxs, qw_idxs)
                    loss_classify = 0
                elif args.name == "slqa":
                    #========================================================================
                    log_p1, log_p2, pred_score = model(cw_idxs, qw_idxs,
                                                       cc_idxs, qc_idxs)
                    y2_mask = torch.zeros_like(y2) != y2
                    y2_mask = y2_mask.to(dtype=torch.float)
                    loss_BCE = nn.BCELoss()
                    loss_classify = loss_BCE(
                        pred_score,
                        y2_mask.to(dtype=torch.float).unsqueeze(1))


#========================================================================

                loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)

                loss = loss + 2 * loss_classify

                loss_val = loss.item()

                # Backward
                #                 print("===================================BACKWARD===================================")
                loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(),
                                         args.max_grad_norm)
                optimizer.step()
                scheduler.step(step // batch_size)
                ema(model, step // batch_size)

                # Log info
                #                 print("===================================Log info===================================")
                step += batch_size
                progress_bar.update(batch_size)
                progress_bar.set_postfix(epoch=epoch, NLL=loss_val)
                tbx.add_scalar('train/NLL', loss_val, step)
                tbx.add_scalar('train/LR', optimizer.param_groups[0]['lr'],
                               step)

                #                 print("===================================EVAL===================================")
                steps_till_eval -= batch_size
                if steps_till_eval <= 0:
                    steps_till_eval = args.eval_steps

                    # Evaluate and save checkpoint
                    log.info('Evaluating at step {}...'.format(step))
                    ema.assign(model)
                    results, pred_dict = evaluate(model, dev_loader, device,
                                                  args.dev_eval_file,
                                                  args.max_ans_len,
                                                  args.use_squad_v2, args)
                    saver.save(step, model, results[args.metric_name], device)
                    ema.resume(model)

                    # Log to console
                    results_str = ', '.join('{}: {:05.2f}'.format(k, v)
                                            for k, v in results.items())
                    log.info('Dev {}'.format(results_str))

                    # Log to TensorBoard
                    log.info('Visualizing in TensorBoard...')
                    for k, v in results.items():
                        tbx.add_scalar('dev/{}'.format(k), v, step)
                    util.visualize(tbx,
                                   pred_dict=pred_dict,
                                   eval_path=args.dev_eval_file,
                                   step=step,
                                   split='dev',
                                   num_visuals=args.num_visuals)
Esempio n. 14
0
def main(args):
    # Set up logging
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=False)
    log = util.get_logger(args.save_dir, args.name)
    log.info('Args: {}'.format(dumps(vars(args), indent=4, sort_keys=True)))
    device, gpu_ids = util.get_available_devices()
    args.batch_size *= max(1, len(gpu_ids))

    # Get embeddings
    log.info('Loading embeddings...')
    word_vectors = util.torch_from_json(args.word_emb_file)
    char_vectors = util.torch_from_json(args.char_emb_file)

    # NEW : load the tag embeddings
    pos_vectors = util.torch_from_json(args.pos_emb_file)
    ner_vectors = util.torch_from_json(args.ner_emb_file)

    # Choose model
    log.info('Building model {}...'.format(args.name))

    if 'baseline' in args.name:
        model = BiDAF(word_vectors=word_vectors, hidden_size=args.hidden_size)

    elif args.name == 'BiDAF_char':
        model = BiDAF_char(word_vectors=word_vectors,
                           char_vectors=char_vectors,
                           hidden_size=args.hidden_size)

    # NEW
    elif (args.name == 'BiDAF_tag') or (args.name == 'BiDAF_tag_unfrozen') or (
            args.name == 'BiDAF_tag_loss') or (args.name
                                               == 'BiDAF_tag_unfrozen_loss'):
        model = BiDAF_tag(word_vectors=word_vectors,
                          char_vectors=char_vectors,
                          pos_vectors=pos_vectors,
                          ner_vectors=ner_vectors,
                          hidden_size=args.hidden_size)

    elif (args.name == 'BiDAF_tag_ext') or (args.name
                                            == 'BiDAF_tag_ext_unfrozen'):
        model = BiDAF_tag_ext(word_vectors=word_vectors,
                              char_vectors=char_vectors,
                              pos_vectors=pos_vectors,
                              ner_vectors=ner_vectors,
                              hidden_size=args.hidden_size)

    elif args.name == 'coattn':
        model = CoattentionModel(hidden_dim=args.hidden_size,
                                 embedding_matrix=word_vectors,
                                 train_word_embeddings=False,
                                 dropout=0.35,
                                 pooling_size=16,
                                 number_of_iters=4,
                                 number_of_layers=2,
                                 device=device)

    else:
        raise NameError('No model named ' + args.name)

    model = nn.DataParallel(model, gpu_ids)
    log.info('Loading checkpoint from {}...'.format(args.load_path))
    model = util.load_model(model, args.load_path, gpu_ids, return_step=False)
    model = model.to(device)
    model.eval()

    # Get data loader
    log.info('Building dataset...')
    record_file = vars(args)['{}_record_file'.format(args.split)]
    dataset = SQuAD(record_file, args.use_squad_v2)
    data_loader = data.DataLoader(dataset,
                                  batch_size=args.batch_size,
                                  shuffle=False,
                                  num_workers=args.num_workers,
                                  collate_fn=collate_fn)

    # Evaluate
    log.info('Evaluating on {} split...'.format(args.split))
    nll_meter = util.AverageMeter()
    pred_dict = {}  # Predictions for TensorBoard
    sub_dict = {}  # Predictions for submission
    eval_file = vars(args)['{}_eval_file'.format(args.split)]
    with open(eval_file, 'r') as fh:
        gold_dict = json_load(fh)
    with torch.no_grad(), \
            tqdm(total=len(dataset)) as progress_bar:
        for cw_idxs, cc_idxs, cpos_idxs, cner_idxs, cw_ems, cw_tfs, qw_idxs, qc_idxs, qpos_idxs, qner_idxs, qw_ems, qw_tfs, y1, y2, ids in data_loader:  # NEW
            # Setup for forward
            cw_idxs = cw_idxs.to(device)
            qw_idxs = qw_idxs.to(device)
            batch_size = cw_idxs.size(0)

            # Forward
            if 'baseline' in args.name:
                log_p1, log_p2 = model(cw_idxs, qw_idxs)

            elif args.name == 'BiDAF_char':
                # Additional setup for forward
                cc_idxs = cc_idxs.to(device)
                qc_idxs = qc_idxs.to(device)
                log_p1, log_p2 = model(cw_idxs, qw_idxs, cc_idxs, qc_idxs)

            elif args.name == 'coattn':
                max_c_len = cw_idxs.size(1)
                max_q_len = qw_idxs.size(1)

                c_len = []
                q_len = []

                for i in range(cw_idxs.size(0)):
                    if len((cw_idxs[i] == 0).nonzero()) != 0:
                        c_len_i = (cw_idxs[i] == 0).nonzero()[0].item()
                    else:
                        c_len_i = cw_idxs.size(1)

                    if len((qw_idxs[i] == 0).nonzero()) != 0:
                        q_len_i = (qw_idxs[i] == 0).nonzero()[0].item()
                    else:
                        q_len_i = qw_idxs.size(1)

                    c_len.append(int(c_len_i))
                    q_len.append(int(q_len_i))

                c_len = torch.Tensor(c_len).int()
                q_len = torch.Tensor(q_len).int()

                num_examples = int(cw_idxs.size(0) / len(gpu_ids))

                log_p1, log_p2 = model(max_c_len, max_q_len, cw_idxs, qw_idxs,
                                       c_len, q_len, num_examples, True, False)

            # NEW
            elif (args.name
                  == 'BiDAF_tag') or (args.name == 'BiDAF_tag_unfrozen') or (
                      args.name
                      == 'BiDAF_tag_loss') or (args.name
                                               == 'BiDAF_tag_unfrozen_loss'):
                # Additional setup for forward
                cc_idxs = cc_idxs.to(device)
                cpos_idxs = cpos_idxs.to(device)
                cner_idxs = cner_idxs.to(device)
                qc_idxs = qc_idxs.to(device)
                qpos_idxs = qpos_idxs.to(device)
                qner_idxs = qner_idxs.to(device)

                log_p1, log_p2 = model(cw_idxs, qw_idxs, cc_idxs, qc_idxs,
                                       cpos_idxs, qpos_idxs, cner_idxs,
                                       qner_idxs)

            elif (args.name
                  == 'BiDAF_tag_ext') or (args.name
                                          == 'BiDAF_tag_ext_unfrozen'):
                # Additional setup for forward
                cc_idxs = cc_idxs.to(device)
                cpos_idxs = cpos_idxs.to(device)
                cner_idxs = cner_idxs.to(device)
                cw_ems = cw_ems.to(device)
                cw_tfs = cw_tfs.to(device)
                qc_idxs = qc_idxs.to(device)
                qpos_idxs = qpos_idxs.to(device)
                qner_idxs = qner_idxs.to(device)
                qw_ems = qw_ems.to(device)
                qw_tfs = qw_tfs.to(device)

                log_p1, log_p2 = model(cw_idxs, qw_idxs, cc_idxs, qc_idxs,
                                       cpos_idxs, qpos_idxs, cner_idxs,
                                       qner_idxs, cw_ems, qw_ems, cw_tfs,
                                       qw_tfs)

            else:
                raise NameError('No model named ' + args.name)

            y1, y2 = y1.to(device), y2.to(device)
            loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)
            nll_meter.update(loss.item(), batch_size)

            # Get F1 and EM scores
            p1, p2 = log_p1.exp(), log_p2.exp()
            starts, ends = util.discretize(p1, p2, args.max_ans_len,
                                           args.use_squad_v2)

            # Log info
            progress_bar.update(batch_size)
            if args.split != 'test':
                # No labels for the test set, so NLL would be invalid
                progress_bar.set_postfix(NLL=nll_meter.avg)

            idx2pred, uuid2pred = util.convert_tokens(gold_dict, ids.tolist(),
                                                      starts.tolist(),
                                                      ends.tolist(),
                                                      args.use_squad_v2)
            pred_dict.update(idx2pred)
            sub_dict.update(uuid2pred)

    # Log results (except for test set, since it does not come with labels)
    if args.split != 'test':
        results = util.eval_dicts(gold_dict, pred_dict, args.use_squad_v2)
        results_list = [('NLL', nll_meter.avg), ('F1', results['F1']),
                        ('EM', results['EM'])]
        if args.use_squad_v2:
            results_list.append(('AvNA', results['AvNA']))
        results = OrderedDict(results_list)

        # Log to console
        results_str = ', '.join('{}: {:05.2f}'.format(k, v)
                                for k, v in results.items())
        log.info('{} {}'.format(args.split.title(), results_str))

        # More detailed error analysis
        results = util.analysis_dicts(gold_dict, pred_dict, args.use_squad_v2)
        results_str = ', '.join('{}: {:05.2f}'.format(k, v)
                                for k, v in results.items())
        log.info('{} {}'.format(args.split.title(), results_str))

        # Log to TensorBoard
        tbx = SummaryWriter(args.save_dir)
        util.visualize(tbx,
                       pred_dict=pred_dict,
                       eval_path=eval_file,
                       step=0,
                       split=args.split,
                       num_visuals=args.num_visuals)

    # Write submission file
    sub_path = join(args.save_dir, args.split + '_' + args.sub_file)
    log.info('Writing submission file to {}...'.format(sub_path))
    with open(sub_path, 'w', newline='', encoding='utf-8') as csv_fh:
        csv_writer = csv.writer(csv_fh, delimiter=',')
        csv_writer.writerow(['Id', 'Predicted'])
        for uuid in sorted(sub_dict):
            csv_writer.writerow([uuid, sub_dict[uuid]])
Esempio n. 15
0
def main(args):
    # Set up logging and devices
    args.save_dir = util.get_save_dir(args.save_dir, args.name, training=True)
    log = util.get_logger(args.save_dir, args.name)
    tbx = SummaryWriter(args.save_dir)
    device, args.gpu_ids = util.get_available_devices()
    log.info('Args: {}'.format(dumps(vars(args), indent=4, sort_keys=True)))
    args.batch_size *= max(1, len(args.gpu_ids))

    # Set random seed
    log.info('Using random seed {}...'.format(args.seed))
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    # Get word embeddings
    log.info('Loading word embeddings...')
    word_vectors = util.torch_from_json(args.word_emb_file)

    ### start our code:
    # Get char-embeddings
    log.info('Loading char-embeddings...')
    char_vectors = util.torch_from_json(args.char_emb_file)
    ### end our code

    # Get model
    log.info('Building model...')
    # model = BiDAF(word_vectors=word_vectors,
    #               hidden_size=args.hidden_size,
    #               char_vectors = char_vectors,
    #               drop_prob=args.drop_prob)

    ### start our code:   (QANet)
    model = QANet(word_vectors=word_vectors,
                  char_vectors=char_vectors,
                  hidden_size=args.hidden_size,
                  kernel_size=7,
                  filters=128,
                  drop_prob=args.drop_prob)
    ### end our code

    model = nn.DataParallel(model, args.gpu_ids)
    if args.load_path:
        log.info('Loading checkpoint from {}...'.format(args.load_path))
        model, step = util.load_model(model, args.load_path, args.gpu_ids)
    else:
        step = 0
    model = model.to(device)
    model.train()
    ema = util.EMA(model, args.ema_decay)

    # Get saver
    saver = util.CheckpointSaver(args.save_dir,
                                 max_checkpoints=args.max_checkpoints,
                                 metric_name=args.metric_name,
                                 maximize_metric=args.maximize_metric,
                                 log=log)

    # Get optimizer and scheduler
    # https://pytorch.org/docs/stable/optim.html
    # Original:
    # optimizer = optim.Adadelta(model.parameters(), args.lr,
    #                           weight_decay=args.l2_wd)
    #                   # default: lr=0.5, rho=0.9, eps=1e-06, weight_decay=0
    # scheduler = sched.LambdaLR(optimizer, lambda s: 1.)  # Constant LR
    # A function which computes a multiplicative factor given an integer parameter epoch,
    # or a list of such functions, one for each group in optimizer.param_groups

    ### start our code:
    optimizer = optim.Adam(model.parameters(),
                           lr=1e-3,
                           betas=(0.8, 0.999),
                           eps=1e-7,
                           weight_decay=3e-7,
                           amsgrad=False)
    scheduler = sched.LambdaLR(optimizer, lambda s: 1.)  # Constant LR

    # Adabound
    # optimizer = adabound.AdaBound(model.parameters(), lr=1e-3, final_lr=0.1)
    # scheduler = sched.LambdaLR(optimizer, lambda s: 1.)
    ### end our code

    # Get data loader
    log.info('Building dataset...')
    train_dataset = SQuAD(args.train_record_file, args.use_squad_v2)
    train_loader = data.DataLoader(train_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   collate_fn=collate_fn)
    dev_dataset = SQuAD(args.dev_record_file, args.use_squad_v2)
    dev_loader = data.DataLoader(dev_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers,
                                 collate_fn=collate_fn)

    # Train
    log.info('Training...')
    steps_till_eval = args.eval_steps
    epoch = step // len(train_dataset)
    while epoch != args.num_epochs:
        epoch += 1
        log.info('Starting epoch {}...'.format(epoch))
        with torch.enable_grad(), \
                tqdm(total=len(train_loader.dataset)) as progress_bar:
            for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in train_loader:

                # print("cc_idxs: ", cc_idxs)
                # print("qc_idxs: ", qc_idxs)

                # Setup for forward
                cw_idxs = cw_idxs.to(device)
                qw_idxs = qw_idxs.to(device)

                ### start our code:
                cc_idxs = cc_idxs.to(device)
                qc_idxs = qc_idxs.to(device)

                ### end our code

                batch_size = cw_idxs.size(0)
                optimizer.zero_grad()

                # Forward
                # log_p1, log_p2 = model(cw_idxs, qw_idxs)   # original

                ### start our code:
                log_p1, log_p2 = model(cw_idxs, qw_idxs, cc_idxs, qc_idxs)

                ### end our code

                y1, y2 = y1.to(device), y2.to(device)
                loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)
                loss_val = loss.item()

                # Backward
                loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(),
                                         args.max_grad_norm)

                ### start our code:
                # optimizer = LR_decay(optimizer, epoch, lr = 0.8)   # initial learning rate 0.8
                if step < 1000:
                    optimizer = LR_warmup(optimizer, step, lr=0.)
                ### end our code

                optimizer.step()
                scheduler.step(step // batch_size)
                ema(model, step // batch_size)

                # Log info
                step += batch_size
                progress_bar.update(batch_size)
                progress_bar.set_postfix(epoch=epoch, NLL=loss_val)
                tbx.add_scalar('train/NLL', loss_val, step)
                tbx.add_scalar('train/LR', optimizer.param_groups[0]['lr'],
                               step)

                steps_till_eval -= batch_size
                if steps_till_eval <= 0:
                    steps_till_eval = args.eval_steps

                    # Evaluate and save checkpoint
                    log.info('Evaluating at step {}...'.format(step))
                    ema.assign(model)
                    results, pred_dict = evaluate(model, dev_loader, device,
                                                  args.dev_eval_file,
                                                  args.max_ans_len,
                                                  args.use_squad_v2)
                    saver.save(step, model, results[args.metric_name], device)
                    ema.resume(model)

                    # Log to console
                    results_str = ', '.join('{}: {:05.2f}'.format(k, v)
                                            for k, v in results.items())
                    log.info('Dev {}'.format(results_str))

                    # Log to TensorBoard
                    log.info('Visualizing in TensorBoard...')
                    for k, v in results.items():
                        tbx.add_scalar('dev/{}'.format(k), v, step)
                    util.visualize(tbx,
                                   pred_dict=pred_dict,
                                   eval_path=args.dev_eval_file,
                                   step=step,
                                   split='dev',
                                   num_visuals=args.num_visuals)