Ejemplo n.º 1
0
def main(args):

    args.cuda = args.use_cuda and torch.cuda.is_available()

    train_set, validate_set, test_set, train_loader, validate_loader, test_loader = get_data.get_data_atomic(
        args)

    print('start validate')
    val_epoch_acc, confmat = validate(validate_loader, args)
    print('validate confmat')
    get_metric_from_confmat(confmat, 'atomic')

    test_loader.dataset.round_cnt = {
        'single': 0,
        'mutual': 0,
        'avert': 0,
        'refer': 0,
        'follow': 0,
        'share': 0
    }
    print('start test')
    test_epoch_acc, confmat_test = validate(test_loader, args)
    print('test confmat')
    get_metric_from_confmat(confmat_test, 'atomic')
def main(args):

    args.cuda = args.use_cuda and torch.cuda.is_available()

    train_set, validate_set, test_set, train_loader, validate_loader, test_loader = get_data.get_data_atomic(
        args)

    #model = models.Atomic(args)
    model = models.Atomic_edge_only(args)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    #optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)

    #{'single': 0, 'mutual': 1, 'avert': 2, 'refer': 3, 'follow': 4, 'share': 5}
    criterion = [
        torch.nn.CrossEntropyLoss(
            weight=torch.Tensor([0.05, 0.05, 0.25, 0.25, 0.25, 0.15])),
        torch.nn.MSELoss()
    ]

    # {'NA': 0, 'single': 1, 'mutual': 2, 'avert': 3, 'refer': 4, 'follow': 5, 'share': 6}

    scheduler = ReduceLROnPlateau(optimizer,
                                  factor=args.lr_decay,
                                  patience=1,
                                  verbose=True,
                                  mode='max')
    #--------------------------------------------------
    # ------------------------
    # use multi-gpu

    if args.cuda and torch.cuda.device_count() > 1:
        print("Now Using ", len(args.device_ids), " GPUs!")

        model = torch.nn.DataParallel(model,
                                      device_ids=args.device_ids,
                                      output_device=args.device_ids[0]).cuda()
        #model=model.cuda()
        criterion[0] = criterion[0].cuda()
        criterion[1] = criterion[1].cuda()

    elif args.cuda:
        model = model.cuda()
        criterion[0] = criterion[0].cuda()
        criterion[1] = criterion[1].cuda()

    if args.load_best_checkpoint:
        loaded_checkpoint = utils.load_best_checkpoint(args,
                                                       model,
                                                       optimizer,
                                                       path=args.resume)

        if loaded_checkpoint:
            args, best_epoch_acc, avg_epoch_acc, model, optimizer = loaded_checkpoint

    if args.load_last_checkpoint:
        loaded_checkpoint = utils.load_last_checkpoint(
            args,
            model,
            optimizer,
            path=args.resume,
            version=args.model_load_version)

        if loaded_checkpoint:
            args, best_epoch_acc, avg_epoch_acc, model, optimizer = loaded_checkpoint

            # ------------------------------------------------------------------------------
            # Start Training!

    since = time.time()

    train_epoch_acc_all = []
    val_epoch_acc_all = []

    best_acc = 0
    avg_epoch_acc = 0

    for epoch in range(args.start_epoch, args.epochs):

        train_epoch_loss, train_epoch_acc = train(train_loader, model,
                                                  criterion, optimizer, epoch,
                                                  args)
        train_epoch_acc_all.append(train_epoch_acc)

        val_epoch_loss, val_epoch_acc = validate(validate_loader, model,
                                                 criterion, epoch, args)
        val_epoch_acc_all.append(val_epoch_acc)

        print('Epoch {}/{} Training Acc: {:.4f} Validation Acc: {:.4f}'.format(
            epoch, args.epochs - 1, train_epoch_acc, val_epoch_acc))
        print('*' * 15)

        scheduler.step(val_epoch_acc)

        is_best = val_epoch_acc > best_acc

        if is_best:
            best_acc = val_epoch_acc

        avg_epoch_acc = np.mean(val_epoch_acc_all)

        utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_epoch_acc': best_acc,
                'avg_epoch_acc': avg_epoch_acc,
                'optimizer': optimizer.state_dict(),
                'args': args
            },
            is_best=is_best,
            directory=args.resume,
            version='epoch_{}'.format(str(epoch)))

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best Val Acc: {},  Final Avg Val Acc: {}'.format(
        best_acc, avg_epoch_acc))

    # ----------------------------------------------------------------------------------------------------------
    # test

    loaded_checkpoint = utils.load_best_checkpoint(args,
                                                   model,
                                                   optimizer,
                                                   path=args.resume)

    if loaded_checkpoint:
        args, best_epoch_acc, avg_epoch_acc, model, optimizer = loaded_checkpoint

    test_loader.dataset.round_cnt = {
        'single': 0,
        'mutual': 0,
        'avert': 0,
        'refer': 0,
        'follow': 0,
        'share': 0
    }
    test_loss, test_acc, confmat, top2_acc = test(test_loader, model,
                                                  criterion, args)

    # save test results
    if not isdir(args.save_test_res):
        os.mkdir(args.save_test_res)

    with open(os.path.join(args.save_test_res, 'raw_test_results.pkl'),
              'w') as f:
        pickle.dump([test_loss, test_acc, confmat, top2_acc], f)

    print("Test Acc {}".format(test_acc))
    print("Top 2 Test Acc {}".format(top2_acc))

    # todo: need to change the mode here!
    get_metric_from_confmat(confmat, 'atomic')
def main(args):

    args.cuda = args.use_cuda and torch.cuda.is_available()

    train_set, validate_set, test_set, train_loader, validate_loader, test_loader = get_data.get_data_atomic(
        args)

    #model = models.Atomic(args)
    model = models.Atomic_edge_only(args)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    #optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)

    #{'single': 0, 'mutual': 1, 'avert': 2, 'refer': 3, 'follow': 4, 'share': 5}
    criterion = [
        torch.nn.CrossEntropyLoss(
            weight=torch.Tensor([0.05, 0.05, 0.25, 0.25, 0.25, 0.15])),
        torch.nn.MSELoss()
    ]

    # {'NA': 0, 'single': 1, 'mutual': 2, 'avert': 3, 'refer': 4, 'follow': 5, 'share': 6}

    scheduler = ReduceLROnPlateau(optimizer,
                                  factor=args.lr_decay,
                                  patience=1,
                                  verbose=True,
                                  mode='max')
    #--------------------------------------------------
    # ------------------------
    # use multi-gpu

    if args.cuda and torch.cuda.device_count() > 1:
        print("Now Using ", len(args.device_ids), " GPUs!")

        model = torch.nn.DataParallel(model,
                                      device_ids=args.device_ids,
                                      output_device=args.device_ids[0]).cuda()
        #model=model.cuda()
        criterion[0] = criterion[0].cuda()
        criterion[1] = criterion[1].cuda()

    elif args.cuda:
        model = model.cuda()
        criterion[0] = criterion[0].cuda()
        criterion[1] = criterion[1].cuda()

    # ----------------------------------------------------------------------------------------------------------
    # test

    checkpoint_dir = args.resume
    best_model_file = os.path.join(checkpoint_dir, 'checkpoint_epoch_1.pth')

    if os.path.isfile(best_model_file):
        print("====> loading best model {}".format(best_model_file))

        checkpoint = torch.load(best_model_file)
        args.start_epoch = checkpoint['epoch']
        best_epoch_error = checkpoint['best_epoch_acc']

        try:
            avg_epoch_error = checkpoint['avg_epoch_acc']
        except KeyError:
            avg_epoch_error = np.inf

        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])

        model.cuda()

        print("===> loaded best model {} (epoch {})".format(
            best_model_file, checkpoint['epoch']))

    test_loader.dataset.round_cnt = {
        'single': 0,
        'mutual': 0,
        'avert': 0,
        'refer': 0,
        'follow': 0,
        'share': 0
    }
    test_loss, test_acc, confmat, top2_acc = test(test_loader, model,
                                                  criterion, args)

    # save test results
    if not isdir(args.save_test_res):
        os.mkdir(args.save_test_res)

    with open(os.path.join(args.save_test_res, 'raw_test_results.pkl'),
              'w') as f:
        pickle.dump([test_loss, test_acc, confmat, top2_acc], f)

    print("Test Acc {}".format(test_acc))
    print("Top 2 Test Acc {}".format(top2_acc))

    # todo: need to change the mode here!
    get_metric_from_confmat(confmat, 'atomic')