Exemplo n.º 1
0
def check_acc():
    import torch
    import torch.nn.functional as F

    # deep model
    with open("results/deep-model.pickle", "rb") as f:
        model = pickle.load(f)
    params = torch.load("results/deep-params.pth", map_location="cpu")
    model.load_state_dict(params)

    batchsize = 1000
    test_loader = torch.utils.data.DataLoader(
        PoseDataset([root_dir / d for d in test_data_dirs], mode="test"),
        batch_size=batchsize,
        shuffle=False,
    )

    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            output = model(data)
            test_loss += F.nll_loss(
                output, target, reduction="sum").item()  # sum up batch loss
            pred = output.argmax(
                dim=1,
                keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    test_acc = 100.0 * correct / len(test_loader.dataset)

    print(test_acc)
Exemplo n.º 2
0
 def create_dataset(self,
                    csv_fn='data/FLIC-full/test_joints.csv',
                    img_dir='data/FLIC-full/images',
                    symmetric_joints='[[2, 4], [1, 5], [0, 6]]',
                    im_size=220,
                    fliplr=False,
                    rotate=False,
                    rotate_range=10,
                    zoom=False,
                    base_zoom=1.5,
                    zoom_range=0.2,
                    translate=False,
                    translate_range=5,
                    min_dim=0,
                    coord_normalize=False,
                    gcn=False,
                    joint_num=7,
                    fname_index=0,
                    joint_index=1,
                    ignore_label=-1):
     sys.path.insert(0, 'scripts')
     from dataset import PoseDataset
     dataset = PoseDataset(
         csv_fn, img_dir, im_size, fliplr, rotate, rotate_range, zoom,
         base_zoom, zoom_range, translate, translate_range, min_dim,
         coord_normalize, gcn, joint_num, fname_index, joint_index,
         symmetric_joints, ignore_label
     )
     return dataset
Exemplo n.º 3
0
def train_random_forest():
    from sklearn.ensemble import RandomForestClassifier
    from sklearn.metrics import accuracy_score

    train_dataset = PoseDataset([root_dir / d for d in train_data_dirs])
    X, y = aggregate_dataset(train_dataset)
    clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)
    clf.fit(X, y)

    test_dataset = PoseDataset([root_dir / d for d in test_data_dirs])
    X, y = aggregate_dataset(test_dataset)
    y_hat = clf.predict(X)
    acc = accuracy_score(y, y_hat)
    print("random forest acc:", acc)

    with open("./results/random-forest.pickle", mode="wb") as f:
        pickle.dump(clf, f)
Exemplo n.º 4
0
def train_lightgbm():
    import lightgbm as lgb
    from sklearn.preprocessing import PolynomialFeatures

    train_dataset = PoseDataset([root_dir / d for d in train_data_dirs])
    X_train, y_train = aggregate_dataset(train_dataset)

    # poly = PolynomialFeatures(2, include_bias=True)
    # poly.fit_transform(X_train)

    test_dataset = PoseDataset([root_dir / d for d in test_data_dirs])
    X_test, y_test = aggregate_dataset(test_dataset)

    # poly.fit_transform(X_test)

    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)

    params = {
        "task": "train",
        "boosting_type": "gbdt",
        "objective": "multiclass",
        "metric": {"multi_logloss"},
        "num_class": 7,
        "learning_rate": 0.1,
        "num_leaves": 31,
        "min_data_in_leaf": 1,
        "num_iteration": 200,
        "verbose": 0,
    }

    model = lgb.train(params,
                      lgb_train,
                      valid_sets=lgb_eval,
                      verbose_eval=False)

    y_pred = model.predict(X_test, num_iteration=model.best_iteration)
    y_pred_max = np.argmax(y_pred, axis=1)

    accuracy = sum(y_test == y_pred_max) / len(y_test)
    print("lightgbm acc:", accuracy)

    with open("./results/light-gbm.pickle", mode="wb") as f:
        pickle.dump(model, f)
Exemplo n.º 5
0
def train(learning_rate, learning_rate_decay, learning_rate_decay_step_size,
          batch_size, num_of_epochs, img_size, arch):
    # check device
    DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'

    # parameters
    RANDOM_SEED = 42
    N_CLASSES = 3

    # Load Data
    dataset = PoseDataset(csv_file='./labels.csv',
                          img_size=img_size,
                          transform=transforms.ToTensor())

    train_set, test_set = torch.utils.data.random_split(
        dataset,
        [int(np.ceil(0.8 * len(dataset))),
         int(np.floor(0.2 * len(dataset)))])

    train_loader = DataLoader(dataset=train_set,
                              batch_size=batch_size,
                              shuffle=True)
    test_loader = DataLoader(dataset=test_set,
                             batch_size=batch_size,
                             shuffle=True)

    # instantiate the model
    torch.manual_seed(RANDOM_SEED)

    if arch == 'simple':
        model = Classifier(N_CLASSES).to(DEVICE)

    elif arch == 'resnet':
        model = ResClassifier(N_CLASSES).to(DEVICE)

    else:
        print(
            'model architecture not supported, you can use simple and resnet only!'
        )
        return

    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    scheduler = lr_scheduler.StepLR(optimizer,
                                    step_size=learning_rate_decay_step_size,
                                    gamma=learning_rate_decay)

    cross_entropy_loss_criterion = nn.CrossEntropyLoss()

    print('start training...')
    # start training
    model, optimizer, train_losses, valid_losses = training_loop(
        model, cross_entropy_loss_criterion, batch_size, optimizer, scheduler,
        num_of_epochs, train_loader, test_loader, DEVICE)
Exemplo n.º 6
0
def train_xgboost():
    import xgboost as xgb
    from sklearn.preprocessing import PolynomialFeatures

    bst = xgb.XGBClassifier(
        base_score=0.5,
        colsample_bytree=1.0,
        gamma=0,
        learning_rate=0.1,
        max_delta_step=0,
        max_depth=5,
        min_child_weight=1,
        missing=None,
        n_estimators=100,
        nthread=-1,
        objective="multi:softprob",
        seed=0,
        silent=True,
        subsample=0.95,
    )

    train_dataset = PoseDataset([root_dir / d for d in train_data_dirs])
    X_train, y_train = aggregate_dataset(train_dataset)

    poly = PolynomialFeatures(2, include_bias=True)
    poly.fit_transform(X_train)

    test_dataset = PoseDataset([root_dir / d for d in test_data_dirs])
    X_test, y_test = aggregate_dataset(test_dataset)

    poly.fit_transform(X_test)

    bst.fit(X_train, y_train)
    y_pred = bst.predict(X_test)
    acc = accuracy_score(y_test, y_pred)
    print("xgboost acc:", acc)
Exemplo n.º 7
0
def train_deep():
    import torch
    import torch.nn as nn
    import torch.nn.functional as F
    import torch.optim as optim
    from torch.utils.data import Dataset
    from model import FCN
    from torch.optim import lr_scheduler

    def train(model, device, train_loader, optimizer):
        model.train()
        train_loss = 0
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(device), target.to(device)
            optimizer.zero_grad()
            output = model(data)
            loss = F.nll_loss(output, target)
            train_loss += loss.item()
            loss.backward()
            optimizer.step()

        return train_loss / len(train_loader.dataset)

    def test(model, device, test_loader):
        model.eval()
        test_loss = 0
        correct = 0
        with torch.no_grad():
            for data, target in test_loader:
                data, target = data.to(device), target.to(device)
                output = model(data)
                test_loss += F.nll_loss(
                    output, target,
                    reduction="sum").item()  # sum up batch loss
                pred = output.argmax(
                    dim=1,
                    keepdim=True)  # get the index of the max log-probability
                correct += pred.eq(target.view_as(pred)).sum().item()

        test_loss /= len(test_loader.dataset)
        test_acc = 100.0 * correct / len(test_loader.dataset)

        return test_loss, test_acc

    # training settings
    batch_size = 32
    test_batch_size = 1000
    epochs = 500
    patience = 30  # for early stopping
    use_cuda = torch.cuda.is_available()

    torch.manual_seed(9)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
    train_loader = torch.utils.data.DataLoader(
        PoseDataset([root_dir / d for d in train_data_dirs]),
        batch_size=batch_size,
        shuffle=True,
        **kwargs,
    )
    test_loader = torch.utils.data.DataLoader(
        PoseDataset([root_dir / d for d in test_data_dirs], mode="test"),
        batch_size=test_batch_size,
        shuffle=True,
        **kwargs,
    )

    model = FCN().to(device)
    optimizer = optim.Adam(model.parameters(), lr=1e-3, amsgrad=True)

    early_stopping = utils.EarlyStopping(patience, Path("results"))
    for epoch in range(1, epochs + 1):
        train_loss = train(model, device, train_loader, optimizer)
        test_loss, test_acc = test(model, device, test_loader)
        print(f"epoch: {epoch:>3}, train_loss: {train_loss:.4f}, ", end="")
        print(f"test_loss: {test_loss:.4f}, test_acc: {test_acc:.3f}")

        early_stopping(test_loss, test_acc, model)

        if early_stopping.early_stop:
            print("Early stopping activated")
            break

    print(f"deep model acc: {early_stopping.best_acc}")
Exemplo n.º 8
0
from dataset import PoseDataset
from mask_generator import UnityMaskGenerator
import open3d as o3d
import cv2
import numpy as np
import torch

#m = UnityMaskGenerator("dataset_processed")
#m.generate_masks()

p = PoseDataset("train", 1000, False, 'dataset_processed', 0.03, True)
cloud, _, _, target, model, _ = p.__getitem__(44)

o3dcloud = o3d.geometry.PointCloud()
o3dcloud.points = o3d.utility.Vector3dVector(cloud)

#o3d.visualization.draw_geometries([o3dcloud])
o3d.io.write_point_cloud('depth_projected.ply', o3dcloud)

o3dtarget = o3d.geometry.PointCloud()
o3dtarget.points = o3d.utility.Vector3dVector(target)

#o3d.visualization.draw_geometries([o3dtarget])
o3d.io.write_point_cloud('target.ply', o3dtarget)

o3dmodel = o3d.geometry.PointCloud()
o3dmodel.points = o3d.utility.Vector3dVector(model)

#o3d.visualization.draw_geometries([o3dmodel])
o3d.io.write_point_cloud('model.ply', o3dmodel)