Ejemplo n.º 1
0
    def prepare_data(self):
        path = osp.join(osp.dirname(osp.realpath(__file__)), "..", "..",
                        "data", self.NAME)

        self.dataset_train = FAUST(path, True, self._train_transform,
                                   self._pre_transform)
        self.dataset_test = FAUST(path, False, self._test_transform,
                                  self._pre_transform)
Ejemplo n.º 2
0
from __future__ import division, print_function

import os.path as osp

import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch_geometric.datasets import FAUST
from torch_geometric.transform import CartesianAdj
from torch_geometric.utils import DataLoader
from torch_geometric.nn.modules import SplineConv

path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'FAUST')
train_dataset = FAUST(path, train=True, transform=CartesianAdj())
test_dataset = FAUST(path, train=False, transform=CartesianAdj())
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=1)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = SplineConv(1, 32, dim=3, kernel_size=5)
        self.conv2 = SplineConv(32, 64, dim=3, kernel_size=5)
        self.conv3 = SplineConv(64, 64, dim=3, kernel_size=5)
        self.conv4 = SplineConv(64, 64, dim=3, kernel_size=5)
        self.conv5 = SplineConv(64, 64, dim=3, kernel_size=5)
        self.conv6 = SplineConv(64, 64, dim=3, kernel_size=5)
        self.fc1 = nn.Linear(64, 256)
        self.fc2 = nn.Linear(256, 6890)
Ejemplo n.º 3
0
import os.path as osp

import torch
import torch.nn.functional as F
from torch_geometric.datasets import FAUST
import torch_geometric.transforms as T
from torch_geometric.loader import DataLoader
from torch_geometric.nn import SplineConv

path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'FAUST')
pre_transform = T.Compose([T.FaceToEdge(), T.Constant(value=1)])
train_dataset = FAUST(path, True, T.Cartesian(), pre_transform)
test_dataset = FAUST(path, False, T.Cartesian(), pre_transform)
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=1)
d = train_dataset[0]


class Net(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = SplineConv(1, 32, dim=3, kernel_size=5, aggr='add')
        self.conv2 = SplineConv(32, 64, dim=3, kernel_size=5, aggr='add')
        self.conv3 = SplineConv(64, 64, dim=3, kernel_size=5, aggr='add')
        self.conv4 = SplineConv(64, 64, dim=3, kernel_size=5, aggr='add')
        self.conv5 = SplineConv(64, 64, dim=3, kernel_size=5, aggr='add')
        self.conv6 = SplineConv(64, 64, dim=3, kernel_size=5, aggr='add')
        self.lin1 = torch.nn.Linear(64, 256)
        self.lin2 = torch.nn.Linear(256, d.num_nodes)

    def forward(self, data):
Ejemplo n.º 4
0
# deterministic
torch.manual_seed(args.seed)
cudnn.benchmark = False
cudnn.deterministic = True


class Pre_Transform(object):
    def __call__(self, data):
        data.x = data.pos
        data = T.FaceToEdge()(data)

        return data


train_dataset = FAUST(args.data_fp,
                      True,
                      transform=T.Cartesian(),
                      pre_transform=Pre_Transform())
test_dataset = FAUST(args.data_fp,
                     False,
                     transform=T.Cartesian(),
                     pre_transform=Pre_Transform())
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=1)
d = train_dataset[0]
target = torch.arange(d.num_nodes, dtype=torch.long, device=device)
print(d)


class MoNet(nn.Module):
    def __init__(self, in_channels, num_classes, kernel_size):
        super(MoNet, self).__init__()
Ejemplo n.º 5
0
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable

sys.path.insert(0, '.')
sys.path.insert(0, '..')

from torch_geometric.datasets import FAUST  # noqa
from torch_geometric.transforms import CartesianAdj  # noqa
from torch_geometric.utils import DataLoader  # noqa
from torch_geometric.nn.modules import SplineConv, Lin  # noqa

path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(path, '..', 'data', 'FAUST')
transform = CartesianAdj()
train_dataset = FAUST(path, train=True, transform=transform)
test_dataset = FAUST(path, train=False, transform=transform)

n = 6890
batch_size = 1

train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = SplineConv(1, 32, dim=3, kernel_size=5)
        self.conv2 = SplineConv(32, 64, dim=3, kernel_size=5)
        self.conv3 = SplineConv(64, 64, dim=3, kernel_size=5)
Ejemplo n.º 6
0
# deterministic
torch.manual_seed(args.seed)
cudnn.benchmark = False
cudnn.deterministic = True


class Pre_Transform(object):
    def __call__(self, data):
        data.x = data.pos
        data = T.FaceToEdge()(data)
        data.pos = None
        return data


train_dataset = FAUST(args.data_fp, True, pre_transform=Pre_Transform())
test_dataset = FAUST(args.data_fp, False, pre_transform=Pre_Transform())
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=1)
d = train_dataset[0]
target = torch.arange(d.num_nodes, dtype=torch.long, device=device)
print(d)

model = FeaStNet(d.num_features, d.num_nodes, args.heads).to(device)
print(model)
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr,
                       weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.StepLR(optimizer,
                                      args.decay_step,
                                      gamma=args.lr_decay)
Ejemplo n.º 7
0
# deterministic
seed = args.seed
np.random.seed(seed)
torch.manual_seed(seed)
cudnn.benchmark = False
cudnn.deterministic = True

batch_size = args.batch_size

# Preprocessor
# preprocessor = transforms.get_transforms()
val_split = args.val_split

if args.datasets == 'faust':
    train_dataset = FAUST('.')
    val_dataset = FAUST('.', train=False)
    train_dataloader = FAUSTDataLoader(train_dataset, batch_size=batch_size)
    val_dataloader = FAUSTDataLoader(val_dataset, batch_size=batch_size)
elif args.datasets == 'full_faust':
    dataset = FullFAUST('.')
    # TODO: Remove hardcoding of test person
    train_dataset, val_dataset = split_faust_by_person(dataset, [1])
    train_dataloader = FAUSTDataLoader(train_dataset, batch_size=batch_size)
    val_dataloader = FAUSTDataLoader(val_dataset, batch_size=batch_size)
else:
    raise NotImplementedError('')

# for DFAUST splitting. 9:1 ratio, with seed from above

print('Template')