Пример #1
0
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

train_batch_logger = Logger(os.path.join('./Results', 'train_batch.log'), ['epoch', 'batch', 'loss', 'acc'])
test_batch_logger = Logger(os.path.join('./Results', 'test_batch.log'), ['batch', 'loss', 'acc'])
acc_logger = Logger(os.path.join('./Results', 'acc.log'), ['epoch', 'acc'])


#shutil.rmtree(osp.join('..',  'data/Traingraph/processed'))
#shutil.rmtree(osp.join('..', 'data/Testgraph/processed'))
for epoch in range(1, 180):
    
    train_path = osp.join('..',  'data/Traingraph')
    test_path = osp.join('..', 'data/Testgraph')

    train_data_aug = T.Compose([T.Cartesian(cat=False), T.RandomFlip(axis=0, p=0.3), T.RandomScale([0.95,0.999]) ])
    test_data_aug = T.Compose([T.Cartesian(cat=False), T.RandomFlip(axis=0, p=0.3), T.RandomScale([0.95,0.999])])

    train_dataset = MyOwnDataset(train_path, transform=train_data_aug)      #### transform=T.Cartesian()
    test_dataset = MyOwnDataset(test_path, transform=test_data_aug)

    
    train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=64)
    
    train(epoch, train_batch_logger, train_loader)
    test_acc = test(test_batch_logger, test_loader)
    
    print('Epoch: {:02d}, Test: {:.4f}'.format(epoch, test_acc))
    
    torch.save(model, './runs_model/model.pkl')
Пример #2
0
# ## Data loading
# Let's get the dataset

# In[2]:

import torch
from torch_geometric.datasets import ModelNet
import torch_geometric.transforms as T
import time
from tqdm import tqdm_notebook

pre_transform = T.NormalizeScale()
transform = T.Compose([
    T.SamplePoints(1024),
    T.RandomRotate(30),
    T.RandomScale((0.5, 2)),
])
name = '40'

train_ds = ModelNet(root='./',
                    train=True,
                    name=name,
                    pre_transform=pre_transform,
                    transform=transform)

test_ds = ModelNet(root='./',
                   train=True,
                   name=name,
                   pre_transform=pre_transform,
                   transform=T.SamplePoints(1024 * 4))
Пример #3
0
    args = parser.parse_args()
    torch.multiprocessing.set_sharing_strategy('file_system')
    torch.backends.cudnn.benchmark = False
    logging.basicConfig(filename=Config.graph_train_log_path,
                        level=logging.DEBUG)

    device = torch.device("cuda:" +
                          args.cuda if torch.cuda.is_available() else "cpu")

    model = Net()
    model = model.to(device)
    model.load_state_dict(
        torch.load(os.path.join(Config.model_dir, args.model_name)))
    test_data_aug = T.Compose(
        [T.Cartesian(cat=False),
         T.RandomScale([0.99999, 1])])
    # test_data_aug = T.Compose([T.Cartesian(cat=False)])
    test_dataset = EV_Gait_3DGraph_Dataset(Config.graph_test_dir,
                                           transform=test_data_aug)
    test_loader = DataLoader(test_dataset,
                             batch_size=16,
                             num_workers=2,
                             pin_memory=True)

    # test
    model.eval()
    correct = 0
    total = 0

    for index, data in enumerate(tqdm(test_loader)):
        data = data.to(device)
Пример #4
0
test_batch_logger = Logger(os.path.join('./Results', 'test_batch.log'),
                           ['batch', 'loss', 'acc'])
acc_logger = Logger(os.path.join('./Results', 'acc.log'), ['epoch', 'acc'])

#shutil.rmtree(osp.join('..',  'data/Traingraph/processed'))
#shutil.rmtree(osp.join('..', 'data/Testgraph/processed'))
for epoch in range(1, 180):

    train_path = osp.join('..', 'data/Traingraph')
    test_path = osp.join('..', 'data/Testgraph')

    #train_data_aug = T.Compose([T.Cartesian(cat=False), T.RandomFlip(axis=0)])
    train_data_aug = T.Compose([
        T.Cartesian(cat=False),
        T.RandomFlip(axis=0, p=0.3),
        T.RandomScale([0.96, 0.999])
    ])
    test_data_aug = T.Compose([
        T.Cartesian(cat=False),
        T.RandomFlip(axis=0, p=0.3),
        T.RandomScale([0.96, 0.999])
    ])

    train_dataset = MyOwnDataset(
        train_path, transform=train_data_aug)  #### transform=T.Cartesian()
    test_dataset = MyOwnDataset(test_path, transform=test_data_aug)

    train_loader = DataLoader(train_dataset, batch_size=3, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=3)

    train(epoch, train_batch_logger, train_loader)
Пример #5
0
train_batch_logger = Logger(os.path.join('./Results', 'train_batch.log'),
                            ['epoch', 'batch', 'loss', 'acc'])
test_batch_logger = Logger(os.path.join('./Results', 'test_batch.log'),
                           ['batch', 'loss', 'acc'])

#shutil.rmtree(osp.join('..',  'data/Traingraph/processed'))
#shutil.rmtree(osp.join('..', 'data/Testgraph/processed'))
for epoch in range(1, 150):

    train_path = osp.join('..', 'data/Traingraph')
    test_path = osp.join('..', 'data/Testgraph')

    train_data_aug = T.Compose([
        T.Cartesian(cat=False),
        T.RandomFlip(axis=0, p=0.3),
        T.RandomScale([0.95, 0.999]),
        T.RandomFlip(axis=1, p=0.2)
    ])
    test_data_aug = T.Compose([
        T.Cartesian(cat=False),
        T.RandomFlip(axis=0, p=0.3),
        T.RandomScale([0.95, 0.999]),
        T.RandomFlip(axis=1, p=0.2)
    ])

    train_dataset = MyOwnDataset(
        train_path, transform=train_data_aug)  #### transform=T.Cartesian()
    test_dataset = MyOwnDataset(test_path, transform=test_data_aug)

    train_loader = DataLoader(train_dataset, batch_size=3, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=3)
Пример #6
0
        os.makedirs(model_dir)

    logging.basicConfig(filename=os.path.join(log_dir, 'n_mnist.log'),
                        level=logging.DEBUG)
    model_file = 'n_mnist.pkl'

    device = torch.device("cuda:" +
                          args.cuda if torch.cuda.is_available() else "cpu")

    model = model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    pre_transform = T.Compose([T.Cartesian(cat=False)])
    # train_data_aug = T.Compose([T.RandomScale([0.95, 1])])
    train_data_aug = T.Compose([
        T.RandomScale([0.95, 1]),
        T.RandomRotate((0, 10), axis=0),
        T.RandomFlip(axis=0, p=0.5)
    ])

    train_dataset = Graph_2D_Memory_Dataset(train_dir,
                                            transform=train_data_aug,
                                            pre_transform=pre_transform)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers)

    # train
    print("train")
    for epoch in range(1, args.epoch):
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default="0", help="The GPU ID")
    parser.add_argument("--epoch", default=150, type=int, help="The GPU ID")
    parser.add_argument("--batch_size", default=16, type=int, help="batch size")
    args = parser.parse_args()
    torch.multiprocessing.set_sharing_strategy('file_system')
    torch.backends.cudnn.benchmark = False
    logging.basicConfig(filename=Config.graph_train_log_path, level=logging.DEBUG)

    device = torch.device("cuda:" + args.cuda if torch.cuda.is_available() else "cpu")

    model = Net()
    model = model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    # train_data_aug = T.Compose([T.Cartesian(cat=False), T.RandomScale([0.96, 0.999]), T.RandomTranslate(0.01)])
    train_data_aug = T.Compose([T.Cartesian(cat=False), T.RandomScale([0.96, 0.999])])

    train_dataset = EV_Gait_3DGraph_Dataset(
        Config.graph_train_dir, transform=train_data_aug
    )
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=2, pin_memory=True)

    test_data_aug = T.Compose([T.Cartesian(cat=False), T.RandomScale([0.999, 1])])
    # test_data_aug = T.Compose([T.Cartesian(cat=False)])
    test_dataset = EV_Gait_3DGraph_Dataset(
        Config.graph_test_dir, transform=test_data_aug
    )
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=2, pin_memory=True)

    # train