Пример #1
0
    )

    return parser


if __name__ == "__main__":
    args = default_argument_parser().parse_args()
    np.random.seed(args.seed)
    num_classes = 2
    transforms = []
    if args.max_points > 0:
        transforms.append(T.FixedPoints(args.max_points))
    if args.augment:
        transforms.append(T.RandomRotate((-180, 180),
                                         axis=2))  # Rotate around z axis
        transforms.append(T.RandomFlip(0))  # Flp about x axis
        transforms.append(T.RandomFlip(1))  # Flip about y axis
        transforms.append(T.RandomTranslate(0.0001))  # Random jitter
    if args.norm:
        transforms.append(T.NormalizeScale())
    transform = T.Compose(transforms=transforms) if transforms else None
    train_dataset = EventDataset(args.dataset,
                                 "trainval",
                                 include_proton=True,
                                 task="separation",
                                 cleanliness=args.clean,
                                 pre_transform=None,
                                 transform=transform,
                                 balanced_classes=True,
                                 fraction=0.001)
    test_dataset = EventDataset(args.dataset,
Пример #2
0
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

train_batch_logger = Logger(os.path.join('./Results', 'train_batch.log'), ['epoch', 'batch', 'loss', 'acc'])
test_batch_logger = Logger(os.path.join('./Results', 'test_batch.log'), ['batch', 'loss', 'acc'])
acc_logger = Logger(os.path.join('./Results', 'acc.log'), ['epoch', 'acc'])


#shutil.rmtree(osp.join('..',  'data/Traingraph/processed'))
#shutil.rmtree(osp.join('..', 'data/Testgraph/processed'))
for epoch in range(1, 180):
    
    train_path = osp.join('..',  'data/Traingraph')
    test_path = osp.join('..', 'data/Testgraph')

    train_data_aug = T.Compose([T.Cartesian(cat=False), T.RandomFlip(axis=0, p=0.3), T.RandomScale([0.95,0.999]) ])
    test_data_aug = T.Compose([T.Cartesian(cat=False), T.RandomFlip(axis=0, p=0.3), T.RandomScale([0.95,0.999])])

    train_dataset = MyOwnDataset(train_path, transform=train_data_aug)      #### transform=T.Cartesian()
    test_dataset = MyOwnDataset(test_path, transform=test_data_aug)

    
    train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=64)
    
    train(epoch, train_batch_logger, train_loader)
    test_acc = test(test_batch_logger, test_loader)
    
    print('Epoch: {:02d}, Test: {:.4f}'.format(epoch, test_acc))
    
    torch.save(model, './runs_model/model.pkl')
Пример #3
0
                            ['epoch', 'batch', 'loss', 'acc'])
test_batch_logger = Logger(os.path.join('./Results', 'test_batch.log'),
                           ['batch', 'loss', 'acc'])
acc_logger = Logger(os.path.join('./Results', 'acc.log'), ['epoch', 'acc'])

#shutil.rmtree(osp.join('..',  'data/Traingraph/processed'))
#shutil.rmtree(osp.join('..', 'data/Testgraph/processed'))
for epoch in range(1, 180):

    train_path = osp.join('..', 'data/Traingraph')
    test_path = osp.join('..', 'data/Testgraph')

    #train_data_aug = T.Compose([T.Cartesian(cat=False), T.RandomFlip(axis=0)])
    train_data_aug = T.Compose([
        T.Cartesian(cat=False),
        T.RandomFlip(axis=0, p=0.3),
        T.RandomScale([0.96, 0.999])
    ])
    test_data_aug = T.Compose([
        T.Cartesian(cat=False),
        T.RandomFlip(axis=0, p=0.3),
        T.RandomScale([0.96, 0.999])
    ])

    train_dataset = MyOwnDataset(
        train_path, transform=train_data_aug)  #### transform=T.Cartesian()
    test_dataset = MyOwnDataset(test_path, transform=test_data_aug)

    train_loader = DataLoader(train_dataset, batch_size=3, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=3)
Пример #4
0
train_batch_logger = Logger(os.path.join('./Results', 'train_batch.log'),
                            ['epoch', 'batch', 'loss', 'acc'])
test_batch_logger = Logger(os.path.join('./Results', 'test_batch.log'),
                           ['batch', 'loss', 'acc'])

#shutil.rmtree(osp.join('..',  'data/Traingraph/processed'))
#shutil.rmtree(osp.join('..', 'data/Testgraph/processed'))
for epoch in range(1, 150):

    train_path = osp.join('..', 'data/Traingraph')
    test_path = osp.join('..', 'data/Testgraph')

    train_data_aug = T.Compose([
        T.Cartesian(cat=False),
        T.RandomFlip(axis=0, p=0.3),
        T.RandomScale([0.95, 0.999]),
        T.RandomFlip(axis=1, p=0.2)
    ])
    test_data_aug = T.Compose([
        T.Cartesian(cat=False),
        T.RandomFlip(axis=0, p=0.3),
        T.RandomScale([0.95, 0.999]),
        T.RandomFlip(axis=1, p=0.2)
    ])

    train_dataset = MyOwnDataset(
        train_path, transform=train_data_aug)  #### transform=T.Cartesian()
    test_dataset = MyOwnDataset(test_path, transform=test_data_aug)

    train_loader = DataLoader(train_dataset, batch_size=3, shuffle=True)
Пример #5
0
train_batch_logger = Logger(os.path.join('./Results', 'train_batch.log'),
                            ['epoch', 'batch', 'loss', 'acc'])
test_batch_logger = Logger(os.path.join('./Results', 'test_batch.log'),
                           ['batch', 'loss', 'acc'])
acc_logger = Logger(os.path.join('./Results', 'acc.log'), ['epoch', 'acc'])

#shutil.rmtree(osp.join('..',  'data/Traingraph/processed'))
#shutil.rmtree(osp.join('..', 'data/Testgraph/processed'))
for epoch in range(1, 150):

    train_path = osp.join('..', 'data/Traingraph')
    test_path = osp.join('..', 'data/Testgraph')

    train_data_aug = T.Compose([
        T.Cartesian(cat=False),
        T.RandomFlip(axis=0, p=0.5),
        T.RandomScale([0.96, 0.999]),
        T.RandomFlip(axis=1, p=0.5)
    ])
    test_data_aug = T.Compose([
        T.Cartesian(cat=False),
        T.RandomFlip(axis=0, p=0.5),
        T.RandomScale([0.96, 0.999]),
        T.RandomFlip(axis=1, p=0.5)
    ])

    train_dataset = MyOwnDataset(
        train_path, transform=train_data_aug)  #### transform=T.Cartesian()
    test_dataset = MyOwnDataset(test_path, transform=test_data_aug)

    train_loader = DataLoader(train_dataset, batch_size=3, shuffle=True)
Пример #6
0
    logging.basicConfig(filename=os.path.join(log_dir, 'n_mnist.log'),
                        level=logging.DEBUG)
    model_file = 'n_mnist.pkl'

    device = torch.device("cuda:" +
                          args.cuda if torch.cuda.is_available() else "cpu")

    model = model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    pre_transform = T.Compose([T.Cartesian(cat=False)])
    # train_data_aug = T.Compose([T.RandomScale([0.95, 1])])
    train_data_aug = T.Compose([
        T.RandomScale([0.95, 1]),
        T.RandomRotate((0, 10), axis=0),
        T.RandomFlip(axis=0, p=0.5)
    ])

    train_dataset = Graph_2D_Memory_Dataset(train_dir,
                                            transform=train_data_aug,
                                            pre_transform=pre_transform)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers)

    # train
    print("train")
    for epoch in range(1, args.epoch):
        model.train()
        correct = 0