示例#1
0
torch.manual_seed(p.random_seed)
np.random.seed(p.random_seed)
learn_rate = p.learn_rate
modelpath = make_model_directory('c_beta_models')
epochs = p.epochs

# ---- Importing and structuring Datasets and Model ----
# Remember!!! Shape Index can only be computed on local. Add other transforms after
# Pre_tranform step to not contaminate the data.
trainset = StructuresDataset(
    root='/work/upcorreia/users/dcoukos/datasets/res_train/'
)  #Pretranforms performed on local.
validset = trainset[:150]
trainset = trainset[150:]

model = p.model_type(3, heads=p.heads).to(cpu)

model.to(device)
optimizer = torch.optim.Adam(model.parameters(),
                             lr=learn_rate,
                             weight_decay=p.weight_decay)

writer = SummaryWriter('./c_beta_runs',
                       comment='model:{}_lr:{}_shuffle:{}_seed:{}'.format(
                           p.version, learn_rate, p.shuffle_dataset,
                           p.random_seed))

max_roc_auc = 0

train_loader = DataLoader(trainset,
                          shuffle=p.shuffle_dataset,
示例#2
0
if str(device) == 'cuda:0':
    epochs = p.epochs
else:
    epochs = 20

# ---- Importing and structuring Datasets and Model ----
print('Importing structures.')
# Remember!!! Shape Index can only be computed on local. Add other transforms after
# Pre_tranform step to not contaminate the data.
trainset = StructuresDataset(root='./datasets/named_masif_train_ds/')
validset = StructuresDataset(root='./datasets/named_masif_test_ds/')
if p.shuffle_dataset:
    trainset = trainset.shuffle()
n_features = trainset.get(0).x.shape[1]
print('Setting up model...')
model = p.model_type(16, heads=p.heads, masif_descr=True).to(device)
optimizer = torch.optim.Adam(model.parameters(),
                             lr=learn_rate,
                             weight_decay=p.weight_decay)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
#                                                       factor=p.lr_decay,
#                                                       patience=p.patience)

writer = SummaryWriter(comment='model:{}_lr:{}_shuffle:{}_seed:{}'.format(
    p.version, learn_rate, p.shuffle_dataset, p.random_seed))

# axes = [0, 1, 2]
max_roc_auc = 0

# ---- Training ----
print('Training...')
示例#3
0
    converter = None

print('Importing structures.')
trainset = Structures(root='./datasets/{}_train/'.format(p.dataset),
                      prefix=p.dataset)
samples = len(trainset)
cutoff = int(np.floor(samples*(1-p.validation_split)))
validset = trainset[cutoff:]
trainset = trainset[:cutoff]


if p.shuffle_dataset:
    trainset = trainset.shuffle()
n_features = trainset.get(0).x.shape[1]
print('Setting up model...')
model = p.model_type(6, heads=p.heads).to(device)
model = DataParallel(model).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=learn_rate, weight_decay=p.weight_decay)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
#                                                       factor=p.lr_decay,
#                                                       patience=p.patience)

writer = SummaryWriter(comment='model:{}_lr:{}_lr_decay:{}_shuffle:{}_seed:{}'.format(
                       p.version,
                       learn_rate,
                       p.lr_decay,
                       p.shuffle_dataset,
                       p.random_seed))


# axes = [0, 1, 2]
示例#4
0
# Remember!!! Shape Index can only be computed on local. Add other transforms after
# Pre_tranform step to not contaminate the data.
trainset = Structures(root='./datasets/masif_site_train/',
                      pre_transform=Compose((FaceAttributes(), NodeCurvature(),
                                             FaceToEdge(), TwoHop())))
# Define transform in epoch, so that rotation occurs around Δ axis every time.
validset = Structures(root='./datasets/masif_site_test/',
                      pre_transform=Compose((FaceAttributes(), NodeCurvature(),
                                             FaceToEdge(), TwoHop())))

if p.shuffle_dataset:
    trainset = trainset.shuffle()
n_features = trainset.get(0).x.shape[1]

# ---- Import previous model to allow deep network to train -------------
model = p.model_type(3).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=learn_rate, weight_decay=p.weight_decay)

writer = SummaryWriter(comment='model:{}_lr:{}_shuffle:{}_seed:{}'.format(
                       p.version,
                       learn_rate,
                       p.shuffle_dataset,
                       p.random_seed))


max_roc_auc = 0

# ---- Training ----
print('Training...')
for epoch in range(1, epochs+1):
    train_loader = DataLoader(trainset, shuffle=p.shuffle_dataset, batch_size=p.batch_size)
示例#5
0
print('Importing structures.')
# Remember!!! Shape Index can only be computed on local. Add other transforms after
# Pre_tranform step to not contaminate the data.
trainset = Structures(root='./datasets/masif_site_train/',
                      transform=AddShapeIndex())
validset = Structures(root='./datasets/masif_site_test/',
                      transform=AddShapeIndex())

# What exactly is a structures Dataset again?
trainset[0]

if p.shuffle_dataset:
    trainset = trainset.shuffle()
n_features = trainset.get(0).x.shape[1]
print('Setting up model...')
model = p.model_type(4, 4)
optimizer = torch.optim.Adam(model.parameters(),
                             lr=learn_rate,
                             weight_decay=p.weight_decay)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
#                                                       factor=p.lr_decay,
#                                                       patience=p.patience)

writer = SummaryWriter(comment='model:{}_lr:{}_shuffle:{}_seed:{}'.format(
    p.version, learn_rate, p.shuffle_dataset, p.random_seed))

# axes = [0, 1, 2]
max_roc_auc = 0
# ---- Training ----

model.to(device)