Example #1
0
def test2():
    device = torch.device("cuda")
    data = np.load('./data/seg_data.npy')
    X_train = data[0][0][:40]
    print(X_train.shape)
    X_train = StandardScaler().fit_transform(X_train)
    X_train = X_train.reshape(-1, 1, 1920)
    X_train = torch.from_numpy(X_train)
    X_train = X_train.cuda().float()
    print(X_train.device)
    model = ConvNet(2).to(device)
    model.load_state_dict(torch.load("./DNNmodel.ckpt", map_location=device))

    # model.eval()
    outputs = model(X_train)
Example #2
0
np.random.seed(100)
torch.manual_seed(100)
torch.cuda.manual_seed(100)

hidden = 16
lr = 0.01
weight_decay = 5e-4
dropout = 0.5
epochs = 200

# Model and optimizer
model = GCN(nfeat=features.shape[1], nhid=hidden, out_dim=25, dropout=dropout)
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)

model.cuda()
features = features.cuda()
adj = changed_adj.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_val = idx_val.cuda()
idx_test = idx_test.cuda()


def train(epoch):
    t = time.time()
    model.train()
    optimizer.zero_grad()
    output = model(features, adj)
    loss_train = F.nll_loss(output[idx_train], labels[idx_train])
    acc_train = accuracy(output[idx_train], labels[idx_train])
    loss_train.backward()