예제 #1
0
FRAME_NUM = 10  # Num of frames per clip
QUERY_NUM = 5  # Num of instances for query per class
INST_NUM = 10  # Num of videos selected in each class (A silly design, will be removed later)
TCN_OUT = 64  # Num of channels of output of TCN

# Define models
c3d = C3D(in_channels=3)
c3d = nn.DataParallel(c3d)
tcn = TCN(245760, [128, 128, 64, TCN_OUT])
ap = AP(CLASS_NUM, SAMPLE_NUM, QUERY_NUM, WINDOW_NUM, CLIP_NUM, TCN_OUT)
rn = RN(CLIP_NUM, hidden_size=32)

# Move models to GPU
c3d.to(device)
rn.to(device)
tcn.to(device)
ap.to(device)

# Load Saved Models & Optimizers & Schedulers
my_load(c3d, "c3d.pkl")
my_load(tcn, "tcn.pkl")
my_load(ap, "ap.pkl")
my_load(rn, "rn.pkl")

# Testing
with torch.no_grad():
    accuracies = []

    test_ep = 0
    while test_ep < args.test_ep:
예제 #2
0
#img = np.transpose(img, (1,2,0))
#plt.imshow(img/10000)
#
#dataset = EEDataset(path, corn_select, transform=(mu, sigma))
#dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False)
#dataiter_norm = iter(dataloader)
#
#batch = next(dataiter_norm)
#x = batch["x"]
#img_norm = x[0,:3,-1,:,:].numpy()
#img_norm = np.transpose(img_norm, (1,2,0))
#plt.imshow(img_norm)

from tcn import TemporalConvNet

tcn = TemporalConvNet.to(device)
loss_fn = nn.SmoothL1Loss(reduction="mean", beta=.1)
optimizer = torch.optim.Adam(tcn.parameters(), lr=1e-4)


def train(model, loss_fn, optimizer, loader_train, epochs=1):
    loss_ts = []
    start = time.time()
    for epoch in range(epochs):
        for i, batch in enumerate(loader_train):
            model.train()
            xb, yb = batch.values()
            xb, yb = xb.float().to(device), yb.float().to(device)
            y_hat = model(xb)
            loss = loss_fn(y_hat, yb)
            loss.backward()