Ejemplo n.º 1
0
    def __init__(self, set_ids):
        super(GroupDataset, self).__init__()
        history = 8
        offset = 2
        self.data_gen = dg(history, offset, set_ids, set_ids)

        self.set_ids = set_ids
        self._refresh_data()
        return
Ejemplo n.º 2
0
import numpy as np
import cv2
from data_generation import DataGeneration as dg

a = dg(8, 2, [0], [0])
inp, out = a.generate_sample()
for i in range(8):
    img = inp[i]
    cv2.imwrite('test/in_' + str(i) + '.jpg', img * 255)
for i in range(8):
    img = out[i]
    cv2.imwrite('test/out_' + str(i) + '.jpg', img * 255)
Ejemplo n.º 3
0
import numpy as np
import torch
import cv2
from data_generation import DataGeneration as dg
from model import ConvAutoencoder as ca

cuda = torch.device('cuda:1')
ckpt = 'checkpoints/model_4_200.pth'
autoencoder = ca()
autoencoder.load_state_dict(torch.load(ckpt))
autoencoder.eval()
autoencoder.to(cuda)

history = 16
offset = 3
data_generator = dg(history, offset)

inputs, outputs = data_generator.generate_sample(from_train=False)
for i in range(history):
    img = inputs[i, :, :, :]
    cv2.imwrite('test/' + str(i) + '.jpg', np.uint8(img * 255))

inputs = np.transpose(inputs, (3, 0, 1, 2))
outputs = np.transpose(outputs, (2, 0, 1))
inputs_tensor = np.expand_dims(inputs, 0)
outputs_tensor = np.expand_dims(outputs, 0)
inputs_tensor = torch.tensor(inputs_tensor, dtype=torch.float32, device=cuda)
outputs_tensor = torch.tensor(outputs_tensor, dtype=torch.float32, device=cuda)

outputs_model = autoencoder(inputs_tensor)
Ejemplo n.º 4
0
from data_generation import DataGeneration as dg
from model import ConvAutoencoder as ca
import torch
import numpy as np
import cv2
import os

#os.environ["CUDA_VISIBLE_DEVICES"] = '0'

history = 8
offset = 1
train_set = [0, 1, 2, 3]
test_set = [4]
data_generator = dg(history, offset, train_set, test_set)

cuda = torch.device('cuda:3')
autoencoder = ca()
autoencoder = autoencoder.to(cuda)

batch_size = 1
num_epoch = 200
num_data = data_generator.num_train_data
print(num_data)
criterion = torch.nn.BCELoss()
optimizer = torch.optim.Adam(autoencoder.parameters(), lr=0.00001)
for epoch in range(num_epoch + 1):
    train_loss = 0.0
    for i in range(num_data):
        print([i, num_data], end='\r')
        inputs_tensor = []
        outputs_tensor = []
Ejemplo n.º 5
0
test_num = 2
cuda = torch.device('cuda:2')
if mode == 0:
    ckpt = 'checkpoints/model_fpsfix_' + str(test_num) + '_200.pth'
else:
    ckpt = 'checkpoints/model_fpsfix_' + str(test_num) + '_200.pth'
print('Loading: {}'.format(ckpt))
autoencoder = ca()
autoencoder.load_state_dict(torch.load(ckpt, map_location='cpu'))
autoencoder.eval()
autoencoder.to(cuda)

history = 8
pred_length = 8
offset = 2
data_generator = dg(history, offset, [], [test_num])

threshold = 0.05

######################################################################################
inputs_raw, outputs_raw = data_generator.generate_sample(from_train=False)
for i in range(history):
    img = inputs_raw[i, :, :, :]
    cv2.imwrite('test/' + str(i) + '.jpg', np.uint8(img * 255))

input_sequence = list(inputs_raw)
if mode == 0:
    output_sequence = inf_mode_0(input_sequence, pred_length, autoencoder,
                                 cuda)
else:
    output_sequence = inf_mode_1(input_sequence, autoencoder, cuda)