Exemplo n.º 1
0
    def val_dataloader(self):
        transform = CustomTransformation().get_transformation()
        val_dataset = VisuomotorDataset(self.DATASET_PATH, transform, (64, 64))

        self.val_loader = torch.utils.data.DataLoader(
            val_dataset, batch_size=self.batch_size, shuffle=True
        )
        self.num_val_imgs = len(self.val_loader)
        return self.val_loader
Exemplo n.º 2
0
seed = 42
torch.manual_seed(seed)

batch_size = 512
epochs = 50
learning_rate = 1e-4

DATASET_PATH = "/home/anirudh/Desktop/main_dataset/**/*.png"
MODEL_PATH = "/home/anirudh/HBRS/Master-Thesis/NJ-2020-thesis/AutoEncoders/model/" \
                  "gpu_ae_prototype.pth"
MODEL_SAVE_PATH = "/home/anirudh/HBRS/Master-Thesis/NJ-2020-thesis/AutoEncoders/model/" \
                  "ae_10_200.pth"
# --------------------------------------------------------------

transform = CustomTransformation().get_transformation()
train_dataset = VisuomotorDataset(DATASET_PATH, transform, (64, 64))

train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)

#  use gpu if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(torch.cuda.is_available())

# create a model from `AE` autoencoder class
# load it to the specified device, either gpu or cpu
model = AutoEncoder(input_shape=64 * 64, output_shape=10).to(device)
model.load_state_dict(torch.load(MODEL_PATH))

# create an optimizer object
matplotlib.use('TkAgg', warn=False, force=True)

from src.autoencoders.spatial_autoencoder import DeepSpatialAutoencoder, DSAE_Loss
from src.dataset_utils.vm_dataset import VisuomotorDataset

EPOCHS = 100
INPUT_SIZE = (64, 64)
INPUT_DIM = 64 * 64
batch_size = 35

DATASET_PATH = "/home/anirudh/Desktop/main_dataset/**/*.png"
MODEL_PATH = "/home/anirudh/HBRS/Master-Thesis/NJ-2020-thesis/AutoEncoders/model" \
                  "/cnn_ds_vae_small_1000_gpu.pth"

transform = transforms.Compose([transforms.ToTensor()])
train_dataset = VisuomotorDataset(DATASET_PATH, transform, INPUT_SIZE)

dataloader = torch.utils.data.DataLoader(train_dataset,
                                         batch_size=batch_size,
                                         shuffle=True,
                                         num_workers=2)

print('Number of samples: ', len(train_dataset))

ds_vae = DeepSpatialAutoencoder(image_output_size=INPUT_SIZE)
ds_vae.load_state_dict(torch.load(MODEL_PATH))
ds_vae.eval()

with torch.no_grad():
    for i, data in enumerate(dataloader, 0):
        inputs, classes = data
Exemplo n.º 4
0
from src.autoencoders.basic_autoencoder import AutoEncoder
from src.dataset_utils.vm_dataset import VisuomotorDataset

MODEL_SAVE = "/home/anirudh/HBRS/Master-Thesis/NJ-2020-thesis/AutoEncoders/model/" \
             "gpu_ae_prototype.pth"
INPUT_SHAPE = (64, 64)
INPUT_DIM = 64 * 64

model = AutoEncoder(input_shape=INPUT_DIM, output_shape=10)
model.load_state_dict(torch.load(MODEL_SAVE))
model.eval()

transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])

DATASET_PATH = "/home/anirudh/Desktop/main_dataset/door_5/*.png"
test_dataset = VisuomotorDataset(DATASET_PATH, transform, INPUT_SHAPE)

test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=10,
                                          shuffle=False)

test_examples = None

with torch.no_grad():
    for batch_features in test_loader:
        batch_features = batch_features[0]
        test_examples = batch_features.view(-1, INPUT_DIM)
        reconstruction = model(test_examples)
        break

with torch.no_grad():