コード例 #1
0
ファイル: lstm.py プロジェクト: shwinshaker/CS253-PA4
def preprocessing(chunk_size=100):

    # load data
    loaders, encoder = createLoaders(extras=extras, chunk_size=chunk_size)
    dataloaders = dict(zip(['train', 'val', 'test'], loaders))
    print('------- Info ---------')
    for phase in dataloaders:
        print('- %s size: %i' % (phase, len(dataloaders[phase])))
    print('----------------------')
    
    return dataloaders, encoder
コード例 #2
0
import utils as ut
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from torch import optim
from music_dataloader import createLoaders
import numpy as np

# Check if your system supports CUDA
use_cuda = torch.cuda.is_available()

# Setup GPU optimization if CUDA is supported
if use_cuda:
    computing_device = torch.device("cuda")
    extras = {"num_workers": 1, "pin_memory": True}
    print("CUDA is supported")
else:  # Otherwise, train on the CPU
    computing_device = torch.device("cpu")
    extras = False
    print("CUDA NOT supported")

# load data
train_loader, val_loader, test_loader = createLoaders(extras=extras)

criterion = nn.CrossEntropyLoss()
コード例 #3
0
# Check if your system supports CUDA
use_cuda = torch.cuda.is_available()

# Setup GPU optimization if CUDA is supported
if use_cuda:
    computing_device = torch.device("cuda")
    extras = {"num_workers": 1, "pin_memory": True}
    print("CUDA is supported")
else:  # Otherwise, train on the CPU
    computing_device = torch.device("cpu")
    extras = False
    print("CUDA NOT supported")

# load data
train_loader, val_loader, test_loader, one_hot_length = createLoaders(
    extras=extras)

RNN = rnn.RNN(hidden_size, one_hot_length, computing_device)
RNN = RNN.to(computing_device)
print("Model on CUDA?", next(RNN.parameters()).is_cuda)
print("Model on CUDA?",
      next(RNN.parameters()).is_cuda,
      file=open("output.txt", "a"))
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(RNN.parameters())

# Track the loss across training
chunk_train_loss = []

# calculate training and validation loss per N times through the whole training process
batch_train_loss = []