Example #1
0
def main(config_file):
    data_config, _ = get_config_from_json(config_file)
    # Load the dataset and send model to CUDA if available
    training_data = Dataset(data_config, mode='train')
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print(device)
    gan = GAN(device=device)

    # For model evaluation
    temp = data.DataLoader(training_data, batch_size=8, shuffle=False)
    _, attr = next(iter(temp))
    attr = attr.to(device)
    z = torch.randn((8, 216, 1, 1), dtype=torch.double, device=device)

    trainer = GANTrainer(training_data,
                         batch_size=32,
                         model=gan,
                         device=device)

    abs_it = 0
    for epoch_id in range(10):
        it = trainer.train_epoch(abs_it,
                                 epoch_id=epoch_id,
                                 z_eval=z,
                                 attr_eval=attr)
        abs_it = it

    print('Done training.')
Example #2
0
def main(config_file):
    data_config, _ = get_config_from_json(config_file)
    # Load the dataset and send model to CUDA if available
    training_data = Dataset(data_config, mode='train', dtype=torch.float)
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print(device)
    inn = cINN(device=device, adversarial=False)

    trainer = INNTrainer(training_data,
                         batch_size=32,
                         model=inn,
                         device=device)

    # For model evaluation
    temp = data.DataLoader(training_data, batch_size=8, shuffle=True)
    x_eval, attr_eval = next(iter(temp))
    x_eval = x_eval.to(device)
    attr_eval = attr_eval.to(device)

    # _, attr_rand = next(iter(temp))
    # attr_rand = attr_rand.to(device)

    # Modify some attributes
    attr_rand = attr_eval.clone().detach()
    selected_attr = [17, 39]
    for i in selected_attr:
        i_0 = attr_eval[:, i] == 0
        i_1 = attr_eval[:, i] == 1
        attr_rand[i_0, i] = 1
        attr_rand[i_1, i] = 0

    z_eval = torch.randn((8, 3 * 64 * 64), dtype=torch.float, device=device)

    mode = ['generate', 'interpolate']
    params = {
        'z_eval': z_eval,
        'x_eval': x_eval,
        'attr_eval': attr_eval,
    }

    abs_it = 0
    for epoch_id in range(10):
        it = trainer.train_epoch(abs_it=abs_it,
                                 epoch_id=epoch_id,
                                 mode=mode,
                                 params=params,
                                 save_image_interval=10)
        abs_it = it

    print('Done training.')
Example #3
0
    def __init__(self, config_path, model_state):
        """
        :param config_path: path to the configuration file used to create saved model
        :param model_state: path to .pt file output by Pytorch's save method
        """
        self.configs, _ = get_config_from_json(config_path)
        self.model = BidirectionalLSTM(model_configs=self.configs)

        if is_available():
            self.model.load_state_dict(py_load(model_state))
        else:
            self.model.load_state_dict(
                py_load(model_state,
                        map_location=lambda storage, loc: storage))

        self.model.eval()
Example #4
0
def main(config_file):
    data_config, _ = get_config_from_json(config_file)
    # Load the dataset and send model to CUDA if available
    training_data = Dataset(data_config, mode='train', dtype=torch.float)
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print(device)
    inn = cINN(device=device)

    #trainer = INNTrainer(training_data, batch_size=32, model=inn, device=device)

    # For model evaluation
    temp = data.DataLoader(training_data, batch_size=4, shuffle=True)
    x, attr = next(iter(temp))
    attr = attr.to(device)
    x = x.to(device)

    z = torch.randn((4, 3 * 64 * 64), dtype=torch.float, device=device)

    attr = torch.squeeze(attr)
    #x = inn.reverse_sample(z, attr)
    #z_inn,j = inn.forward(x, attr)

    z_inn, j = inn.forward(x, attr)
    x_inn = inn.reverse_sample(z_inn, attr)

    from torchvision.utils import save_image
    save_image(x, "verify_x.png")
    save_image(x_inn, "verify_xinn.png")

    import numpy as np
    x_inn = x_inn.cpu().detach().numpy()
    x = x.cpu().detach().numpy()

    diff = np.abs(x - x_inn)

    print(np.mean(diff), np.std(diff))
    print(np.mean(x), np.std(x))
    print(np.mean(x_inn), np.std(x_inn))
def move_file(folder_path, destination_path, file_name):
    """
    Moves an audio file from its directory to either the train or test set directory.

    :param folder_path:         the path to the directory containing the audio file
    :param destination_path:    the path to the test or train set directory
    :param file_name:           the name of the audio file
    :return:                    None
    """
    rename(path.join(folder_path, file_name), path.join(destination_path, file_name))


def process_folders(configs):
    """
    Iterates through a directory containing folders of audio data and segments them into a training and test set.

    :param configs:     a Bunch object of the run's configuration file
    :return:            None
    """
    train_directory, test_directory = create_directories(configs)
    for folder in filter(lambda v: "." not in v, listdir(configs.raw_data_dir)):
        folder_path = path.join(configs.raw_data_dir, folder)
        for file in filter(lambda v: ".wav" in v, listdir(folder_path)):
            destination = train_directory if random() <= configs.p_train else test_directory
            move_file(folder_path, destination, file)
        rmdir(folder_path)

RUN_CONFIG_FILE = "config_1.json"
model_configs, _ = get_config_from_json(path.join('/home/hugolucas/PycharmProjects/sound/configs', RUN_CONFIG_FILE))

process_folders(model_configs)
Example #6
0
from trainer import AudioTrainer
from logger import AudioLogger
from torch.optim import Adam
from data import AudioData
"""
This script should be used to coordinate the training of a PyTorch model. 
"""

# This import must be maintained in order for script to work on paperspace
from os import path

# Any parameters that may change from run-to-run
RUN_CONFIG_FILE = "config_1.json"

# Run Configs
model_configs, _ = get_config_from_json(path.join('./configs',
                                                  RUN_CONFIG_FILE))

# Training Data
train_data = AudioData(configs=model_configs)
train_loader = DataLoader(dataset=train_data,
                          batch_size=model_configs.batch_size,
                          shuffle=True,
                          num_workers=4)

# Test Data
test_data = AudioData(configs=model_configs, training_data=False)
test_loader = DataLoader(dataset=train_data,
                         batch_size=model_configs.batch_size,
                         shuffle=True,
                         num_workers=4)
Example #7
0
from utils.config_utils import get_config_from_json
from torch import from_numpy, FloatTensor, topk
from torch.autograd.variable import Variable
from data import process_audio, pad_array
from torch.nn.functional import softmax
from utils.model_utils import emotions
from librosa import load as lib_load
from model import BidirectionalLSTM
from torch import load as py_load
from os import path

model_configs, _ = get_config_from_json(path.join('./configs',
                                                  "config_1.json"))
model = BidirectionalLSTM(model_configs=model_configs)
model.load_state_dict(
    py_load("/home/hugolucas/PycharmProjects/sound/models/good_model.pt"))
model.eval()

test_file, sr = lib_load(
    "/home/hugolucas/emotion_dataset/test/03-01-08-01-01-02-01.wav",
    duration=5)
test_file = process_audio(test_file, sr)
test_file = pad_array(test_file)

x = Variable(from_numpy(test_file))
x = x.type(FloatTensor)
x = x.unsqueeze(0)

output = softmax(model(x))
print(output)
_, output = topk(output, k=1)