示例#1
0
    def set_pretrained_model(self):
        rel_path = "../utils/pretrained_models/vae_model.pth"
        self.model = VAE()

        self.model = loadStateDict(self.model, rel_path)
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        if self.model.train():
            self.model.eval()
示例#2
0
 def set_model_path(self):
     file_path = QFileDialog.getOpenFileName(self, 'Open File', os.getenv('..'))[0]
     self.model = VAE()
     try:
         self.model = loadModel(self.model, file_path, dataParallelModel=True)
     except:
         self.model = loadStateDict(self.model, file_path)
     self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
     if self.model.train():
         self.model.eval()
示例#3
0
    if not args.model_path:
        print("Please set the model path to autoencoder model using \
            --model_path /path/to/model")
        sys.exit()

    # initialize live input instrument
    live_instrument = LiveParser(port=args.port,
                                 bars=args.bars,
                                 bpm=args.bpm,
                                 ppq=24)
    live_instrument.open_inport(live_instrument.parse_notes)
    live_instrument.open_outport()

    #load model and corresponding weights
    model = VAE()
    try:
        model = loadStateDict(model, args.model_path)
    except:
        model = loadModel(model, args.model_path)

    # check for gpu support and send model to gpu if possible
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    # make sure model is in eval mode
    if model.train():
        model.eval()

    while True:
        vae_main(live_instrument, model, args)
示例#4
0
 def load_model(self, vae_path):
     vae_model = VAE()
     vae_model = loadStateDict(vae_model, vae_path)
     device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
     vae_model = vae_model.to(device)
     return vae_model
示例#5
0
    ############################################

    writer = SummaryWriter(log_dir=('lstm_many2many_plots/' + args.model_name))
    writer.add_text("dataset", dataset)
    writer.add_text("learning_rate", str(learning_rate))
    writer.add_text("learning_rate_decay", str(lr_decay))
    writer.add_text("learning_rate_decay_step", str(lr_decay_step))
    writer.add_text("lstm_layers", str(lstm_layers))
    writer.add_text("hidden_size", str(hidden_size))
    writer.add_text("batch_size", str(batch_size))
    writer.add_text("autoencoder_model", args.vae_path)

    #load variational autoencoder
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    autoencoder_model = VAE()
    autoencoder_model = loadStateDict(autoencoder_model, args.vae_path)
    autoencoder_model = autoencoder_model.to(device)

    # load dataset from npz
    data = np.load(dataset)
    train_dataset = data['train']  #[0:10]
    test_dataset = data['test']  #[0:10]
    data.close()
    print("train set: {}".format(train_dataset.shape))
    print("test set: {}".format(test_dataset.shape))
    # print("valid set: {}".format(valid_dataset.shape))

    train_dataset = createDataset(train_dataset, seq_length=seq_length)
    test_dataset = createDataset(test_dataset, seq_length=seq_length)
    # valid_dataset = createDataset(valid_dataset, seq_length=seq_length)
示例#6
0
            len(valid_dataset)))

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = VAE(embedding_size=embedding_size,
                covariance_param=covariance_param)
    if torch.cuda.device_count() > 1:
        print('Using {} GPUs!'.format(torch.cuda.device_count()))
        model = nn.DataParallel(model)
    model = model.to(device)
    writer.add_text("pytorch model", str(model))
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # Load Checkpoint
    if args.checkpoint:
        print("Trying to load checkpoint...")
        model = loadStateDict(model, args.checkpoint)

    checkpoint_path = 'checkpoints_vae/'
    best_valid_loss = np.inf
    if learning_rate_decay:
        print("Learning rate decay activated!")
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=lr_decay_step,
                                              gamma=learning_rate_decay)
    for epoch in range(1, epochs + 1):
        if learning_rate_decay:
            scheduler.step()
        #training with plots
        train_loss, cos_sim_train, kld_train, weights, embedding = train(epoch)
        writer.add_scalar('loss/train_loss_epoch', train_loss, epoch)
        writer.add_scalar('loss/train_reconstruction_loss_epoch',