Exemplo n.º 1
0
def testSingleImage(
    network: torch.nn.Module, image: Image, transforms
) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
    Logger.log("Testing single image!", logger="main")
    data: torch.Tensor = transforms(image)

    data = util.putOnDevice(data)

    Logger.log("Resizing image.")
    data.resize_(1, 3, 224, 224)

    if Config.useCuda():
        network = network.cuda()
    else:
        network = network.cpu()

    Logger.log("Triggering dropout layers.")
    network.train()
    outputs: typing.List[torch.Tensor] = []

    for i in range(Config.getArgs().num_of_tests):
        outputs.append(network(data))

    outs: np.ndarray = np.zeros((len(outputs), 7))
    for i in range(len(outputs)):
        outs[i] = outputs[i].cpu().detach().numpy()

    Logger.log("Getting uncertainty.")
    certPosMat, certOriMat, _, _ = getUncertainty(
        output=torch.from_numpy(outs), returnMatrix=True)

    Logger.log("Getting actual output.", logger="main")
    network.eval()
    output: np.ndarray = network(data).detach().cpu().numpy()
    return output[0], outs, certPosMat, certOriMat
Exemplo n.º 2
0
def train(epoch):
    model.train()
    train_loss = 0
    ax = 0
    print(len(train_loader.dataset)," ",len(train_loader))
    for batch_idx, (data,_)  in enumerate(train_loader):
        ax += 1
        print(data.size())
        data.resize_((args.batch_size, 1, 64, 96))
        print(data.size())
        data = data.to(device)
        optimizer.zero_grad()
        recon_batch, mu, logvar = model(data)
        loss = loss_function(recon_batch, data, mu, logvar)
        loss.backward()
        train_loss += loss.item()
        optimizer.step()
        #if batch_idx % args.log_interval == 0:
        print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),	
                100. * batch_idx / len(train_loader),
                loss.item() / len(data)))

    print('====> Epoch: {} Average loss: {:.4f}'.format(
          epoch, train_loss / len(train_loader.dataset)))
Exemplo n.º 3
0
    def __getitem__(self, index):
        fpath = os.path.join(self.wav_dir, self.df.fname[index])
        y, sr = librosa.load(fpath, sr=self.sr)
        if sr is None:
            print('WARNING:', fpath)
            sr = 44100

        # ランダムクロップ
        y = random_crop(y, int(self.max_length * sr))

        # 特徴抽出
        n_fft = int(self.window_size * sr)
        hop_length = int(self.hop_size * sr)

        if self.feature == 'mfcc':
            feature = librosa.feature.mfcc(y=y,
                                           sr=sr,
                                           n_fft=n_fft,
                                           hop_length=hop_length,
                                           n_mfcc=self.n_feature)
        elif self.feature == 'melgram':
            feature = librosa.feature.melspectrogram(y,
                                                     sr=sr,
                                                     n_fft=n_fft,
                                                     hop_length=hop_length,
                                                     n_mels=self.n_feature)
        else:
            print('Invalid feature name: %s' % self.feature)
            exit(1)

        data = torch.from_numpy(feature).float()
        s = data.size()

        if self.model_type == 'alex2d' or self.model_type == 'resnet':
            # Conv2dの場合は (channel, features, frames)
            data.resize_(1, s[0], s[1])
        elif self.model_type == 'alex1d' or self.model_type == 'lstm':
            # Conv1dの場合は (features, frames)
            data.resize_(s[0], s[1])
        else:
            print('Invalid conv type: %s' % self.model_type)
            exit(1)

        mean = data.mean()
        std = data.std()
        if std != 0:
            data.add_(-mean)
            data.div_(std)

        if self.test:
            # テストモードのときは正解ラベルがないのでデータだけ返す
            return data
        else:
            # label
            label = self.df.label_idx[index]

            return data, label
def test(testloader, epoch, isVal):
    model.eval()
    test_loss = 0
    correct = 0

    all_labels = 0
    all_preds = 0

    hidden = model.init_hidden(eval_batch_size)

    for i, (data, target) in enumerate(testloader, 0):
        if data.size(0) < args.batchSize * args.bptt:
            smaller_batch_size = data.size(0) // args.bptt
            if smaller_batch_size == 0:
                break
            data = data[:smaller_batch_size * args.bptt]
            hidden = model.init_hidden(smaller_batch_size)
            data.resize_(args.bptt, smaller_batch_size, inputSize)
            target = target[:(smaller_batch_size * args.bptt)]
        else:
            data.resize_(args.bptt, args.batchSize, inputSize)

        input.data.resize_(data.size()).copy_(data)
        label.data.resize_(target.size()).copy_(target)

        output, hidden = model(input, hidden)

        test_loss += criterion(output, label)
        pred = output.data.max(1)[
            1]  # get the index of the max log-probability
        correct += pred.eq(label.data).cpu().sum()
        if not torch.is_tensor(all_labels):
            all_labels = target
            all_preds = output.data[:, 1]
        else:
            all_labels = torch.cat((all_labels, target), 0)
            all_preds = torch.cat((all_preds, output.data[:, 1]), 0)

    test_loss /= len(testloader)

    auc = metrics.roc_auc_score(all_labels.cpu().numpy(),
                                all_preds.cpu().numpy())
    if isVal:
        print(
            '\n[%d/%d] ||VAL|| Average loss: %.4f, Accuracy: %d / %d (%.1f) AUC : %.6f \n'
            % (epoch, args.epochs, test_loss.data[0], correct,
               len(testloader.dataset),
               100. * correct / len(testloader.dataset), auc))
    else:
        print(
            '\n[%d/%d] ||TEST|| Average loss: %.4f, Accuracy: %d / %d (%.1f) AUC : %.6f \n'
            % (epoch, epochs, test_loss.data[0], correct,
               len(testloader.dataset),
               100. * correct / len(testloader.dataset), auc))
    return test_loss.data[0]
Exemplo n.º 5
0
def test(epoch):
    model.eval()
    test_loss = 0
    with torch.no_grad():
        for i, (data, _) in enumerate(test_loader):
            data = data.to(device)
            recon_batch, mu, logvar = model(data)
            test_loss += loss_function(recon_batch, data, mu, logvar).item()
            if i == 0:
                n = min(data.size(0), 20)
                data.resize_((args.batch_size, 1, 64, 96))
                comparison = torch.cat([data[:n], recon_batch.view(args.batch_size, 1, 64, 96)[:n]])
                save_image(comparison.cpu(), '../bin/results/reconstruction_' + str(epoch) + '.png', nrow=n)

    test_loss /= len(test_loader.dataset)
    print('====> Test set loss: {:.4f}'.format(test_loss))
def train(trainloader, epoch):
    model.train()
    hidden = model.init_hidden(args.batchSize)

    for i, (data, target) in enumerate(trainloader, 0):
        if data.size(0) < args.batchSize * args.bptt:
            smaller_batch_size = data.size(0) // args.bptt
            if smaller_batch_size == 0:
                break
            data = data[:smaller_batch_size * args.bptt]
            hidden = model.init_hidden(smaller_batch_size)
            data.resize_(args.bptt, smaller_batch_size, inputSize)
            target = target[:(smaller_batch_size * args.bptt)]
        else:
            data.resize_(args.bptt, args.batchSize, inputSize)

        input.data.resize_(data.size()).copy_(data)
        if args.noise > 0:
            noise = torch.FloatTensor(data.size()).normal_(0, args.noise)
            if args.cuda:
                noise = noise.cuda()
            input.data.add_(noise)
        label.data.resize_(target.size()).copy_(target)

        hidden = repackage_hidden(hidden)
        model.zero_grad()

        output, hidden = model(input, hidden)

        loss = criterion(output, label)
        loss.backward()

        if args.optimizer == 'base':
            clipped_lr = lr * clip_gradient(model, args.clip)
            for p in model.parameters():
                p.data.add_(-clipped_lr, p.grad.data)
        elif args.optimizer in ['adam', 'RMSprop']:
            clip_grad_norm(model.parameters(), args.clip)
            optimizer.step()

        if i % log_interval == 0:
            print('[%d/%d] [%d/%d] Train Loss : %.4f' %
                  (epoch, args.epochs, i, len(trainloader), loss.data[0]))
Exemplo n.º 7
0
def prepareData(train_loader, seq_length, batch_index):
    index_start = batch_index * seq_length
    index_end = index_start + seq_length
    for i, (data, _) in enumerate(train_loader):
        data = data.view(-1, input_dim)
        data = data.resize_((batch_size, input_dim, 1))
        if i == index_start:
            outData = data
        elif i > index_start and i < index_end:
            outData = torch.cat((outData, data), 2)
    return outData.permute(2, 0, 1)