Exemplo n.º 1
0
def val(args, zoomout, model, val_loader, epoch):
    # modified from https://github.com/wkentaro/pytorch-fcn/blob/master/examples/voc/evaluate.py
    model.eval()
    print("Validating...")
    label_trues, label_preds = [], []
    count = 0

    for batch_idx, (data, target) in enumerate(val_loader):

        data, target, im_viz, lbl_viz = data.float(), target.float(
        ), data.float(), target.to('cpu')
        score = model(zoomout(data))

        _, pred = torch.max(score, dim=1)  # changed dim from 0 to 1
        lbl_pred = pred.data.numpy().astype(np.int64)
        lbl_true = target.data.numpy().astype(np.int64)

        for _, lt, lp in zip(_, lbl_true, lbl_pred):
            label_trues.append(lt)
            label_preds.append(lp)

        if (batch_idx % 10 == 0) and (epoch in [0, 1, 4, 9]):
            count = count + 1
            """
            Visualization of results on val dataset. 
            epoch 0: only with weights learned from FCClassifier
            epoch 1: after 2 epochs of training DenseClassifier
            epoch 4: after 5 epochs of training DenseClassifier
            epoch 9: after 10 epochs of training DenseClassifier
            """
            pred = score[0, :, :, :]
            gt = lbl_viz[0, :, :].data.numpy().squeeze()
            im = im_viz[0, :, :, :].data.numpy().squeeze()
            im = np.swapaxes(im, 0, 2)
            im = np.swapaxes(im, 0, 1)
            _, pred_mx = torch.max(pred, 0)
            pred = pred_mx.data.numpy().squeeze()
            image = Image.fromarray(im.astype(np.uint8), mode='RGB')

            image.save("./imgs/val/im_" + str(count) + "_" + str(epoch) +
                       "_.png")
            visualize(
                "./lbls/val/pred_" + str(count) + "_" + str(epoch) + ".png",
                pred)
            visualize(
                "./lbls/val/gt_" + str(count) + "_" + str(epoch) + ".png", gt)

    n_class = 21
    metrics = label_accuracy_score(label_trues, label_preds, n_class=n_class)
    metrics = np.array(metrics)
    metrics *= 100
    print('''\
    Accuracy: {0}
    Accuracy Class: {1}
    Mean IU: {2}
    FWAV Accuracy: {3}'''.format(*metrics))
def train(model, train_loader, val_loader, epoch, num_epochs, loss_function, optimiser, scheduler, savename, highest_iou):
    model.train()
    losses = list()
    gpu1 = 'cuda:0'
    gpu2 = 'cuda:1'
    ious = list()
    max_iou = highest_iou
    count = 0
    savename2 = savename[ : -3] + '_opt.pt'
    loop = tqdm(train_loader)
    num_steps = len(loop)
    for data, target in loop:
        model.train()
        model = model.to(gpu1)
        data, target = data.float().to(gpu1), target.float().to(gpu1)
        
        optimiser.zero_grad()
        prediction = model(data)
        prediction = prediction.squeeze(1)
        
        loss = loss_function(prediction, target) + dice_loss(torch.sigmoid(prediction), target)
        losses.append(loss.item())
        
        loss.backward()
        optimiser.step()
        scheduler.step()
        
        loop.set_description('Epoch {}/{}'.format(epoch + 1, num_epochs))
        loop.set_postfix(loss = loss.item())
        count += 1
        if count % (num_steps // 3) == 0 :
            model.eval()
            for data, target in val_loader : 
                model = model.to(gpu1)
                data, target = data.float().to(gpu1), target.float()

                prediction = model(data)
                prediction = prediction.squeeze(1)

                ious.append(iou(target, prediction))
    
            avg_iou = sum(ious) / len(ious)

            if avg_iou > max_iou :
                max_iou = avg_iou
                torch.save(model.state_dict(), savename)
                torch.save(optimiser.state_dict(), savename2)
                print('new max_iou', max_iou)
    
            print('avg_iou: ', avg_iou)
        
    print('avg_loss: ', sum(losses) / len(losses))
    return max_iou
Exemplo n.º 3
0
 def __getitem__(self, index):
     sample = self.imgs[index]
     if self.phase == 'train':
         splits = sample.split(' ')
         img_path = splits[0]
         data = Image.open(img_path).convert(self.img_mode)
         data = self.transforms(data)
         label = np.int32(splits[1])
         return data.float(), label
     else:
         data = Image.open(sample).convert(self.img_mode)
         data = self.transforms(data)
         name = self.imgs_name[index][:-1]
         return data.float(), name
Exemplo n.º 4
0
def train(model, loader, epoch):
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    scheduler = StepLR(optimizer, step_size=30, gamma=0.5)
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output, _ = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            percentage = 100. * batch_idx * args.batch_size / len(
                loader.dataset)
            print(
                f'Train Epoch: {epoch} [{batch_idx * args.batch_size}/{len(loader.dataset)} ({percentage}%)]'
                f'\tLoss: {loss.item()}\t{args.tag}')
            logger.add_scalar('train_loss',
                              loss.cpu().item(),
                              batch_idx + epoch * len(loader))
    return float(correct) / float(dataset_size)
Exemplo n.º 5
0
def test(model, device, test_loader):
    # Testing
    model.eval()
    test_loss = 0
    accuracy = 0
    balanced_accuracy = 0
    for sample in test_loader:
        data, target = sample['image'], sample['label']
        data, target = data.to(device), target.to(device)

        outputs = net(data.float())
        #loss = criterion(outputs, torch.max(target, 1)[1])
        loss = criterion(outputs, target.squeeze(1).long())
        test_loss += loss.item() * data.size(0)
        acc, bacc, precision, recall, f1_score, rep = \
            METRIX(target.squeeze(1).long(), outputs)
        accuracy += acc
        balanced_accuracy += bacc
        # ----------------------
        print('Acc:\t{:.3f}%\tBalanced Acc.:\t{:.3f}%\tPrecision:\t{:.3f}%\t'
              'Recall:\t{:.3f}%\tF1 Score:\t{:.3f}%\t'.format(
                  acc * 100, bacc * 100, precision * 100, recall * 100,
                  f1_score * 100))
        print('Report: \n', rep)
        # ----------------------

    return test_loss, accuracy, balanced_accuracy
    def fit(self, path, mask):
        if self.feature_length == None:
            self.feature_length = len(self.completeData[0])
            self.h_length = self.feature_length // 2

        dataloader = torch.utils.data.DataLoader(self.completeData,
                                                 batch_size=self.batch_size,
                                                 shuffle=True,
                                                 num_workers=2)
        encoder = Encoder(self.feature_length, self.h_length)
        decoder = Decoder(self.feature_length, self.h_length)
        ae = AE(encoder, decoder)
        criterion = torch.nn.MSELoss()
        optimizer = torch.optim.Adam(ae.parameters(), lr=self.learning_rate)
        l = None
        bestLoss = np.inf
        for epoch in range(self.epochs):
            for data in dataloader:
                inputs = data.float()
                inputs = inputs.resize_(self.batch_size, self.feature_length)
                if bi['core'] == 'cuda':
                    inputs = inputs.cuda()
                optimizer.zero_grad()
                dec = ae(inputs)
                loss = criterion(dec[mask], inputs[mask])
                loss.backward()
                optimizer.step()
                l = loss.item()
            print(epoch, l)
            if np.isnan(l):
                break
            if epoch % 5 == 0:
                torch.save(ae.state_dict(), path)
                logger.info("{}'s loss is {}".format(path, l))
                print("模型保存成功")
Exemplo n.º 7
0
 def __getitem__(self,index):
     qid=self.val[index+self.len_end+opt.question_train_set_items-50000]
     data=self.data[index]
     label=self.label_file[qid]
     data = data.float()
     label_tensor = t.zeros(25556).scatter_(0,t.LongTensor(label),1).long()
     return data,label_tensor
Exemplo n.º 8
0
def val(args, zoomout, model, val_loader):
    # modified from https://github.com/wkentaro/pytorch-fcn/blob/master/examples/voc/evaluate.py
    USE_GPU = True
    dtype = torch.float32  # we will be using float throughout this tutorial
    if USE_GPU and torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    model.eval()
    print("Validating...")
    label_trues, label_preds = [], []

    for batch_idx, (data, target) in enumerate(val_loader):

        data, target = data.float(), target.float()
        score = model(zoomout(data))
        score = score.squeeze()
        _, pred = torch.max(score, 0)
        lbl_pred = pred.data.numpy().astype(np.int64)
        lbl_true = target.data.numpy().astype(np.int64)
        lbl_true = lbl_true.squeeze()

        for _, lt, lp in zip(_, lbl_true, lbl_pred):
            label_trues.append(lt)
            label_preds.append(lp)

    n_class = 21
    metrics = label_accuracy_score(label_trues, label_preds, n_class=n_class)
    metrics = np.array(metrics)
    metrics *= 100
    print('''\
    Accuracy: {0}
    Accuracy Class: {1}
    Mean IU: {2}
    FWAV Accuracy: {3}'''.format(*metrics))
    def train_agent(self, epochs: int):
        if not self.dataset:
            raise Exception(
                "No Training Data Set to Train Agent. Please set Training Data using ImmitationLearningAgent.injest_demonstrations"
            )

        self.logger.info("Starting Training of Agent ")
        self.neural_network.train()
        for epoch in range(epochs):
            running_loss = 0.0
            steps = 0
            for batch_idx, (data, target) in enumerate(self.data_loader):
                data, target = Variable(data), Variable(target)
                data, target = data.to(device), target.to(device)
                self.optimizer.zero_grad()
                network_pred = self.neural_network(data.float()).to(device)
                loss = self.loss_function(network_pred, target.float())
                loss.backward()
                if self.collect_gradients:
                    self.set_gradients(self.neural_network.named_parameters())
                self.optimizer.step()
                running_loss += loss.item()
                steps += 1

            self.logger.info('[%d] loss: %.6f' % (epoch + 1, running_loss /
                                                  (steps + 1)))
Exemplo n.º 10
0
 def __getitem__(self,index):
     qid=self.val[index+self.len_end+2999967-200000]
     data=self.data[index]
     label=self.label_file[qid]
     data = data.float()
     label_tensor = t.zeros(1999).scatter_(0,t.LongTensor(label),1).long()
     return data,label_tensor
Exemplo n.º 11
0
def train(epoch, denoise=False):
    model.train()
    train_loss = 0

    for batch_idx, data in enumerate(train_loader):
        data = data.float().to(device)
        # import pdb; pdb.set_trace()
        optimizer.zero_grad()
        if denoise:
            noise = torch.bernoulli((torch.rand_like(data))).to(device)
            noisy_data = data + noise
            reconBatch, mu = model(noisy_data)
        else:
            reconBatch, mu = model(data)
        loss = loss_function(data, reconBatch)
        loss.backward()
        train_loss += loss.item()
        optimizer.step()
        if(batch_idx % log_interval == 0):
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader),
                loss.item() / len(data)))

    train_loss /= len(train_loader.dataset)
    print('====> Epoch: {} Average loss: {:.4f}'.format(
          epoch, train_loss))
    return train_loss
Exemplo n.º 12
0
def train(epoch, criterion, optimizer):
    model.train()
    train_loss = 0
    correct = 0
    running_loss = 0.0
    running_corrects = 0
    for data, target in tqdm.tqdm(dataloaders['train'],
                                  total=len(dataloaders['train']),
                                  desc='Batch'):

        data = data.to(device)
        data = torch.squeeze(data)
        data = data.float()
        target = target.to(target)
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, target)
        train_loss += loss.data
        sm = nn.Softmax(dim=1)
        output_sm = sm(output)
        _, preds = torch.max(output_sm, 1)
        running_loss += loss.item() * data.size(0)
        running_corrects += torch.sum(preds == target.data)
        loss.backward()
        optimizer.step()
    epoch_loss = running_loss / dataset_sizes['train']

    epoch_acc = running_corrects.double() / dataset_sizes['train']
    line_to_save_train = 'Train set: Average loss: {:.4f} Accuracy: {}/{} {:.4f}\n'.format(
        epoch_loss, running_corrects, len(train_loader.dataset), epoch_acc)
    with open(args.outf + '/ACC_train.txt', 'a') as f:
        f.write(line_to_save_train)
    print(line_to_save_train)
Exemplo n.º 13
0
def train(epoch):
    model.train()
    trainLoss = 0
    cos_sims = 0
    klds = 0
    loss_divider = len(
        train_loader.dataset) - (len(train_loader.dataset) % batch_size)
    for batch_idx, data in enumerate(train_loader):
        data = data.float().to(device)
        optimizer.zero_grad()
        reconBatch, mu, logvar = model(data)
        loss, cos_sim, kld = loss_function(reconBatch, data, mu, logvar)
        loss.backward()
        trainLoss += loss.item()
        cos_sims += cos_sim.item()
        klds += kld.item()
        optimizer.step()

        weights = []
        for i, f in enumerate(model.parameters()):
            weights.append(f.cpu().data.numpy())

        if (batch_idx % log_interval == 0):
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader),
                loss.item() / len(data)))

    print('====> Epoch: {} Average Loss: {:.4f}'.format(
        epoch, trainLoss / loss_divider))

    return trainLoss / loss_divider, cos_sims / loss_divider, \
                            klds / loss_divider, weights, mu
Exemplo n.º 14
0
    def one_epoch(self, mode, epoch_num):
        if mode not in ['train', 'test']:
            raise ValueError("Unknown value {} for mode".format(mode))
        print("{}ing... epoch: {}".format(mode, epoch_num))

        if mode == 'train':
            self.model.train()
            dl = self.train_data
            one_iter_function = self.one_train_iteration
        else:
            self.model.eval()
            dl = self.test_data
            one_iter_function = self.one_test_iteration

        acc_avg = RunningAverage()
        loss_avg = RunningAverage()
        with tqdm(total=len(dl)) as t:
            for n, (data, label) in enumerate(dl):
                if self.train_params['use_gpu']:
                    data, label = data.cuda(
                        self.train_params['gpu_id']), label.cuda(
                            self.train_params['gpu_id'])
                data, label = Variable(data), Variable(label)
                data = data.float()
                loss, acc = one_iter_function(data, label)
                loss_avg.update(loss)
                acc_avg.update(acc)
                t.set_postfix(
                    run_param="Epoch{} Loss:{:.2f} Acc:{:.2f}".format(
                        epoch_num, loss_avg(), acc_avg()))
                t.update()

        return acc_avg, loss_avg
Exemplo n.º 15
0
def test(epoch):
    model.eval()
    test_loss = 0
    for i, (data, _) in enumerate(test_loader):
        # for i, data in enumerate(test_loader):
        data = data.float()
        # for x in data:
        #     scaler.partial_fit(x[0]) # 0 because there is only one dimension

        # # # # normalizes the data
        # for x in data:
        #     x[0] = torch.from_numpy(scaler.transform(x[0])) # new

        data = (data - torch.mean(data)) / torch.std(data)

        if args.cuda:
            data = data.cuda()
        data = Variable(data, volatile=True)
        output = model(data)
        loss = MSE(output, data)

        test_loss += loss.data[0]
        # if i % 100 == 0:
        #     n = min(data.size(0), 8)
        #     comparison = torch.cat([data[:n],
        #                            output[:n]])
        #     save_image(comparison.data.cpu(),
        #                'snapshots/conv_vae/reconstruction_' + str(epoch) +
        #                '.png', nrow=n)

    test_loss /= len(test_loader.dataset)
    print('====> Test set loss: {:.4f}'.format(test_loss))
Exemplo n.º 16
0
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
                epoch, batch_idx * len(data), len(loader.dataset),
                100. * batch_idx * len(data) / len(loader.dataset),
                loss.item(), args.tag))
            logger.add_scalar('train_loss',
                              loss.cpu().item(),
                              batch_idx + epoch * len(loader))
    return float(correct) / float(dataset_size)
Exemplo n.º 17
0
 def __getitem__(self, index):
     img_path = self.image_list[index]
     img = Image.open(img_path)
     data = img.convert('RGB')
     data = self.transforms(data)
     label = self.label_list[index]
     return data.float(), label
Exemplo n.º 18
0
def test(model, loader):
    model.eval()
    torch.set_grad_enabled(False)
    test_loss = 0
    correct = 0
    dataset_size = 0
    da = {}
    db = {}
    res = []
    for batch_idx, (data, target, obj_name) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        output = model(data)  # N*C
        test_loss += F.nll_loss(output, target,
                                size_average=False).cpu().item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        for i, j, k in zip(obj_name,
                           pred.data.cpu().numpy(),
                           target.data.cpu().numpy()):
            res.append((i, j[0], k))

    test_loss /= len(loader.dataset)
    acc = float(correct) / float(dataset_size)
    return acc, test_loss
Exemplo n.º 19
0
def class_probabilities_test(model, device, test_loader, class_num):

    model.eval()
    scores = np.empty((0, class_num))
    y_target = np.empty((0, class_num))
    test_correct = 0
    test_total = 0

    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            outputs = model(data.float())
            _, predicted = torch.max(outputs.data, 1)
            test_total += target.size(0)
            test_correct += (predicted == target).sum()
            # print(test_correct)
            out_result = outputs.cpu().numpy()
            out_target = target.cpu()
            prediction_prob = out_result[:, 1]
            prediction_binary = np.where(prediction_prob > 0.5, 1, 0)
            acc_mlp = accuracy_score(out_target, prediction_binary)
            # print(acc_mlp)
            scores = np.append(scores, out_result, axis=0)
            out_target = out_target.tolist()
            # print(out_target)
            labels_onehot = to_onehot(out_target)
            y_target = np.append(y_target, labels_onehot, axis=0)
        test_acc = test_correct.item() / test_total
        # print("Test Accuracy: %.4f " % (test_acc))

    return scores, y_target, test_acc
Exemplo n.º 20
0
def eval_accuracy(net_path, name):
    path = 'UCRArchive_2018/' + name + '/' + name + '_TEST.tsv'
    net = torch.load(net_path)
    net.eval()
    test_set, n_class = load_ucr(path)
    # x_test = test_set[:, 1:]
    # x_test = torch.from_numpy(x_test).unsqueeze(dim=-1).float()
    # print(x_test.shape)
    # y_true = test_set[:, 0]
    # y_pred = net(x_test)
    # y_pred = torch.argmax(y_pred, dim=-1)
    # print(y_pred)
    # y_pred = y_pred.detach().numpy()
    dataset = UcrDataset(test_set, channel_last=opt.channel_last)
    dataloader = UCR_dataloader(dataset, batch_size=128)

    y_pred = []
    y_true = []
    with torch.no_grad():
        for i, (data, label) in enumerate(dataloader):
            data = data.float().to(device)
            y_ = net(data).cpu()
            y_ = torch.argmax(y_, dim=1)
            y_pred.append(y_.detach().numpy())
            y_true.append(label.view(-1).long())
    y_pred = np.concatenate(y_pred)
    y_true = np.concatenate(y_true)
    res = calculate_metrics(y_true, y_pred)
    return res
Exemplo n.º 21
0
def test_model(model, test_load, criterion, testout, batch_size):
    with torch.no_grad():
        # indicate that we are evaluating model
        model.eval()
        
        # initialize errors to 0
        running_loss = 0.0
        total_predictions = 0.0
        correct_predictions = 0.0    
        
        # start iterating
        for batch_idx, (data, target) in enumerate(test_load):   
            data = data.to(device)
            target = target.to(device)

            # run forward pass and then compute loss
            outputs = model(data.float())
            loss = criterion(outputs, target.long()).detach()
            
            # get predictions 
            _, predicted = torch.max(outputs.data, 1)
            
            # calculate correct predictions / loss
            total_predictions += target.size(0)            
            correct_predictions += (predicted == target).sum().item()
            running_loss += loss.item()
            
        # calculate average loss and accuract
        running_loss /= len(test_load)
        acc = (correct_predictions/total_predictions)*100.0
        return running_loss, acc
Exemplo n.º 22
0
def train_epoch(model, train_load, criterion, optimizer):
    # indicate that we are training 
    model.train()
    running_loss = 0.0
    
    # start timer and start iterating
    start_train = time.time()
    for batch_idx, (data, target) in enumerate(train_load):
        data = data.to(device)
        target = target.to(device) # all data & model on same device
        
        # forward, then backward, then step
        outputs = model(data.float())
        loss = criterion(outputs, target.long())
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()   # .backward() accumulates gradients

        # accumulate loss
        running_loss += loss.item()
    
    # end timer and take average loss
    end_train = time.time()
    running_loss /= len(train_load)
    
    return end_train, start_train, running_loss
Exemplo n.º 23
0
def test(epoch, data_loader, test_set=False, valid_set=False):
    model.eval()
    loss = 0
    cos_sim = 0
    kld = 0
    loss_divider = len(
        data_loader.dataset) - (len(data_loader.dataset) % batch_size)
    with torch.no_grad():
        for i, data in enumerate(data_loader):
            data = data.float().to(device)
            reconBatch, mu, logvar = model(data)

            temp_loss, cos_sim_temp, kld_temp = loss_function(
                reconBatch, data, mu, logvar)
            loss += temp_loss.item()
            cos_sim += cos_sim_temp.item()
            kld += kld_temp.item()

    loss /= loss_divider
    if test_set:
        print('====> Test set loss: {:.4f}'.format(loss))
    elif valid_set:
        print('====> Validation set loss: {:.4f}'.format(loss))

    return loss, cos_sim / loss_divider, kld / loss_divider
Exemplo n.º 24
0
def val(args, zoomout, model, val_loader):
    # modified from https://github.com/wkentaro/pytorch-fcn/blob/master/examples/voc/evaluate.py
    model.eval()
    print("Validating...")
    label_trues, label_preds = [], []

    for batch_idx, (data, target) in enumerate(val_loader):

        data, target = data.float(), target.float()
        score = model(zoomout(data))

        _, pred = torch.max(score, 0)
        lbl_pred = pred.data.numpy().astype(np.int64)
        lbl_true = target.data.numpy().astype(np.int64)

        for _, lt, lp in zip(_, lbl_true, lbl_pred):
            label_trues.append(lt)
            label_preds.append(lp)

    n_class = 21
    metrics = label_accuracy_score(label_trues, label_preds, n_class=n_class)
    metrics = np.array(metrics)
    metrics *= 100
    print('''\
    Accuracy: {0}
    Accuracy Class: {1}
    Mean IU: {2}
    FWAV Accuracy: {3}'''.format(*metrics))
Exemplo n.º 25
0
def test(epoch):
    model.eval()
    test_loss = 0
    correct = 0
    running_loss = 0.0
    running_corrects = 0
    for data, target in tqdm.tqdm(dataloaders['validation'],
                                  total=len(dataloaders['validation']),
                                  desc='Batch'):
        data = data.to(device)
        data = data.float()
        data = torch.squeeze(data)
        target = target.to(target)
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        output = model(data)
        sm = nn.Softmax(dim=1)
        output_sm = sm(output)
        _, preds = torch.max(output_sm, 1)
        loss = criterion(output, target)
        running_loss += loss.item() * data.size(0)
        running_corrects += torch.sum(preds == target.data)
    epoch_loss = running_loss / dataset_sizes['validation']

    epoch_acc = running_corrects.double() / dataset_sizes['validation']
    line_to_save_test = 'Test set: Average loss: {:.4f} Accuracy: {}/{} {:.4f}\n'.format(
        epoch_loss, running_corrects, dataset_sizes['validation'], epoch_acc)

    with open(args.outf + '/ACC_test.txt', 'a') as f:
        f.write(line_to_save_test)
    print(line_to_save_test)

    return epoch_loss, epoch_acc
Exemplo n.º 26
0
def train(epoch):
    model.train()
    train_loss = 0
    train_distance = 0
    criterion = nn.MSELoss()
    accuracy_criterion = nn.CosineSimilarity()
    for batch_idx, data in enumerate(train_loader):
        optimizer.zero_grad()
        #float byte tensor
        data = data.float().to(device)
        data = data.view(-1, 1, 96, 60)
        #embed data with autoencoder
        with torch.no_grad():
            mu, logvar = autoencoder_model.encoder(data)

        #prepare for input lstm
        mu = mu.view(model.batch_size, model.seq_length, 100)
        embedding = mu.double()

        # Normalize to mean 0 and std 1
        # mean_batch = torch.mean(embedding)
        # std_batch = torch.std(embedding)
        # embedding_norm = (embedding - mean_batch) / std_batch

        g_truth = embedding[:, -1, :]
        input_lstm = embedding[:, :-1, :]
        _, output_lstm = model(input_lstm)

        loss = criterion(output_lstm, g_truth)
        loss.backward()

        train_loss += loss.item()
        # torch.nn.utils.clip_grad_value_(model.parameters(), 5)
        optimizer.step()

        # with torch.no_grad():
        #     prediction = autoencoder_model.decoder(output_lstm.float().view(-1, model.input_size))
        #     prediction = prediction.view(-1, half_seq_length, 1, 96, 60)
        #     train_distance += np.linalg.norm(data.view(-1, model.seq_length, 1,
        #         96, 60)[:,half_seq_length:].cpu().numpy() - prediction.cpu().numpy())

        gradients = []
        weights = []
        for i, f in enumerate(model.parameters()):
            gradients.append(f.grad.cpu().data.numpy())
            weights.append(f.cpu().data.numpy())

        if (batch_idx % log_interval == 0):
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data),
                len(train_loader.dataset) * model.seq_length,
                100. * batch_idx / len(train_loader), loss.item()))

    # average train loss
    train_loss /= (batch_idx + 1)
    train_distance /= (batch_idx + 1)
    print('====> Epoch: {} Average Loss: {:.4f}'.format(epoch, train_loss))

    return train_loss, train_distance, gradients, weights
Exemplo n.º 27
0
 def get_batch(self, train_data, bptt, evaluation=False):
     data, target = bptt
     data = data.float().cuda()
     target = target.long().cuda()
     if evaluation:
         data.requires_grad_(False)
         target.requires_grad_(False)
     return data, target
Exemplo n.º 28
0
def transform_ToTensor(data):
    # transform numpy array to float tensor

    data = torch.from_numpy(np.array(data, dtype='uint8'))

    if isinstance(data, torch.ByteTensor):
        return data.float().div(255)
    else:
        return data
Exemplo n.º 29
0
 def __getitem__(self, index):
     sample = self.imgs[index]
     splits = sample.split()
     img_path = splits[0]
     data = Image.open(img_path)
     data = data.convert('L')
     data = self.transforms(data)
     label = np.int32(splits[1])
     return data.float(), label
Exemplo n.º 30
0
    def train(self):
        # load data from file
        with h5py.File(
                'C:/Users/Kai/GitHub/Forever_Machine_Learning/code/data_950.h5',
                'r') as hf:
            data_x = hf['x'][:]
            data_y = hf['y'][:]

        # 80/20 split
        # data_tr, data_te = data_obs.split()
        train_ind = int(len(data_x) * 0.8)
        data_tr = Dataset(data_x[:train_ind], data_y[:train_ind])
        data_te = Dataset(data_x[train_ind:], data_y[train_ind:])

        train_loader = DataLoader(data_tr,
                                  batch_size=self.batch_size,
                                  shuffle=self.shuffle)

        train_losses = []

        for epoch in range(self.num_epochs):
            start_time1 = time.time()

            epoch_loss = torch.tensor(0.0, device=self.device)

            for batch_idx, batch in enumerate(train_loader):
                start_time2 = time.time()

                data, target = batch

                outputs = self.model(data.float().to(self.device))

                loss_gen = self.criterion(outputs,
                                          target.float().to(self.device))
                loss_L1 = self.criterionL1(outputs, target) * self.lambda_L1
                loss = loss_gen + loss_L1

                # Aggregate loss across mini-batches (per epoch)
                epoch_loss += loss

                # Backprop and perform Adam optimisation
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
                print(
                    f"Epoch: {epoch}\tBatch: {batch_idx}\tGen Loss: {loss_gen:.5f}\tL1 Loss: {loss_L1:.5f}"
                    f"\tTotal Time: {(time.time() - start_time2)/60:.3f} minutes"
                )

            print(
                f"Epoch: {epoch}\tTrain Loss: {epoch_loss/len(train_loader):.5f}"
                f"\tTotal Time: {(time.time() - start_time1)/60:.3f} minutes")

            train_losses.append(epoch_loss / len(train_loader))

        return train_losses