Example #1
0
	def train_epoch(self, net :BaseNet, train_loader ):
		self.accuracy.zero()
		net.train()
		self.scheduler.step()

		loss_epoch = 0.0
		n_batches = 0
		dist = 0
		epoch_start_time = time.time()
		for data in train_loader:
			inputs, y = data

			# Zero the network parameter gradients
			self.optimizer.zero_grad()

			# Update network parameters via backpropagation: forward + backward + optimize
			outputs = self.predict(inputs, net)
			loss = self.criterion(outputs, y)
			if torch.isnan(loss):
				raise ValueError('loss is nan while training')
			self.accuracy(inputs, outputs, y)
			loss.backward()
			self.optimizer.step()

			loss_epoch += loss.item()
			n_batches += 1

		# log epoch statistics
		epoch_train_time = time.time() - epoch_start_time
		return loss_epoch, self.accuracy.value, epoch_train_time, n_batches
Example #2
0
    def score(self, dataset: BaseDataset, net: BaseNet):
        logger = logging.getLogger()

        # Set device for network

        # Get test data loader
        _, test_loader = dataset.loaders(batch_size=self.batch_size,
                                         num_workers=self.n_jobs_dataloader)

        # Testing
        logger.info('Starting testing...')
        start_time = time.time()

        net.eval()
        loss_total = 0
        with torch.no_grad():
            for data in test_loader:
                inputs, y = data

                outputs = self.predict(inputs, net)
                loss = self.criterion(outputs, y)
                loss_total += loss.item()

        self.test_time = time.time() - start_time
        logger.info('Testing time: %.3f' % self.test_time)

        self.test_scores = loss_total
Example #3
0
    def test(self, dataset: BaseADDataset, ae_net: BaseNet):
        logger = logging.getLogger()

        # Set device for network
        ae_net = ae_net.to(self.device)

        # Get test data loader
        _, test_loader = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)

        # Testing
        logger.info('Testing autoencoder...')
        loss_epoch = 0.0
        n_batches = 0
        start_time = time.time()
        idx_label_score = []
        ae_net.eval()
        with torch.no_grad():
            for data in test_loader:
                inputs, labels, idx = data
                inputs = inputs.to(self.device)
                outputs = ae_net(inputs)
                #scores = torch.sum((outputs - inputs) ** 2, dim=tuple(range(1, outputs.dim())))
                scores = bidirectional_score(inputs, outputs)
                loss = torch.mean(scores)

                # Save triple of (idx, label, score) in a list
                idx_label_score += list(zip(idx.cpu().data.numpy().tolist(),
                                            labels.cpu().data.numpy().tolist(),
                                            scores.cpu().data.numpy().tolist()))

                loss_epoch += loss.item()
                n_batches += 1
Example #4
0
    def train(self, dataset: BaseADDataset, ae_net: BaseNet):
        logger = logging.getLogger()

        # Set device for network
        ae_net = ae_net.to(self.device)

        # Get train data loader
        train_loader, _ = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)

        # Set optimizer (Adam optimizer for now)
        optimizer = optim.Adam(ae_net.parameters(), lr=self.lr, weight_decay=self.weight_decay,
                               amsgrad=self.optimizer_name == 'amsgrad')

        # Set learning rate scheduler
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestones, gamma=0.1)

        # Training
        logger.info('Starting pretraining...')
        start_time = time.time()
        ae_net.train()
        for epoch in range(self.n_epochs):

            scheduler.step()
            if epoch in self.lr_milestones:
                logger.info('  LR scheduler: new learning rate is %g' % float(scheduler.get_lr()[0]))

            loss_epoch = 0.0
            n_batches = 0
            epoch_start_time = time.time()
            for data in train_loader:
                inputs, _, _ = data
                inputs = inputs.to(self.device)

                # Zero the network parameter gradients
                optimizer.zero_grad()

                # Update network parameters via backpropagation: forward + backward + optimize
                outputs = ae_net(inputs)
                #scores = torch.sum((outputs - inputs) ** 2, dim=tuple(range(1, outputs.dim())))
                scores = bidirectional_score(inputs, outputs)
                loss = torch.mean(scores)
                loss.backward()
                optimizer.step()

                loss_epoch += loss.item()
                n_batches += 1

            # log epoch statistics
            epoch_train_time = time.time() - epoch_start_time
            logger.info('  Epoch {}/{}\t Time: {:.3f}\t Loss: {:.8f}'
                        .format(epoch + 1, self.n_epochs, epoch_train_time, loss_epoch / n_batches))

        pretrain_time = time.time() - start_time
        logger.info('Pretraining time: %.3f' % pretrain_time)
        logger.info('Finished pretraining.')

        return ae_net
Example #5
0
    def test(self, dataset: BaseADDataset, net: BaseNet):
        logger = logging.getLogger()

        # Set device for network
        net = net.to(self.device)

        # Get test data loader
        _, test_loader = dataset.loaders(batch_size=self.batch_size,
                                         num_workers=self.n_jobs_dataloader)

        # Testing
        logger.info('Starting testing...')
        start_time = time.time()
        idx_label_score = []
        net.eval()
        with torch.no_grad():
            for data in test_loader:
                # 这里的label全部都是0和1,其中,1的比例占9份,0的比例占1份,这正好符合
                # 另外测试的时候并没有打乱数据,数据是按照index的0开始的顺序排列的.
                inputs, labels, idx = data
                inputs = inputs.to(self.device)
                # 对下面几句代码的解释
                # output的输出为[128,32],其中128是batch的大小,32是网络的输出
                # self.c大小是[32]
                # 经过torch.sum之后,dist等于是将所有的output加了起来.变为了dist的大小为[128],作为输出的score
                outputs = net(inputs)
                dist = torch.sum((outputs - self.c)**2, dim=1)
                if self.objective == 'soft-boundary':
                    scores = dist - self.R**2
                else:
                    scores = dist

                # 这里是将数据整合成元组
                # Save triples of (idx, label, score) in a list
                idx_label_score += list(
                    zip(idx.cpu().data.numpy().tolist(),
                        labels.cpu().data.numpy().tolist(),
                        scores.cpu().data.numpy().tolist()))

        self.test_time = time.time() - start_time
        logger.info('Testing time: %.3f' % self.test_time)

        # 这个idx_label_score整合之后,数据的序列是按照index的顺序排列的.
        self.test_scores = idx_label_score

        # Compute AUC
        # labels里面是0和1,scores是网络输出的scores
        _, labels, scores = zip(*idx_label_score)
        labels = np.array(labels)
        scores = np.array(scores)

        # 这个函数计算的是一个面积,他并不是按照预测的scores是不是大于0.5来看的,是要计算一个面积的.具体看我的笔记
        self.test_auc = roc_auc_score(labels, scores)
        logger.info('Test set AUC: {:.2f}%'.format(100. * self.test_auc))

        logger.info('Finished testing.')
    def test(self, dataset: BaseADDataset, ae_net: BaseNet):
        logger = logging.getLogger()

        # Set device for network
        ae_net = ae_net.to(self.device)

        # Get test data loader
        _, test_loader = dataset.loaders(batch_size=self.batch_size,
                                         num_workers=self.n_jobs_dataloader)

        # Testing
        logger.info('Testing autoencoder...')
        loss_epoch = 0.0
        n_batches = 0
        start_time = time.time()
        idx_label_score = []
        ae_net.eval()
        with torch.no_grad():
            for data in test_loader:
                inputs, labels, idx = data
                inputs = inputs.to(self.device)
                outputs = ae_net(inputs)
                scores = torch.sum((outputs - inputs)**2,
                                   dim=tuple(range(1, outputs.dim())))
                loss = torch.mean(scores)

                # Save triple of (idx, label, score) in a list
                idx_label_score += list(
                    zip(idx.cpu().data.numpy().tolist(),
                        labels.cpu().data.numpy().tolist(),
                        scores.cpu().data.numpy().tolist()))

                loss_epoch += loss.item()
                n_batches += 1

        logger.info('Test set Loss: {:.8f}'.format(loss_epoch / n_batches))

        _, labels, scores = zip(*idx_label_score)
        labels = np.array(labels)
        scores = np.array(scores)

        auc = roc_auc_score(labels, scores)
        logger.info('Test set AUC: {:.2f}%'.format(100. * auc))

        test_time = time.time() - start_time
        logger.info('Autoencoder testing time: %.3f' % test_time)
        logger.info('Finished testing autoencoder.')
    def test(self, dataset: BaseADDataset, net: BaseNet):
        logger = logging.getLogger()

        # Set device for network
        net = net.to(self.device)

        # Get test data loader
        _, test_loader = dataset.loaders(batch_size=self.batch_size,
                                         num_workers=self.n_jobs_dataloader)

        # Testing
        logger.info('Starting testing...')
        start_time = time.time()
        idx_label_score = []
        net.eval()
        with torch.no_grad():
            for data in test_loader:
                inputs, labels, idx = data
                inputs = inputs.to(self.device)
                outputs = net(inputs)
                dist = torch.sum((outputs - self.c)**2, dim=1)
                if self.objective == 'soft-boundary':
                    scores = dist - self.R**2
                else:
                    scores = dist

                # Save triples of (idx, label, score) in a list
                idx_label_score += list(
                    zip(idx.cpu().data.numpy().tolist(),
                        labels.cpu().data.numpy().tolist(),
                        scores.cpu().data.numpy().tolist()))

        self.test_time = time.time() - start_time
        logger.info('Testing time: %.3f' % self.test_time)

        self.test_scores = idx_label_score

        # Compute AUC
        _, labels, scores = zip(*idx_label_score)
        labels = np.array(labels)
        scores = np.array(scores)

        self.test_auc = roc_auc_score(labels, scores)
        logger.info('Test set AUC: {:.2f}%'.format(100. * self.test_auc))

        logger.info('Finished testing.')
Example #8
0
	def fit(self, dataset: BaseDataset, net: BaseNet, validate = 0):

		# initialize logger
		self.logger = logging.getLogger()

		# Get train data loader
		train_loader, _ = dataset.loaders(batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)

		# Set optimizer (Adam optimizer for now)
		self.optimizer = optim.Adam(net.parameters(), lr=self.lr, weight_decay=self.weight_decay,
							   amsgrad=self.optimizer_name == 'amsgrad')
		# Set learning rate scheduler
		self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=self.lr_milestones, gamma=0.1)

		# Early Stopping
		if self.early_stopping:
			early_stopping = EarlyStopping(patience=40)

		self.logger.info('Starting training...')
		start_time = time.time()


		for epoch in range(self.n_epochs):

			# lr scheduler update
			if epoch in self.lr_milestones:
				self.logger.info('  LR scheduler: new learning rate is %g' % float(self.scheduler.get_lr()[0]))

			# train epoch
			loss_epoch, accuracy, epoch_train_time, n_batches = self.train_epoch(net, train_loader)

			# Validate on test set and log
			if validate > 0 :
				validation_score, validation_time, validation_accuracy = self.score(dataset, net)
				self.logger.info('  Epoch {}/{}\t Time: {:.3f}\t Loss: {:.8f}\t Accuracy: {:.4f}\t Validation Time: {:.3f}\t Validation Loss: {:.4f}\t Validation Accuracy: {:.4f}'
								 .format(epoch + 1, self.n_epochs, epoch_train_time, loss_epoch,
										 self.accuracy.value, validation_time,validation_score, validation_accuracy  ))
			else:
				self.logger.info('  Epoch {}/{}\t Time: {:.3f}\t Loss: {:.8f}\t Accuracy: {:.4f}'
						.format(epoch + 1, self.n_epochs, epoch_train_time, loss_epoch ,
								self.accuracy.value))

			# check early stopping
			if self.early_stopping:
				early_stopping(self.accuracy.value, net)
				if early_stopping.early_stop:
					# load best model so far
					early_stopping.load_best_model(net)
					print('Early Stopping..')
					break

		self.train_time = time.time() - start_time
		self.logger.info('Training time: %.3f' % self.train_time)
		self.logger.info('Finished training.')
		return net
    def init_center_c(self, train_loader: DataLoader, net: BaseNet, eps=0.1):
        """Initialize hypersphere center c as the mean from an initial forward pass on the data."""
        n_samples = 0
        c = torch.zeros(net.rep_dim, device=self.device)

        net.eval()
        with torch.no_grad():
            for data in train_loader:
                # get the inputs of the batch
                inputs, _, _ = data
                inputs = inputs.to(self.device)
                outputs = net(inputs)
                n_samples += outputs.shape[0]
                c += torch.sum(outputs, dim=0)

        c /= n_samples

        # If c_i is too close to 0, set to +-eps. Reason: a zero unit can be trivially matched with zero weights.
        c[(abs(c) < eps) & (c < 0)] = -eps
        c[(abs(c) < eps) & (c > 0)] = eps

        return c
Example #10
0
    def train(self, dataset: BaseADDataset, net: BaseNet):
        logger = logging.getLogger()

        # Set device for network
        net = net.to(self.device)

        # Get train data loader
        train_loader, _ = dataset.loaders(batch_size=self.batch_size,
                                          num_workers=self.n_jobs_dataloader)

        # Set optimizer (Adam optimizer for now)
        optimizer = optim.Adam(net.parameters(),
                               lr=self.lr,
                               weight_decay=self.weight_decay,
                               amsgrad=self.optimizer_name == 'amsgrad')

        # Set learning rate scheduler
        scheduler = optim.lr_scheduler.MultiStepLR(
            optimizer, milestones=self.lr_milestones, gamma=0.1)

        # Initialize hypersphere center c (if c not loaded)
        if self.c is None:
            logger.info('Initializing center c...')

            # 需要注意的是,这里的c是针对网络的最后一层输出的每一个cell都有一个center
            # 另外,这个c是不随着网络更新的,只是在最开始的时候生成一次,这就导致前面的autoencoder一定要进行了咯?????
            self.c = self.init_center_c(train_loader, net)
            logger.info('Center c initialized.')

        # 这里计算的c是32的大小,这个程序的batchsize是200

        # Training
        logger.info('Starting training...')
        start_time = time.time()
        net.train()
        for epoch in range(self.n_epochs):

            scheduler.step()
            if epoch in self.lr_milestones:
                logger.info('  LR scheduler: new learning rate is %g' %
                            float(scheduler.get_lr()[0]))

            loss_epoch = 0.0
            n_batches = 0
            epoch_start_time = time.time()
            for data in train_loader:
                # 第一个是所有的图片,第二个是图片对应的label,这里每个的label都是0,第三个是这个图片在数据集中对应的index
                inputs, _, _ = data
                inputs = inputs.to(self.device)

                # Zero the network parameter gradients
                optimizer.zero_grad()

                # Update network parameters via backpropagation: forward + backward + optimize
                outputs = net(inputs)
                # 得到的outputs是[200,32]七种200是batch的大小
                # 相当于求每个batch里面,这32个的和
                # dist大小是200,相当于是这是一个32维的空间,求一个样本到圆心的距离的时候是每一维的距离平方然后求和
                # 最后对dist的mean反映了公式里面,对n个样本,进行求平均值

                # 这里就对应论文里面的loss了
                dist = torch.sum((outputs - self.c)**2, dim=1)

                if self.objective == 'soft-boundary':
                    scores = dist - self.R**2
                    loss = self.R**2 + (1 / self.nu) * torch.mean(
                        torch.max(torch.zeros_like(scores), scores))
                else:
                    # 对应我需要的情况
                    loss = torch.mean(dist)
                loss.backward()
                optimizer.step()

                # Update hypersphere radius R on mini-batch distances
                if (self.objective == 'soft-boundary') and (
                        epoch >= self.warm_up_n_epochs):
                    self.R.data = torch.tensor(get_radius(dist, self.nu),
                                               device=self.device)

                loss_epoch += loss.item()
                n_batches += 1

            # log epoch statistics
            epoch_train_time = time.time() - epoch_start_time
            logger.info('  Epoch {}/{}\t Time: {:.3f}\t Loss: {:.8f}'.format(
                epoch + 1, self.n_epochs, epoch_train_time,
                loss_epoch / n_batches))

        self.train_time = time.time() - start_time
        logger.info('Training time: %.3f' % self.train_time)

        logger.info('Finished training.')

        return net
Example #11
0
    def fit(self, dataset: BaseDataset, net: BaseNet):
        logger = logging.getLogger()

        # Get train data loader
        train_loader, _ = dataset.loaders(batch_size=self.batch_size,
                                          num_workers=self.n_jobs_dataloader)

        # Set optimizer (Adam optimizer for now)
        optimizer = optim.Adam(net.parameters(),
                               lr=self.lr,
                               weight_decay=self.weight_decay,
                               amsgrad=self.optimizer_name == 'amsgrad')

        # Set learning rate scheduler
        scheduler = optim.lr_scheduler.MultiStepLR(
            optimizer, milestones=self.lr_milestones, gamma=0.1)

        # Early stopping

        # Loss criterion
        self.criterion = nn.CrossEntropyLoss()

        # Training
        logger.info('Starting training...')
        start_time = time.time()
        net.train()
        for epoch in range(self.n_epochs):

            scheduler.step()
            if epoch in self.lr_milestones:
                logger.info('  LR scheduler: new learning rate is %g' %
                            float(scheduler.get_lr()[0]))

            loss_epoch = 0.0
            success_rate = 0
            n_batches = 0
            epoch_start_time = time.time()
            for data in train_loader:
                inputs, y = data

                # Zero the network parameter gradients
                optimizer.zero_grad()

                # Update network parameters via backpropagation: forward + backward + optimize
                outputs = self.predict(inputs, net)
                loss = self.criterion(outputs, y)
                loss.backward()
                optimizer.step()

                loss_epoch += loss.item()
                success_rate += inputs.size(0) - torch.nonzero(
                    torch.max(outputs, dim=1)[1] - y).size(0)
                n_batches += 1

            # log epoch statistics
            epoch_train_time = time.time() - epoch_start_time
            logger.info(
                '  Epoch {}/{}\t Time: {:.3f}\t Loss: {:.8f}\t Success Rate:{:.5f}'
                .format(epoch + 1, self.n_epochs, epoch_train_time,
                        loss_epoch / n_batches,
                        success_rate / train_loader.dataset.len))

        self.train_time = time.time() - start_time
        logger.info('Training time: %.3f' % self.train_time)

        logger.info('Finished training.')

        return net
    def train(self, dataset: BaseADDataset, net: BaseNet):
        logger = logging.getLogger()

        # Set device for network
        net = net.to(self.device)

        # Get train data loader
        train_loader, _ = dataset.loaders(batch_size=self.batch_size,
                                          num_workers=self.n_jobs_dataloader)

        # Set optimizer (Adam optimizer for now)
        optimizer = optim.Adam(net.parameters(),
                               lr=self.lr,
                               weight_decay=self.weight_decay,
                               amsgrad=self.optimizer_name == 'amsgrad')

        # Set learning rate scheduler
        scheduler = optim.lr_scheduler.MultiStepLR(
            optimizer, milestones=self.lr_milestones, gamma=0.1)

        # Initialize hypersphere center c (if c not loaded)
        if self.c is None:
            logger.info('Initializing center c...')
            self.c = self.init_center_c(train_loader, net)
            logger.info('Center c initialized.')

        # Training
        logger.info('Starting training...')
        start_time = time.time()
        net.train()
        for epoch in range(self.n_epochs):

            scheduler.step()
            if epoch in self.lr_milestones:
                logger.info('  LR scheduler: new learning rate is %g' %
                            float(scheduler.get_lr()[0]))

            loss_epoch = 0.0
            n_batches = 0
            epoch_start_time = time.time()
            for data in train_loader:
                inputs, _, _ = data
                inputs = inputs.to(self.device)

                # Zero the network parameter gradients
                optimizer.zero_grad()

                # Update network parameters via backpropagation: forward + backward + optimize
                outputs = net(inputs)
                dist = torch.sum((outputs - self.c)**2, dim=1)
                if self.objective == 'soft-boundary':
                    scores = dist - self.R**2
                    loss = self.R**2 + (1 / self.nu) * torch.mean(
                        torch.max(torch.zeros_like(scores), scores))
                else:
                    loss = torch.mean(dist)
                loss.backward()
                optimizer.step()

                # Update hypersphere radius R on mini-batch distances
                if (self.objective == 'soft-boundary') and (
                        epoch >= self.warm_up_n_epochs):
                    self.R.data = torch.tensor(get_radius(dist, self.nu),
                                               device=self.device)

                loss_epoch += loss.item()
                n_batches += 1

            # log epoch statistics
            epoch_train_time = time.time() - epoch_start_time
            logger.info('  Epoch {}/{}\t Time: {:.3f}\t Loss: {:.8f}'.format(
                epoch + 1, self.n_epochs, epoch_train_time,
                loss_epoch / n_batches))

        self.train_time = time.time() - start_time
        logger.info('Training time: %.3f' % self.train_time)

        logger.info('Finished training.')

        return net
Example #13
0
    def fit(self, dataset: BaseDataset, net: BaseNet):
        logger = logging.getLogger()

        # Get train data loader
        train_loader, _ = dataset.loaders(batch_size=self.batch_size,
                                          num_workers=self.n_jobs_dataloader)

        # Set optimizer (Adam optimizer for now)
        optimizer = optim.Adam(net.parameters(),
                               lr=self.lr,
                               weight_decay=self.weight_decay,
                               amsgrad=self.optimizer_name == 'amsgrad')

        # Set learning rate scheduler
        scheduler = optim.lr_scheduler.MultiStepLR(
            optimizer, milestones=self.lr_milestones, gamma=0.1)

        # Early stopping

        # Loss criterion
        self.criterion = self.weighted_mse
        #self.criterion = nn.MSELoss()
        # Training
        logger.info('Starting training...')
        start_time = time.time()
        net.train()
        for epoch in range(self.n_epochs):

            scheduler.step()
            if epoch in self.lr_milestones:
                logger.info('  LR scheduler: new learning rate is %g' %
                            float(scheduler.get_lr()[0]))

            loss_epoch = 0.0
            n_batches = 0
            dist = 0
            epoch_start_time = time.time()
            for data in train_loader:
                inputs, y = data

                # Zero the network parameter gradients
                optimizer.zero_grad()

                # Update network parameters via backpropagation: forward + backward + optimize
                outputs = self.predict(inputs, net)
                loss = self.criterion(outputs, y)
                loss.backward()
                optimizer.step()

                loss_epoch += loss.item()
                n_batches += 1

                diff = torch.abs((outputs - y) / (y + 1e-3))
                dist += torch.mean(diff)

            # log epoch statistics
            epoch_train_time = time.time() - epoch_start_time
            logger.info(
                '  Epoch {}/{}\t Time: {:.3f}\t Loss: {:.8f}\t Accuracy: {:.4f}'
                .format(epoch + 1, self.n_epochs, epoch_train_time,
                        loss_epoch / n_batches, 1 - dist.item() / n_batches))
        self.train_time = time.time() - start_time
        logger.info('Training time: %.3f' % self.train_time)

        logger.info('Finished training.')

        return net
    def validate(self, dataset: BaseADDataset, net: BaseNet, best_metric):
        logger = logging.getLogger()

        # Get validation loaders
        val_all_loader, val_normal_loader = dataset.val_loaders(
            batch_size=self.batch_size, num_workers=self.n_jobs_dataloader)

        n_batches_all = 0
        accuracy_all = 0
        net.eval()
        start_time = time.time()
        with torch.no_grad():
            for data_all in val_all_loader:
                inputs, labels, idx = data_all
                inputs = inputs.to(self.device)
                labels = labels.to(self.device)
                outputs = net(inputs)
                dist = torch.sum((outputs - self.c)**2, dim=1)
                if self.objective == 'soft-boundary':
                    scores = dist - self.R**2
                else:
                    scores = dist - 1

                classifications = scores.clone()
                classifications[classifications <= 0] = 0
                classifications[classifications > 0] = 1
                accuracy = torch.mean(
                    (classifications == labels.float()).float())
                accuracy_all += accuracy.item()
                n_batches_all += 1

            loss_val = 0
            n_batches_normal = 0
            accuracy_normal = 0
            for data_normal in val_normal_loader:
                inputs, labels, idx = data_normal
                inputs = inputs.to(self.device)
                labels = labels.to(self.device)
                outputs = net(inputs)
                dist = torch.sum((outputs - self.c)**2, dim=1)
                if self.objective == 'soft-boundary':
                    scores = dist - self.R**2
                    loss = self.R**2 + (1 / self.nu) * torch.mean(
                        torch.max(torch.zeros_like(scores), scores))
                else:
                    scores = dist - 1
                    loss = torch.mean(dist)

                classifications = scores.clone()
                classifications[classifications <= 0] = 0
                classifications[classifications > 0] = 1
                accuracy = torch.mean(
                    (classifications == labels.float()).float())
                accuracy_normal += accuracy.item()
                loss_val += loss.item()
                n_batches_normal += 1

        # log val statistics
        val_time = time.time() - start_time
        logger.info(
            '  Validation Time: {:.3f}\t Acc_all: {:.8f}\t Acc_Normal: {:.8f}\t Loss_Normal: {:.8f}'
            .format(val_time, accuracy_all / n_batches_all,
                    accuracy_normal / n_batches_normal,
                    loss_val / n_batches_normal))