def predict(self, data_loader): n = len(data_loader) * data_loader.batch_size Yhat = np.zeros((n, self.num_output_channels)) Ids = np.zeros((n, 1)) k = 0 # switch to evaluate mode self.net.eval() with torch.no_grad(): end = time.time() for i, (Id, inputs) in enumerate(tqdm(data_loader)): x = inputs.cuda() if self.cuda else inputs # fit (forward) yhat = self.net(x) yhat = F.softmax(yhat, dim=1) yhat = pytutils.to_np(yhat) for j in range(yhat.shape[0]): Yhat[k, :] = yhat[j] Ids[k] = Id[j] k += 1 Yhat = Yhat[:k, :] Ids = Ids[:k] return Ids, Yhat
def test(self, data_loader): masks = [] ids = [] k = 0 # switch to evaluate mode self.net.eval() with torch.no_grad(): end = time.time() for i, sample in enumerate(tqdm(data_loader)): # get data (image, label) inputs, meta = sample['image'], sample['metadata'] idd = meta[:, 0] x = inputs.cuda() if self.cuda else inputs # fit (forward) yhat = self.net(x) yhat = F.softmax(yhat, dim=1) yhat = pytutils.to_np(yhat) masks.append(yhat) ids.append(idd) return ids, masks
def test(self, data_loader): n = len(data_loader) * data_loader.batch_size Yhat = np.zeros((n, self.num_output_channels)) Y = np.zeros((n, 1)) k = 0 # switch to evaluate mode self.net.eval() with torch.no_grad(): end = time.time() for i, sample in enumerate(tqdm(data_loader)): # get data (image, label) x, y = sample['image'], sample['label'].argmax(1).long() x = x.cuda() if self.cuda else x # fit (forward) yhat = self.net(x) yhat = F.softmax(yhat, dim=1) yhat = pytutils.to_np(yhat) for j in range(yhat.shape[0]): Y[k] = y[j] Yhat[k, :] = yhat[j] k += 1 Yhat = Yhat[:k, :] Y = Y[:k] return Yhat, Y
def predict(self, data_loader): n = len(data_loader) * data_loader.batch_size Yhat = np.zeros((n, self.num_output_channels)) Ids = np.zeros((n, 1)) k = 0 # switch to evaluate mode self.net.eval() with torch.no_grad(): end = time.time() for i, (Id, inputs) in enumerate(tqdm(data_loader)): # get data (image, label) #inputs = sample['image'] #Id = sample['id'] x = inputs.cuda() if self.cuda else inputs x = Variable(x, requires_grad=False, volatile=True) # fit (forward) yhat = self.net(x) yhat = F.softmax(yhat, dim=1) yhat = pytutils.to_np(yhat) for j in range(yhat.shape[0]): Yhat[k, :] = yhat[j] Ids[k] = Id[j] k += 1 Yhat = Yhat[:k, :] Ids = Ids[:k] return Ids, Yhat
def __call__(self, image): # switch to evaluate mode self.net.eval() with torch.no_grad(): x = image.cuda() if self.cuda else image msoft = nn.Softmax() yhat = msoft(self.net(x)) yhat = pytutils.to_np(yhat) return yhat
def __call__(self, image): # switch to evaluate mode self.net.eval() with torch.no_grad(): x = image.cuda() if self.cuda else image yhat = F.softmax(self.net(x), dim=1) yhat = pytutils.to_np(yhat).transpose(2, 3, 1, 0)[..., 0] return yhat
def __call__(self, image): # switch to evaluate mode self.net.eval() with torch.no_grad(): x = image.cuda() if self.cuda else image x = Variable(x, requires_grad=False, volatile=True) msoft = nn.Softmax() yhat = msoft(self.net(x)) yhat = pytutils.to_np(yhat) return yhat
def representation(self, data_loader): """" Representation -data_loader: simple data loader for image """ # switch to evaluate mode self.net.eval() n = len(data_loader) * data_loader.batch_size k = 0 # embebed features embX = np.zeros([n, self.net.dim]) embY = np.zeros([n, 1]) batch_time = AverageMeter() end = time.time() for i, sample in enumerate(data_loader): # get data (image, label) x, y = sample['image'], sample['label'].argmax(1).long() x = x.cuda() if self.cuda else x # representation emb = self.net.representation(x) emb = pytutils.to_np(emb) for j in range(emb.shape[0]): embX[k, :] = emb[j, :] embY[k] = y[j] k += 1 # measure elapsed time batch_time.update(time.time() - end) end = time.time() print( 'Representation: |{:06d}/{:06d}||{batch_time.val:.3f} ({batch_time.avg:.3f})|' .format(i, len(data_loader), batch_time=batch_time)) embX = embX[:k, :] embY = embY[:k] return embX, embY
def test(self, data_loader): n = len(data_loader) * data_loader.batch_size Yhat = np.zeros((n, self.num_output_channels)) Y = np.zeros((n, 1)) k = 0 # switch to evaluate mode self.net.eval() with torch.no_grad(): end = time.time() for i, sample in enumerate(tqdm(data_loader)): # get data (image, label) inputs = sample['image'] targets = pytutils.argmax(sample['label']) x = inputs.cuda() if self.cuda else inputs x = Variable(x, requires_grad=False, volatile=True) # fit (forward) yhat = self.net(x) yhat = F.softmax(yhat, dim=1) yhat = pytutils.to_np(yhat) for j in range(yhat.shape[0]): Y[k] = targets[j] Yhat[k, :] = yhat[j] k += 1 #print( 'Test:', i , flush=True ) Yhat = Yhat[:k, :] Y = Y[:k] return Yhat, Y