def run_test(model, dloader, test_data, result): model.eval() with torch.no_grad(): for item in dloader.batch_fv(test_data, len(test_data)): genes, nimgs, labels, timesteps = item inputs = torch.from_numpy(nimgs).type(torch.cuda.FloatTensor) pd = model(inputs) test_pd = torch_util.threshold_tensor_batch(pd) np_pd = test_pd.data.cpu().numpy() npmetrics.write_metrics(labels, np_pd, result)
def run_test(rnn, test_data, result): rnn.eval() with torch.no_grad(): # for item in fvloader.batch_fv(test_data, len(test_data)): for item in matloader.batch_fv(test_data, len(test_data)): genes, nimgs, labels, timesteps = item idx = np.argsort(np.array(-timesteps)) s_nimgs = torch.from_numpy(np.stack(nimgs[idx])).type( torch.cuda.FloatTensor) s_timesteps = timesteps[idx] out_pack, hidden = rnn(s_nimgs, s_timesteps) test_pd = torch_util.threshold_tensor_batch(out_pack) npmetrics.write_metrics(labels[idx], np.array(test_pd), result)
def evaluate_ilocator(): test_genes = datautil.get_test_gene_list(size=0) pd = [] gt = [] d = datautil.load_enhanced_label() for g in test_genes: pics = datautil.get_gene_pics(g) g_scores = [] for pic in [x.replace(".", "_") for x in pics]: spath = os.path.abspath( os.path.join("util/testimgs_ilocator_result/%s.txt" % pic)) with open(spath, 'r') as f: score = [float(x) for x in f.readline().strip().split()] if any([math.isnan(x) for x in score]): pass else: g_scores.append(score) g_scores = np.stack(g_scores) g_scores = np.mean(g_scores, axis=0) pd.append(g_scores) gene_label = np.zeros(6) for l in d[g]: gene_label[l] = 1 gt.append(gene_label) gt = np.stack(gt) pd = np.stack(pd) # rearrange label order idx = np.array([5, 0, 6, 4, 2, 1]) pd = pd[:, idx] thr = pd.max(axis=1) zeros = np.zeros(thr.shape[0]) thr = np.min(np.stack([zeros, thr], axis=1), axis=1) pd = np.greater_equal(pd, thr[:, np.newaxis]).astype(int) npmetrics.write_metrics(gt, pd, "util/ilocator.txt")
def run_test(model, test_dataset, result, dcfg): model.eval() with torch.no_grad(): test_loader = DataLoader(test_dataset, batch_size=dcfg['bsize'], shuffle=False, num_workers=dcfg['nworker'], collate_fn=dcfg['collate']) np_label = [] np_pd = [] for i_batch, sample_batched in enumerate(test_loader): (img, label) = sample_batched inputs = img.type(torch.cuda.FloatTensor) gt = label.type(torch.cuda.FloatTensor) pd = model(inputs) test_pd = torch_util.threshold_tensor_batch(pd) np_pd.append(test_pd.data.cpu().numpy()) np_label.append(gt.data.cpu().numpy()) np_label = np.concatenate(np_label) np_pd = np.concatenate(np_pd) npmetrics.write_metrics(np_label, np_pd, result)
def run_test(model, dloader, test_data, result): model.eval() with torch.no_grad(): gt = np.array([[0 for _ in range(10)]]) pd = np.array([[0 for _ in range(10)]]) for item in dloader.batch_fv(test_data, batch=1): genes, nimgs, labels, timesteps = item inputs = torch.from_numpy(nimgs).type(torch.cuda.FloatTensor) # print(gt) apd = model(inputs) test_pd = torch_util.threshold_tensor_batch(apd) np_pd = test_pd.data.cpu().numpy() gt = np.concatenate((gt, labels)) pd = np.concatenate((pd, np_pd)) gt = gt[1:, :] pd = pd[1:, :] npmetrics.write_metrics(gt, pd, result)