def test(model, opt, device): ''' generate predictions :param: model: cpkt, opt: dict, device:device :return ''' model.eval() name2idx = name2index(opt.arrythmia_path) idx2name = {idx: name for name, idx in name2idx.items()} sub_dir = './submit' if not os.path.exists(sub_dir): os.makedirs(sub_dir) sub_file = '%s/subB_%s.txt' % (sub_dir, '1') with open(sub_file, 'w', encoding='utf-8') as fout: with torch.no_grad(): with open(opt.test_label, 'r', encoding='utf-8') as f: lines = f.readlines() for line in tqdm(lines): fout.write(line.strip('\n')) id = line.split('\t')[0] file_path = os.path.join(opt.test_root, id) df = pd.read_csv(file_path, sep=' ').values data = transform(df).unsqueeze(0) data = data.to(device) output = model(data).squeeze().cpu().numpy() ixs = [i for i, out in enumerate(output) if out > 0.5] for i in ixs: fout.write("\t" + idx2name[i]) fout.write('\n') return
def test_new(args): from dataset import transform from data_process import name2index, sex2Index, age2Index name2idx = name2index(config.arrythmia) idx2name = {idx: name for name, idx in name2idx.items()} sex2idx = sex2Index(config.test_label) age2idx = age2Index(config.test_label) utils.mkdirs(config.sub_dir) # model model = ResMlp(ResMlpParams) model.load_state_dict( torch.load(args.ckpt, map_location='cpu')['state_dict']) model = model.to(device) model.eval() sub_file = '%s/subA_%s.txt' % (config.sub_dir, time.strftime("%Y%m%d%H%M")) fout = open(sub_file, 'w', encoding='utf-8') with torch.no_grad(): for line in open(config.test_label, encoding='utf-8'): fout.write(line.strip('\n')) row = line.split('\t') id = row[0] sex, age = sex2idx[id], age2idx[id] file_path = os.path.join(config.test_dir, id) df = pd.read_csv(file_path, sep=' ').values x = transform(df).unsqueeze(0).to(device) # 因为conv1d需要三维的向量才能读进来 sex, age = sex.unsqueeze(0).to(device), age.unsqueeze(0).to(device) output = torch.sigmoid(model(x, sex, age)).squeeze().cpu().numpy() ixs = [i for i, out in enumerate(output) if out > 0.5] for i in ixs: fout.write("\t" + idx2name[i]) fout.write('\n') fout.close()
def toplayer(args): from dataset import transform from data_process import name2index name2idx = name2index(config.arrythmia) idx2name = {idx: name for name, idx in name2idx.items()} utils.mkdirs(config.sub_dir) model = models.myecgnet() model.load_state_dict( torch.load(args.ckpt, map_location='cpu')['state_dict']) model = model.to(device) model.eval() sub_file = '%s.txt' % args.ex fout = open(sub_file, 'w', encoding='utf-8') with torch.no_grad(): for line in tqdm(open(config.train_label, encoding='utf-8')): fout.write(line.strip('\n')) id = line.split('\t')[0] file_path = os.path.join(config.train_dir, id) df = pd.read_csv(file_path, sep=' ').values x = transform(df).reshape((1, 1, 8, 2500)).to(device) output = torch.sigmoid(model(x)).squeeze().cpu().numpy() for i in output: fout.write("\t" + str(i)) fout.write('\n') fout.close()
def test(args): from dataset import transform from data_process import name2index name2idx = name2index(config.arrythmia) idx2name = {idx: name for name, idx in name2idx.items()} utils.mkdirs(config.sub_dir) model = models.myecgnet() model.load_state_dict( torch.load(args.ckpt, map_location='cpu')['state_dict']) model = model.to(device) model.eval() sub_file = 'result.txt' fout = open(sub_file, 'w', encoding='utf-8') with torch.no_grad(): for line in open(config.test_label, encoding='utf-8'): fout.write(line.strip('\n')) id = line.split('\t')[0] file_path = os.path.join(config.test_dir, id) df = pd.read_csv(file_path, sep=' ').values x = transform(df).unsqueeze(0).to(device) output = torch.sigmoid(model(x)).squeeze().cpu().numpy() ixs = [i for i, out in enumerate(output) if out > 0.5] for i in ixs: fout.write("\t" + idx2name[i]) print(i, end=',') fout.write('\n') fout.close() print('\n', end='')
def detect(Img): logger.info(f"Detect: Detecting...") i = transform(Img).unsqueeze(0) predict_label1, predict_label2 = model(i) predict_label = LabeltoStr([ np.argmax(predict_label1.data.numpy()[0]), np.argmax(predict_label2.data.numpy()[0]), ]) logger.info(f"Detect: Result {predict_label}") return predict_label
def test(args): if config.kind == 2 and config.top4_catboost: top4_catboost_test(args) # catboost else: from dataset import transform from data_process import name2index name2idx = name2index(config.arrythmia) idx2name = {idx: name for name, idx in name2idx.items()} utils.mkdirs(config.sub_dir) # model model = getattr(models, config.model_name)(num_classes=config.num_classes, channel_size=config.channel_size) model.load_state_dict( torch.load(args.ckpt, map_location='cpu')['state_dict']) model = model.to(device) model.eval() sub_file = '%s/subA_%s.txt' % (config.sub_dir, time.strftime("%Y%m%d%H%M")) fout = open(sub_file, 'w', encoding='utf-8') print(sub_file) with torch.no_grad(): for line in open(config.test_label, encoding='utf-8'): fout.write(line.strip('\n')) line = line.strip('\n') id = line.split('\t')[0] age = line.split('\t')[1] sex = line.split('\t')[2] if len(age) < 1: age = '-999' age = int(age) sex = {'FEMALE': 0, 'MALE': 1, '': -999}[sex] file_path = os.path.join(config.test_dir, id) df = utils.read_csv(file_path, sep=' ', channel_size=config.channel_size) x = transform(df.values).unsqueeze(0).to(device) fr = torch.tensor([age, sex], dtype=torch.float32).unsqueeze(0).to(device) if config.kind == 1: output = torch.sigmoid(model(x, fr)).squeeze().cpu().numpy() elif config.kind == 2: output, out2 = model(x) output = torch.sigmoid(output).squeeze().cpu().numpy() if config.top4_DeepNN: output = output[config.top4_tag_list] else: output = torch.sigmoid(model(x)).squeeze().cpu().numpy() ixs = [i for i, out in enumerate(output) if out > 0.5] for i in ixs: fout.write("\t" + idx2name[i]) fout.write('\n') fout.close()
def process_PIL_image(frame, do_corrections=True, clahe=None, table=None): if clahe is None: clahe = cv2.createCLAHE(clipLimit=1.5, tileGridSize=(8,8)) if table is None: table = 255.0*(np.linspace(0, 1, 256)**0.8) img = Image.fromarray(frame).convert("L") if do_corrections: img = cv2.LUT(np.array(img), table) img = clahe.apply(np.array(np.uint8(img))) img = Image.fromarray(img) img = transform(img) return img
def test(args): from dataset import transform name2idx = { "AF": 0, "I-AVB": 1, "LBBB": 2, "Normal": 3, "PAC": 4, "PVC": 5, "RBBB": 6, "STD": 7, "STE": 8 } idx2name = { 0: "AF", 1: "I-AVB", 2: "LBBB", 3: "Normal", 4: "PAC", 5: "PVC", 6: "RBBB", 7: "STD", 8: "STE" } # model model = getattr(models, config.model_name)() print(model) model.load_state_dict( torch.load(args.ckpt, map_location='cpu')['state_dict']) model = model.to(device) model.eval() #sub_file = '%s/subB_%s.txt' % (config.sub_dir, time.strftime("%Y%m%d%H%M")) sub_file = 'result.txt' fout = open(sub_file, 'w', encoding='utf-8') with torch.no_grad(): for line in tqdm(open(config.test_label, encoding='utf-8')): fout.write(line.strip('\n')) id = line.split('\t')[0] file_path = os.path.join(config.test_dir, id) df = pd.read_csv(file_path, sep=' ') df['III'] = df['II'] - df['I'] df['aVR'] = -(df['I'] + df['II']) / 2 df['aVL'] = df['I'] - df['II'] / 2 df['aVF'] = df['II'] - df['I'] / 2 x = transform(df.values).unsqueeze(0).to(device) output = torch.sigmoid(model(x)).squeeze().cpu().numpy() ixs = [i for i, out in enumerate(output) if out > 0.5] for i in ixs: fout.write("\t" + idx2name[i]) fout.write('\n') fout.close()
def forward(self, input, itr=8): if self.training: images, _, _, _ = input t, c, h, w = images.size() assert (h, w) == (600, 800) result = list() hidden_c = None # features = self.encoder.features(images)#.data features, sal = self.encoder(images, layers=[4]) features = features[-1] # features = self.readout(features) for idx in range(t): # features = self.encoder.features(images[[idx]]) # feat = Variable(features[[idx]].unsqueeze(1).data).cuda() # feat = features[[idx]].unsqueeze(1)#.cuda() # feat_copy = Variable(feat.unsqueeze(1)).cuda() feat = features[[idx]].unsqueeze(1) output, [_, hidden_c] = self.decoder(feat, hidden_c) result.append(output[0, 0]) return sal, torch.stack(result) else: # eval mode --- supports batch? images, _, _, img = input t, c, h, w = images.size() assert (h, w) == (600, 800) img = Image.open(img) #.resize((800,300)) result = list() hidden_c = None image = images[[0]] for idx in range(itr): features, _ = self.encoder(image, layers=[4]) # features = self.readout(features[-1]) features = Variable(features[-1].data.unsqueeze(1), volatile=True).cuda() output, [_, hidden_c] = self.decoder(features, hidden_c) result.append(output[0, 0]) image = self._eval_next_frame( img, output[0, 0, 0].data.cpu().numpy()) image = Image.fromarray(image) image = transform(image) image = Variable(image.unsqueeze(0)).cuda() return torch.stack(result)
def predict_image(image): index_dict = { 0: '1_asset_tags', 1: '2_tapedrop', 2: '4_sector_view', 3: '3_tilt', 4: '0_colored_cables' } image_tensor = transform(image).float() image_tensor = image_tensor.unsqueeze_(0) input = image_tensor.to(device) output = model(input) index = output.data.cpu().numpy().argmax() return index_dict[index]
def forward(self, input, itr=15): if self.training: images, sal, target, path = input t, c, h, w = images.size() assert (h, w) == (600, 800) result = list() hidden_c = None features = self.encoder.features(images).data for idx in range(t): # features = self.encoder.features(images[[idx]]) feat = Variable(features[[idx]].unsqueeze(1)).cuda() # feat_copy = Variable(feat.unsqueeze(1)).cuda() output, [_, hidden_c] = self.decoder(feat, hidden_c) result.append(output[0, 0]) return torch.stack(result) else: # eval mode --- supports batch? images, sal, target, img = input t, c, h, w = images.size() assert (h, w) == (300, 400) img = Image.open(img) result = list() hidden_c = None image = images[[0]] for idx in range(itr): features = self.encoder.features(image).data features = Variable(features.unsqueeze(1), volatile=False).cuda() output, [_, hidden_c] = self.decoder(features, hidden_c) result.append(output[0, 0]) image = self._eval_next_frame( img, output[0, 0, 0].data.cpu().numpy()) image = Image.fromarray(image) image = transform(image) image = Variable(image.unsqueeze(0)).cuda(0) return torch.stack(result)
def test_ensemble(args): from dataset import transform from data_process import name2index, get_arrythmias, get_dict # arrythmias = get_arrythmias(config.arrythmia) # name2idx,idx2name = get_dict(arrythmias) name2idx = name2index(config.arrythmia) idx2name = {idx: name for name, idx in name2idx.items()} #utils.mkdirs(config.sub_dir) num_clases = 34 kfold = len(config.model_names) # model model = [] for fold in range(kfold): model.append(getattr(models, config.model_names)()) for fold in range(kfold): model[fold].load_state_dict( torch.load(os.path.join(args.ckpt, config.model_ckpts[fold], "best_weight.pth"), map_location='cpu')['state_dict']) model[fold] = model[fold].to(device) model[fold].eval() #sub_file = '%s/subB_%s.txt' % (config.sub_dir, time.strftime("%Y%m%d%H%M")) sub_file = './result.txt' fout = open(sub_file, 'w', encoding='utf-8') with torch.no_grad(): for line in tqdm(open(config.test_label, encoding='utf-8')): fout.write(line.strip('\n')) id = line.split('\t')[0] file_path = os.path.join(config.test_dir, id) df = pd.read_csv(file_path, sep=' ') df['III'] = df['II'] - df['I'] df['aVR'] = -(df['I'] + df['II']) / 2 df['aVL'] = df['I'] - df['II'] / 2 df['aVF'] = df['II'] - df['I'] / 2 x = transform(df.values).unsqueeze(0).to(device) output = 0 #np.zeros(num_clases) for fold in range(kfold): output += torch.sigmoid(model[fold](x)).squeeze().cpu().numpy() output = output / kfold ixs = [i for i, out in enumerate(output) if out > 0.5] for i in ixs: fout.write("\t" + idx2name[i]) fout.write('\n') fout.close()
def test(model_list, model_weight, opt, device): ''' generate predictions :param: model_list: list, model_weight: list, opt: dict, mname:list, device:device :return ''' model_weight = torch.Tensor(model_weight).to(device) name2idx = name2index(opt.arrythmia_path) idx2name = {idx: name for name, idx in name2idx.items()} sub_dir = '../prediction_result' if not os.path.exists(sub_dir): os.makedirs(sub_dir) sub_file = '%s/result_%s.txt' % (sub_dir, '1') with open(sub_file, 'w', encoding='utf-8') as fout: with torch.no_grad(): with open(opt.test_label, 'r', encoding='utf-8') as f: lines = f.readlines() for line in tqdm(lines): fout.write(line.strip('\n')) ind, age, sex = line[:-1].split('\t') file_path = os.path.join(opt.test_root, ind) df = pd.read_csv(file_path, sep=' ').values data = transform(df).unsqueeze(0) data = data.to(device) age = transfer_age(age) age = torch.Tensor(age).unsqueeze(0).to(device) sex = transfer_sex(sex) sex = torch.FloatTensor([sex]).to(device) output_list = [] for index, model in enumerate(model_list): outputs = model(data, age, sex) weight = model_weight[index] outputs *= weight output_list.append(outputs) outputs_mean = torch.sum(torch.stack(output_list), dim=0) output = outputs_mean.squeeze().cpu().numpy() ixs = [i for i, out in enumerate(output) if out > 0.5] for i in ixs: fout.write("\t" + idx2name[i]) fout.write('\n') return
def test(args): from dataset import transform from data_process import name2index import pickle from tqdm import tqdm test_age_sex = pickle.load(open(config.test_age_sex, 'rb')) name2idx = name2index(config.arrythmia) idx2name = {idx: name for name, idx in name2idx.items()} utils.mkdirs(config.sub_dir) # model model = getattr(models, config.model_name)() model.load_state_dict(torch.load(args.ckpt, map_location='cpu')['state_dict']) model = model.to(device) model.eval() sub_file = '%s/subA_%s.txt' % (config.sub_dir, time.strftime("%Y%m%d%H%M")) fout = open(sub_file, 'w', encoding='utf-8') with torch.no_grad(): for line in tqdm(open(config.test_label, encoding='utf-8')): fout.write(line.strip('\n')) id = line.split('\t')[0] file_path = os.path.join(config.test_dir, id) df = pd.read_csv(file_path, sep=' ').values df = add_4(df) age = test_age_sex[id.split('.')[0]]['age'] sex = test_age_sex[id.split('.')[0]]['sex'] age = torch.tensor(age.copy(), dtype=torch.float).unsqueeze(0).to(device) sex = torch.tensor(sex.copy(), dtype=torch.float).unsqueeze(0).to(device) x = transform(df).unsqueeze(0).to(device) output = torch.sigmoid(model(x, age, sex)).squeeze().cpu().numpy() ixs = [i for i, out in enumerate(output) if out > 0.5] for i in ixs: fout.write("\t" + idx2name[i]) fout.write('\n') fout.close()
def infer(img_path, save_output, model_weights): cfg = config.get_cfg_defaults() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = levelnet.Model() model.load_state_dict(torch.load(model_weights)["state_dict"]) model.to(device) model.eval() img = cv2.imread(img_path) img = dataset.transform(img, [256, 256], save_img=True, save_file_name='out.jpg') image = PIL.Image.open('out.jpg') trans = transforms.ToTensor() img = trans(image) img = img.unsqueeze(0) with torch.no_grad(): output = model.forward(img.to(device)) i = 0 pred = tools.get_preds(np.array(output.cpu()), img_h=256, img_w=256) plt.figure() plt.imshow(img[i].permute(1, 2, 0)) plt.scatter(pred[i][:, 0], pred[i][:, 1]) plt.savefig(save_output)
def test(self): name2idx = name2index('./hf_round2_arrythmia.txt') idx2name = {idx: name for name, idx in name2idx.items()} print(idx2name) # utils.mkdirs(self.opt.sub_dir) # model # model_save_dir = self.opt.ckpt # best_w = torch.load(os.path.join(model_save_dir, self.opt.best_w)) # best_w = torch.load('./best_w.pth') best_w = './best_w.pth' model = getattr(models, self.opt.model_name)() model.load_state_dict( torch.load(best_w, map_location='cpu')['state_dict']) model = model.to(self.device) model.eval() # sub_file = '%s/result.txt' % (self.opt.sub_dir) sub_file = './result.txt' fout = open(sub_file, 'w', encoding='utf-8') with torch.no_grad(): for line in open(self.opt.test_label, encoding='utf-8'): fout.write(line.strip('\n')) id = line.split('\t')[0] file_path = os.path.join(self.opt.test_dir, id) # df = pd.read_csv(file_path, sep=' ').values df = pd.read_csv(file_path, sep=' ') df = add_feature(df).values x = transform(df).unsqueeze(0).to(self.device) output = torch.sigmoid(model(x)).squeeze().cpu().numpy() ixs = [i for i, out in enumerate(output) if out > 0.5] print(id, ixs) for i in ixs: # print(i, idx2name[i]) if i < len(idx2name): fout.write("\t" + idx2name[i]) fout.write('\n') print("writing finish.") fout.close()
def top4_catboost_test(args): print('top4_catboost_test') from dataset import transform from data_process import name2index name2idx = name2index(config.arrythmia) idx2name = {idx: name for name, idx in name2idx.items()} utils.mkdirs(config.sub_dir) if config.top4_DeepNN: # model model = getattr(models, config.model_name)(num_classes=config.num_classes, channel_size=config.channel_size) model.load_state_dict( torch.load(args.ckpt, map_location='cpu')['state_dict']) model = model.to(device) model.eval() print(config.model_name, args.ckpt) else: model = None print('no', config.model_name) sub_file = '%s/subA_%s.txt' % (config.sub_dir, time.strftime("%Y%m%d%H%M")) print(sub_file) fout = open(sub_file, 'w', encoding='utf-8') if config.kind == 2: model_list = load_model_list( os.path.join(config.ckpt, config.top4_catboost_model)) with torch.no_grad(): for line in open(config.test_label, encoding='utf-8'): fout.write(line.strip('\n')) line = line.strip('\n') id = line.split('\t')[0] age = line.split('\t')[1] sex = line.split('\t')[2] if len(age) < 1: age = '-999' age = int(age) sex = {'FEMALE': 0, 'MALE': 1, '': -999}[sex] file_path = os.path.join(config.test_dir, id) df = utils.read_csv(file_path, sep=' ', channel_size=config.channel_size) fr = torch.tensor([age, sex], dtype=torch.float32) if config.top4_DeepNN: x = transform(df.values).unsqueeze(0).to(device) output, out1 = model(x) output = torch.sigmoid(output).squeeze().cpu().numpy() out1 = out1.squeeze().cpu().numpy() if config.top4_DeepNN_tag: output = output[config.top4_tag_list] else: output, out1 = torch.zeros(0).numpy(), torch.ones(0).numpy() r_features_file = os.path.join(config.r_test_dir, id.replace('.txt', '.fea')) other_f = get_other_features(df, r_features_file) df_values = np.concatenate((output, out1, other_f, fr)) columnslist = [] columnslist += ['dnn1_%d' % i for i in range(len(output))] columnslist += ['dnn2_%d' % i for i in range(len(out1))] # print('len_dnn_feature', len(columnslist)) columnslist += ['other_f_%d' % i for i in range(len(other_f))] columnslist += ['sex', 'age'] df = pd.DataFrame(df_values.reshape(1, -1), columns=columnslist) df[df.columns[config.top4_cat_features]] = df[df.columns[ config.top4_cat_features]].astype(int) # for cindex in config.top4_cat_features: # df[df.columns[cindex]] = df[df.columns[cindex]].astype(int) output = model_list_predict(model_list, df).squeeze() # print(output) ixs = [i for i, out in enumerate(output) if out > 0.5] for i in ixs: fout.write("\t" + idx2name[i]) fout.write('\n') fout.close()
def run_12ECG_classifier(data, header_data, loaded_model): # Use your classifier here to obtain a label and score for each class. dx_mapping_scored = pd.read_csv('./evaluation/dx_mapping_scored.csv' )['SNOMED CT Code'].values.tolist() with torch.no_grad(): sig = data FS = 500 SIGLEN = FS * 10 fs = int(header_data[0].split(' ')[2]) siglen = int(header_data[0].split(' ')[3]) adc_gain = int(header_data[1].split(' ')[2].split('/')[0]) # print(fs,siglen,adc_gain) if fs == FS * 2: # sig = signal.resample(sig.T, int(annot.siglen/annot.fs * FS)).T sig = sig[:, ::2] elif fs == FS: pass #raise ValueError("fs wrong") elif fs != FS: sig = signal.resample(sig.T, int(siglen / fs * FS)).T siglen = sig.shape[1] # print(siglen) if siglen != SIGLEN: sig_ext = np.zeros([12, SIGLEN]) if siglen < SIGLEN: sig_ext[:, :siglen] = sig if siglen > SIGLEN: sig_ext = sig[:, :SIGLEN] if siglen != SIGLEN: sig = sig_ext sig = sig / adc_gain x = transform(sig.T, train=False).unsqueeze(0).to(device) # k-fold ''' ''' output = 0 kfold = 5 for fold in range(kfold): output += torch.sigmoid( loaded_model[fold](x)).squeeze().cpu().numpy() output = output / kfold mapping = dict(zip([str(i) for i in dx_mapping_scored], output)) output = [mapping[key] for key in sorted(mapping.keys())] ixs = [1 if out > 0.25 else 0 for out in output] ''' # 0.2 —— 0.990,0.881,0.629,0.792,0.826,0.605,0.792 # 0.25 —— 0.990,0.881,0.669,0.806,0.825,0.620,0.796 # one-fold output = torch.sigmoid(loaded_model(x)).squeeze().cpu().numpy() mapping = dict(zip([str(i) for i in dx_mapping_scored],output)) output = [mapping[key] for key in sorted(mapping.keys())] ixs = [1 if out>0.25 else 0 for out in output] ''' # 0.5 —— 0.975,0.745,0.621,0.679,0.657,0.460,0.654 # 0.4 —— 0.975,0.745,0.610,0.697,0.689,0.477,0.696 # 0.3 —— 0.975,0.745,0.579,0.704,0.713,0.482,0.723 # 0.2 —— 0.975,0.745,0.517,0.694,0.728,0.470,0.733 # 0.1 —— 0.975,0.745,0.396,0.637,0.711,0.417,0.701 current_score = output current_label = ixs classes = sorted([str(i) for i in dx_mapping_scored ]) #[str(c) for c in dx_mapping_scored] return current_label, current_score, classes
'--input_pic', type=str, default='test/berlin/berlin_000000_000019_leftImg8bit.png', help='Path to the input picture') # args parse args = parser.parse_args() data_path, model_weight, input_pic = args.data_path, args.model_weight, args.input_pic image = Image.open('{}/leftImg8bit/{}'.format(data_path, input_pic)).convert('RGB') image_height, image_width = image.height, image.width num_width = 2 if 'test' in input_pic else 3 target = Image.new('RGB', (image_width * num_width, image_height)) images = [image] image = transform(image).unsqueeze(dim=0).cuda() # model load model = FastSCNN(in_channels=3, num_classes=19) model.load_state_dict( torch.load(model_weight, map_location=torch.device('cpu'))) model = model.cuda() model.eval() # predict and save image with torch.no_grad(): output = model(image) pred = torch.argmax(output, dim=1) pred_image = ToPILImage()(pred.byte().cpu()) pred_image.putpalette(palette) if 'test' not in input_pic:
def test(image, model): #get image im = cv2.imread(image, cv2.IMREAD_COLOR) H, W, _ = im.shape im = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) imgs = {} if config.TEST.TTA == True: imgs = TTA(config, im) if transform: for key in imgs.keys(): for i, img in enumerate(imgs[key]): imgs[key][i] = transform(imgs[key][i]) else: if transform: imgs['img'] = [transform(im)] else: imgs['img'] = im img = imgs['img'][0].unsqueeze(0) img = img.float().cuda() out = model(img) out = nn.functional.interpolate(input=out, size=(384, 384), mode='bilinear', align_corners=True) if config.TEST.INV == True: img_inv = imgs['img_inv'][0].unsqueeze(0).cuda() img_out_inv = model(img_inv) img_out_inv = nn.functional.interpolate(input=img_out_inv, size=(384, 384), mode='bilinear', align_corners=True) img_out_inv = inv_back([img_out_inv]) out = (out + img_out_inv[0]) * 0.5 if config.TEST.ROTATION == True: r_outs = [] r_imgs = imgs['rotation'] for i, img in enumerate(r_imgs): r_out = model(img.unsqueeze(0).cuda()) r_out = nn.functional.interpolate(input=r_out, size=(384, 384), mode='bilinear', align_corners=True) r_outs.append(r_out) r_outs = rotation_back(r_outs) #numpy r_output = get_avg(r_outs) r_output = np.transpose(r_output, (2, 0, 1)) r_output = torch.from_numpy(r_output).unsqueeze(0).cuda() if config.TEST.INV == True: r_outs_inv = [] r_imgs_inv = imgs['rotation_inv'] for i, img in enumerate(r_imgs_inv): r_out_inv = model(img.unsqueeze(0).cuda()) r_out_inv = nn.functional.interpolate(input=r_out_inv, size=(384, 384), mode='bilinear', align_corners=True) r_outs_inv.append(r_out_inv) r_outs_inv = inv_back(r_outs_inv) r_outs_inv = rotation_back(r_outs_inv) #numpy r_output_inv = get_avg(r_outs_inv) r_output_inv = np.transpose(r_output_inv, (2, 0, 1)) r_output_inv = torch.from_numpy(r_output_inv).unsqueeze( 0).cuda() if config.TEST.SHIFT == True: s_outs = [] s_imgs = imgs['shift'] for i, img in enumerate(s_imgs): #print('aaa') #print('when shift,the shape of input is :{}'.format(img.shape)) s_out = model(img.unsqueeze(0).cuda()) s_out = nn.functional.interpolate(input=s_out, size=(384, 384), mode='bilinear', align_corners=True) #print(',the shape output is {}'.format(s_out.shape)) # show_pic(image,i,'before,',s_out) #print(s_out.shape) s_outs.append(s_out) s_outs = shift_back(s_outs) for i, img in enumerate(s_outs): img = torch.from_numpy(np.transpose(img, (2, 0, 1))).unsqueeze(0) #print('after shift back') #print(img.shape) # show_pic(image,i,'after',img) s_output = get_avg(s_outs) s_output = np.transpose(s_output, (2, 0, 1)) s_output = torch.from_numpy(s_output).unsqueeze(0).cuda() if config.TEST.INV == True: s_outs_inv = [] s_imgs_inv = imgs['shift_inv'] for i, img in enumerate(s_imgs_inv): #print('aaa') #print('when shift,the shape of input is :{}'.format(img.shape)) s_out_inv = model(img.unsqueeze(0).cuda()) s_out_inv = nn.functional.interpolate(input=s_out_inv, size=(h, w), mode='bilinear', align_corners=True) #print(',the shape output is {}'.format(s_out_inv.shape)) # show_pic(image,i,'before,',s_out_inv) #print(s_out_inv.shape) s_outs_inv.append(s_out_inv) s_outs_inv = inv_back(s_outs_inv) s_outs_inv = shift_back(s_outs_inv) for i, img in enumerate(s_outs_inv): img = torch.from_numpy(np.transpose(img, (2, 0, 1))).unsqueeze(0) #print('after shift back') #print(img.shape) # show_pic(image,i,'after',img) s_output_inv = get_avg(s_outs_inv) s_output_inv = np.transpose(s_output_inv, (2, 0, 1)) s_output_inv = torch.from_numpy(s_output_inv).unsqueeze(0).cuda() out = (out + r_output + r_output_inv + s_output + s_output_inv) / 5 #print(out.shape) result = decode_parsing(out, num_images=1, num_classes=20, is_pred=True) #print(result.type(),result.shape) #print(result.shape) result = result.squeeze(0) result = np.asarray(result) result = np.transpose(result, (1, 2, 0)) result = cv2.resize(result, (W, H), interpolation=cv2.INTER_NEAREST) # result = get_result(result,H,W) #print(result.shape) out_name = '/home/aniki/code/test/out' + str(image[1:-4]) + '.png' print(out_name) cv2.imwrite(out_name, result)