def demo(config): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # output folder if not os.path.exists(config.outdir): os.makedirs(config.outdir) # image transform transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) image = Image.open(config.input_pic).convert('RGB') images = transform(image).unsqueeze(0).to(device) model = get_model(args.model, pretrained=True, root=args.save_folder).to(device) print('Finished loading model!') model.eval() with torch.no_grad(): output = model(images) pred = torch.argmax(output[0], 1).squeeze(0).cpu().data.numpy() mask = get_color_pallete(pred, args.dataset) outname = os.path.splitext(os.path.split(args.input_pic)[-1])[0] + '.png' mask.save(os.path.join(args.outdir, outname))
def eval(self): self.metric.reset() self.model.eval() logger.info("Start validation, Total sample: {:d}".format( len(self.val_loader))) for i, (image, target, filename) in enumerate(self.val_loader): image = image.to(self.device) target = target.to(self.device) with torch.no_grad(): outputs = self.model(image) self.metric.update(outputs[0], target) pixAcc, mIoU = self.metric.get() logger.info( "Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format( i + 1, pixAcc * 100, mIoU * 100)) if self.args.save_pred: pred = torch.argmax(outputs[0], 1) pred = pred.cpu().data.numpy() predict = pred.squeeze(0) mask = get_color_pallete(predict, args.dataset) mask.save( os.path.join(outdir, os.path.splitext(filename[0])[0] + '.png')) synchronize()
def demo(config): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # output folder if not os.path.exists(config.outdir): os.makedirs(config.outdir) # image transform transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) image = Image.open(config.input_pic).convert('RGB') images = transform(image).unsqueeze(0).to(device) if args.resume: if os.path.isfile(args.resume): name, ext = os.path.splitext(args.resume) assert ext == '.pkl' or '.pth', 'Sorry only .pth and .pkl files supported.' print('Resuming training, loading {}...'.format(args.resume)) self.model.load_state_dict( torch.load(args.resume, map_location=lambda storage, loc: storage)) model = get_model(args.model, pretrained=True, root=args.save_folder).to(device) print('Finished loading model!') model.eval() with torch.no_grad(): output = model(images) pred = torch.argmax(output[0], 1).squeeze(0).cpu().data.numpy() mask = get_color_pallete(pred, args.dataset) outname = os.path.splitext(os.path.split(args.input_pic)[-1])[0] + '.png' mask.save(os.path.join(args.outdir, outname))
def train(self): self.model.train() start_time = time.time() for epoch in range(self.args.epochs): self.lr_scheduler(self.optimizer, epoch) cur_lr = self.lr_scheduler.learning_rate # self.lr_scheduler(self.optimizer, epoch) for param_group in self.optimizer.param_groups: param_group['lr'] = cur_lr images = self.img.to(self.args.device) targets = self.target.to(self.args.device) outputs = self.model(images) loss = self.criterion(outputs, targets) self.optimizer.zero_grad() loss['loss'].backward() self.optimizer.step() pred = torch.argmax(outputs[0], 1).cpu().data.numpy() mask = get_color_pallete(pred.squeeze(0), self.args.dataset) save_pred(self.args, epoch, mask) hist, labeled, correct = hist_info(pred, targets.cpu().numpy(), 21) _, mIoU, _, pixAcc = compute_score(hist, correct, labeled) print( 'Epoch: [%2d/%2d] || Time: %4.4f sec || lr: %.8f || Loss: %.4f || pixAcc: %.3f || mIoU: %.3f' % (epoch, self.args.epochs, time.time() - start_time, cur_lr, loss['loss'].item(), pixAcc, mIoU))
def demo(config): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # output folder if not os.path.exists(config.outdir): os.makedirs(config.outdir) # image transform transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) image = Image.open(config.input_pic).convert('RGB') images = transform(image).unsqueeze(0).to(device) model = get_model(args.model, pretrained=True, root=args.save_folder).to(device) print('Finished loading model!') model.eval() with torch.no_grad(): output = model(images) pred = torch.argmax(output[0], 1).squeeze(0).cpu().data.numpy() print('predicted masks : ', np.unique(pred)) mask = get_color_pallete(pred, args.dataset) outname = os.path.splitext(os.path.split(args.input_pic)[-1])[0] + '.png' mask.save(os.path.join(args.outdir, outname)) mask = cv2.imread(os.path.join(args.outdir, outname), cv2.IMREAD_COLOR) mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB) blended = cv2.addWeighted(np.array(image), 0.5, mask, 0.5, 0.0) blended = cv2.cvtColor(blended, cv2.COLOR_RGB2BGR) cv2.imwrite(os.path.join(args.outdir, outname), blended)
def eval(self): #self.metric.reset() self.model.eval() if self.args.distributed: model = self.model.module else: model = self.model logger.info("Start validation, Total sample: {:d}".format( len(self.val_loader))) counter = 0 for i, (image, target) in enumerate(self.val_loader): oimage = cv2.imread("../dataset/rgb/train/1.png", 1) oimage = cv2.cvtColor(oimage, cv2.COLOR_BGR2RGB) oimage = np.transpose(oimage, (2, 0, 1)) oimage = oimage.astype(np.float32) / 255. oimage = oimage[np.newaxis, ...] oimage = torch.tensor(oimage).cuda() #print(oimage.shape) #from PIL import Image #oimage = Image.open("../dataset/rgb/val/1.png").convert('RGB') #image_transform = transforms.Compose([transforms.ToTensor()]) #oimage = image_transform(oimage).unsqueeze(0) #oimage = oimage.to(self.device) #print(oimage.shape) image = image.to(self.device) #print(image) #print(type(image), image.shape) #target = target.to(self.device) with torch.no_grad(): import time time_start = time.time() for i in range(100): outputs = model(image) print((time.time() - time_start) / 100.) #self.metric.update(outputs[0], target) #pixAcc, mIoU = self.metric.get() #logger.info("Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format( # i + 1, pixAcc * 100, mIoU * 100)) if self.args.save_pred: pred = torch.argmax(outputs[0], 1) pred = pred.cpu().data.numpy() predict = pred.squeeze(0) #predict = cv2.resize(predict, (oimage.shape[1], oimage.shape[0]), interpolation = cv2.INTER_NEAREST) mask = get_color_pallete(predict, self.args.dataset) #cv2.imshow("image", np.array(mask, dtype=np.uint8)) #cv2.waitKey(2000) cv2.imwrite( os.path.join(outdir, 'test_mask_' + str(counter) + '.png'), np.array(predict * 20, dtype=np.uint8)) mask.save(os.path.join(outdir, 'test_' + str(counter) + '.png')) counter += 1 synchronize()
def demo(config): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # output folder if not os.path.exists(config.outdir): os.makedirs(config.outdir) # image transform transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) #read the video file here model = get_model(args.model, pretrained=True, root=args.save_folder).to(device) model.eval() print('Finished loading model!') count = 0 pbar = tqdm(total=150) while cap.isOpened(): count += 1 ret, image = cap.read() # image = Image.open(config.input_pic).convert('RGB') try: image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # RGB image = cv2.resize(image, (640, 480)) # image = cv2.flip(image, 1) except: continue image = cv2.GaussianBlur(image, (5, 5), 0) images = transform(image).unsqueeze(0).to(device) with torch.no_grad(): output = model(images) pred = torch.argmax(output[0], 1).squeeze(0).cpu().data.numpy() mask = get_color_pallete(pred, args.dataset) # print(mask.shape) # print('type is :: ',type(mask)) outname = os.path.splitext(os.path.split('tmp')[-1])[0] + '.png' mask.save(os.path.join(args.outdir, outname)) mask = cv2.imread(os.path.join(args.outdir, outname)) #in BGR image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) blended = cv2.addWeighted(image, 0.5, mask, 0.5, 0.0) if args.display: cv2.imshow('output', blended) cv2.waitKey(1) out.write(blended) pbar.update(1) # if count==300: break cap.release() out.release() print('Done. Video file generated')
def eval(args): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # output folder outdir = 'test_result' if not os.path.exists(outdir): os.makedirs(outdir) # image transform input_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([.485, .456, .406], [.229, .224, .225]), ]) # dataset and dataloader test_dataset = get_segmentation_dataset(args.dataset, split='val', mode='testval', transform=input_transform) test_loader = data.DataLoader(dataset=test_dataset, batch_size=1, shuffle=False) # create network model = get_segmentation_model(model=args.model, dataset=args.dataset, backbone=args.backbone, aux=args.aux, pretrained=True, pretrained_base=False).to(device) print('Finished loading model!') metric = SegmentationMetric(test_dataset.num_class) model.eval() for i, (image, label) in enumerate(test_loader): image = image.to(device) with torch.no_grad(): outputs = model(image) pred = torch.argmax(outputs[0], 1) pred = pred.cpu().data.numpy() label = label.numpy() metric.update(pred, label) pixAcc, mIoU = metric.get() print('Sample %d, validation pixAcc: %.3f%%, mIoU: %.3f%%' % (i + 1, pixAcc * 100, mIoU * 100)) if args.save_result: predict = pred.squeeze(0) mask = get_color_pallete(predict, args.dataset) mask.save(os.path.join(outdir, 'seg_{}.png'.format(i)))
def eval(self): self.metric.reset() self.model.eval() if self.args.distributed: model = self.model.module else: model = self.model logger.info("Start validation, Total sample: {:d}".format( len(self.val_loader))) all_pixAcc = 0 all_mIoU = 0 all_IoU_0 = 0 all_IoU_1 = 0 all_IoU_2 = 0 for i, (image, target, filename) in enumerate(self.val_loader): image = image.to(self.device) target = target.to(self.device) with torch.no_grad(): outputs = model(image) self.metric.update(outputs[0], target) #pixAcc, mIoU = self.metric.get() pixAcc, mIoU, IoU_0, IoU_1, IoU_2 = self.metric.get() #logger.info("Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format( # i + 1, pixAcc * 100, mIoU * 100)) logger.info( "Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}, IoU_0: {:.3f}, IoU_1: {:.3f}, IoU_2: {:.3f}" .format(i + 1, pixAcc * 100, mIoU * 100, IoU_0 * 100, IoU_1 * 100, IoU_2 * 100)) all_pixAcc = all_pixAcc + pixAcc all_mIoU = all_mIoU + mIoU all_IoU_0 = all_IoU_0 + IoU_0 all_IoU_1 = all_IoU_1 + IoU_1 all_IoU_2 = all_IoU_2 + IoU_2 if self.args.save_pred: pred = torch.argmax(outputs[0], 1) pred = pred.cpu().data.numpy() predict = pred.squeeze(0) mask = get_color_pallete(predict, self.args.dataset) mask.save( os.path.join(outdir, os.path.splitext(filename[0])[0] + '.png')) print('mean pixAcc: ', all_pixAcc / len(self.val_loader)) print('mean mIoU: ', all_mIoU / len(self.val_loader)) print('mean IoU_0: ', all_IoU_0 / len(self.val_loader)) print('mean IoU_1: ', all_IoU_1 / len(self.val_loader)) print('mean IoU_2: ', all_IoU_2 / len(self.val_loader)) synchronize()
def main(args): modelpath = args.loadDir + args.loadModel weightspath = args.loadDir + args.loadWeights print("Loading model: " + modelpath) print("Loading weights: " + weightspath) model = Net(NUM_CLASSES) model = torch.nn.DataParallel(model) if (not args.cpu): model = model.cuda() def load_my_state_dict( model, state_dict ): #custom function to load model when not all dict elements own_state = model.state_dict() for name, param in state_dict.items(): if name not in own_state: continue own_state[name].copy_(param) return model model = load_my_state_dict(model, torch.load(weightspath)) print("Model and weights LOADED successfully") model.eval() transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) image = Image.open(args.test_img).convert('RGB') image = image.resize((480, 480)) # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") images = transform(image).unsqueeze(0).to(device) output = model(images) label = output[0].max(0)[1].byte().cpu().data label = label.numpy() mask = get_color_pallete(label, 'ade20k') outname = args.test_img.split('.')[0] + '.png' mask.save(os.path.join('./', outname))
def eval(self): self.metric.reset() self.model.eval() if self.args.distributed: model = self.model.module else: model = self.model logger.info("Start validation, Total sample: {:d}".format( len(self.val_loader))) fps_sum = 0.0 for i, (image, target, filename) in enumerate(self.val_loader): start = time.time() image = image.to(self.device) target = target.to(self.device) with torch.no_grad(): outputs = model(image) self.metric.update(outputs[0], target) pixAcc, mIoU = self.metric.get() end = time.time() fps = 1.0 / (end - start) fps_sum = fps_sum + fps logger.info( "Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}, FPS: {:.3f}" .format(i + 1, pixAcc * 100, mIoU * 100, fps)) if self.args.save_pred: pred = torch.argmax(outputs[0], 1) pred = pred.cpu().data.numpy() predict = pred.squeeze(0) mask = get_color_pallete(predict, self.args.dataset) mask.save( os.path.join(outdir, os.path.splitext(filename[0])[0] + '.png')) #danet 显存不足 #if i + 1 > 302: break avg_fps = fps_sum / len(self.val_loader) logger.info("avgFPS: {:.3f}".format(avg_fps)) synchronize()
def test(model, image, name): starttime = time.time() model.eval() with torch.no_grad(): output = model(image) # endtime = time.time() # # fps = 1. / (endtime - starttime) # print("fps:", fps) pred = torch.argmax(output[0], 1).squeeze(0).cpu().data.numpy() mask = get_color_pallete(pred, args.dataset) file = os.path.splitext(os.path.split(name)[-1])[0] outname1 = file + '.jpg' #原图名 outname2 = file + '.png' #mask文件名 # mask.save(os.path.join(args.outdir, outname2)) # image1 原图 # image2 分割图 image1 = Image.open( os.path.join('../datasets/own/VOC2012/JPEGImages', outname1)) #打开原图 #image2 = Image.open(os.path.join(args.outdir, outname2)) image1 = image1.convert('RGBA') mask = mask.convert('RGBA') # 两幅图像进行合并时,按公式:blended_img = img1 * (1 – alpha) + img2* alpha 进行 image = Image.blend(image1, mask, 0.6) # # image.save("test.png") # # gam2 = exposure.adjust_gamma(img_as_float(image), 0.5) # 调节图片亮度,0.5为变亮,1为不变,2为变暗 # plt.imsave(os.path.join(args.outdir, outname2), image, cmap='plasma') # 融合图片保存在../scripts/eval文件夹下 image.save(os.path.join(args.outdir, outname2)) endtime = time.time() fps = 1. / (endtime - starttime) print("fps:", fps)
def eval(self): self.metric.reset() self.model.eval() if self.args.distributed: model = self.model.module else: model = self.model logger.info("Start validation, Total sample: {:d}".format( len(self.val_loader))) all_pixAcc = 0 all_mIoU = 0 all_IoU_0 = 0 all_IoU_1 = 0 all_IoU_2 = 0 for i, (image, target, filename) in enumerate(self.val_loader): image = image.to(self.device) target = target.to(self.device) with torch.no_grad(): #outputs = model(image) outputs, mat_result_512, mat_result_3 = model(image) mat_result_512 = mat_result_512.transpose((1, 2, 0)) mat_result_3 = mat_result_3.transpose((1, 2, 0)) print('mat_result_512.type:', type(mat_result_512)) print('mat_result_512.shape:', mat_result_512.shape) print('mat_result_3.type:', type(mat_result_3)) print('mat_result_3.shape:', mat_result_3.shape) filename_mat = os.path.splitext(filename[0])[0] + '.mat' print('This is:', filename_mat) datapath_512 = '/home/pzn/pzncode/non-local/awesome-semantic-segmentation-pytorch/runs/mat_result_512_121/' + filename_mat print(datapath_512) result_512 = {'pzn_feature_512': mat_result_512} scio.savemat(datapath_512, {'feature512': result_512}, appendmat=True, do_compression=True) print('This is:', filename_mat) datapath_3 = '/home/pzn/pzncode/non-local/awesome-semantic-segmentation-pytorch/runs/mat_result_3_121/' + filename_mat print(datapath_3) result_3 = {'pzn_feature_3': mat_result_3} scio.savemat(datapath_3, {'feature3': result_3}, appendmat=True, do_compression=True) self.metric.update(outputs[0], target) #pixAcc, mIoU = self.metric.get() pixAcc, mIoU, IoU_0, IoU_1, IoU_2 = self.metric.get() #logger.info("Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format( # i + 1, pixAcc * 100, mIoU * 100)) logger.info( "Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}, IoU_0: {:.3f}, IoU_1: {:.3f}, IoU_2: {:.3f}" .format(i + 1, pixAcc * 100, mIoU * 100, IoU_0 * 100, IoU_1 * 100, IoU_2 * 100)) all_pixAcc = all_pixAcc + pixAcc all_mIoU = all_mIoU + mIoU all_IoU_0 = all_IoU_0 + IoU_0 all_IoU_1 = all_IoU_1 + IoU_1 all_IoU_2 = all_IoU_2 + IoU_2 if self.args.save_pred: pred = torch.argmax(outputs[0], 1) pred = pred.cpu().data.numpy() predict = pred.squeeze(0) mask = get_color_pallete(predict, self.args.dataset) mask.save( os.path.join(outdir, os.path.splitext(filename[0])[0] + '.png')) print('mean pixAcc: ', all_pixAcc / len(self.val_loader)) print('mean mIoU: ', all_mIoU / len(self.val_loader)) print('mean IoU_0: ', all_IoU_0 / len(self.val_loader)) print('mean IoU_1: ', all_IoU_1 / len(self.val_loader)) print('mean IoU_2: ', all_IoU_2 / len(self.val_loader)) synchronize()
def eval(self): self.metric.reset() self.model.eval() if self.args.distributed: model = self.model.module else: model = self.model logger.info("Start validation, Total sample: {:d}".format( len(self.val_loader))) name_list = [] mIOU_list = [] acc_list = [] all_pixAcc = 0 all_mIoU = 0 all_IoU_0 = 0 all_IoU_1 = 0 all_IoU_2 = 0 for i, (image, target, filename) in enumerate(self.val_loader): image = image.to(self.device) target = target.to(self.device) with torch.no_grad(): outputs = model(image) self.metric.update(outputs[0], target) #pixAcc, mIoU = self.metric.get() pixAcc, mIoU, IoU_0, IoU_1, IoU_2 = self.metric.get() #logger.info("Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format( # i + 1, pixAcc * 100, mIoU * 100)) logger.info( "Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}, IoU_0: {:.3f}, IoU_1: {:.3f}, IoU_2: {:.3f}" .format(i + 1, pixAcc * 100, mIoU * 100, IoU_0 * 100, IoU_1 * 100, IoU_2 * 100)) all_pixAcc = all_pixAcc + pixAcc all_mIoU = all_mIoU + mIoU all_IoU_0 = all_IoU_0 + IoU_0 all_IoU_1 = all_IoU_1 + IoU_1 all_IoU_2 = all_IoU_2 + IoU_2 mIOU_list.append(mIoU) acc_list.append(pixAcc) print('image_pre_i:', filename) name_list.append(filename[0]) if self.args.save_pred: pred = torch.argmax(outputs[0], 1) pred = pred.cpu().data.numpy() predict = pred.squeeze(0) mask = get_color_pallete(predict, self.args.dataset) mask.save( os.path.join(outdir, os.path.splitext(filename[0])[0] + '.png')) print('mean pixAcc: ', all_pixAcc / len(self.val_loader)) print('mean mIoU: ', all_mIoU / len(self.val_loader)) print('mean IoU_0: ', all_IoU_0 / len(self.val_loader)) print('mean IoU_1: ', all_IoU_1 / len(self.val_loader)) print('mean IoU_2: ', all_IoU_2 / len(self.val_loader)) print('name_list: ', name_list) print('mIOU_list: ', mIOU_list) print('acc_list: ', acc_list) df_data = name_list + mIOU_list + acc_list title_name = ['image_name'] df = pd.DataFrame(columns=title_name, data=df_data) df.to_csv('name_demo.csv') synchronize()