def expand_resize_color_data(prediction=None, submission_size=[3384, 1710], offset=690): color_pred_mask = decode_color_labels(prediction) color_pred_mask = np.transpose(color_pred_mask, (1, 2, 0)) color_expand_mask = cv2.resize(color_pred_mask, (submission_size[0], submission_size[1] - offset), interpolation=cv2.INTER_NEAREST) color_submission_mask = np.zeros((submission_size[1], submission_size[0], 3), dtype='uint8') color_submission_mask[offset:, :, :] = color_expand_mask return color_submission_mask
def get_color_mask(pred): pred = torch.softmax(pred, dim=1) pred = torch.argmax(pred, dim=1) pred = torch.squeeze(pred) pred = pred.detach().cpu().numpy() pred = decode_color_labels(pred) pred = np.transpose(pred, (1, 2, 0)) return pred
def get_color_mask(pred): pred = torch.softmax(pred, dim=1) # 将 channel 取得的最大的响应作为标签 pred = torch.argmax(pred, dim=1) # squeeze 将某些维度上的1 去掉 pred = torch.squeeze(pred) pred = pred.detach().cpu().numpy() # 转换成 color 的 label pred = decode_color_labels(pred) # 将通道数返回来 pred = np.transpose(pred, (1, 2, 0)) return pred
def main(): IMG_SIZE =[1536, 512] SUBMISSION_SIZE = [3384, 1710] save_test_logits = False num_classes = 8 batch_size = 4 log_iters = 100 network = 'unet_simple' # Define paths for each model if network == 'deeplabv3p': model_path = "./model_weights/paddle_deeplabv3p_8_end_060223" npy_dir = '/npy_save/deeplabv3p/' elif network == 'unet_base': model_path = "./model_weights/paddle_unet_base_10_end_059909" npy_dir = '/npy_save/unet_base/' elif network == 'unet_simple': model_path = "./model_weights/paddle_unet_simple_12_end_060577" npy_dir = '/npy_save/unet_simple/' program_choice = 2 # 1 - Validtion; 2 - Test show_label = False crop_offset = 690 data_dir = './data_list/val.csv' test_dir = '/root/private/LaneDataSet/TestSet/Image_Data/ColorImage/' sub_dir = './test_submission/' # Get data list and split it into train and validation set. val_list = pd.read_csv(data_dir) iter_id = 0 total_loss = 0.0 total_miou = 0.0 prev_time = time.time() # Validation if program_choice == 1: val_reader = val_image_gen(val_list, batch_size=batch_size, image_size=IMG_SIZE, crop_offset=crop_offset) model = create_network(network=network) model.load_weights(model_path) print("loaded model from: %s" % model_path) print('Start Validation!') for iteration in range(int(len(val_list) / batch_size)): val_data,val_label = next(val_reader) results = model.evaluate(val_data,val_label) if iter_id % log_iters == 0: print('Finished Processing %d Images.' %(iter_id * batch_size)) iter_id += 1 total_loss += np.mean(results[0]) total_miou += np.mean(results[1]) # label to mask if show_label == True: label_image = val_label[0] color_label_mask = decode_color_labels(label_image) color_label_mask = np.transpose(color_label_mask, (1, 2, 0)) cv2.imshow('gt_label', cv2.resize(color_label_mask, (IMG_SIZE[0], IMG_SIZE[1]))) prediction = np.argmax(results[2][0], axis=0) color_pred_mask = decode_color_labels(prediction) color_pred_mask = np.transpose(color_pred_mask, (1, 2, 0)) cv2.imshow('pred_label', cv2.resize(color_pred_mask, (IMG_SIZE[0], IMG_SIZE[1]))) cv2.waitKey(0) end_time = time.time() print("validation loss: %.3f, mean iou: %.3f, time cost: %.3f s" % (total_loss / iter_id, total_miou / iter_id, end_time - prev_time)) # Test elif program_choice == 2: model = create_network(network=network) model.load_weights(model_path) print("loaded model from: %s" % model_path) print('Start Making Submissions!') test_list = os.listdir(test_dir) for test_name in test_list: test_ori_image = cv2.imread(os.path.join(test_dir, test_name)) test_image = crop_resize_data(test_ori_image, label=None, image_size=IMG_SIZE, offset=crop_offset) out_image = np.expand_dims(np.array(test_image), axis=0) out_image = out_image[:, :, :, ::-1].transpose(0, 3, 1, 2).astype(np.float32) / (255.0 / 2) - 1 results_1 = model.evaluate(out_image,None) if iter_id % 20 == 0: print('Finished Processing %d Images.' %(iter_id)) iter_id += 1 prediction = np.argmax(results_1[0][0], axis=0) # Save npy files if save_test_logits == True: np.save(npy_dir + test_name.replace('.jpg', '.npy'), results_1[0][0]) # Save Submission PNG submission_mask = expand_resize_data(prediction, SUBMISSION_SIZE, crop_offset) cv2.imwrite(os.path.join(sub_dir, test_name.replace('.jpg', '.png')), submission_mask) # Show Label if show_label == True: cv2.imshow('test_image', cv2.resize(test_ori_image,(IMG_SIZE[0], IMG_SIZE[1]))) cv2.imshow('pred_label', cv2.resize(submission_mask,(IMG_SIZE[0], IMG_SIZE[1]))) cv2.waitKey(0) sys.stdout.flush()
# from PIL import Image import matplotlib as mpl mpl.use('TkAgg') # or whatever other backend that you want import matplotlib.pyplot as plt # import torch import pandas as pd from utils.process_labels import code_init,encode_labels,decode_labels,decode_color_labels,verify_labels from utils.image_process import crop_resize_data,CLAHE_nomalization,contrast,random_filp from Baidureader import BaiduDataset data=BaiduDataset('./train.csv', [1024, 384]) image,label,ins=data[0] print(image.size()) print(label.size()) label_=np.array(label,dtype='uint8') verify_labels(label_) decode_mask=decode_color_labels(label_) plt.imshow(decode_mask.transpose(1,2,0)) plt.show() # cv2.imshow('img',image) # cv2.imshow('label',label) # cv2.imshow('ins',ins[0]) # cv2.waitKey(0) # cv2.destroyAllWindows() # a=list() # a.append('./170927_063811892_Camera_5.jpg') # b=list() # b.append('./170927_063811892_Camera_5_bin.png') # print(len(a)) # print(len(b)) # c ={'image':a,'label':b}#合并成一个新的字典c
def main(): IMG_SIZE =[1536, 512] SUBMISSION_SIZE = [3384, 1710] save_test_logits = False num_classes = 8 batch_size = 4 log_iters = 100 network = 'unet_simple' # Define paths for each model if network == 'deeplabv3p': model_path = "./model_weights/paddle_deeplabv3p_8_end_060223" npy_dir = '/npy_save/deeplabv3p/' elif network == 'unet_base': model_path = "./model_weights/paddle_unet_base_10_end_059909" npy_dir = '/npy_save/unet_base/' elif network == 'unet_simple': model_path = "./model_weights/paddle_unet_simple_12_end_060577" npy_dir = '/npy_save/unet_simple/' program_choice = 2 # 1 - Validtion; 2 - Test show_label = False crop_offset = 690 data_dir = './data_list/val.csv' test_dir = '../PaddlePaddle/TestSet_Final/ColorImage/' sub_dir = './test_submission/' # Get data list and split it into train and validation set. val_list = pd.read_csv(data_dir) #Initialization images = fluid.layers.data(name='image', shape=[3, IMG_SIZE[1], IMG_SIZE[0]], dtype='float32') labels = fluid.layers.data(name='label', shape=[1, IMG_SIZE[1], IMG_SIZE[0]], dtype='float32') iter_id = 0 total_loss = 0.0 total_miou = 0.0 prev_time = time.time() # Validation if program_choice == 1: val_reader = val_image_gen(val_list, batch_size=batch_size, image_size=IMG_SIZE, crop_offset=crop_offset) reduced_loss, miou, pred = create_network(images, labels, num_classes, network=network, image_size=(IMG_SIZE[1], IMG_SIZE[0]), for_test=False) place = fluid.CUDAPlace(0) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) fluid.io.load_params(exe, model_path) print("loaded model from: %s" % model_path) # Parallel Executor to use multi-GPUs exec_strategy = fluid.ExecutionStrategy() exec_strategy.allow_op_delay = True build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce train_exe = fluid.ParallelExecutor(use_cuda=True, build_strategy=build_strategy, exec_strategy=exec_strategy) print('Start Validation!') for iteration in range(int(len(val_list) / batch_size)): val_data = next(val_reader) results = train_exe.run( feed=get_feeder_data(val_data, place), fetch_list=[reduced_loss.name, miou.name, pred.name]) if iter_id % log_iters == 0: print('Finished Processing %d Images.' %(iter_id * batch_size)) iter_id += 1 total_loss += np.mean(results[0]) total_miou += np.mean(results[1]) # label to mask if show_label == True: label_image = val_data[1][0] color_label_mask = decode_color_labels(label_image) color_label_mask = np.transpose(color_label_mask, (1, 2, 0)) cv2.imshow('gt_label', cv2.resize(color_label_mask, (IMG_SIZE[0], IMG_SIZE[1]))) prediction = np.argmax(results[2][0], axis=0) color_pred_mask = decode_color_labels(prediction) color_pred_mask = np.transpose(color_pred_mask, (1, 2, 0)) cv2.imshow('pred_label', cv2.resize(color_pred_mask, (IMG_SIZE[0], IMG_SIZE[1]))) cv2.waitKey(0) end_time = time.time() print("validation loss: %.3f, mean iou: %.3f, time cost: %.3f s" % (total_loss / iter_id, total_miou / iter_id, end_time - prev_time)) # Test elif program_choice == 2: predictions = create_network(images, labels, num_classes, network=network, image_size=(IMG_SIZE[1], IMG_SIZE[0]), for_test=True) place = fluid.CUDAPlace(0) # place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) fluid.io.load_params(exe, model_path) print("loaded model from: %s" % model_path) print('Start Making Submissions!') test_list = os.listdir(test_dir) for test_name in test_list: test_ori_image = cv2.imread(os.path.join(test_dir, test_name)) test_image = crop_resize_data(test_ori_image, label=None, image_size=IMG_SIZE, offset=crop_offset) out_image = np.expand_dims(np.array(test_image), axis=0) out_image = out_image[:, :, :, ::-1].transpose(0, 3, 1, 2).astype(np.float32) / (255.0 / 2) - 1 feed_dict = {} feed_dict["image"] = out_image results_1 = exe.run( feed=feed_dict, fetch_list=[predictions]) if iter_id % 20 == 0: print('Finished Processing %d Images.' %(iter_id)) iter_id += 1 prediction = np.argmax(results_1[0][0], axis=0) # Save npy files if save_test_logits == True: np.save(npy_dir + test_name.replace('.jpg', '.npy'), results_1[0][0]) # Save Submission PNG submission_mask = expand_resize_data(prediction, SUBMISSION_SIZE, crop_offset) cv2.imwrite(os.path.join(sub_dir, test_name.replace('.jpg', '.png')), submission_mask) # Show Label if show_label == True: cv2.imshow('test_image', cv2.resize(test_ori_image,(IMG_SIZE[0], IMG_SIZE[1]))) cv2.imshow('pred_label', cv2.resize(submission_mask,(IMG_SIZE[0], IMG_SIZE[1]))) cv2.waitKey(0) sys.stdout.flush()