def method_two(image): # Preprocess image orig_img = np.copy(image) img = cv.cvtColor(image, cv.COLOR_BGR2GRAY) img = utils.adjust_gamma(img, 1.5) img = cv.GaussianBlur(img, (3, 3), 0) _, threshold = cv.threshold(img, 0, 255, cv.THRESH_OTSU) # Morphology kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3)) dilation = cv.dilate(threshold, kernel, iterations=3) closing = cv.morphologyEx(dilation, cv.MORPH_CLOSE, kernel, iterations=3) # Inner wall snake_in = snakes_algorithm(closing, 60, alpha=0.003, beta=3, gamma=0.1) # Outer wall snake_out = snakes_algorithm(closing, 80, alpha=0.003, beta=3, gamma=0.1) # Cross-sectional area of hearts encircled by inner walls c = np.expand_dims(snake_in.astype(np.float32), 1) c = cv.UMat(c) area = cv.contourArea(c) # Plot utils.show_figure_snakes(orig_img, snake_in, snake_out, save=False) return snake_in, snake_out, area
def method_one(image, point1, point2): orig_img = np.copy(image) img = utils.get_crop(image, point1, point2) img = cv.cvtColor(img.copy(), cv.COLOR_BGR2GRAY) img = utils.adjust_gamma(img, 1.5) img = cv.GaussianBlur(img, (3, 3), 0) _, threshold = cv.threshold(img, 0, 255, cv.THRESH_OTSU) kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3)) dilation = cv.dilate(threshold, kernel, iterations=3) closing = cv.morphologyEx(dilation, cv.MORPH_CLOSE, kernel, iterations=3) dx, dy = point1[0], point1[1] orig_copy = np.copy(orig_img) area = get_cross_sectional_area(orig_copy, closing, dx, dy) utils.show_figures(images=[ orig_img, threshold, dilation, closing, utils.get_crop(orig_copy, point1, point2), orig_copy ], titles=[ "Original", "Threshold", "Dilation", "Closing", "ROI", "Result" ], save=False) return orig_copy, area
def make_random_manipulation(img, rng, crop_policy=args.crop_policy, crop_size=args.crop_size): """ Parameters ---------- img : 1024x1024 PIL image Returns ------- img_manip : (args.crop_size, args.crop_size) PIL image """ return rng.choice([ lambda x: jpg_compress( make_crop(x, crop_size, rng, crop_policy=crop_policy), quality=70), lambda x: jpg_compress( make_crop(x, crop_size, rng, crop_policy=crop_policy), quality=90), lambda x: adjust_gamma( make_crop(x, crop_size, rng, crop_policy=crop_policy), gamma=0.8), lambda x: adjust_gamma( make_crop(x, crop_size, rng, crop_policy=crop_policy), gamma=1.2), lambda x: interp(x, ratio='0.5', rng=rng, crop_policy=crop_policy, crop_size=crop_size), lambda x: interp(x, ratio='0.8', rng=rng, crop_policy=crop_policy, crop_size=crop_size), lambda x: interp(x, ratio='1.5', rng=rng, crop_policy=crop_policy, crop_size=crop_size), lambda x: interp(x, ratio='2.0', rng=rng, crop_policy=crop_policy, crop_size=crop_size), ])(img)
# frame_origin = camera_capture(0) # except: # print('Erro: Captura da camera') # frame = frame_origin[180:400, 120:500] now = datetime.datetime.now() # date_stamp = (f'{now.year}.{now.month}.{now.day}._{now.hour}h{now.minute}m{now.second}s') date_stamp = (f'{current_iter}') # utils.show_img(frame, 'Imagem') # utils.save_img(frame, 'Cropped') # ------------------------------------------------------------------------------------------- Gamma try: frame_gamma = utils.adjust_gamma(frame, gamma=2.0) except: print('Erro: Gamma') utils.show_img(frame_gamma, 'Gamma') # utils.save_img(frame, 'Gamma', date_stamp) # ------------------------------------------------------------------------------------------- CLACHE try: frame_clache = utils.apply_clache(frame_gamma, clip_Limit=2.0, GridSize=8) except: print('Erro: Clache') # utils.show_img(frame_clache, 'Clache')
def predict(optimizer, **kwargs): # load data X_test = np.load(os.path.join(kwargs['data_path'], 'X_test.npy')) y_test = np.zeros((len(X_test), ), dtype=np.int64) test_transform = transforms.Compose([ transforms.Lambda(lambda x: Image.fromarray(x)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) # TTA rng = RNG(seed=1337) base_transform = transforms.Compose([ transforms.Lambda(lambda x: Image.fromarray(x)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.Lambda( lambda img: [img, img.transpose(Image.ROTATE_90)][int(rng.rand() < 0.5)]), transforms.Lambda( lambda img: adjust_gamma(img, gamma=rng.uniform(0.8, 1.25))), transforms.Lambda( lambda img: jpg_compress(img, quality=rng.randint(70, 100 + 1))), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) tta_n = 10 def tta_f(img, n=tta_n - 1): out = [test_transform(img)] for _ in xrange(n): out.append(base_transform(img)) return torch.stack(out, 0) tta_transform = transforms.Compose([ transforms.Lambda(lambda img: tta_f(img)), ]) test_loader = DataLoader(dataset=make_numpy_dataset( X_test, y_test, tta_transform), batch_size=kwargs['batch_size'], shuffle=False, num_workers=4) test_dataset = KaggleCameraDataset(kwargs['data_path'], train=False, lazy=not kwargs['not_lazy']) # compute predictions logits, _ = optimizer.test(test_loader) # compute and save raw probs logits = np.vstack(logits) proba = softmax(logits) # group and average predictions K = 16 * tta_n proba = proba.reshape(len(proba) / K, K, -1).mean(axis=1) fnames = [os.path.split(fname)[-1] for fname in test_dataset.X] df = pd.DataFrame(proba) df['fname'] = fnames df = df[['fname'] + range(10)] dirpath = os.path.split(kwargs['predict_from'])[0] df.to_csv(os.path.join(dirpath, 'proba.csv'), index=False) # compute predictions and save in submission format index_pred = unhot(one_hot_decision_function(proba)) data = { 'fname': fnames, 'camera': [KaggleCameraDataset.target_labels()[int(c)] for c in index_pred] } df2 = pd.DataFrame(data, columns=['fname', 'camera']) df2.to_csv(os.path.join(dirpath, 'submission.csv'), index=False)
def train(optimizer, **kwargs): # load training data print 'Loading and splitting data ...' if os.path.isfile(os.path.join(kwargs['data_path'], 'X_train.npy')): X_train = np.load(os.path.join(kwargs['data_path'], 'X_train.npy')) y_train = np.load(os.path.join(kwargs['data_path'], 'y_train.npy')) X_val = np.load(os.path.join(kwargs['data_path'], 'X_val.npy')) y_val = np.load(os.path.join(kwargs['data_path'], 'y_val.npy')) else: X = np.load(os.path.join(kwargs['data_path'], 'X_patches.npy')) y = np.load(os.path.join(kwargs['data_path'], 'y_patches.npy')) # split into train, val in stratified fashion sss = StratifiedShuffleSplit(n_splits=1, test_size=kwargs['n_val'], random_state=kwargs['random_seed']) train_ind, val_ind = list(sss.split(np.zeros_like(y), y))[0] X_train = X[train_ind] y_train = y[train_ind] X_val = X[val_ind] y_val = y[val_ind] np.save(os.path.join(kwargs['data_path'], 'X_train.npy'), X_train) np.save(os.path.join(kwargs['data_path'], 'y_train.npy'), y_train) np.save(os.path.join(kwargs['data_path'], 'X_val.npy'), X_val) np.save(os.path.join(kwargs['data_path'], 'y_val.npy'), y_val) rng = RNG() # noinspection PyTypeChecker train_transform = transforms.Compose([ transforms.Lambda(lambda x: Image.fromarray(x)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.Lambda( lambda img: [img, img.transpose(Image.ROTATE_90)][int(rng.rand() < 0.5)]), transforms.Lambda( lambda img: adjust_gamma(img, gamma=rng.uniform(0.8, 1.25))), transforms.Lambda( lambda img: jpg_compress(img, quality=rng.randint(70, 100 + 1))), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) val_transform = transforms.Compose([ transforms.Lambda(lambda x: Image.fromarray(x)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) train_dataset = make_numpy_dataset(X_train, y_train, train_transform) val_dataset = make_numpy_dataset(X_val, y_val, val_transform) # define loaders train_loader = DataLoader(dataset=train_dataset, batch_size=kwargs['batch_size'], shuffle=False, num_workers=4, sampler=StratifiedSampler( class_vector=y_train, batch_size=kwargs['batch_size'])) val_loader = DataLoader(dataset=val_dataset, batch_size=kwargs['batch_size'], shuffle=False, num_workers=4) print 'Starting training ...' optimizer.train(train_loader, val_loader)
imgs = utils.get_imgs(['init_imgs'], file_type_list=['.bmp', '.png'], homomorphic=True, max_pool=False, morphology=True, bit_wise=True) mask = utils.get_imgs(['mask_imgs'], file_type_list=['.bmp', '.png'], max_pool=False, morphology=False, bit_wise=True) # 预处理用于训练的输入图像和掩码图像并保存 if not os.path.exists('Q2'): mode = TRAIN os.mkdir('Q2') if not os.path.exists('Q2/imgs'): os.mkdir('Q2/imgs') if not os.path.exists('Q2/mask_imgs'): os.mkdir('Q2/mask_imgs') for i in range(imgs.shape[0]): tmp = imgs[i].astype(np.uint8) tmp = utils.adjust_gamma(tmp, gamma=6) tmp = cv2.erode(tmp, kernel) tmp = cv2.dilate(tmp, kernel) cv2.imwrite('Q2/imgs/' + str(i) + '.bmp', tmp) for i in range(mask.shape[0]): tmp = mask[i].astype(np.uint8) # tmp [tmp > 0] = 255 tmp[tmp < 255] = 0 cv2.imwrite('Q2/mask_imgs/' + str(i) + '.bmp', tmp) # 预处理用于测试的图像并保存 if not os.path.exists('Q2/predictions'): os.mkdir('Q2/predictions') if not os.path.exists('Q2/test_imgs'): os.mkdir('Q2/test_imgs') father_path = os.path.join(os.getcwd(), "..")