Ejemplo n.º 1
0
import numpy as np
import pandas as pd
from tqdm import tqdm

from augmentators import randomHueSaturationValue, randomHorizontalFlip, randomShiftScaleRotate
from u_net import get_unet_128
import glob

orig_width = 240
orig_height = 320

threshold = 0.5

epochs = 10
batch_size = 1
input_size, model = get_unet_128()
model.load_weights(filepath='weights/best_weights.hdf5')

print(input_size)

test_filenames = glob.glob("input/test/*.jpg")
test_filenames = [
    filename.replace('\\', '/').replace('.jpg', '')
    for filename in test_filenames
]
test_filenames = [filename.split('/')[-1] for filename in test_filenames]

print('Predicting on {} samples with batch_size = {}...'.format(
    len(test_filenames), batch_size))
for start in tqdm(range(0, len(test_filenames), batch_size)):
    x_batch = []
Ejemplo n.º 2
0
import glob
from datetime import datetime
import time

# epochs = 50
# batch_size = 4  # 1 or 2 or 4
# input_size, model = get_unet_128(input_shape=(256, 256, 3))

epochs = int(input("Number of epochs (25)? ") or '25')
batch_size = int(input("Batch size (2)? ") or '2')
input_size = int(input("Input size (128)? ") or '128')
net_name = input("Which net you want to use?\nnormal (default)\nbig\nsmall\n: ") or 'normal'

input_shape = (input_size, input_size, 3)
if net_name == 'normal': # normal
    input_size, model = get_unet_128(input_shape=input_shape)
elif net_name == 'big': # big
    input_size, model = get_unet_128_modified(input_shape=input_shape)
elif net_name == 'small': # small
    input_size, model = get_unet_128_small(input_shape=input_shape)
else:
    raise ValueError('Unkonw net:' + net_name)

# model.load_weights(filepath='weights/best_weights.hdf5') # For resuming train

weigth_name = datetime.now().strftime('weights/date-%Y%m%d%H%M_') + (
        'epochs-%d_batch-%d_inputsize-%d_net-%s.hdf5' % (epochs, batch_size, input_size, net_name))

train_img_path_template = 'input/train/{}.png'
train_img_mask_path_template = 'input/train/segmentation/{}.png'
Ejemplo n.º 3
0
from tqdm import tqdm

from u_net import get_unet_128, get_unet_256, get_unet_512

df_test = pd.read_csv('input/sample_submission.csv')
ids_test = df_test['img'].map(lambda s: s.split('.')[0])

input_size = 128
batch_size = 16

orig_width = 1918
orig_height = 1280

threshold = 0.5

model = get_unet_128()
model.load_weights(filepath='weights/best_weights.hdf5')

names = []
for id in ids_test:
    names.append('{}.jpg'.format(id))


# https://www.kaggle.com/stainsby/fast-tested-rle
def run_length_encode(mask):
    '''
    img: numpy array, 1 - mask, 0 - background
    Returns run length as string formated
    '''
    inds = mask.flatten()
    runs = np.where(inds[1:] != inds[:-1])[0] + 2
import cv2
import numpy as np
import pandas as pd
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard
from sklearn.model_selection import train_test_split
from augmentators import randomHueSaturationValue, randomHorizontalFlip, randomShiftScaleRotate
import matplotlib.pyplot as plt
from u_net import get_unet_128
from gauss import generate_hm
import glob

epochs = 200
epochs = 50
batch_size = 1
nClasses = 9
input_size, model = get_unet_128(input_shape=(128, 128, 3), num_classes=1)
# model.load_weights(filepath='weights/best_weights.hdf5') # For resuming train

print(model.summary())

train_img_path_template = 'input/Drill_train/Cropped_384/train/color_back/img_{}.png'
train_img_mask_path_template = 'input/Drill_train/Cropped_384/train/mask/mask_{}.png'
train_img_keypoints_path_template = 'input/Drill_train/Cropped_384/train/train_keypoints'

train_filenames = glob.glob(
    "input/Drill_train/Cropped_384/train/color_back/*.png")
train_filenames = [
    filename.replace('\\', '/').replace('.png', '')
    for filename in train_filenames
]
train_filenames = [filename.split('/')[-1][4:] for filename in train_filenames]