예제 #1
0
def predict_image(im_path):
    plankton = Segmentation(im_path, target_shape=(75, 75, 3))
    plankton.segment()
    padded = plankton.get_padded()
    feat = plankton.get_features()
    feat = np.array(feat)
    padded = preprocess_input(np.array(padded, dtype=np.float32))
    x_img = padded.reshape(1, padded.shape[0], padded.shape[1], padded.shape[2])
    x_feat = feat.reshape(1, feat.shape[0])
    x_feat = mms.transform(x_feat)
    y_hat = model.predict([x_img, x_feat])
    valid = False
    labels = {}
    results = []
    for i, y in enumerate(y_hat.flatten()):
        if y > 0.5:
            valid = True
            labels[i] = y
    if valid:
        #sort by value. lower prob to higher prob. (given it is above threshold prob)
        labels = sorted(labels.items(), key=lambda x: x[1], reverse=False)
        save_image(im_path, labels)
        label_hat = class_map[labels[0][0]]
        y_hat = [str(x) for x in y_hat.flatten()]
        results.append(im_path)
        results.append(label_hat)
        results.extend(y_hat)
        return results
    return None
예제 #2
0
def predict_image(im_path):
    plankton = Segmentation(im_path, target_shape=(75, 75, 3))
    plankton.segment()
    padded = plankton.get_padded()
    feat = plankton.get_features()
    feat = np.array(feat)
    padded = preprocess_input(np.array(padded, dtype=np.float32))
    x_img = padded.reshape(1, padded.shape[0], padded.shape[1],
                           padded.shape[2])
    x_feat = feat.reshape(1, feat.shape[0])
    x_feat = mms.transform(x_feat)
    y_hat = model.predict([x_img, x_feat])
    label_hat = class_map[np.argmax(y_hat)]
    save_image(im_path, y_hat)
    y_hat = [str(x) for x in y_hat.flatten()]
    results = []
    results.append(im_path)
    results.append(label_hat)
    results.extend(y_hat)
    return results
df = pd.concat(frames)

with open('normalizer.pickle', 'rb') as handle:
    mms = pickle.load(handle)

X_img = np.empty((df.shape[0], input_shape[0], input_shape[1], input_shape[2]), dtype=np.uint8)
X_feat = np.empty((df.shape[0], feat_shape[0]))
y = np.empty((df.shape[0], n_classes), dtype=int)
data_path = os.path.join(os.getcwd(), 'data')

for i, (im_name, label) in tqdm(enumerate(zip(df.im_name, df.label))):
    im_dir = os.path.join(data_path, class_map[label])
    im_path = os.path.join(im_dir, im_name)
    plankton = Segmentation(im_path, target_shape=input_shape)
    plankton.segment()
    padded = plankton.get_padded()
    aug = seq.augment_image(padded)
    X_img[i,] = preprocess_input(padded)
    X_feat[i,] = plankton.get_features()
    y[i,] = to_categorical(label, num_classes=n_classes)

X_feat = mms.transform(X_feat)
X_img, X_feat, y = shuffle(X_img, X_feat, y, random_state=0)

print(X_img.shape)
print(X_feat.shape)
print(y.shape)

X = [X_img, X_feat]

checkpoint = ModelCheckpoint('./models/inception_v3_3k_cached.model', monitor='val_acc', verbose=1, mode='max',