def process(self, image_array):
    model = model.load('my_model.h5')
    digit = prep.crop_image(image_array)
    digit = prep.crop_image(digit)
    digit = prep.center_image(digit)
    digit = prep.resize_image(digit)
    digit = prep.min_max_scaler(digit)
    digit = prep.reshape_array(digit)
    digit = model.predict(digit)
    return digit
Beispiel #2
0
def process(image_array):
    model = load_model('my_model2.h5')
    digit = prep.crop_image(image_array)
    digit = prep.crop_image(digit)
    digit = prep.center_image(digit)
    digit = prep.resize_image(digit)
    digit = prep.min_max_scaler(digit)
    digit = prep.reshape_array(digit)
    digit = model.predict(digit)
    i = "Draw Again!"
    predict = False
    count = 0
    for number in digit[0]:
        if np.max(digit[0]) == number and number > 0.70:
            print(number)
            predict = True
            break
        else:
            count = count + 1
    if predict == True:
        return count
    else:
        return i
# Plot Training Summaries
plot_summaries(history, PLOT_NAME1, PLOT_NAME2)

# Create Predictions
row_ids, targets = [], []
id = 0

# Loop through parquet files
for i in range(4):
    img_df = pd.read_parquet(
        os.path.join(DATA_DIR, 'test_image_data_' + str(i) + '.parquet'))
    img_df = img_df.drop('image_id', axis=1)

    # Loop through rows in parquet file
    for index, row in img_df.iterrows():
        img = resize_image(row.values, WIDTH, HEIGHT, WIDTH_NEW, HEIGHT_NEW)
        img = np.stack((img, ) * CHANNELS, axis=-1)
        image = img.reshape(-1, HEIGHT_NEW, WIDTH_NEW, 3)

        # Predict
        preds = model.predict(image, verbose=1)
        for k in range(3):
            row_ids.append('Test_' + str(id) + '_' + tgt_cols[k])
            targets.append(np.argmax(preds[k]))
        id += 1

# Create and Save Submission File
submission = pd.DataFrame({
    'row_id': row_ids,
    'target': targets
},
Beispiel #4
0
    def generator(self):

        while True:
            batches = _make_batches(size=self.total_images,
                                    batch_size=self.batch_size)
            for start, end in batches:
                arr = []
                labels = []
                cur_batch = self.image_paths[start:end]

                for image_path in cur_batch:
                    # print image_path
                    img = imread(
                        fname=os.path.join(self.data_path, image_path))

                    # if channels are not 3
                    ndim = len(img.shape)

                    if ndim == 2:
                        img = img[..., np.newaxis]
                        img = np.tile(A=img, reps=(1, 1, 3))

                    if ndim == 4:
                        img = img[..., :3]

                    # resizing image maintaining aspect ratio
                    img = resize_image(img=img, size=self.input_size)

                    if self.training:
                        # random cropping while training
                        img = random_crop_image(img=img, size=self.input_size)
                        img = augment(img=img,
                                      horizontal_flip=True,
                                      vertical_flip=True,
                                      brightness=True,
                                      contrast=True,
                                      rotation=True,
                                      translation=True,
                                      blur=True,
                                      noise=True)
                    else:
                        # center cropping
                        h, w, c = img.shape
                        center_h = h / 2
                        center_w = w / 2
                        center_new_img = self.input_size / 2
                        new_x1 = center_w - center_new_img
                        new_y1 = center_h - center_new_img
                        new_x2 = center_w + center_new_img
                        new_y2 = center_h + center_new_img
                        if self.input_size % 2 == 1:
                            new_x2 += 1
                            new_y2 += 1
                        img = img[new_y1:new_y2, new_x1:new_x2]

                    arr.append(img)
                    cls = image_path.split('/')[0]
                    id_for_cls = self.cls2id[cls]
                    labels.append(id_for_cls)

                arr = np.array(arr)
                arr.astype('float32')

                # making mean of data 0 with standard deviation 1
                arr /= 255.
                arr -= 0.5
                arr *= 2.

                # one hot encoding
                labels = to_categorical(y=labels,
                                        num_classes=self.total_classes)
                yield (arr, labels)