def test_preprocessing_legacy(self, augname):
     image = tf.zeros((300, 300, 3), dtype=tf.float32)
     try:
         preprocessing.preprocess_image(image, 224, False, None, augname)
     except tf.errors.InvalidArgumentError as e:
         if 'ExtractJpegShape' not in str(e):
             raise e
Beispiel #2
0
def pre_processing(image, label, attr, H=h, W=w, is_training=True):
    preprocessing.preprocess_image(image,
                                   H,
                                   W,
                                   is_training=is_training,
                                   resize_side_min=H,
                                   resize_side_max=W)

    return image, label, attr
Beispiel #3
0
def record_parser(value, is_training):
    keys_to_features = {
        'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
        'image/format': tf.FixedLenFeature((), tf.string,
                                           default_value='jpeg'),
        'image/class/label': tf.FixedLenFeature((), tf.int64,
                                                default_value=-1),
        'image/class/text': tf.FixedLenFeature((), tf.string,
                                               default_value=''),
        'image/object/bbox/xmin': tf.FixedLenFeature((), tf.float32),
        'image/object/bbox/ymin': tf.FixedLenFeature((), tf.float32),
        'image/object/bbox/xmax': tf.FixedLenFeature((), tf.float32),
        'image/object/bbox/ymax': tf.FixedLenFeature((), tf.float32),
        'image/object/class/label': tf.FixedLenFeature((), tf.int64)
    }
    parsed = tf.parse_single_example(value, keys_to_features)
    image = tf.image.decode_image(
        tf.reshape(parsed['image/encoded'], shape=[]), _NUM_CHANNELS)
    image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    image = preprocessing.preprocess_image(image=image,
                                           output_height=_DEFAULT_IMAGE_SIZE,
                                           output_width=_DEFAULT_IMAGE_SIZE,
                                           is_training=is_training)
    label = tf.cast(tf.reshape(parsed['image/class/label'], shape=[]),
                    dtype=tf.int32)
    return image, tf.one_hot(label, _LABEL_CLASSED)
Beispiel #4
0
 def _classify_image(self):
     fname = "{}.jpeg".format(uuid.uuid4().hex)
     image = preprocess_image(self._input)
     image.save(fname)
     image.close()
     self._pred = predict(fname, self._img_model, self._img_labels)
     os.remove(fname)
Beispiel #5
0
def telemetry(sid, data):
    if data:
        # The current steering angle of the car
        steering_angle = data["steering_angle"]
        # The current throttle of the car
        throttle = data["throttle"]
        # The current speed of the car
        speed = data["speed"]
        # The current image from the center camera of the car
        imgString = data["image"]
        image = Image.open(BytesIO(base64.b64decode(imgString)))
        image_array = np.asarray(image)
        image_array = preprocess_image(image_array)
        steering_angle = float(
            model.predict(image_array[None, :, :, :], batch_size=1))

        # throttle = controller.update(float(speed))
        throttle = .12

        print(steering_angle, throttle)
        send_control(steering_angle, throttle)

        # save frame
        if args.image_folder != '':
            timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
            image_filename = os.path.join(args.image_folder, timestamp)
            image.save('{}.jpg'.format(image_filename))
    else:
        # NOTE: DON'T EDIT THIS.
        sio.emit('manual', data={}, skip_sid=True)
Beispiel #6
0
    def predict(self, image_path='./dataset/test/text.jpg', text=""):

        try:
            model = self.build()
            model.load_weights('./model/MemSem')
            input_id, token_type_id, attention_mask = self.encode(
                [preprocess_txt(text)])
            image_data = preprocess_image(
                keras_image.load_img(image_path,
                                     target_size=(224, 224),
                                     interpolation='bicubic'))
            image_data = np.expand_dims(image_data, axis=0)
            value = model.predict(
                [input_id, token_type_id, attention_mask, image_data])

            prediction = np.argmax(value)
            if prediction == 2:  # negative = [0,0,1]
                print("Its a bad meme")
            elif prediction == 1:  # postive = [0,1,0]
                print("Its not bad XD")
            elif prediction == 0:  # neutral = [1,0,0]
                print("Its meaningless")

        except Exception as e:
            print(e)
Beispiel #7
0
 def _preprocess_image(image_bytes):
     """Preprocess a single raw image."""
     image = preprocessing.preprocess_image(image_bytes=image_bytes,
                                            is_training=False,
                                            image_size=image_size,
                                            resize_method=resize_method)
     return image
Beispiel #8
0
def generate_samples():
    X, y = [], []
    with open('driving_log.csv') as csvfile:
        # counter = 0
        for center_image, _, _, steering_angle, _, _, speed in csv.reader(
                csvfile):
            # if counter >2000: break
            # counter +=1
            # if float(steering_angle)**2 > 0.02 or random.random() < .2:
            X.append(preprocess_image(cv2.imread(center_image)))
            y.append(float(steering_angle))
            if float(steering_angle)**2 > .02:
                X.append(preprocess_image(cv2.imread(center_image), True))
                y.append(-float(steering_angle))

    return np.asarray(X), np.asarray(y)
Beispiel #9
0
 def _parse_function(filename):
     image_string = tf.read_file(filename)
     image_decoded = preprocessing.preprocess_image(image_string,
                                                    is_training,
                                                    image_size=image_size)
     image = tf.cast(image_decoded, tf.float32)
     return image
Beispiel #10
0
def show_img_mask(train_df, idx, PATH='../data/pku-autonomous-driving/'):
    img0 = imread(PATH + 'train_images/' + train_df['ImageId'][idx] + '.jpg')
    img = preprocess_image(img0)

    mask, regr, mask_gaus = get_mask_and_regr(
        img0, train_df['PredictionString'][idx])

    print('img.shape', img.shape, 'std:', np.std(img))
    print('mask.shape', mask.shape, 'std:', np.std(mask))
    print('regr.shape', regr.shape, 'std:', np.std(regr))

    plt.figure(figsize=(16, 16))
    plt.title('Processed image')
    plt.imshow(img)
    plt.show()

    plt.figure(figsize=(16, 16))
    plt.title('Detection Mask')
    plt.imshow(mask)
    plt.show()

    plt.figure(figsize=(16, 16))
    plt.title('Detection Mask Gaussian')
    plt.imshow(mask_gaus)
    plt.show()

    plt.figure(figsize=(16, 16))
    plt.title('Yaw values')
    plt.imshow(regr[:, :, -2])
    plt.show()
    def __getitem__(self, idx):
        if torch.is_tensor(idx):
            idx = idx.tolist()

        # Get image name
        idx, labels, _ = self.df.values[idx]
        img_name = self.root_dir.format(idx)

        # Augmentation
        flip = False
        if self.training:
            flip = np.random.randint(10) == 1

        # Read image
        img0 = imread(img_name, True)
        img = preprocess_image(img0,
                               img_w=self.img_w,
                               img_h=self.img_h,
                               flip=flip)
        img = np.rollaxis(img, 2, 0)

        # Get mask and regression maps

        if self.training:
            mask, regr, mask_gaus = get_mask_and_regr(img0,
                                                      labels,
                                                      img_w=self.img_w,
                                                      img_h=self.img_h,
                                                      flip=flip)
            regr = np.rollaxis(regr, 2, 0)
            dropmask = 0
        else:
            mask, regr, mask_gaus = 0, 0, 0
            dropmask_name = self.root_dir_dropmasks.format(idx)
            if os.path.isfile(dropmask_name):
                dropmask = imread(dropmask_name, True)
                dropmask = preprocess_image(dropmask, self.img_w, self.img_h)
            else:
                dropmask = np.zeros((self.img_h, self.img_w, 3))

        img = torch.as_tensor(img, dtype=torch.float32)
        mask = torch.as_tensor(mask, dtype=torch.float32)
        mask_gaus = torch.as_tensor(mask_gaus, dtype=torch.float32)
        regr = torch.as_tensor(regr, dtype=torch.float32)
        dropmask = torch.as_tensor(dropmask, dtype=torch.float32)

        return [img, mask, regr, mask_gaus, dropmask]
Beispiel #12
0
 def image_preprocessing(self, image):
     return preprocessing.preprocess_image(image,
                                           image_size=self.image_size,
                                           is_training=self.is_training,
                                           image_dtype=self.image_dtype,
                                           augname=self.augname,
                                           ra_num_layers=self.ra_num_layers,
                                           ra_magnitude=self.ra_magnitude)
Beispiel #13
0
 def crop_preprocess_image(self, full_img, bbox):
     bbox_array = bbox.split(" ")
     links = int(bbox_array[0])
     oben = int(bbox_array[1])
     rechts = int(bbox_array[2])
     unten = int(bbox_array[3])
     crop_img = full_img[oben:unten, links:rechts]
     data = preprocess_image(crop_img)
     return data
    def _preprocess_image(image, label):
      image = preprocessing.preprocess_image(image,
                                    output_size=self.output_size,
                                    is_training=self.is_training,
                                    resize_with_pad=self.resize_with_pad,
                                    randaug_num_layers=self.randaug_num_layers,
                                    randaug_magnitude=self.randaug_magnitude)

      return image, label
Beispiel #15
0
 def _parse_function(filename, label):
   image_string = tf.read_file(filename)
   image_decoded = preprocessing.preprocess_image(
       image_bytes=image_string,
       is_training=is_training,
       use_bfloat16=False,
       image_size=self.image_size)
   image = tf.cast(image_decoded, tf.float32)
   return image, label
Beispiel #16
0
 def __init__(self, image_size=299, is_training=True, is_show=False):
     self.sess = tf.Session()
     with self.sess.as_default():
         self.pl = tf.placeholder(dtype=tf.uint8)
         self.result = preprocess_image(self.pl,
                                        image_size,
                                        image_size,
                                        is_training=is_training)
         # here result.dtype should be tf.float32
         if is_show and self.result.dtype != tf.uint8:
             self.result = tf.cast(self.result, dtype=tf.uint8)
Beispiel #17
0
async def image_to_text(data: Data) -> dict[str, str]:
    """Extract the text from an image, and preprocess it using the received points."""
    np_image = decode_image(data.b64image)

    if data.points:
        points = [(pt.x, pt.y) for pt in data.points]
    else:
        points = None
    preprocessed = preprocess_image(np_image, points)

    text = text_from_image(preprocessed)
    return {'result': text}
Beispiel #18
0
 def _image_tta(image):
     rescale = preprocessing.preprocess_image(
         image,
         output_size=self.output_size,
         is_training=False,
         resize_with_pad=self.resize_with_pad)
     rescale_flip = tf.image.flip_left_right(rescale)
     leftup = preprocessing.preprocess_image(
         image,
         output_size=self.output_size,
         is_training=False,
         resize_with_pad=self.resize_with_pad,
         tta='leftup')
     leftup_flip = tf.image.flip_left_right(leftup)
     rightdown = preprocessing.preprocess_image(
         image,
         output_size=self.output_size,
         is_training=False,
         resize_with_pad=self.resize_with_pad,
         tta='rightdown')
     rightdown_flip = tf.image.flip_left_right(rightdown)
     return (rescale, rescale_flip, leftup, leftup_flip, rightdown, \
              rightdown_flip)
Beispiel #19
0
def preprocess(parsed):
    """Preprocess image for inference."""
    features = {}
    if FLAGS.data_type == 'tfrecord':
        image = tf.image.decode_jpeg(parsed['image/encoded'], channels=3)

    features['image'] = preprocessing.preprocess_image(
        image,
        is_training=False,
        image_size=FLAGS.input_image_size,
        use_bfloat16=FLAGS.use_bfloat16,
        is_image_bytes=False,
    )
    return features
Beispiel #20
0
        def _parse_single_example(example_proto):
            features = tf.io.parse_single_example(example_proto,
                                                  self.feature_description)
            image = tf.io.decode_jpeg(features['image/encoded'])
            label = _parse_label(features)
            instance_id = features['image/source_id']
            latitude = features['image/latitude']
            longitude = features['image/longitude']
            date = features['image/date']
            valid = features['image/valid']

            if self.use_tta:
                image = _image_tta(image)
            else:
                image = preprocessing.preprocess_image(
                    image,
                    output_size=self.output_size,
                    is_training=self.preprocess_for_train,
                    resize_with_pad=self.resize_with_pad,
                    randaug_num_layers=self.randaug_num_layers,
                    randaug_magnitude=self.randaug_magnitude)

            coordinates = tf.stack([latitude, longitude], 0)
            if self.is_training and FLAGS.use_coordinates_augment:
                coordinates = _drop_coordinates(coordinates)

            if self.provide_coord_date_encoded_input:
                lat = _encode_feat(latitude, FLAGS.loc_encode)
                lon = _encode_feat(longitude, FLAGS.loc_encode)
                if FLAGS.use_date_feats:
                    date = date * 2.0 - 1.0
                    date = _encode_feat(date, FLAGS.date_encode)
                    coord_date_encoded = tf.concat([lon, lat, date], axis=0)
                else:
                    coord_date_encoded = tf.concat([lon, lat], axis=0)
                inputs = (image, coordinates, coord_date_encoded) \
                          if self.provide_coordinates_input \
                          else (image, coord_date_encoded)
            else:
                inputs = (image, coordinates) if self.provide_coordinates_input \
                                              else image

            if self.provide_validity_info_output:
                outputs = (label, valid, instance_id) if self.provide_instance_id \
                                                      else (label, valid)
            else:
                outputs = (label,
                           instance_id) if self.provide_instance_id else label

            return inputs, outputs
Beispiel #21
0
def read_and_decode(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        # Defaults are not specified since both keys are required.
        features={
            'image/encoded': tf.FixedLenFeature([], tf.string),
            'image/class/label': tf.FixedLenFeature([], tf.int64),
        })

    image = preprocessing.preprocess_image(features['image/encoded'])
    label = features['image/class/label']
    return image, label
Beispiel #22
0
def canonize(img_file, depth_file, opts, params):
    img = preprocess_image(img_file)
    segmask = segmentation(depth_file, opts["segmentation"])

    img, segmask = _canonize(img, segmask, opts)

    # Determine if image is upside down.
    diff = np.sum(np.abs(img - params["img_avg"]))
    diff_upsidedown = np.sum(np.abs(img - params["img_avg_upsidedown"]))
    upsidedown = diff > diff_upsidedown
    if dataset.is_upside_down(img_file) != upsidedown:
        print "Canonization failed!"
    if upsidedown:
        img = np.fliplr(np.flipud(img))
        segmask = np.fliplr(np.flipud(segmask))

    return img, segmask
Beispiel #23
0
def canonization_training(opts):
    print "# Canonization training"
    params = caching.nul_repr_dict()
    # Generate average intensity image from a subset of the dataset.
    img_avg = np.zeros(opts["img_shape"], dtype=int)
    files = dataset.training_files(opts["num_train_images"])
    for img_file, depth_file in print_progress(files):
        img = preprocess_image(img_file)
        segmask = segmentation(depth_file, opts["segmentation"])
        img, segmask = _canonize(img, segmask, opts)
        # Orient correctly if image is upside down
        if dataset.is_upside_down(img_file):
            img = np.fliplr(np.flipud(img))
        img_avg += img
    img_avg /= len(files)
    params["img_avg"] = img_avg
    params["img_avg_upsidedown"] = np.fliplr(np.flipud(img_avg))
    return params
Beispiel #24
0
def pre_reconst(img, verbose=False, is_simple=False, threaded=True):
    if verbose: print_msg("preprocessing..")
    if is_simple:
        layouts = [
            Paragraph(img=simple_preproc(img),
                      rect=(0, 0, img.shape[0], img.shape[1]))
        ]
    else:
        layouts = preprocess_image(img)

    if verbose: print_msg("detecting..")
    graphs = detection.get_graphs(layouts, threaded=threaded)

    if verbose: print_msg("recognizing..")
    graphs = chrecog.predict.get_pred(graphs)

    if verbose: print_msg("semantic..")
    graphs = semantic.analyze(graphs)
    return graphs
Beispiel #25
0
def decaptcha(filenames):
    model = load_model('model.h5')
    codes = []
    numChars = np.zeros((len(filenames), ))
    index = 0
    for filename in filenames:
        image = cv2.imread(filename)
        letters = preprocessing.preprocess_image(image)
        outputs = []
        for letter in letters:
            outputs.append(
                one_hot_to_char(
                    model.predict(
                        letter.reshape(1, letter.shape[0], letter.shape[1],
                                       1))))
        codes.append(''.join(outputs))
        numChars[index] = len(outputs)
        index += 1
    return (numChars, codes)
Beispiel #26
0
def main(args):
    thetas = args.thetas
    num_b_lines = args.num_b_lines
    num_horizontal_lines = args.num_horizontal_lines
    cap = cv2.VideoCapture('Cov-Atlas-Day+1.avi')
    ret, frame = cap.read()
    # # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = preprocess_image(gray)
    regularized_image = fb_algorithm(gray, thetas=thetas)
    fig = plt.figure()
    ax1 = fig.add_subplot(1, 2, 1)
    ax1.imshow(gray, cmap='gray')
    a_line_points = get_horizontal_peaks(regularized_image,
                                         num_peaks=num_horizontal_lines)
    b_line_points = get_b_line_peaks(regularized_image, num_peaks=num_b_lines)
    draw_lines(b_line_points, gray)
    draw_lines(a_line_points, gray)
    ax2 = fig.add_subplot(1, 2, 2)
    ax2.imshow(regularized_image, cmap='gray')
    plt.show()
Beispiel #27
0
def test_model(csv_file,
               test_dir,
               n_classes=5,
               model="best_model.hdf5",
               test_cache_file="x_test.npy",
               cached_predictions="predictions.npy",
               submission_file="submission.csv"):

    data, N = read_csv(csv_file)

    if not os.path.exists(test_cache_file):

        x_test = np.empty((N, 224, 224, 3), dtype=np.uint8)

        for i, image_id in tqdm(enumerate(data['id_code'])):
            f_path = os.path.join(test_dir, image_id + ".png")
            x_test[i, :, :, :] = preprocessing.preprocess_image(
                f_path, grayscale=False, output_channels=3)

        np.save(test_cache_file, x_test)

    else:
        x_test = np.load(test_cache_file)

    if not os.path.exists(cached_predictions):
        classifier = Classifier(labels=["0", "1", "2", "3", "4"])
        classifier.load(model)
        predictions = classifier.predict(x_test)
        np.save(cached_predictions, predictions)
    else:
        predictions = np.load(cached_predictions)

    y_test = predictions > 0.5
    y_test = y_test.astype(int).sum(axis=1) - 1

    submission_df = pd.read_csv(csv_file)
    submission_df['diagnosis'] = y_test
    submission_df.to_csv('submission.csv', index=False)
Beispiel #28
0
def parse_tf_example(example, is_training, image_size):
    features = {
        'image/label': tf.FixedLenFeature([], dtype=tf.int64),
        'image/encoded': tf.FixedLenFeature([], dtype=tf.string)
    }
    parsed = tf.parse_single_example(serialized=example, features=features)
    label = parsed['image/label']
    encode_image = parsed['image/encoded']

    # return image of [0,1),shape(?,?,3)
    image = preprocessing.decode_image(encode_image)
    # return image of [-1,1),need 4-D of input,but shape(?,?,3) is also ok from my test.
    image = preprocessing.preprocess_image(
        image,
        is_training=is_training,
        height=image_size[0],
        width=image_size[1],
        min_scale=0.8,
        max_scale=1,
        p_scale_up=0.5,
        aug_color=True,
    )
    return image, label
Beispiel #29
0
def train_model(
        csv_file,
        train_dir,
        n_classes=5,
        train_cache_file="x_train.npy",
        epochs=20,
        densenet_weights='/media/hdd/data/densenet/DenseNet-BC-121-32-no-top.h5'
):

    data, N = read_csv(csv_file)

    if not os.path.exists(train_cache_file):

        x_train = np.empty((N, 224, 224, 3), dtype=np.uint8)

        for i, (image_id, diagnosis) in tqdm(enumerate(
                zip(data['id_code'], data['diagnosis'])),
                                             total=len(data['id_code'])):
            f_path = os.path.join(train_dir, diagnosis, image_id + ".png")
            x_train[i, :, :, :] = preprocessing.preprocess_image(
                f_path, grayscale=False, output_channels=3)

        np.save(train_cache_file, x_train)

    else:
        x_train = np.load(train_cache_file)

    y_train = np.zeros((N, n_classes))
    for i, diagnosis in enumerate(data["diagnosis"]):
        y_train[i, :] = get_multilabel(diagnosis, n_classes)

    train_data = {"x": x_train, "y": y_train}

    classifier = Classifier(labels=["0", "1", "2", "3", "4"])
    classifier.build(densenet_weights=densenet_weights)
    classifier.train(train_data, epochs=epochs)
Beispiel #30
0
if __name__ == '__main__':
    model_name = 'efficientnet-b0'
    labels_map_file = 'eval_data/labels_map.txt'
    image_file = 'eval_data/panda.jpg'
    training = False
    blocks_args, global_params = eb.get_model_params(model_name, None)
    model = keras_efficientnet(blocks_args, global_params, training)
    model.load_weights(
        'models/efficientnet_b0_weights_tf_dim_ordering_tf_kernels.h5')

    MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
    STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]

    image_string = tf.read_file(image_file)
    image_decoded = preprocessing.preprocess_image(image_string, training, 224)
    image = tf.cast(image_decoded, tf.float32)
    image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
    image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
    y = model.predict(tf.expand_dims(image, 0), steps=1)[0]

    label_map = json.loads(open(labels_map_file).read())
    pred_idx = np.argsort(y)[::-1]
    print(np.argmax(y))
    print('truth: 388')
    print(pred_idx[:5])
    print([y[pid] for pid in pred_idx[:5]])
    for i in range(5):
        print('  -> top_{} ({:4.2f}%): {}  '.format(
            i + 1, y[pred_idx[i]] * 100, label_map[str(pred_idx[i])]))
Beispiel #31
0
    from PIL import Image
except ImportError:
    import Image
data = {"image": [], "filepath": [], "text": [], "label": []}
for dirname, _, filenames in os.walk('./dataset/'):
    for filename in filenames:
        try:
            if dirname == './dataset/positive':
                data['label'].append(int(1))
            if dirname == './dataset/neutral':
                data['label'].append(int(2))
            if dirname == './dataset/negative':
                data['label'].append(int(0))

            data['image'].append(
                preprocess_image(
                    os.path.join(dirname, filename)))

            data['filepath'].append(
                os.path.join(dirname, filename))

            data['text'].append(
                preprocess_txt(
                    pytesseract.image_to_string(
                        Image.open(
                            os.path.join(dirname, filename)))))

        except Exception as e:
            print(e)
            continue

        print('\r images: {} texts: {} labels : {}'.format(len(data['image']), len(data['text']), len(data['label'])), end='')
Beispiel #32
0
 def _preprocess_image(image_bytes):
     """Preprocess a single raw image."""
     image = preprocessing.preprocess_image(image_bytes=image_bytes,
                                            is_training=False)
     return image