Example #1
0
def telemetry(sid, data):
    if data:
        # The current steering angle of the car
        old_steering_angle = float(data["steering_angle"])
        # The current throttle of the car
        old_throttle = float(data["throttle"]) or 1.2
        # The current speed of the car
        speed = float(data["speed"])
        # The current image from the center camera of the car
        imgString = data["image"]
        image = Image.open(BytesIO(base64.b64decode(imgString)))
        image_array = np.asarray(image)
        image_array = preprocess_image(image_array)
        steering_angle = float(
            model.predict(image_array[None, :, :, :], batch_size=1))
        #throttle = 0.6 if speed < 21 else 0.2
        #throttle = 1.8/(1+2*(abs(old_steering_angle - steering_angle)))
        #throttle = 0.2/(1+(abs(old_steering_angle - steering_angle)/50))
        throttle = 0.2
        print(steering_angle, throttle)
        send_control(steering_angle, throttle)

        # save frame
        if args.image_folder != '':
            timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
            image_filename = os.path.join(args.image_folder, timestamp)
            image.save('{}.jpg'.format(image_filename))
    else:
        # NOTE: DON'T EDIT THIS.
        sio.emit('manual', data={}, skip_sid=True)
def predict():
    try:
        current_time = time.strftime("%Y-%m-%d-%H-%M-%S")
        img_file = request.files['file']
        filename = img_file.mimetype
        user_id = request.form['user']
        if "image" in filename or "octet-stream" in filename:
            img_bytes = cv2.imdecode(np.frombuffer(img_file.read(), np.uint8),
                                     cv2.IMREAD_UNCHANGED)

            preprocessed_image = preprocess_image(img_bytes)

            base64_img = get_base64_image(preprocessed_image)

            add_data = None
            if "lat" in request.form:
                add_data = torch.from_numpy(get_add_data(request.form))
                add_data = add_data.to(device)

            values = get_prediction(preprocessed_image, add_data)
            if type(values) == dict and "classId" in values:
                species_val = get_species_info(values["classId"])
                if "lat" in request.form:
                    save_to_firestore_norm(user_id, values,
                                           request.form['lat'],
                                           request.form['lng'], current_time)
                else:
                    save_to_firestore(user_id, values, current_time)

                return jsonify({
                    'data': {
                        **species_val, "confidence": values["confidence"],
                        "standardDev": values["standardDev"],
                        "time": current_time,
                        "processedImage": base64_img
                    }
                }), 200
            else:
                return jsonify({"error": values}), 400
        else:
            return jsonify({
                "error": {
                    "message": "Wrong file type",
                    "currentType": filename
                }
            }), 400
    except:
        return jsonify({"error": {"message": "An error has occurred"}}), 400
Example #3
0
def read_image(base_path, image_name, num_channels, preprocess):
    """Read a single image from disk.

    After the image is read, it will be preprocessed if desired.

    Args:
        base_path: The path that contains the image to be read.
        image_name: The name of the image to read, with no filename extension.
        num_channels: The number of channels from the input image to read.
        preprocess: True to pre-process the image after reading.

    Returns:
        The image, possibly preprocessed, that was read from disk.
    """
    img = cv2.imread(os.path.join(base_path, image_name + '.png'))[:,:,:num_channels]
    if preprocess:
        img = preprocess_image(img)
    return img
Example #4
0
                        required=True,
                        help="Path to the ONNX model.")
    parser.add_argument(
        "-o",
        "--output",
        required=True,
        choices={"calyx", "tvm", "relay", "all"},
        help="Choices: `calyx`, `tvm`, `relay`, or `all` the above.",
    )

    args = vars(parser.parse_args())

    # The name of your net.
    net_name = args["net_name"]
    # The filepath to your input data.
    input_path = args["image"]

    # The dataset for which the classification is occurring, e.g. "mnist".
    dataset = args["dataset"]
    # Preprocess the data for classification.
    data = preprocess_image(input_path, dataset)

    # The filepath to the ONNX model.
    onnx_model_path = args["onnx_model"]

    # Determines which output you want.
    output = args["output"]

    # Runs the net and prints the classification output.
    run_net(net_name, data, onnx_model_path, output)
Example #5
0
import numpy as np
from keras.applications import vgg16
from keras import backend as K
from keras.preprocessing.image import load_img, img_to_array
#Helper Class
import image_processing as img
#Image Display
from IPython.display import Image

base_image = K.variable(img.preprocess_image('./base_image.png'))
style_reference_image = K.variable(img.preprocess_image('./style_image.png'))
combination_image = K.placeholder((1, 400, 711, 3))

Image('./style_image.png')

#Combine 3 Images Into A Single Keras Tensor
input_tensor = K.concatenate(
    (base_image, style_reference_image, combination_image, axis == 0))

#Build The VGG16 Network With Our 3 Images As Input
model = vgg16.VGG16(input_tensor=input_tensor,
                    weights='imagenet',
                    include_top=False)
print('Model Loaded!')

#Combine Loss Functions Into A Singular Scalar
loss = img.combination_loss(model, combination_image)
print(loss)

Tensor("add_16:0", shape=(), dtype=float32)
#Get The Gradients Of The Generated Image Wrt The Loss