def detect_from_dir(input_dir, output_dir, model: modellib.MaskRCNN):
    # Подготовка изображений
    prepared_images_dir = output_dir
    os.makedirs(prepared_images_dir, exist_ok=True)
    utils.prepare_images(input_dir, prepared_images_dir)
    images = utils.load_images(prepared_images_dir)
    begin = time.time()
    detect(images, output_dir, model)
    end = time.time()
    n = len(images)
    duration = end - begin
    print(f'Processed {n} in {duration} sec')
    print(f'{duration / n} sec per image')
    utils.write_images(images, os.path.join(output_dir, 'vis'))
Example #2
0
def predict():

    # initialize the data dictionary that will be returned from the view
    data = {"success": False}

    # ensure an image was properly uploaded to our endpoint
    if flask.request.method == "POST":
        if flask.request.files.get("image"):
            # read the image in PIL format
            image = flask.request.files["image"].read()
            image = Image.open(io.BytesIO(image))

            # preprocess the image and prepare it for classification
            image = img_to_array(image)
            image = np.expand_dims(image, axis=0)
            image, _ = prepare_images(image)

            # classify the input image and return the predicted number along with its probability.
            preds = model.predict(image)
            # indicate that the request was a success
            data["success"] = True
            data["predicted_number"] = str(preds.argmax(axis=-1)[0])
            data["probability"] = str(preds.max(axis=-1)[0])

    # return the data dictionary as a JSON response
    return flask.jsonify(data)
Example #3
0
def get_data():
    '''
    The training and test MNIST datasets used to train and evaluate the model.
    '''

    # the data, split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    # preprocess images before training.
    x_train, train_input_shape = prepare_images(x_train)
    x_test, test_input_shape = prepare_images(x_test)
    assert train_input_shape == test_input_shape

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)
    return (x_train, y_train, x_test, y_test, train_input_shape)
def evaluate(args: Arguments):
    np.random.seed(1)
    tf.random.set_seed(1)
    path, model_path = get_path(args)
    if not os.path.exists(model_path + '.index'):
        return None
    ds = MNIST()
    model = BetaGammaVAE(**get_dense_networks(args),
                         gamma=float(args.gamma),
                         beta=float(args.beta),
                         name=f'Z{args.zdim}B{args.beta}G{args.gamma}'.replace(
                             '.', ''))
    model.build(ds.full_shape)
    model.load_weights(model_path, raise_notfound=True, verbose=True)
    #
    test = ds.create_dataset('test', batch_size=32)
    for x in test.take(1):
        px, qz = model(x, training=False)
    x = prepare_images(px.mean().numpy(), True)[0]

    llk = tf.reduce_mean(
        tf.concat(
            [model(x, training=False)[0].log_prob(x) for x in test.take(200)],
            0)).numpy()
    return dict(beta=args.beta,
                gamma=args.gamma,
                zdim=args.zdim,
                finetune=args.finetune,
                step=model.step.numpy(),
                llk=llk,
                image=x)
 def step0(self):
     self.vars.get_paths()
     print("Images are cropped and are being prepared for cellprofiler")
     # TODO : these window 100 vals are very cryptic and not easy to understand
     utils.prepare_images(self.vars.MF, window=100)
     print("Images preparation is done")
Example #6
0
from functools import lru_cache
import random
from urllib.parse import urlparse
import itertools

import network
import utils

bugs = utils.get_bugs()

utils.prepare_images()
all_images = utils.get_all_images()[:3000]  # 3000
image = utils.load_image(all_images[0])
input_shape = image.shape
BATCH_SIZE = 32
EPOCHS = 50

bugs_to_website = {}
for bug in bugs:
    bugs_to_website[bug['id']] = urlparse(bug['url']).netloc


@lru_cache(maxsize=len(all_images))
def site_for_image(image):
    bug = image[:image.index('_')]
    return bugs_to_website[int(bug)]


def are_same_site(image1, image2):
    return site_for_image(image1) == site_for_image(image2)
    model = model_from_json(model_json)

    # load weights into new model
    model.load_weights(os.path.join('models', MODEL_NAME + ".h5"))
    print("Loaded model from disk")

    colors = [(255, 255, 255), (0, 128, 0)]

    while True:

        # get a frame from RGB camera
        frame = get_video()
        # get a frame from depth sensor
        depth = get_depth()

        X = prepare_images(frame, image_size, ratio, n_slices)
        X = X.astype(np.float32)

        preds = model.predict(X)
        labels = np.argmax(preds, axis=1).reshape((n_slices_h, n_slices_v))

        # Initialize the class assignment mask.
        assignment_mask = np.zeros((labels.shape[0], labels.shape[1], 3))

        # Fill class labels into assignment mask.
        for label in xrange(len(classes)):
            assignment_mask[np.isin(labels, label)] = colors[label]

        assignment_mask = cv2.resize(assignment_mask,
                                     image_size,
                                     interpolation=cv2.INTER_NEAREST)