Exemple #1
0
def show_google_map(paths, API_key, region):

    lines = []
    for f in pbar()(paths.fragments):
        flines = []
        for l in f:
            line_coords = np.r_[list(l.coords.xy)].T
            for i in range(len(line_coords) - 1):
                flines.append(
                    gmaps.Line(start=tuple(line_coords[i][::-1]),
                               end=tuple(line_coords[i + 1][::-1])))
        lines.append(flines)
    lines = flatten(lines)
    print "found", len(lines), "line segments"

    markers = []

    for o, f in pbar()(zip(flatten(paths.resampled_orientations),
                           flatten(paths.resampled_fragments))):
        coords = np.r_[list(f.xy)].T
        markers.append([
            gmaps.Marker((coords[i][1], coords[i][0]),
                         info_box_content=str(o[i]))
            for i in range(len(coords))
        ])
    markers = flatten(markers)
    print "found", len(markers), "sampling locations"

    gmaps.configure(api_key=API_key)
    gmap_b = gmaps.Polygon([(i[1], i[0]) for i in region])
    fig = gmaps.figure(center=tuple(region.mean(axis=0)[::-1]), zoom_level=16)
    fig.add_layer(gmaps.drawing_layer(features=[gmap_b] + lines + markers))
    return fig
Exemple #2
0
def scale_images(X_imgs, scales, show_progress_bar=False):
    """
    X_imgs: shape [n_imgs, size_x, size_y, n_channels]
    scales: p.ej.: [.9, .5] produces 2 new images (scale <0 means larger)
    """
    from rlx.utils import pbar
    IMAGE_SIZE_1, IMAGE_SIZE_2 = X_imgs.shape[1], X_imgs.shape[2]
    # Various settings needed for Tensorflow operation
    boxes = np.zeros((len(scales), 4), dtype=np.float32)
    for index, scale in enumerate(scales):
        x1 = y1 = 0.5 - 0.5 * scale  # To scale centrally
        x2 = y2 = 0.5 + 0.5 * scale
        boxes[index] = np.array([y1, x1, y2, x2], dtype=np.float32)
    box_ind = np.zeros((len(scales)), dtype=np.int32)
    crop_size = np.array([IMAGE_SIZE_1, IMAGE_SIZE_2], dtype=np.int32)

    X_scale_data = []
    tf.reset_default_graph()
    X = tf.placeholder(tf.float32, shape=(1, IMAGE_SIZE_1, IMAGE_SIZE_2, 3))
    # Define Tensorflow operation for all scales but only one base image at a time
    tf_img = tf.image.crop_and_resize(X, boxes, box_ind, crop_size)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for img_data in (pbar()(X_imgs) if show_progress_bar else X_imgs):
            batch_img = np.expand_dims(img_data, axis=0)
            scaled_imgs = sess.run(tf_img, feed_dict={X: batch_img})
            X_scale_data.extend(scaled_imgs)


#    X_scale_data = np.array(X_scale_data, dtype=np.float32)
    return X_scale_data
Exemple #3
0
def rotate_images(X_imgs,
                  start_angle,
                  end_angle,
                  n_images,
                  show_progress_bar=False):
    from rlx.utils import pbar, flatten
    IMAGE_SIZE_1, IMAGE_SIZE_2 = X_imgs.shape[1], X_imgs.shape[2]

    X_rotate = []
    iterate_at = (end_angle - start_angle) / (n_images - 1)

    tf.reset_default_graph()
    X = tf.placeholder(tf.float32, shape=(None, IMAGE_SIZE_1, IMAGE_SIZE_2, 3))
    radian = tf.placeholder(tf.float32, shape=(len(X_imgs)))
    tf_img = tf.contrib.image.rotate(X, radian)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for index in (pbar()(range(n_images))
                      if show_progress_bar else range(n_images)):
            degrees_angle = start_angle + index * iterate_at
            radian_value = degrees_angle * np.pi / 180  # Convert to radian
            radian_arr = [radian_value] * len(X_imgs)
            rotated_imgs = sess.run(tf_img,
                                    feed_dict={
                                        X: X_imgs,
                                        radian: radian_arr
                                    })
            X_rotate.extend(rotated_imgs)

    X_rotate = np.array(X_rotate, dtype=np.float32)
    X_rotate = X_rotate[np.r_[flatten([[i, i + len(X_imgs)]
                                       for i in np.arange(len(X_imgs))])]]
    return X_rotate
Exemple #4
0
def mcmc(n_samples,
         s,
         q_sampler,
         q_pdf,
         init_state_sampler,
         use_logprobs=False,
         verbose=True):
    xi = init_state_sampler()
    r = [xi]
    c = 0
    loop_values = range(n_samples)
    for i in utils.pbar()(loop_values) if verbose else loop_values:
        proposed_state = q_sampler(xi)
        if use_logprobs:
            acceptance_probability = np.exp(
                s(proposed_state) - s(xi) + q_pdf(proposed_state, xi) -
                q_pdf(xi, proposed_state))
        else:
            acceptance_probability = s(proposed_state) / s(xi) * q_pdf(
                proposed_state, xi) / q_pdf(xi, proposed_state)
        acceptance_probability = np.min((1, acceptance_probability))
        if np.random.random() < acceptance_probability:
            xi = proposed_state
            c += 1
        r.append(xi)
    return np.r_[r], c * 1. / n_samples
Exemple #5
0
def show_filters(model, layer_name="conv1/W:0"):
    vars = {i.name: i for i in tflearn.variables.get_all_variables()}
    w1 = model.get_weights(vars[layer_name])
    from rlx.utils import pbar
    plt.figure(figsize=(6, 6))
    for i in pbar()(range(w1.shape[-1])):
        plt.subplot(10, 10, i + 1)
        img = w1[:, :, :, i]
        img = (img - np.min(img)) / (np.max(img) - np.min(img))
        plt.imshow(img)
        plt.axis("off")
Exemple #6
0
    def fit_mcmc(self, X_train, n_cycles=1, n_iters=100):
        k = self.get_vectorized_params()

        X1 = X_train
        self.log_data_energy = []

        for it in utils.pbar()(range(n_iters)):
            mgrad, X1 = self.compute_mcmc_gradient(n=n_cycles,
                                                   X_train=X_train,
                                                   X_init=X1)
            k -= mgrad
            self.set_vectorized_params(k)
            self.log_data_energy.append(
                (it, np.mean([self.n_free_energy(xi) for xi in X_train])))
Exemple #7
0
def get_streetview_images(requests, dest_dir, API_key):
    from skimage.io import imsave
    import os
    skipped = 0
    for reqs in pbar()(requests):
        for _, req in reqs.iterrows():
            for k in ["front", "right", "left"]:
                fname = dest_dir + "/sv_lat_%f_lon_%f_%s.jpg" % (req.lat,
                                                                 req.lon, k)
                if not os.path.isfile(fname):
                    img = utils.get_http_image(req[k])
                    if np.max(np.histogram(
                            img.flatten())[0]) < np.product(img.shape) * .9:
                        imsave(fname, img)
                    else:
                        skipped += 1
    print "skipped", skipped, "images with more than 90% of pixels with the same value"
Exemple #8
0
def flip_images(X_imgs, show_progress_bar=False):
    from rlx.utils import pbar
    IMAGE_SIZE_1, IMAGE_SIZE_2 = X_imgs.shape[1], X_imgs.shape[2]

    X_flip = []
    tf.reset_default_graph()
    X = tf.placeholder(tf.float32, shape=(IMAGE_SIZE_1, IMAGE_SIZE_2, 3))
    tf_img1 = tf.image.flip_left_right(X)
    tf_img2 = tf.image.flip_up_down(X)
    tf_img3 = tf.image.transpose_image(X)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for img in (pbar()(X_imgs) if show_progress_bar else X_imgs):
            flipped_imgs = sess.run([tf_img1, tf_img2, tf_img3],
                                    feed_dict={X: img})
            X_flip.extend(flipped_imgs)
    X_flip = np.array(X_flip, dtype=np.float32)
    return X_flip
Exemple #9
0
def show_image_mosaic(imgs, labels, figsize=(12, 12), idxs=None):
    from rlx.utils import pbar

    plt.figure(figsize=figsize)
    for labi, lab in pbar()([i for i in enumerate(np.unique(labels))]):
        k = imgs[labels == lab]
        _idxs = idxs[:10] if idxs is not None else np.random.permutation(
            len(k))[:10]
        for i, idx in enumerate(_idxs):
            if i == 0:
                plt.subplot(10, 11, labi * 11 + 1)
                plt.title("LABEL %d" % lab)
                plt.plot(0, 0)
                plt.axis("off")

            img = k[idx]
            plt.subplot(10, 11, labi * 11 + i + 2)
            plt.imshow(img, cmap=plt.cm.Greys_r)
            plt.axis("off")
Exemple #10
0
    def fit_symbolic(self, X_train, n_iters=10, verbose=False):
        self.s_compute_likelihood(X_train, verbose)

        if verbose:
            print "gradient descent"
        k = self.get_vectorized_params()

        self.log_data_energy = []

        for it in utils.pbar()(range(n_iters)):
            k += np.r_[[
                self.l_log_likelihood_grads[i](*self.set_vectorized_params(k))
                for i in self.sykeys
            ]]
            self.log_data_energy.append(
                (it, np.mean([self.n_free_energy(xi) for xi in X_train])))

        self.set_vectorized_params(k)
        self.compute_probs_exhaustive()
Exemple #11
0
def augment_imgs(imgs, dataset, prob_augment, op, opname):
    """
    dataset must be indexed with the image file name and must have a field
    named "path" with the full path to the image.
    a one to one correspondance is assumed to between imgs and entries on dataset.

    returns: augmented_imgs
             augmented_dataset: where corresponding records in the dataset
             are copied and paths and indexes appropriated updated.
    """
    assert len(imgs) == len(
        dataset), "dataset and imgs must have the same length"

    from rlx.utils import pbar
    from skimage.io import imsave

    imgs_to_augment = np.random.permutation(
        len(imgs))[:int(len(imgs) * prob_augment)]
    print "applying operation", opname
    augmented_imgs = op(imgs[imgs_to_augment])

    r = len(augmented_imgs) / len(imgs_to_augment)

    augmented_dataset = None

    t = np.random.randint(len(imgs_to_augment))
    print "saving imgs and building dataset"
    for t in pbar()(range(len(imgs_to_augment))):
        z = dataset.iloc[imgs_to_augment[t]]
        zd = pd.DataFrame(
            [z] * r,
            index=["%s__%s_%d.jpg" % (z.name, opname, i) for i in range(r)])
        zd["path"] = [
            "/".join(i.path.split("/")[:-1] + [i.name])
            for _, i in zd[["path"]].iterrows()
        ]
        for i, (_, item) in enumerate(zd.iterrows()):
            imsave(item.path, augmented_imgs[r * t + i])
        augmented_dataset = zd if augmented_dataset is None else pd.concat(
            (augmented_dataset, zd))

    return augmented_imgs, augmented_dataset
Exemple #12
0
    def s_compute_likelihood(self, X, verbose=False):
        if verbose:
            print "building symbolic likelihood expression"
        self.s_create_probability_symbols(verbose)
        dataX_ = [{self.s_x[i]: xi[i] for i in range(len(xi))} for xi in X]

        self.s_log_likelihood = sy.log(
            np.product([self.s_prob_x.subs(xi) for xi in dataX_]))

        if verbose:
            print "building symbolic likelihood gradient expression"
        self.s_W_grad = {
            i[1]: self.s_log_likelihood.diff(i[1])
            for i in np.ndenumerate(self.s_W)
        }
        self.s_b_grad = {
            i[1]: self.s_log_likelihood.diff(i[1])
            for i in np.ndenumerate(self.s_b)
        }
        self.s_c_grad = {
            i[1]: self.s_log_likelihood.diff(i[1])
            for i in np.ndenumerate(self.s_c)
        }

        self.s_grads = utils.merge_dicts(self.s_W_grad, self.s_b_grad,
                                         self.s_c_grad)

        if verbose:
            print "compiling likelihood"
        self.l_log_likelihood = sy.lambdify((self.mW, self.mc, self.mb),
                                            self.s_log_likelihood, "numpy")

        if verbose:
            print "compiling likelihood gradient, with", len(
                self.sykeys), "parameters"
        self.l_log_likelihood_grads = {
            k: sy.lambdify((self.mW, self.mc, self.mb), self.s_grads[k],
                           "numpy")
            for k in utils.pbar()(self.sykeys)
        }
Exemple #13
0
def get_streetview_requests(b_full, API_key):
    sv_requests = []
    for o, f in pbar()(zip(flatten(b_full.resampled_orientations),
                           flatten(b_full.resampled_fragments))):
        sv_item = []
        for i in range(len(o)):
            s_right = streetview_http_request(API_key, f.xy[1][i], f.xy[0][i],
                                              (o[i] + 90) % 360)
            s_front = streetview_http_request(API_key, f.xy[1][i], f.xy[0][i],
                                              o[i])
            s_left = streetview_http_request(API_key, f.xy[1][i], f.xy[0][i],
                                             (o[i] - 90) % 360)
            sv_item.append(
                [o[i], f.xy[0][i], f.xy[1][i], s_front, s_right, s_left])
        sv_requests.append(
            pd.DataFrame(sv_item,
                         columns=[
                             "orientation", "lon", "lat", "front", "right",
                             "left"
                         ]))
    print "total number of street view requests", np.sum(
        [len(i) for i in sv_requests]) * 3
    return sv_requests