Exemplo n.º 1
0
def show_google_map(paths, API_key, region):

    lines = []
    for f in pbar()(paths.fragments):
        flines = []
        for l in f:
            line_coords = np.r_[list(l.coords.xy)].T
            for i in range(len(line_coords) - 1):
                flines.append(
                    gmaps.Line(start=tuple(line_coords[i][::-1]),
                               end=tuple(line_coords[i + 1][::-1])))
        lines.append(flines)
    lines = flatten(lines)
    print "found", len(lines), "line segments"

    markers = []

    for o, f in pbar()(zip(flatten(paths.resampled_orientations),
                           flatten(paths.resampled_fragments))):
        coords = np.r_[list(f.xy)].T
        markers.append([
            gmaps.Marker((coords[i][1], coords[i][0]),
                         info_box_content=str(o[i]))
            for i in range(len(coords))
        ])
    markers = flatten(markers)
    print "found", len(markers), "sampling locations"

    gmaps.configure(api_key=API_key)
    gmap_b = gmaps.Polygon([(i[1], i[0]) for i in region])
    fig = gmaps.figure(center=tuple(region.mean(axis=0)[::-1]), zoom_level=16)
    fig.add_layer(gmaps.drawing_layer(features=[gmap_b] + lines + markers))
    return fig
Exemplo n.º 2
0
Arquivo: ml.py Projeto: rramosp/rlx
def rotate_images(X_imgs,
                  start_angle,
                  end_angle,
                  n_images,
                  show_progress_bar=False):
    from rlx.utils import pbar, flatten
    IMAGE_SIZE_1, IMAGE_SIZE_2 = X_imgs.shape[1], X_imgs.shape[2]

    X_rotate = []
    iterate_at = (end_angle - start_angle) / (n_images - 1)

    tf.reset_default_graph()
    X = tf.placeholder(tf.float32, shape=(None, IMAGE_SIZE_1, IMAGE_SIZE_2, 3))
    radian = tf.placeholder(tf.float32, shape=(len(X_imgs)))
    tf_img = tf.contrib.image.rotate(X, radian)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for index in (pbar()(range(n_images))
                      if show_progress_bar else range(n_images)):
            degrees_angle = start_angle + index * iterate_at
            radian_value = degrees_angle * np.pi / 180  # Convert to radian
            radian_arr = [radian_value] * len(X_imgs)
            rotated_imgs = sess.run(tf_img,
                                    feed_dict={
                                        X: X_imgs,
                                        radian: radian_arr
                                    })
            X_rotate.extend(rotated_imgs)

    X_rotate = np.array(X_rotate, dtype=np.float32)
    X_rotate = X_rotate[np.r_[flatten([[i, i + len(X_imgs)]
                                       for i in np.arange(len(X_imgs))])]]
    return X_rotate
Exemplo n.º 3
0
Arquivo: ml.py Projeto: rramosp/rlx
def load_alexnet_weights(fname):
    w = np.load(fname, encoding='bytes').item()
    from rlx.utils import flatten
    weights = {
        i[0]: i[1]
        for i in flatten([[[k + "/W:0", w[k][0]], [k + "/b:0", w[k][1]]]
                          for k in w.keys()])
    }

    # these weights are duplicated (AlexNet has two groups of convolutions for parallelization)
    for k in ["conv2/W:0", "conv4/W:0", "conv5/W:0"]:
        weights[k] = np.concatenate((weights[k], weights[k]), axis=2)
    return weights
Exemplo n.º 4
0
def get_streetview_requests(b_full, API_key):
    sv_requests = []
    for o, f in pbar()(zip(flatten(b_full.resampled_orientations),
                           flatten(b_full.resampled_fragments))):
        sv_item = []
        for i in range(len(o)):
            s_right = streetview_http_request(API_key, f.xy[1][i], f.xy[0][i],
                                              (o[i] + 90) % 360)
            s_front = streetview_http_request(API_key, f.xy[1][i], f.xy[0][i],
                                              o[i])
            s_left = streetview_http_request(API_key, f.xy[1][i], f.xy[0][i],
                                             (o[i] - 90) % 360)
            sv_item.append(
                [o[i], f.xy[0][i], f.xy[1][i], s_front, s_right, s_left])
        sv_requests.append(
            pd.DataFrame(sv_item,
                         columns=[
                             "orientation", "lon", "lat", "front", "right",
                             "left"
                         ]))
    print "total number of street view requests", np.sum(
        [len(i) for i in sv_requests]) * 3
    return sv_requests
Exemplo n.º 5
0
Arquivo: gps.py Projeto: rramosp/rlx
def augment_data_to_reference_stations(dfree, vfree, ref_sds, target_sds):
    """
    #this was used to check whether a target sd was also used as reference

    for r,t in itertools.product(ref_sds, target_sds):
        if r==t:
            raise ValueError("SD %d is both in reference and target sets"%r)
    """
    xvfree = {i.name: i[["X", "Y", "Z"]].values for _, i in vfree.iterrows()}

    # add distance and angle from GPS position to actual position in reference stations
    r = {target_sd: pd.DataFrame([ru.flatten([[xvfree[ref_sd][0]-gps_pos[0],
                                               xvfree[ref_sd][0]-gps_pos[1],
                                               np.linalg.norm(gps_pos - xvfree[ref_sd]),
                                            rm.angle_vector(gps_pos[:2][::-1]-xvfree[ref_sd][:2][::-1])] \
                                           for ref_sd in ref_sds]) \
                                  for gps_pos in dfree[target_sd][["gps_X", "gps_Y", "gps_Z"]].values],
                                 index=dfree[target_sd].index,
                                 columns=ru.flatten([["GDISTX_"+str(k), "GDISTY_"+str(k), "L_" + str(k), "A_" + str(k)] for k in ref_sds])
                                 ).join(dfree[target_sd]) \
         for target_sd in tqdm(target_sds)}

    # add observed GPS error at reference positions
    for target_sd in r.keys():
        for ref_sd in ref_sds:
            cols = ["dX", "dY", "dZ"] + [
                i
                for i in dfree[ref_sd].columns if "smooth" in i or "kalm" in i
            ]
            r[target_sd] = r[target_sd].join(dfree[ref_sd][cols],
                                             rsuffix="_" + str(ref_sd))

    k = r[r.keys()[0]]
    for sd in r.keys()[1:]:
        k = pd.concat((k, r[sd]))
    k.sort_index(inplace=True)
    return k.dropna()
Exemplo n.º 6
0
def get_intersecting_paths(vias, region):
    """
    vias: a dataframe with a columns 'coords', containing a list of tuples with lat/lon
    region: a 2d np array with two columns (lat/lon) defining a polygon

    returns:
        path: a dataframe which augments vias with a column named 'fragments'
              containing only the rows in vias which with fragments falling
              within the region
    """
    print "obtaining paths within region"
    b_clipped, b_intersects = geo.linespol_intersects(vias.coords.values,
                                                      region,
                                                      scale=1.1)
    b_full = vias.coords.iloc[b_intersects]
    print "paths intersecting", len(b_full)
    print "clipped paths", len(b_clipped)
    print "clipped path fragments", len(flatten(b_clipped))

    b_full = vias.iloc[b_intersects].copy()
    b_full["fragments"] = b_clipped
    return b_full
Exemplo n.º 7
0
def plot_cross_refval_results(l,
                              title="",
                              with_std=True,
                              with_mean=True,
                              vmin=None,
                              vmax=None):
    from rlx import utils as ru
    sds_val = np.unique(l.val_sd).astype(int)
    sds_ref = np.unique(ru.flatten([eval(i) for i in l.ref_sds])).astype(int)
    """
    r = pd.DataFrame([ ['[]']*len(sds_ref)]*len(sds_val),
                     columns=pd.Series(sds_ref, name="ref_sd"),
                     index=pd.Index(sds_val, name="val_sd"))

    for _,i in tqdm(l.iterrows(), total=len(l)):
        for ref_sd in eval(i.ref_sds):
            r.loc[i.val_sd, ref_sd]=str(eval(r.loc[i.val_sd,ref_sd])+[i.val_score])
    """

    import itertools
    t = {str((i, j)): [] for i, j in itertools.product(sds_val, sds_ref)}
    #    print t.keys()
    for _, item in tqdm(l.iterrows(), total=len(l)):
        for sd in eval(item.ref_sds):
            #            print str((int(item.val_sd), int(sd)))
            t[str((int(item.val_sd), int(sd)))].append(item.val_score)

    rm = pd.DataFrame(np.zeros((len(sds_val), len(sds_ref))),
                      index=pd.Index(sds_val, name="val_sd"),
                      columns=pd.Series(sds_ref, name="ref_sd"))
    rs = rm.copy()
    r = rm.copy()
    for i, j in itertools.product(sds_val, sds_ref):
        rm.loc[i, j] = np.mean(t[str((i, j))])
        rs.loc[i, j] = np.std(t[str((i, j))])
        r.loc[i, j] = str(t[str((i, j))])

    def f(x, function):
        x = eval(x)
        return np.nan if len(x) == 0 else function(x)

    rm = r.apply(lambda x: [f(i, np.mean) for i in x])
    rs = r.apply(lambda x: [f(i, np.std) for i in x])

    if with_std and with_mean:
        plt.figure(figsize=(12, 6))
        plt.subplot(121)
    if with_mean:
        plt.imshow(rm,
                   origin="bottom",
                   interpolation="none",
                   vmin=vmin,
                   vmax=vmax,
                   cmap=plt.cm.gnuplot)
        plt.colorbar(fraction=.04)
        plt.xticks(range(len(rm.columns)), [int(i) for i in rm.columns],
                   rotation="vertical")
        plt.yticks(range(len(rm.index)), [int(i) for i in rm.index])
        plt.xlabel("ref sd")
        plt.ylabel("target sd")
        plt.title("mean err\n" + "\n".join(rxu.split_str(title, 45)))

    if with_std and with_mean:
        plt.subplot(122)
    if with_std:
        plt.imshow(rs,
                   origin="bottom",
                   interpolation="none",
                   vmin=vmin,
                   vmax=vmax,
                   cmap=plt.cm.gnuplot)
        plt.colorbar(fraction=.04)
        plt.xticks(range(len(rs.columns)), [int(i) for i in rs.columns],
                   rotation="vertical")
        plt.yticks(range(len(rs.index)), [int(i) for i in rs.index])
        plt.xlabel("ref sd")
        plt.ylabel("target sd")
        plt.title("std err\n" + "\n".join(rxu.split_str(title, 45)))

    return r, rm, rs
Exemplo n.º 8
0
Arquivo: gps.py Projeto: rramosp/rlx
def compute_positions_and_GPS_errors(d, init_date, calcpos_to):
    dx = d.loc[init_date:calcpos_to]
    dd = d.loc[calcpos_to:]

    # compute fixed position with first period of data
    pos = dx.groupby("station_id")[["gps_X", "gps_Y",
                                    "gps_Z"]].agg([np.mean, np.std,
                                                   len]).dropna()

    # identify stations in free mode
    sfree = pos[(
        (pos["gps_X", "std"] + pos["gps_Y", "std"] + pos["gps_Z", "std"]) != 0)
                & (pos["gps_X", "len"] > 1000)].copy()
    vfree = {
        i.name:
        np.r_[[i["gps_X", "mean"], i["gps_Y", "mean"], i["gps_Z", "mean"]]]
        for _, i in sfree.iterrows()
    }

    # identify stations in fixed mode
    sfixed = pos.loc[[i for i in pos.index if i not in sfree.index]]
    vfixed = {
        i.name:
        np.r_[[i["gps_X", "mean"], i["gps_Y", "mean"], i["gps_Z", "mean"]]]
        for _, i in sfixed.iterrows()
    }

    # compute sd groups and centroids
    X = sfree[[("gps_X", "mean"), ("gps_Y", "mean")]].values
    from sklearn.cluster import KMeans
    km = KMeans(n_clusters=6, random_state=0)
    km.fit(X)
    sfree["cluster"] = km.predict(X)
    sd_clusters = {
        i: np.unique(sfree[sfree.cluster == i].index)
        for i in np.unique(sfree.cluster)
    }
    clusters_sd = {
        i: j
        for i, j in ru.flatten([[[sd, k] for sd in sd_clusters[k]]
                                for k in sd_clusters.keys()])
    }

    sds = np.unique(sfree.index.values)
    dd = dd[[i in sds for i in dd.station_id.values]]
    print "datapoints to compute fix positions", len(dx), "for", len(
        np.unique(dx.station_id)), "SDs (free and fixed)"
    print "datapoints for experiments         ", len(dd), "for", len(
        np.unique(dd.station_id)), "SDs (in free mode)"

    print "computing GPS position errors"
    diffs = np.r_[[(np.r_[i.gps_X, i.gps_Y, i.gps_Z] - vfree[i.station_id])
                   for _, i in tqdm(dd.iterrows(), total=len(dd))]]
    dd = pd.DataFrame(np.hstack((dd.values, diffs)),
                      columns=list(dd.columns) + ["dX", "dY", "dZ"],
                      index=dd.index)
    dd["station_id"] = dd.station_id.values.astype(int)
    vfree = pd.DataFrame([
        np.r_[[
            i["gps_X", "mean"], i["gps_Y", "mean"], i["gps_Z", "mean"],
            i["cluster"]
        ]] for _, i in sfree.iterrows()
    ],
                         columns=["X", "Y", "Z", "cluster"],
                         index=sfree.index)
    vfree["cluster"] = vfree.cluster.values.astype(int)

    vfixed = pd.DataFrame([
        np.r_[[i["gps_X", "mean"], i["gps_Y", "mean"], i["gps_Z", "mean"]]]
        for _, i in sfixed.iterrows()
    ],
                          columns=["X", "Y", "Z"],
                          index=sfixed.index)

    return dd, vfree, vfixed