Beispiel #1
0
def readpath(paths):
    ######################################################################################
    # Function that extract all the features from the input paths
    #
    # input: image_paths: list of N elements containing image paths
    # output: N x M matrix. where M is the dimensionality of the feature representation.
    ######################################################################################
    print('getting sharpness')
    temp = sharpness(paths)
    temp = temp / np.linalg.norm(temp)
    input = np.copy(temp)
    print(input.shape)

    print('getting exposureness')
    temp = exposureness(paths)
    input = np.concatenate((input, temp), axis=1)
    print(input.shape)

    print('getting object position')
    temp = object(paths)
    input = np.concatenate((input, temp), axis=1)
    print(input.shape)

    print('getting orientation')
    temp = orientation(paths)
    input = np.concatenate((input, temp), axis=1)
    print(input.shape)

    print('getting facial expression')
    temp = facial(paths)
    input = np.concatenate((input, temp), axis=1)
    print(input.shape)

    return (input)
Beispiel #2
0
def handle_navigation(req):
    coordinates = [latitude, longitude, req.c, req.d]
    start = str(coordinates[0]) + "," + str(coordinates[1])
    goal = str(coordinates[2]) + "," + str(coordinates[3])

    closestStart = findClosestWaypoints(start)
    closestGoal = findClosestWaypoints(goal)

    searchResult = []

    shortestDistance = 10000000
    shortestResult = []
    for i in range(0, 4):
        result, distance = aStarSearch(G, closestStart[(i / 2)],
                                       closestGoal[not (i % 2)])
        if distance < shortestDistance:
            shortestDistance = distance
            shortestResult = result
            shortestResult.insert(0, closestStart[(i / 2)])
            shortestResult.insert(0, start)
            shortestResult.append(closestGoal[not (i % 2)])
            shortestResult.append(goal)

    pin = run(shortestResult)

    route = []
    for j in range(0, 4):
        short = shortestResult[j]
        dots = short.split(',')
        route.append([float(dots[0]), float(dots[1])])

    p = pyproj.Proj(proj='utm', zone=31, ellps='WGS84')

    rd_route = []
    for coord in route:
        x, y = p(coord[1], coord[0])
        rd_route.append([x, y])

    os.system(
        "rosrun map_server map_server /home/ubuntu/ROS/nautonomous_ws4/src/WaternetNautonomous/WaternetNautonomousNavigation/nautonomous_navigation_navigate/scripts/amsterdam.yaml&"
    )
    os.system("")

    z, w = orientation(rd_route)
    #print "x:", rd_route[1][0] - 121000, "y:", rd_route[1][1] - 486500, "z:", z, "w:", w

    #return pin[0][0], pin[0][1], z, w

    #node_colors = ["green" if n in shortestResult else "white" for n in G.nodes()]

    #pos=nx.get_node_attributes(G,'pos')
    #nx.draw_networkx_nodes(G, pos=pos, node_color=node_colors)
    #nx.draw_networkx_edges(G, pos=pos)
    #plt.show()
    print "Dit is een test..."
    print rd_route[1][0]
    return rd_route[1][0] - 628550, rd_route[1][1] - 5803351, z, w

#Tests Arduino Reader -> observer interface
#arduino_tester()

#Test serial comms to




home = reset()
ard = setup_serial(arduino_port, 115200)
counter = time.time()
#f = open("log_"+str(float(ephem.now()))+".csv", 'w')
#f.write("Time,Lat,Lon,Heading\n")
orient = orientation.orientation("$IMU,0,0,0,0,0,0,0,0,0")
position = nmea.nmea("$GPRMC,0,V,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0")
while True:
    mes = (read_message(ard))
    if mes[:2] == "$G":
        try:
            position = nmea.nmea(mes)
        except:
            pass
    elif mes[:2] == "$I":
        try:
            orient = orientation.orientation(mes)
        except:
            pass
    # home.date = "2016-06-28 12:00:00"
Beispiel #4
0
from detect import detect_code
from orientation import orientation

import cv2

cam = cv2.VideoCapture(0)
while 1:
    ret, img = cam.read()
    #if ret == 1: detect_code(img)
    if ret == 1: orientation(img)
    if cv2.waitKey(1) == 27: break

cam.release()
cv2.destroyAllWindows()
def getFeatureFromWindow(myF, index_start, index_end, video_par, model_par):
    # 需要把到当前帧为止的数据提出来
    # myF = myF.iloc[0:index_end, :]
    # fid= myF.iloc[-1, 0]
    # myidF = myF[myF[0] == fid]
    # myF = myF[myF[0] <= fid]
    # F = myF.values
    # # 将当前帧帧的不重复的track_id提取出来,表示有多少个人
    # idF = myidF.values
    # track_id = sorted(set(idF[:,1].astype(int)))

    my = myF.iloc[index_start:index_end, :]
    F = my.values
    track_id = sorted(set(F[:, 1].astype(int)))
    myF = myF.iloc[0:index_end, :]
    # print(track_id)
    # 计算这10s内每个人在场景中待了多久(多少个frame),如果时间过短(小于3),从当前考虑的数据中删除
    deleteid = []
    for i in range(len(track_id)):
        if len(myF[myF[1] == track_id[i]]) < 4:
            deleteid.append(track_id[i])
            # 筛选出要删除的数据
            choosebytrackid = myF[myF[1] == track_id[i]]
            # 得到每行的行号,然后逐个删除
            row = choosebytrackid.iloc[:, 0].index.values
            myF = myF.drop(index=row)

    # 重新索引,并更新track_id
    myF = myF.reset_index(drop=True)
    # F = myF.values
    # track_id = sorted(set(myF.values[:, 1].astype(int)))
    track_id = sorted(list(set(track_id) - set(deleteid)))
    # print(track_id)
    # print(len(track_id))
    # print(myF)
    """
        PERSONAL FEATURES -----------------------------------------------------------------------------
        (i.e. time, position and velocity)
    """
    """
        path[i] 表示行人i的轨迹信息,是dict类型
        其中数据类型:
                有速度时:{int_path_id: dataframe_path(frameid,position_X,position_Y,v_X,v_Y)}
                无速度时:{int_path_id: dataframe_path(frameid,position_X,position_Y)}
    """
    path = dict()
    # 从10s的第一帧到最后一帧,把每个人的轨迹提取出来,其中数据类型{int_path_id: dataframe_path}
    for t in track_id:
        choosebytrackid = myF[myF[1] == t]
        path[t] = choosebytrackid.iloc[:, [0, 2, 4]]

    # 提取速度特征
    for t in track_id:
        trajectory = path[t].values
        trajectoryPlusVeloc = []
        for i in range(trajectory.shape[0]):
            if i == 0:
                # 每条轨迹的第一个位置,由于没有之前的位置,所以速度置为nan
                v_nan = np.insert(trajectory[0],
                                  len(trajectory[0]),
                                  values=[np.nan, np.nan])
                trajectoryPlusVeloc.append(list(v_nan))
            else:
                # 计算之后所有位置的速度(x方向和y方向)
                vx = trajectory[i, 1] - trajectory[i - 1, 1]
                vy = trajectory[i, 2] - trajectory[i - 1, 2]
                velocity = np.insert(trajectory[i],
                                     len(trajectory[i]),
                                     values=[vx, vy])
                trajectoryPlusVeloc.append(list(velocity))
        # 把含有速度特征的轨迹信息替换到path变量中,其中trajectoryPlusVeloc是array类型,又转换为dataframe类型再替换
        path[t] = pd.DataFrame(np.delete(trajectoryPlusVeloc, 0,
                                         0))  # 删除第一行,因为如果有速度特征的话,第一行的速度值为Nan

    # print(path)
    """
        PAIR-WISE FEATURES:-------------------------------------------------------------------------------------

        physical distance |  feature_pd  |  prox
        trajectory shape  |  feature_ts  |  dtw
        motion causality  |  feature_vs  |  velocity
        paths convergence |  feature_pc  |  heatmap
    """
    # 通过track_id先将两两行人形成初始化的组,对于数据集student003大小为:(1128,2)
    couples = group(track_id)
    print(couples.shape[0])

    feature_pd = np.zeros((couples.shape[0], 1))
    feature_ts = np.zeros((couples.shape[0], 1))
    feature_vs = np.zeros((couples.shape[0], 1))
    feature_pc = np.zeros((couples.shape[0], 1))

    # compute features for each couple  couples.shape[0]
    for i in range(couples.shape[0]):
        if len(track_id) == 1:
            break
        # 提取出第i行的couple的两个轨迹,数据类型都是dataframe
        traj1 = path[couples[i, 0]]
        traj2 = path[couples[i, 1]]
        traj_1 = traj1.values
        traj_2 = traj2.values
        """
            1) compute proxemics: physical distance | feature_pd 
        """
        if model_par.features[0] == 1:
            traj1_frameid = traj1.iloc[:, 0].values
            traj2_frameid = traj2.iloc[:, 0].values
            feature_pd[i] = prox(traj1_frameid, traj2_frameid, traj_1, traj_2)
        """
            2) compute MD-DTW: trajectory shape  |  feature_ts
        """
        if model_par.features[1] == 1:
            # print(couples[i])
            feature_ts[i] = Hausdorff(traj_1, traj_2)
            # dist, k = dtw(traj_1, traj_2)
            # feature_ts[i] = dist/k
            # print(feature_ts[i])
        """
            3) compute : velocity similarity  |  feature_vs
        """
        if model_par.features[2] == 1:
            # F1 = granger(traj_1, traj_2)
            # F2 = granger(traj_2, traj_1)
            # F1 = granger(traj1, traj2)
            # F2 = granger(traj2, traj1)
            # feature_mc[i] = max(F1, F2)
            # print(feature_mc[i])
            dist, k = v_similar(traj_1, traj_2)
            feature_vs[i] = dist / k
        """
            4) compute HEAT MAPS: paths convergence |  feature_pc
            modification: 利用“匀速直线运动”得到未来运动的方向,然后利用方向夹角得到特征
        """
        # if model_par.features[3] == 1:
        #     allHeatMaps[i], feature_pc[i] = heatmap(traj_1[:, 0:3], traj_2[:, 0:3], video_par)
        #     # if model_par.features[3] != 1:
        # else:
        #     feature_pc[i] = 0
        if model_par.features[3] == 1:
            feature_pc[i] = orientation(traj_1[:, 0:5], traj_2[:, 0:5])
        # print(feature_pd[i])
        # print(feature_ts[i])
        # print(feature_vs[i])
        # print(feature_pc[i])
        # print(feature_sd[i])

    detectedGroup = couples
    # detectedGroup = group1(couples, feature_pd, track_id)
    # print(detectedGroup.shape[0])

    # 把四个特征列向量组合成一个n*4的二维矩阵[feature_pd, feature_ts, feature_mc, feature_pc]
    myfeatures = np.concatenate((feature_pd, feature_ts), axis=1)
    myfeatures = np.concatenate((myfeatures, feature_vs), axis=1)
    myfeatures = np.concatenate((myfeatures, feature_pc), axis=1)
    print(myfeatures.shape[0])
    return [track_id, F, couples, myfeatures, detectedGroup]
Beispiel #6
0
import cv2 as cv
from fixperspective import fixperspective
from preprocess import preprocess
from orientation import orientation

src = cv.imread("sampleImages/6.jpg", 1)

orientation(cv.resize(src, (0, 0), fx=1, fy=1))

# scale factor for image displays only
scalefactor = 0.1

# display the source image
cv.imshow("Source", cv.resize(src, (0, 0), fx=scalefactor, fy=scalefactor))

preproc = preprocess(src)

warped = fixperspective(preproc)

cv.imshow("Warped", cv.resize(warped, (0, 0), fx=scalefactor, fy=scalefactor))

cv.waitKey(0)
cv.destroyAllWindows()