Ejemplo n.º 1
0
def orientation(coord1, coord2, coord3):
    c1 = vec.get_vector(coord1, coord2)
    c2 = vec.get_vector(coord2, coord3)
    d = vec.cross_product_direction(c1, c2)
    if d > 0:
        return "counter"
    else:
        if d < 0:
            return "clock"
        else:
            return "collinear"
Ejemplo n.º 2
0
def main():
    init_pos = Vectors.V4D(0.65, 0.05, 0.03, 0)
    bound = Vectors.V4D(0.65, 0.55, 0.53, 0)
    edges = {
        'init': init_pos,
        'bound': bound
    }
    sock = create_socket()
    try:
        move.run(sock, 'left', left_arm, edges)
    except KeyboardInterrupt:
        sock.close()
Ejemplo n.º 3
0
def main():
    init_pos = Vectors.V4D(0.656982770038, -0.752598021641, 0.0388609422173, 0)
    bound = Vectors.V4D(0.656982770038, -0.252598021641, 0.5388609422173, 0)
    edges = {
        'init': init_pos,
        'bound': bound
    }
    sock = create_socket()
    try:
        move.run(sock, 'right', right_arm, edges)
    except KeyboardInterrupt:
        sock.close()
def smallest_distance_pt_seg(opponent, sender, receiver):
    """
    Compute the smallest distance between the pass line segment and a given
    opponent.

    PARAMETERS
    opponent: pair, (int, int)
        The coordinates of the considered opponent of the sender.
    sender: pair, (int, int)
        The coordinates of the sender of the ball.
    receiver: pair, (int, int)
        The coordinates of the potential receiver of the ball.

    RETURN
    smallest_dist: float
        The smallest distance of oppenant to the line pass segment.
    """
    pass_vector = vec.vector(sender, receiver)
    snd2opp_vector = vec.vector(sender, opponent)
    pass_len = vec.norm(pass_vector)
    pass_vector_unit = vec.unit(pass_vector)
    snd2opp_vector_scaled = vec.scale(snd2opp_vector, 1.0/pass_len)
    t = vec.dot(pass_vector_unit, snd2opp_vector_scaled)
    if t < 0.0:
        t = 0.0
    elif t > 1.0:
        t = 1.0

    closest_match = vec.scale(pass_vector, t)
    smallest_dist = vec.distance(closest_match, snd2opp_vector)

    return smallest_dist
Ejemplo n.º 5
0
 def __init__(self):
     self.original = pygame.Surface((size, size), SRCALPHA)
     self.color = (random.randint(0, 255), random.randint(0, 255),
                   random.randint(0, 255))
     pygame.draw.polygon(self.original, self.color,
                         ((0, 0 + margin), (0, size - margin),
                          (size, size / 2)))
     self.pos = self.original.get_rect()
     self.pos = [random.randint(0, width), random.randint(0, height)]
     self.vel = v2.Vector()
     self.accel = v2.Vector()
     self.vel.setVectorC(random.randint(-5, 5), random.randint(-5, 5))
     self.accel.setVectorC(0, 0)
     self.radius = radius
     self.angle = self.vel.getAngle()
Ejemplo n.º 6
0
def decide_increment(data):
    #TODO - decide based on data
    mutex.acquire()
    global direction

    global dz

    global last_data
    if last_data is None:
        last_data = data

    if (data is not None):
        wow = data['onset']
        dwow = data['onset'] - last_data['onset']
        if abs(dwow) > 0.01:
            dz = dwow / 10.0
        print "dwow", dwow

    print "dz: ", dz

    inc = Vectors.V4D(0.0, direction * 0.05, dz, 0.0)
    mutex.release()

    last_data = data

    return inc
Ejemplo n.º 7
0
    def update(self):
        self.vel = v2.addVects(self.vel, self.accel)
        self.pos = [
            self.pos[0] + self.vel.components[0],
            self.pos[1] + self.vel.components[1]
        ]
        self.angle = self.vel.getAngle()
        self.image = pygame.transform.rotate(self.original, self.angle)
        self.rect = self.image.get_rect(center=self.pos)
        DispSurface.blit(self.image, self.rect)

        #This code is for screen wrapping of boids when we are not destroying them for going out of the screen
        # if self.pos[0] > width:
        #     self.pos[0] = self.pos[0] - width
        # if self.pos[0] < 0:
        #     self.pos[0] = width - self.pos[0]
        # if self.pos[1] > height:
        #     self.pos[1] = self.pos[1] - height
        # if self.pos[1] < 0:
        #     self.pos[1] = height - self.pos[1]

        if self.vel.getMag() > maxSpeed:
            self.vel.setMag(maxSpeed)
        if self.accel.getMag() > maxAccel:
            self.accel.setMag(maxAccel)
Ejemplo n.º 8
0
def build_polygon_graph(polygons):
    polygon_graph = {}
    count_p = 0
    for p in polygons:
        previous = Node()
        first = Node()
        count_n = 0
        for n in p:
            new_id = "P{}.{}".format(count_p + 1, count_n)
            polygon_graph[new_id] = Node(vec.Coord(n.x, n.y), new_id)
            current = polygon_graph[new_id]
            if previous.is_proper():
                edge_id = "E{}.{}".format(count_p + 1, count_n)
                w = compute_weight(previous, current)
                previous.add_neighbour(Edge(w, previous, current, edge_id))
                current.add_neighbour(Edge(w, current, previous, edge_id))
            if count_n == 0:
                first = current
            previous = current
            count_n += 1
            if count_n == len(p):
                edge_id = "E{}.{}".format(count_p + 1, count_n)
                w = compute_weight(current, first)
                current.add_neighbour(Edge(w, current, first, edge_id))
                first.add_neighbour(Edge(w, first, current, edge_id))
        count_p += 1
    return polygon_graph
Ejemplo n.º 9
0
 def __init__(self, coordinates=vec.Coord(-1, -1), id="undefined"):
     self.coord = coordinates
     self.neighbours = []
     self.id = id
     # Hilfsattribute für Dijkstra
     self.distance = 0
     self.visited = False
     self.prev = ""
Ejemplo n.º 10
0
def run(sock, arm, pose_func, edges):
    current_p = edges['init']
    i_bound = edges['init']
    o_bound = edges['bound']


    channel = Queue.Queue()

    # initial values
    increment = Vectors.V4D(0.00, 0.05, 0.00, 0);
    speed = 0.5

    # limit the amount of movement calls
    lim = 1000
    j = 0

    mdata = None

    while (j < lim):
        j += 1
        
        # socket call
        try:
            data, addr = sock.recvfrom(1024)
            mdata = recv_data(data)
            speed = mdata['energy']
            def swap_inc(data):
                while time.time() < data['beat']:
                    time.sleep(0.01)
                global direction
		mutex.acquire()
                direction *= -1
		mutex.release()
	    t = threading.Thread(target = swap_inc, args = (mdata,))
            t.daemon = True
	    t.start()

        except socket.error, e:
           None 
        
        increment = decide_increment(mdata);

        current_p = current_p + increment

        current_p = clamp(current_p, i_bound, o_bound)

        pose_func(current_p)

        resp = ik_move(arm, pose_func(current_p))

        if resp is not None:
            make_move(resp, arm, speed)

        print "--------"

        time.sleep(0.1)
 def calcCorners(self):
     """
     assumes we'll be facing from 2 units away on the z axis
     """
     targetZ = 2
     screenWidth = glutGet(GLUT_WINDOW_WIDTH)
     screenHeight = glutGet(GLUT_WINDOW_HEIGHT)
     fov = 45.0 / 360.0 * ( 2.0 * 3.14 )
     worldHeight = targetZ * math.tan( fov / 2 )
     worldWidth  = worldHeight * float( self.imageWidth ) / float( self.imageHeight )
     
     scaledWidth = self.width * worldWidth / self.height / 2
     quadCenter = [0,0,0]
     self.topLeft     = Vectors.add( quadCenter, [-worldWidth, worldHeight, 0])
     self.topRight    = Vectors.add( quadCenter, [ worldWidth, worldHeight, 0])        
     self.bottomRight = Vectors.add( quadCenter, [ worldWidth,-worldHeight, 0])
     self.bottomLeft  = Vectors.add( quadCenter, [-worldWidth,-worldHeight, 0])
     
     print "w %s %s %s %s %s %s" % (screenWidth, screenHeight, fov, worldHeight, worldWidth, scaledWidth)
     print " %s\n %s\n %s\n %s\n" % (self.topLeft,
                                     self.topRight,  
                                     self.bottomRight,
                                     self.bottomLeft)
Ejemplo n.º 12
0
def left_arm(pos):
    quaternion = Vectors.V4D(0.36, 0.88, -0.10, 0.26)
    position = Pose(
        position=Point(
            x=pos.x(),
            y=pos.y(),
            z=pos.z(),
        ),
        orientation=Quaternion(
            x=quaternion.x(),
            y=quaternion.y(),
            z=quaternion.z(),
            w=quaternion.w(),
        ),
    )
    return position
Ejemplo n.º 13
0
def clamp(p, inner, outer):
    if p.x() > outer.x():
        p = Vectors.V4D(outer.x(), p.y(), p.z(), p.w())
    elif p.x() < inner.x():
        p = Vectors.V4D(inner.x(), p.y(), p.z(), p.w())
    if p.y() > outer.y():
        p = Vectors.V4D(p.x(), outer.y(), p.z(), p.w())
    elif p.y() < inner.y():
        p = Vectors.V4D(p.x(), inner.y(), p.z(), p.w())
    if p.z() > outer.z():
        p = Vectors.V4D(p.x(), p.y(), outer.z(), p.w())
    elif p.z() < inner.z():
        p = Vectors.V4D(p.x(), p.y(), inner.z(), p.w())
    if p.w() > outer.w():
        p = Vectors.V4D(p.x(), p.y(), p.z(), outer.w())
    elif p.w() < inner.w():
        p = Vectors.V4D(p.x(), p.y(), p.z(), inner.w())

    return p
Ejemplo n.º 14
0
    def movepoints(self):
        for object in self.objects:
            for point in object:
                # Gets the vector from the old pos to the new pos
                # Multiplies it by the friction value to act as air resistance and friction
                velocity = (point - point.old) * self.friction
                self.evManager.push(MoveEvent(velocity.length()))
                # Sets the new old position the current new position
                point.old = Vectors.Vector3d(*point.list())

                # Gets the new position. Adds the gravity vector and mulplies by the nodes w value
                # A W value of 1 means that the point is normal and acts normally
                # A W value of 0 means that the point is 'pinned' and can't move
                newposition = point + (velocity + self.grav) * point.w

                # Sets the new node position
                point.x = newposition.x
                point.y = newposition.y
                point.z = newposition.z
Ejemplo n.º 15
0
def get_information(meter, v_graph, p_graph, start_node):
	departure = Time.Time(7, 30, 0)
	end_node = gr.Node(vec.Coord(0, meter), "B")
	gr.update_visibility(v_graph, p_graph, end_node)
	gr.dijkstra(v_graph, start_node)
	distance = end_node.distance
	bt = shortest_runtime(meter, "Bus")
	lt = shortest_runtime(distance, "Lisa")
	meeting_time = Time.add_seconds(departure, bt)
	leaving_time = Time.subtract_seconds(meeting_time, lt)

	print("Startzeit: {}".format(leaving_time))
	print("Endzeit: {}".format(meeting_time))
	print("y-Koordinate Bus: {} m".format(meter))
	print("Laufdistanz: {} m".format(distance))
	print("Laufdauer: {} min".format(Time.sec_in_min(lt)))
	print(gr.traversed_nodes(v_graph, end_node))

	svg = Graphics.read_svg(case + ".svg")
	Graphics.visualise_path(svg, v_graph, end_node)
	Graphics.output_svg(svg, "shortest_path_final")
Ejemplo n.º 16
0
def find_leaving_time(lower, higher, step, p_graph, v_graph, start_node, end_node):
	departure = Time.Time(7, 30, 0)
	meter = 0
	gr.dijkstra(v_graph, start_node)
	distance = end_node.distance
	# Zeit die Lisa braucht
	lt = shortest_runtime(distance, "Lisa")
	# Zeit die Bus braucht
	bt = shortest_runtime(meter, "Bus")
	# Zeit bei Treffpunkt, d.h Endzeit
	meeting_time = Time.add_seconds(departure, bt)
	# Startzeit
	leaving_time = Time.subtract_seconds(meeting_time, lt)
	# Initialisierung der Bestzeit (mit dazugehöriger y-Koordinate)
	current_best_time = leaving_time
	current_best_meter = meter
	times = []

	# Finden des spätesten Hausverlasszeitpunkts
	for meter in range(lower, higher):
		if meter % step == 0:
			end_node = gr.Node(vec.Coord(0, meter), "B")
			gr.update_visibility(v_graph, p_graph, end_node)
			gr.dijkstra(v_graph, start_node)
			distance = end_node.distance
			lt = shortest_runtime(distance, "Lisa")
			bt = shortest_runtime(meter, "Bus")
			meeting_time = Time.add_seconds(departure, bt)
			leaving_time = Time.subtract_seconds(meeting_time, lt)
			if leaving_time > current_best_time:
				current_best_time = leaving_time
				current_best_meter = meter
			times.append(leaving_time.seconds_from_midnight())

	# Plot of possible leaving times
	x = np.arange(lower, higher, step)
	plt.plot(x, times)
	plt.show()

	return current_best_meter
Ejemplo n.º 17
0
    Create goal Pose and call ik move
    '''
    pose_right = Pose(
        position=Point(
            x=pos.x(),
            y=pos.y(),
            z=pos.z(),
        ),
        orientation=Quaternion(x=0, y=1, z=0, w=0),
    )
    return pose_right


#time.sleep(2)

init_pos = Vectors.V4D(0.8, -0.47, 0.2, 0)

bound = Vectors.V4D(0.656982770038, -0.252598021641, 0.5388609422173, 0)

init_pos2 = Vectors.V4D(0.656982770038, -0.35, 0.1, 0)

init_pos3 = Vectors.V4D(0.656982770038, -0.35, 0.4, 0)

myps = PoseStamped(
    header=Header(stamp=rospy.Time.now(), frame_id='base'),
    pose=pose2(init_pos),
)

print 'initialisation move...'
resp = trac_ik_solve(mylimb, myps)
if resp is not None:
Ejemplo n.º 18
0
def covariance(x, y):
    n = len(x)
    return Vectors.dot(de_mean(x), de_mean(y)) / (n-1)
def main():
    userID = systemArgHandler()

    global limbRight
    global limbLeft

    rospy.init_node('lift_ik_prototype', anonymous=True)
    """
	myps = PoseStamped(
		header=Header(stamp=rospy.Time.now(), frame_id='base'),
		pose=pose2(init_pos),
	)
	solve_move_trac(limbRight, myps)
	"""

    rate = rospy.Rate(5000.0)
    #main loop
    while not rospy.is_shutdown():
        try:
            #defines the pose of the child from the parent
            buffUserLeft = tf_buffer.lookup_transform(
                'cob_body_tracker/user_' + str(userID) + '/left_shoulder',
                'cob_body_tracker/user_' + str(userID) + '/left_hand',
                rospy.Time())
            buffUserRight = tf_buffer.lookup_transform(
                'cob_body_tracker/user_' + str(userID) + '/right_shoulder',
                'cob_body_tracker/user_' + str(userID) + '/right_hand',
                rospy.Time())
            #get translations
            xL = buffUserLeft.transform.translation.x
            yL = buffUserLeft.transform.translation.y
            zL = buffUserLeft.transform.translation.z
            xR = buffUserRight.transform.translation.x
            yR = buffUserRight.transform.translation.y
            zR = buffUserRight.transform.translation.z

            #translate the translations
            #add offsets to each joint
            tranXL = -zL * 1.2 + 0.2
            tranYL = xL * 1.5 - 0.28
            tranZL = -yL * 1.1 + 0.40

            tranXR = -zR * 1.2 + 0.2
            tranYR = xR * 1.5 + 0.28
            tranZR = -yR * 1.1 + 0.40

    #tranRightX = -z * 1.2
    #tranRightY =  x * 1.2
    #tranRightZ = -y * 0.9

    #catch and continue after refresh rate
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException):
            rate.sleep()
            continue

        user_pos_left = Vectors.V4D(
            tranXL,  #depth (z?) (inline depth 0.2) (limit 1.0)
            tranYL,  #left-right (x?) (inline 0.28) (limit 1.0)
            tranZL,
            0)  #height (y?) (inline height 0.40)

        user_pos_right = Vectors.V4D(
            tranXR,  #depth (z?) (inline depth 0.2) (limit 1.0)
            tranYR,  #left-right (x?) (inline 0.28) (limit 1.0)
            tranZR,
            0)  #height (y?) (inline height 0.40)

        #user_rot = Vectors.V4D(math.pi/2,
        #	((math.atan2(y,x) + math.pi/2) % (math.pi * 2)) - 10*math.pi/180,
        #	0, 0)

        # rotation pointing the infront of Red
        user_rot = Vectors.V4D(0, math.pi / 2, 0, 0)

        rightArmPose = PoseStamped(
            header=Header(stamp=rospy.Time.now(), frame_id='base'),
            #header=Header(stamp=rospy.Time.now(), frame_id=limbRight+'_arm_mount'),
            #header=Header(stamp=rospy.Time.now(), frame_id=limbRight+'_lower_shoulder'),
            pose=userPose(user_pos_left, user_rot),
        )

        leftArmPose = PoseStamped(
            header=Header(stamp=rospy.Time.now(), frame_id='base'),
            #header=Header(stamp=rospy.Time.now(), frame_id=limbRight+'_arm_mount'),
            #header=Header(stamp=rospy.Time.now(), frame_id=limbRight+'_lower_shoulder'),
            pose=userPose(user_pos_right, user_rot),
        )

        #solve_move_trac(limbRight, leftArmPose)
        solve_move_trac(limbLeft, leftArmPose)

        #Refresh rate
        rate.sleep()
        #orientation=Quaternion(
        #    x=0,
        #    y=1,
        #    z=0,
        #    w=0
        #    ),
        orientation=normalize(
            Quaternion(x=q_rot[0], y=q_rot[1], z=q_rot[2], w=q_rot[3])))
    return pose_right


#NOTE: right hand
#index finger point left
init_pos = Vectors.V4D(
    0.6,  #depth (z?) (inline depth 0.2) (limit 1.0)
    -0.60,  #left-right (x?) (inline 0.28) (limit 1.0)
    0.40,
    0)  #height (y?) (inline height 0.40)


# Quaternion -> Quaternion
def normalize(quat):
    quatNorm = math.sqrt(quat.x * quat.x + quat.y * quat.y + quat.z * quat.z +
                         quat.w * quat.w)
    normQuat = Quaternion(quat.x / quatNorm, quat.y / quatNorm,
                          quat.z / quatNorm, quat.w / quatNorm)
    return normQuat


def systemArgHandler():
    if len(sys.argv) == 2:
Ejemplo n.º 21
0
def variance(x):
    n = len(x)
    deviations = de_mean(x)
    return Vectors.sum_of_squares(deviations) / (n-1)
Ejemplo n.º 22
0
        tempBoids.remove(me)
        proximity = []
        for others in tempBoids:
            x1, y1 = me.pos
            x2, y2 = others.pos
            if (x1 - x2)**2 + (y1 - y2)**2 < radius**2:
                proximity.append(others)
        me.proximity = proximity

        #if there is anything in the boid's proximity, it Tries to align, avoid bumping, and generally move towards mean position
        if me.proximity:
            #alignment with direction of other boids
            sum_an1 = 0
            sum_an2 = 0
            for x in me.proximity:
                remVel = v2.Vector()
                remVel.setVectorC(0, 0)
                remVel = v2.addVects(remVel, x.vel)
                if remVel.getMag():
                    remVel.setMag(1 / remVel.getMag())
                sum_an1 += remVel.components[0]
                sum_an2 += remVel.components[1]
            sum_an1 /= len(me.proximity)
            sum_an2 /= len(me.proximity)
            angVector = v2.Vector()
            angVector.setVectorC(sum_an1, sum_an2)
            me.avgAngle = angVector.getAngle()  #Average angle for neighbors
            me.accel.setVectorA(me.avgAngle, 1)

            #seperation
            me_post = (me.pos[0], me.pos[1])
Ejemplo n.º 23
0
# Geschwindigkeit in m/s
speed_lisa = 15.0 / 3.6
speed_bus = 30.0 / 3.6

# Daten einlesen
f = open(case+".txt", "r")
p = int(f.readline())
# Koordinaten Polygone
polygons = []
for i in range(p):
	line = f.readline()
	coord = []
	count = 0
	for number in line.split():
		if count % 2 != 0:
			coord.append(vec.Coord(pos_x=int(number)))
		if count % 2 == 0 and count != 0:
			coord[int((count / 2) - 1)].set_y(int(number))
		count += 1
	polygons.append(coord)
# Koordinaten Haus
line = f.readline()
coord_h = []
for number in line.split():
	coord_h.append(int(number))
start_x = coord_h[0]
start_y = coord_h[1]
f.close()

# Startknoten
start = vec.Coord(start_x, start_y)
Ejemplo n.º 24
0
def main():
    global window, scene

    glutInit(sys.argv)

    # Select type of Display mode:   
    #  Double buffer 
    #  RGBA color
    # Alpha components supported 
    # Depth buffer
    glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
    
    # get a 640 x 480 window 
    glutInitWindowSize(960, 480)
    
    # the window starts at the upper left corner of the screen 
    glutInitWindowPosition(0, 0)
    
    # Okay, like the C version we retain the window id to use when closing, but for those of you new
    # to Python (like myself), remember this assignment would make the variable local and not global
    # if it weren't for the global declaration at the start of main.
    window = glutCreateWindow("ACCRE Cluster Status Monitor")

    # Register the drawing function with glut, BUT in Python land, at least using PyOpenGL, we need to
    # set the function pointer and invoke a function to actually register the callback, otherwise it
    # would be very much like the C version of the code.    
    glutDisplayFunc(DrawGLScene)
    #glutDisplayFunc()
    
    # Uncomment this line to get full screen.
    # glutFullScreen()

    # When we are doing nothing, redraw the scene.
    glutIdleFunc(doIdle)
        
    # Register the function called when our window is resized.
    glutReshapeFunc(ReSizeGLScene)

    # Register the function called when the keyboard is pressed.  
    glutKeyboardFunc(keyPressed)
    
    glutMouseFunc( mousePressed )
    
    # Load the stages
    storageStage   = StorageStage()
    globeStage     = TexturedGlobe()

    LHCStatusStage = CurlStage( pos = [40.9604490329, 580.455382799, 797.001287513],
                                url = "http://vistar-capture.web.cern.ch/vistar-capture/lhc1.png",
                                updateRate = 160 * 1000)
    DAQStatusStage = CurlStage( pos = [36.9604490329, 580.455382799, 797.001287513],
                                url = "http://cmsonline.cern.ch/daqStatusSCX/aDAQmon/DAQstatusGre.jpg",
                                updateRate = 150 * 1000)
    CMSStatusStage = CurlStage( pos = [32.9604490329, 580.455382799, 797.001287513],
                                url = "http://cmspage1.web.cern.ch/cmspage1/data/page1.png",
                                updateRate = 180 * 1000)
    
    scene['camera'].lookat = LHCStatusStage.pos
    scene['camera'].pos = [40.9604490329, 580.455382799, 799.001287513]
     
    scene['objects'].extend( [storageStage, globeStage, LHCStatusStage, DAQStatusStage, CMSStatusStage] )
    
    globalCameraTween    = MoveCameraTween( scene['camera'], [137.74360349518597, 1769.5965518451512, 2418.585277263117],
                                            [0,0,0],[0,1,0] )
    
    #globalViewTween = RotateCameraTween( scene['camera'], 36.1658, -86.7844, 3000, 1, [0,0,0], [0,1,0])
    globalViewTween = RotateCameraTween( scene['camera'], 36.1658,-86.7844, 3000, 1, [0,0,0], [0,1,0])

    storageCamTween  = MoveCameraTween( scene['camera'],
                                             [45.9604490329, 580.455382799, 799.001287513],
                                             [45.9604490329, 580.455382799, 797.001287513],
                                             [0,1,0] )

    
    hideGlobe = HideTween( [ globeStage] )
    showGlobe = ShowTween( [ globeStage] )
    
    storageTimeline = Timeline( name = "Vampire - Storage")
    storageTimeline.tweens.append( storageCamTween )
    storageTimeline.tweens.append( hideGlobe )
    storageToGlobal = Timeline( name = "Zoom to world")
    storageToGlobal.tweens.append( globalCameraTween )
    storageToGlobal.tweens.append( showGlobe )
    globalTimeline  = Timeline( name = "CMS - Global")
    globalTimeline.tweens.append( globalViewTween )
    #globalTimeline.duration = 50000
    globalToStorage = Timeline( name = "Zoom to ACCRE")
    globalToStorage.tweens.append( storageCamTween )
    
    plotsToMonitor = ["http://vistar-capture.web.cern.ch/vistar-capture/lhc1.png",
                      "http://cmsonline.cern.ch/daqStatusSCX/aDAQmon/DAQstatusGre.jpg",
                      "http://cmspage1.web.cern.ch/cmspage1/data/page1.png"]
    initialPlotPos = [40.9604490329, 580.455382799, 797.001287513]
    previousTimeline = storageTimeline
    for plot in plotsToMonitor:
        stage = CurlStage( pos = initialPlotPos,
                                url = plot,
                                updateRate = 160 * 1000)
        
        plotSwapTween  = MoveCameraTween( scene['camera'],
                                             Vectors.add(stage.pos, [0,0,2]),
                                             stage.pos,
                                             [0,1,0],
                                             arrivalAlpha = 0.1 )
        currentTimeline = Timeline( name = "Monitoring Plots")
        currentTimeline.tweens.append( plotSwapTween )
        previousTimeline.setNext( currentTimeline )
        previousTimeline = currentTimeline
        initialPlotPos = Vectors.add(initialPlotPos, [-4,0,0])

    currentTimeline.setNext(storageToGlobal)
    storageToGlobal.setNext( globalTimeline )
    globalTimeline.setNext(  globalToStorage )
    globalToStorage.setNext( storageTimeline )
    
    scene['currentTimeline'] = globalTimeline
    globalTimeline.start( 0 )
    # Initialize our window. 
    InitGL(960, 480)
Ejemplo n.º 25
0
def main():
    dev = load()
    dev_inner = dev["data"]
    result = {}
    classifier = V.classifier()

    for documents in dev_inner:
        for paragraph in documents['paragraphs']:
            pos_dict = {}
            word_dict = {}
            entities = {}
            entity_dict = {}
            para = normalize(paragraph['context'])
            tokens = para.encode("utf-8").split()
            #encodetoken=[]
            #for x in tokens:
            #    encodetoken.append(x.encode("utf-8"))
            tagged = nltk.pos_tag(nltk.word_tokenize(paragraph['context']))
            #tagged = nltk.pos_tag(tokens)
            named_entities = nltk.chunk.ne_chunk(tagged)
            tags = named_entities.pos()
            for t in tags:
                word = t[0][0]
                if t[0][1] not in pos_dict:
                    pos_dict[t[0][1]] = []
                    pos_dict[t[0][1]].append(t[0][0])
                elif t[0][0] not in pos_dict[t[0][1]]:
                    pos_dict[t[0][1]].append(t[0][0])
                if t[0][0] not in word_dict:
                    word_dict[t[0][0]] = []
                    word_dict[t[0][0]].append(t[0][1])
                elif t[0][1] not in word_dict[t[0][0]]:
                    word_dict[t[0][0]].append(t[0][1])
                if t[1] not in entities:
                    entities[t[1]] = []
                    entities[t[1]].append(t[0][0])
                elif t[0][0] not in entities[t[1]]:
                    entities[t[1]].append(t[0][0])
                if word not in entity_dict:
                    entity_dict[word] = []
                    entity_dict[word].append(t[1])
                elif t[1] not in entity_dict[word]:
                    entity_dict[word].append(t[1])

            # for e in entities:
            #     print e
            #     print entities[e]

            paragraph_tokens=paragraph['context'].split() ##get tokens from context
            numofsen=0  ##number of sentences
            sentence={}
            sentence[numofsen] = ([])
            for x in paragraph_tokens:
                sentence[numofsen].append(normalize(x))
                if '.'in x or ','in x or '?'in x or '!'in x or ';'in x or ':'in x:  ##if reach any punctuation, sign as the end of the sentence, or the sentence will be very long
                    numofsen+=1
                    sentence[numofsen] = ([])

            sentence_vectors = []
            for i in range(0,numofsen):
                sentence_vectors.append(classifier.create_avg_vector(sentence[i]))

            for qa in paragraph['qas']: ##for each question, compute the unigram overlap
                tag= "OTHER"
                question = qa['question'].lower()
                if "what time" in question or "which time" in question or "what year" in question or "which year" in question or "what century" in question or "which century" in question or "what month" in question or "which month" in question or "what decade" in question or "which decade" in question:
                    tag = "TIM"
                elif "what place" in question or "which place" in question or "what area" in question or "which area" in question or "what town" in question or "which town" in question or "what state" in question or  "which state" in question or "what city" in question or "which city" in question or "what country" in question or "which country" in question:
                    tag = "LOC"
                elif "what person" in question or "which person" in question:
                    tag = "PER"
                elif "what" in question or "which" in question:
                    tag = "OTHER1"
                #check for anytime what/how comes before when, tag as OTHER
                #check for "on what" tag as LOC
                #check for what followed by a LOC/PER/TIM tag
                #check for "what time" tag as TIM
                elif "where" in question:
                    tag = "LOC"
                elif "when" in question:
                    tag = "TIM"
                elif "who" in question:
                    #covers whom/whose as well
                    tag = "PER"
                else:
                    tag = "OTHER2"

                maxsim = -1 ##maximum of the similarity
                maxnum = 0 ##the sentence with the maximal similarity
                qa_tokens=normalize(qa['question']).split()
                qa_vector = classifier.create_avg_vector(qa_tokens)
                for i in range(0,numofsen):
                    total=0
                    total= classifier.cosine_similarity(sentence_vectors[i], qa_vector)##compute the similarity
                    # print total
                    if total>=maxsim: ##find the maximum similarity
                        #print ("in if", i, total)
                        maxsim=total
                        maxnum=i
                better_qa(tag, sentence[maxnum], numofsen, qa['id'], qa['question'], entities, word_dict, result)
        store(result) ##write the answers to json file
Ejemplo n.º 26
0
def compute_features(path, features):
    img = Image.open(path)
    data = np.asarray(img, np.float32)
    return Vectors.compute_feature_vector(data, features)
Ejemplo n.º 27
0
def main():
    userID = systemArgHandler()

    global mylimb

    rospy.init_node('lift_ik_prototype', anonymous=True)
    """
	myps = PoseStamped(
		header=Header(stamp=rospy.Time.now(), frame_id='base'),
		pose=pose2(init_pos),
	)
	solve_move_trac(mylimb, myps)
	"""

    rate = rospy.Rate(5000.0)
    #main loop
    while not rospy.is_shutdown():
        try:
            #defines the pose of the child from the parent
            buffUserLeft = tf_buffer.lookup_transform(
                'cob_body_tracker/user_' + str(userID) + '/left_shoulder',
                'cob_body_tracker/user_' + str(userID) + '/left_hand',
                rospy.Time())
            #get translations
            x = buffUserLeft.transform.translation.x
            y = buffUserLeft.transform.translation.y
            z = buffUserLeft.transform.translation.z

            #translate the translations
            tranRightX = -z * 1.2 + 0.2
            tranRightY = x * 1.5 - 0.28
            tranRightZ = -y * 1.1 + 0.40
    #tranRightX = -z * 1.2
    #tranRightY =  x * 1.2
    #tranRightZ = -y * 0.9

    #catch and continue after refresh rate
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException):
            rate.sleep()
            continue

        user_pos = Vectors.V4D(
            tranRightX,  #depth (z?) (inline depth 0.2) (limit 1.0)
            tranRightY,  #left-right (x?) (inline 0.28) (limit 1.0)
            tranRightZ,
            0)  #height (y?) (inline height 0.40)

        #user_rot = Vectors.V4D(math.pi/2,
        #	((math.atan2(y,x) + math.pi/2) % (math.pi * 2)) - 10*math.pi/180,
        #	0, 0)
        user_rot = Vectors.V4D(0, math.pi / 2, 0, 0)

        leftArmPose = PoseStamped(
            header=Header(stamp=rospy.Time.now(), frame_id='base'),
            #header=Header(stamp=rospy.Time.now(), frame_id=mylimb+'_arm_mount'),
            #header=Header(stamp=rospy.Time.now(), frame_id=mylimb+'_lower_shoulder'),
            pose=userPose(user_pos, user_rot),
        )

        solve_move_trac(mylimb, leftArmPose)

        #Refresh rate
        rate.sleep()
Ejemplo n.º 28
0
    elif to_generate == 'v':
        vector_len = conf['vector_len']
        motion_type = conf['motion_type']  # Type of motion

        data_dir = conf['root'] + "Vectors_dataset/" + motion_type + '_' + str(
            n_samples)
        check_dirs(data_dir, True)

        for i in range(n_samples):
            if i % 100 == 0 or i == n_samples - 1:
                print(i)

            if motion_type == 'URM':
                if i == 0:
                    header = 'x0 u_x n_points gap motion noise(mean,std)\n'
                sample = Vectors.URM(noise_parameters, n_points, gap,
                                     vector_len)

            if split_flag:
                if i == 0:
                    train_path = data_dir + '/' + motion_type + '_' + str(gap) + '_' \
                                     + str(noise_parameters) + '_train'
                    check_dirs(train_path + '/samples')

                    test_path = data_dir + '/' + motion_type + '_' + str(gap) + '_' \
                                    + str(noise_parameters) + '_test'
                    check_dirs(test_path + '/samples')

                    val_path = data_dir + '/' + motion_type + '_' + str(gap) + '_' \
                                   + str(noise_parameters) + '_val'
                    check_dirs(val_path + '/samples')
Ejemplo n.º 29
0
 def findCentre(self):
     meanX = sum([node.x for node in self.nodes]) / len(self.nodes)
     meanY = sum([node.y for node in self.nodes]) / len(self.nodes)
     meanZ = sum([node.z for node in self.nodes]) / len(self.nodes)
     return Vectors.Vector3d(meanX, meanY, meanZ)
Ejemplo n.º 30
0
    with open('findsentence.json', 'w') as json_file:
        json_file.write(json.dumps(data))

##transform json file to dictionary
def load():
    with open('training.json') as json_file:
        data = json.load(json_file)
        return data

if __name__ == "__main__":

    data = {}
    data = load()
    dataset = data['data']
    result = {}
    classifier = V.classifier()
    ##here we compute unigram overlap, because the order of the words in the questions are often different from contexts, unigram is more reliable
    for article in dataset:
        for paragraph in article['paragraphs']:
            paragraph_tokens=paragraph['context'].split() ##get tokens from context
            numofsen=0  ##number of sentences
            sentence={}
            sentence[numofsen] = ([])
            for x in paragraph_tokens:
                sentence[numofsen].append(normalize(x))
                if '.'in x or ',' in x or '?' in x or '!' in x or ';' in x or ':' in x:  ##sign as the end of the sentence
                    numofsen+=1
                    sentence[numofsen] = ([])

            #print('start_classifier')