Example #1
0
    def onRingRightDrag(self, event):

        self.cursorPos = (event.x, event.y)

        initDistance = calc.dist(self.dragInit, self.pixLoc)

        curDistance = calc.dist(self.cursorPos, self.pixLoc)

        deltaDist = 1.0 * curDistance / initDistance

        #print "dist:", deltaDist

        fontSize = int(self.dragFontInit *
                       deltaDist)  #max(min(int(deltaDist), 4), 40)
        self.fontSize = fontSize
        self.z_fontSize = self.fontSize * self.curZoom
        #self.tk_text.config(font=(g.mainFont, int(self.z_fontSize), "normal"))
        self.updateFont()

        self.resizeCircleForText()
Example #2
0
    def getCoords(self, dFrac=0.5):
        x0, y0, x1, y1 = self.tA.pixLoc[0], self.tA.pixLoc[1], self.tB.pixLoc[
            0], self.tB.pixLoc[1]

        # only want line to go between white rings around thoughts
        length = calc.dist((x0, y0), (x1, y1))

        #frac of way white ring on A is to center to A
        f0 = (1.0 * length - (self.tA.z_r + self.tA.z_ringSpacing[0])) / length
        x0p = (1.0 - f0) * x1 + f0 * x0
        y0p = (1.0 - f0) * y1 + f0 * y0

        #frac of way white ring on B is to center to B
        f1 = (1.0 * length - (self.tB.z_r + self.tB.z_ringSpacing[0])) / length
        x1p = (1.0 - f1) * x0 + f1 * x1
        y1p = (1.0 - f1) * y0 + f1 * y1

        self.z_length = calc.dist((x0p, y0p), (x1p, y1p))
        self.length = self.z_length / self.parentSheet.curZoom

        return (x0p, y0p, x1p, y1p)
Example #3
0
 def interact(self, other, dt):
     dx = other.position.x - self.position.x
     dy = other.position.y - self.position.y
     force = calculations.force(dx, dy, self.mass, self.size, other.mass,
                                other.size)
     dist = calculations.dist(dx, dy, self.size, other.size)
     acceleration = force / (self.mass * 10)  # 1000000000000
     compx = dx / dist
     compy = dy / dist
     self.velocity.x += acceleration * compx
     self.velocity.y += acceleration * compy
     self.position.x += self.velocity.x * dt
     self.position.y += self.velocity.y * dt
     self.rect.center = (self.position.x, self.position.y)
Example #4
0
    def onRingLeftDrag(self, event):
        cz = self.curZoom

        #print "drag:", event.x, event.y

        # calculate new radius
        center = self.pixLoc

        cur = (event.x, event.y)

        # calculate dist form center to cur
        d = calc.dist(center, cur) / cz

        if d <= 10:
            self.remove()
            return

        self.r = d - self.ringSpacing[0]
        self.z_r = cz * self.r

        self.reDraw()

        self.parentSheet.updateNodeEdges(self)
Example #5
0
	def nodeDist(self, tA, tB):
		return calc.dist(tA.pixLoc, tB.pixLoc)
Example #6
0
    def step(self, action):
        # Update visual elements
        pygame.display.update()

        # 'ticks' is like a unity of time. Not really used, but it's here just in case.
        self.ticks += 1

        # Set background color
        self.screen.fill((0, 0, 0))

        # Gravity!
        for bodyA in self.bodies:
            for bodyB in self.bodies:
                if (bodyA != bodyB):
                    bodyA.interact(bodyB, self.dt)

        # Currently, rewards['tick'] = 0. This is for the case where we want reward to be
        # passively updated over time.
        self.score = self.rewards["tick"]

        # Get distance from sun, and use that distance to reward the agent for being far from it.
        dx = self.agent.position.x - self.sun.position.x
        dy = self.agent.position.y - self.sun.position.y
        self.dist_to_sun = calculations.dist(dx, dy, self.agent.size / 2,
                                             self.sun.size / 2)
        if (abs(self.dist_to_sun -
                (self.agent.size / 2 + self.sun.size / 2)) < 1):
            self.score -= 1000
        #reward = self.dist_to_sun

        #print(self.agent.velocity.length*10, " , ", self.dist_to_sun)

        # Agent velocity is a vector, so we get the magnitude (length) of it and
        # add it to the score to reward going fast.
        reward = self.agent.velocity.length

        # Score is the actual reward that is observed by PLE, so we update that.
        self.score += reward
        #print(self.score)

        # Take action, then update the agent's position based on that action
        self._handle_player_events()
        self.agent.update(self.dx, self.dy, self.dt)

        # Draw all the agent and celestial bodies onto the screen.
        self.agent.draw(self.screen)
        for body in self.bodies:
            body.draw(self.screen)

        # Reset the simulation if the agent leaves the screen. Once we start implementing DQN we will probably want to move this step over to the other file.
        if (self.agent.position.x > self.width or self.agent.position.x < 0
                or self.agent.position.y > self.width
                or self.agent.position.y < 0):
            self.score += 1000
            self.escapes += 1
            self.reset()
            self.init()

        if (self.ticks > 1000):
            if self.escapes == 0:
                self.score -= 100
            self.reset()
            self.init()

        print(self.score)