def getWR(self, O): self.l = self.getL(self.l, self.wWriteList[-1], self.p) _w = self.wReadList[-1] assert helper.check(_w, [self.amountReadHeads, self.memory.length], self.batchSize) f = tf.matmul(_w, self.l) b = tf.matmul(_w, self.l, transpose_b=True) assert helper.check(f, [self.amountReadHeads, self.memory.length], self.batchSize) assert helper.check(b, [self.amountReadHeads, self.memory.length], self.batchSize) kR = tf.reshape( helper.map("map_kR", O, self.amountReadHeads * self.memory.bitDepth), [-1, self.amountReadHeads, self.memory.bitDepth]) bR = tf.nn.softplus(helper.map("map_bR", O, self.amountReadHeads)) + 1 c = self.getCosSimSoftMaxExtra(kR, bR, self.amountReadHeads) pi = tf.nn.softmax( tf.reshape(helper.map("map_pi", O, self.amountReadHeads * 3), [-1, self.amountReadHeads, 3])) w = tf.expand_dims(pi[:, :, 0], axis=-1) * b + tf.expand_dims( pi[:, :, 1], axis=-1) * c + tf.expand_dims(pi[:, :, 2], axis=-1) * f assert helper.check(w, [self.amountReadHeads, self.memory.length], self.batchSize) self.p = self.getP(self.p, self.wWriteList[-1]) return w
def buildTimeLayer(self, input, first=False): with tf.variable_scope(self.name): if first: if (len(input.get_shape()) == 2): batchSize = tf.shape(input)[0] else: batchSize = None self.prevState = helper.getTrainableConstant( "startState", self.stateSize, batchSize) self.prevOutput = tf.tanh(self.prevState) cc = tf.concat([input, self.prevOutput], axis=-1) forgetGate = tf.sigmoid( helper.map("forgetGate", cc, self.stateSize)) saveGate = tf.sigmoid(helper.map("saveGate", cc, self.stateSize)) outputGate = tf.sigmoid( helper.map("outputGate", cc, self.stateSize)) update = tf.tanh(helper.map("update", cc, self.stateSize)) self.prevState = (self.prevState * forgetGate) + (saveGate * update) self.prevOutput = outputGate * tf.tanh(self.prevState) return self.prevOutput
def getWR(self, O): k = tf.nn.softplus(helper.map("map_k", O, self.memory.bitDepth)) b = tf.nn.softplus(helper.map("map_b", O, 1)) w = self.getCosSimSoftMax(k, b) assert helper.check(w, [self.memory.length], self.batchSize) return w
def updateForce(self, rsy_val, lsx_val): """ Updates locally stored thruster force values and direction. Arguments: joystickAngle -- angle that the coordinate of steering joystick make with the x-axis rsy_val -- y value of right joystick ltrig_val -- value of left trigger (must be modified for Windows, because on win both triggers are read as single variable) rtrig_val -- value of right trigger (also must be modified for Windows) """ self.force[0] = helper.map(rsy_val, -100, 100, 1140, 1850) self.force[1] = helper.map(lsx_val, -100, 100, 1140, 1850)
def getWW(self, O): g = tf.sigmoid(helper.map("map_g", O, 1)) b = tf.nn.softplus(helper.map("map_b", O, 1)) #differentiable approximation of lu lu = tf.nn.softmax((1 - tf.sigmoid(self.u)) * b) w = g * self.wReadList[-1] + (1 - g) * lu self.u = 0.95 * self.u + self.wReadList[-1] + w assert helper.check(w, [self.memory.length], self.batchSize) return w
def getWR(self, O): mapping = [ self.amountReadHeads * self.memory.bitDepth, self.amountReadHeads, self.amountReadHeads * 3, self.amountReadHeads ] o = helper.map("map_wro", O, np.sum(mapping)) o1, o2, o3, o4 = tf.split(o, mapping, -1) self.l = self.getL(self.l, self.wWriteList[-1], self.p) _w = self.wReadList[-1] assert helper.check(_w, [self.amountReadHeads, self.memory.length], self.batchSize) f = tf.matmul(_w, self.l) b = tf.matmul(_w, self.l, transpose_b=True) assert helper.check(f, [self.amountReadHeads, self.memory.length], self.batchSize) assert helper.check(b, [self.amountReadHeads, self.memory.length], self.batchSize) kR = tf.nn.softplus( tf.reshape(o1, [-1, self.amountReadHeads, self.memory.bitDepth])) bR = tf.nn.softplus(o2) + 1 c = self.getCosSimSoftMaxExtra(kR, bR, self.amountReadHeads) if self.cosSimMask: mask = tf.reshape( tf.sigmoid( helper.map("map_wr_mask", O, self.amountReadHeads * self.memory.bitDepth)), [-1, self.amountReadHeads, self.memory.bitDepth]) c = self.getCosSimSoftMaxExtraMasked(kR, bR, self.amountReadHeads, mask) else: c = self.getCosSimSoftMaxExtra(kR, bR, self.amountReadHeads) pi = tf.nn.softmax(tf.reshape(o3, [-1, self.amountReadHeads, 3])) w = tf.expand_dims(pi[:, :, 0], axis=-1) * b + tf.expand_dims( pi[:, :, 1], axis=-1) * c + tf.expand_dims(pi[:, :, 2], axis=-1) * f assert helper.check(w, [self.amountReadHeads, self.memory.length], self.batchSize) self.p = self.getP(self.p, self.wWriteList[-1]) f = tf.sigmoid(o4) self.memory.queueForget(1 - (tf.expand_dims(f, axis=-1) * w)) return w
def buildWriteHead(self, O): ''' Build the write head: get the W from the child class and add the operation to the memory queue This queue is nececairy if there are multiple write operations/heads. Otherwise the second write is based on the first write ''' with tf.variable_scope(self.name): with tf.variable_scope("write"): self.wWriteList.append(self.getWW(O)) erase = tf.sigmoid( helper.map("map_erase", O, self.memory.bitDepth)) write = helper.map("map_write", O, self.memory.bitDepth) self.memory.queueWrite(self.wWriteList[-1], erase, write)
def getWW(self, O): self.u = self.getU(O, self.u, self.wWriteList[-1], self.wReadList[-1]) a = self.getA(self.u) kW = helper.map("map_kW", O, self.memory.bitDepth) bW = tf.nn.softplus(helper.map("map_bW", O, 1)) + 1 c = self.getCosSimSoftMax(kW, bW) gw = tf.sigmoid(helper.map("map_gw", O, 1)) ga = tf.sigmoid(helper.map("map_ga", O, 1)) w = gw * (ga * a + (1 - ga) * c) assert helper.check(w, [self.memory.length], self.batchSize) return w
def build(self, x, outputMask=None, outputSize=None): ''' Builds the unit. outputMask: array (of size of the amount of time steps and consisting of 0 and 1). If 1, output is of that time step is returned outputSize: if not None, do a linear map to that dimention ''' output = [] #TODO: Check if unrolling can be optimized using unstack and stack #Loop over all the timesteps for i in range(0,x.get_shape()[-2]): print("Building step: "+str(i+1)) #Get slice of input, and build network for this time step input = tf.squeeze(tf.slice(x, [0,i,0], [-1,1,-1]),[1]) O = self.buildTimeLayer(input, bool(i==0)) #Process output as defined by parameters if(outputMask[i]==1): if(outputSize is not None): with tf.variable_scope(self.name): O = helper.map("outputMap", O, outputSize) output.append(tf.expand_dims(O, -2)) #Return concated output of all time steps return tf.concat(output, axis=-2)
def getW(self, O, w_): assert helper.check(w_, [self.memory.length], self.batchSize) k = tf.nn.softplus(helper.map("map_k", O, self.memory.bitDepth)) b = tf.nn.softplus(helper.map("map_b", O, 1)) g = tf.sigmoid(helper.map("map_g", O, 1)) s = tf.nn.softmax(tf.sigmoid(helper.map("map_s", O, 5))) #Added sigmoid y = tf.nn.softplus(helper.map("map_y", O, 1)) + 1 wc = self.getCosSimSoftMax(k, b) wg = self.getWg(wc, g, w_) wm = self.getWmFast(wg, s) #wm can be negtive -> power will push it into the complex domain pow = tf.pow(wm, y) w = pow / (tf.reduce_sum(pow, axis=-1, keep_dims=True)+0.001) assert helper.check(w, [self.memory.length], self.batchSize) return w
def buildTimeLayer(self, input, first=False): with tf.variable_scope(self.name): if first: if (len(input.get_shape()) == 2): batchSize = tf.shape(input)[0] else: batchSize = None self.output = helper.getTrainableConstant( "startOuput", self.stateSize, batchSize) cc = tf.concat([input, self.output], axis=-1) z = tf.sigmoid(helper.map("updateGate", cc, self.stateSize)) r = tf.sigmoid(helper.map("resetGate", cc, self.stateSize)) h = tf.tanh( helper.map("outputGate", tf.concat([input, r * self.output], axis=-1), self.stateSize)) self.output = (1 - z) * self.output + z * h return self.output
def getWW(self, O): mapping = [self.memory.bitDepth, 1, 1, 1] o = helper.map("map_wwo", O, np.sum(mapping)) o1, o2, o3, o4 = tf.split(o, mapping, -1) u = self.memory.getU() a = self.getA(u) kW = tf.nn.softplus(o1) bW = tf.nn.softplus(o2) + 1 if self.cosSimMask: mask = tf.sigmoid( helper.map("map_ww_mask", O, self.memory.bitDepth)) c = self.getCosSimSoftMax(kW, bW, mask) else: c = self.getCosSimSoftMax(kW, bW) gw = tf.sigmoid(o3) ga = tf.sigmoid(o4) w = gw * (ga * a + (1 - ga) * c) assert helper.check(w, [self.memory.length], self.batchSize) return w
def getU(self, O, _u, _wW, _wR): assert helper.check(_u, [self.memory.length], self.batchSize) assert helper.check(_wW, [self.memory.length], self.batchSize) assert helper.check(_wR, [self.amountReadHeads, self.memory.length], self.batchSize) f = tf.sigmoid(helper.map("map_f", O, self.amountReadHeads)) #If a reading head reads a memory adress in t-1, and the free gate is activated, release the memory v = tf.reduce_prod(1 - (tf.expand_dims(f, axis=-1) * _wR), axis=-2) assert helper.check(v, [self.memory.length], self.batchSize) #If you write to a memory adress, reserve it u = (_u + _wW - (_u * _wW)) * v assert helper.check(u, [self.memory.length], self.batchSize) return u
cam.pos.x = max(movement_speed, min(cam.pos.x, map.width - movement_speed)) cam.pos.y = max(movement_speed, min(cam.pos.y, map.height - movement_speed)) # Reset window window.fill(map.sky) pygame.draw.rect(window, map.floor, (0, (scr_height / 2) - viewpitch, scr_width, scr_height)) scene = cam.look(window, map.walls, show_map, offset=scr_width) # First Person View for i, item in enumerate(scene): if item[0] > scr_height: item[0] = scr_height sq = item[0]**2 col_r = hl.map(sq, 0, map.r_distance, item[1][0], map.r_color[0]) col_g = hl.map(sq, 0, map.r_distance, item[1][1], map.r_color[1]) col_b = hl.map(sq, 0, map.r_distance, item[1][2], map.r_color[2]) h = tbuih / item[0] to_draw = pygame.Rect(0, 0, w + 1, h) to_draw.center = ((i * w), half - viewpitch) col_r = min(255, max(round(col_r), 0)) col_g = min(255, max(round(col_g), 0)) col_b = min(255, max(round(col_b), 0)) pygame.draw.rect(window, (col_r, col_g, col_b), to_draw) if show_map: pygame.draw.rect(window, (0, 0, 0), (scr_width, 0, scr_height, mapview_width))
def buildTimeLayer(self, input, first=False): with tf.variable_scope(self.name): if self.AF == None: return helper.map("forward", input, self.outputSize) else: return self.AF(helper.map("forward", input, self.outputSize))