コード例 #1
0
ファイル: DNCHead.py プロジェクト: pks42/P-DNC
    def getWR(self, O):
        self.l = self.getL(self.l, self.wWriteList[-1], self.p)

        _w = self.wReadList[-1]
        assert helper.check(_w, [self.amountReadHeads, self.memory.length],
                            self.batchSize)

        f = tf.matmul(_w, self.l)
        b = tf.matmul(_w, self.l, transpose_b=True)
        assert helper.check(f, [self.amountReadHeads, self.memory.length],
                            self.batchSize)
        assert helper.check(b, [self.amountReadHeads, self.memory.length],
                            self.batchSize)

        kR = tf.reshape(
            helper.map("map_kR", O,
                       self.amountReadHeads * self.memory.bitDepth),
            [-1, self.amountReadHeads, self.memory.bitDepth])
        bR = tf.nn.softplus(helper.map("map_bR", O, self.amountReadHeads)) + 1
        c = self.getCosSimSoftMaxExtra(kR, bR, self.amountReadHeads)

        pi = tf.nn.softmax(
            tf.reshape(helper.map("map_pi", O, self.amountReadHeads * 3),
                       [-1, self.amountReadHeads, 3]))
        w = tf.expand_dims(pi[:, :, 0], axis=-1) * b + tf.expand_dims(
            pi[:, :, 1], axis=-1) * c + tf.expand_dims(pi[:, :, 2],
                                                       axis=-1) * f
        assert helper.check(w, [self.amountReadHeads, self.memory.length],
                            self.batchSize)

        self.p = self.getP(self.p, self.wWriteList[-1])

        return w
コード例 #2
0
    def getW(self, O, w_):
        assert helper.check(w_, [self.memory.length], self.batchSize)

        k = tf.nn.softplus(helper.map("map_k", O, self.memory.bitDepth))
        b = tf.nn.softplus(helper.map("map_b", O, 1)) + 1
        g = tf.sigmoid(helper.map("map_g", O, 1))
        s = tf.nn.softmax(helper.map("map_s", O, 5))
        y = tf.nn.softplus(helper.map("map_y", O, 1)) + 1

        if self.cosSimMask:
            mask = tf.sigmoid(helper.map("map_y_mask", O,
                                         self.memory.bitDepth))
            wc = self.getCosSimSoftMax(k, b, mask)
        else:
            wc = self.getCosSimSoftMax(k, b)

        wc = self.getCosSimSoftMax(k, b)
        wg = self.getWg(wc, g, w_)
        wm = self.getWmFast(wg, s)

        #wm can be negtive -> power will push it into the complex domain
        pow = tf.pow(wm, y)
        w = pow / (tf.reduce_sum(pow, axis=-1, keepdims=True) + 0.001)

        assert helper.check(w, [self.memory.length], self.batchSize)
        return w
コード例 #3
0
    def write(self, u):
        assert helper.check(u, [self.length], self.batchSize)

        if len(self.ops) == 1:
            erase = 1 - tf.matmul(tf.expand_dims(self.ops[0]['w'], axis=-1),
                                  tf.expand_dims(self.ops[0]['e'], axis=-2))
            add = tf.matmul(tf.expand_dims(self.ops[0]['w'], axis=-1),
                            tf.expand_dims(self.ops[0]['a'], axis=-2))

            self.M.append(self.M[-1] * erase + add)

            u = u + self.ops[0]['w'] - (u * self.ops[0]['w'])

            assert helper.check(u, [self.length], self.batchSize)
            self.u.append(u)

        else:
            erase = tf.ones([self.batchSize, self.length, self.bitDepth])
            add = tf.zeros([self.batchSize, self.length, self.bitDepth])

            for op in self.ops:
                erase *= 1 - tf.matmul(tf.expand_dims(op['w'], axis=-1),
                                       tf.expand_dims(op['e'], axis=-2))
                add += tf.matmul(tf.expand_dims(op['w'], axis=-1),
                                 tf.expand_dims(op['a'], axis=-2))

                u = u + op['w'] - (u * op['w'])

            self.M.append(self.M[-1] * erase + add)

            assert helper.check(u, [self.length], self.batchSize)
            self.u.append(u)

        self.ops = []
コード例 #4
0
    def getP(self, _p, w):
        assert helper.check(_p, [self.memory.length], self.batchSize)
        assert helper.check(w, [self.memory.length], self.batchSize)

        p = (1 - tf.reduce_sum(w, axis=-1, keepdims=True)) * _p + w
        assert helper.check(p, [self.memory.length], self.batchSize)

        return p
コード例 #5
0
ファイル: NTMHead.py プロジェクト: pks42/P-DNC
    def getWg(self, wc, g, w_):
        assert helper.check(wc, [self.memory.length], self.batchSize)
        assert helper.check(g, [1], self.batchSize)
        assert helper.check(w_, [self.memory.length], self.batchSize)

        result = g*wc + (1-g)*w_

        assert helper.check(result, [self.memory.length], self.batchSize)
        return result
コード例 #6
0
    def queueForget(self, v):
        if len(v.get_shape()) == 2:
            assert helper.check(v, [self.length], self.batchSize)

            self.forgetQueue.append(tf.expand_dims(v, -2))

        elif len(v.get_shape()) == 3:
            assert helper.check(v, [v.get_shape()[1], self.length],
                                self.batchSize)

            self.forgetQueue.append(v)
コード例 #7
0
    def getL(self, _l, w, _p):
        assert helper.check(_l, [self.memory.length, self.memory.length],
                            self.batchSize)
        assert helper.check(w, [self.memory.length], self.batchSize)
        assert helper.check(_p, [self.memory.length], self.batchSize)

        o = tf.ones([self.batchSize, self.memory.length, self.memory.length])
        o_w = o - tf.expand_dims(w, axis=-2)
        assert helper.check(o_w, [self.memory.length, self.memory.length],
                            self.batchSize)

        o_ww = o_w - tf.transpose(tf.expand_dims(w, axis=-2), perm=[0, 2, 1])
        assert helper.check(o_ww, [self.memory.length, self.memory.length],
                            self.batchSize)

        w_l = o_ww * _l
        assert helper.check(w_l, [self.memory.length, self.memory.length],
                            self.batchSize)

        w_p = tf.matmul(tf.expand_dims(w, axis=-1), tf.expand_dims(_p,
                                                                   axis=-2))
        assert helper.check(w_p, [self.memory.length, self.memory.length],
                            self.batchSize)

        l = (w_l + w_p) * self.lMask
        assert helper.check(l, [self.memory.length, self.memory.length],
                            self.batchSize)

        return l
コード例 #8
0
    def getWR(self, O):
        mapping = [
            self.amountReadHeads * self.memory.bitDepth, self.amountReadHeads,
            self.amountReadHeads * 3, self.amountReadHeads
        ]
        o = helper.map("map_wro", O, np.sum(mapping))
        o1, o2, o3, o4 = tf.split(o, mapping, -1)

        self.l = self.getL(self.l, self.wWriteList[-1], self.p)

        _w = self.wReadList[-1]
        assert helper.check(_w, [self.amountReadHeads, self.memory.length],
                            self.batchSize)

        f = tf.matmul(_w, self.l)
        b = tf.matmul(_w, self.l, transpose_b=True)
        assert helper.check(f, [self.amountReadHeads, self.memory.length],
                            self.batchSize)
        assert helper.check(b, [self.amountReadHeads, self.memory.length],
                            self.batchSize)

        kR = tf.nn.softplus(
            tf.reshape(o1, [-1, self.amountReadHeads, self.memory.bitDepth]))
        bR = tf.nn.softplus(o2) + 1
        c = self.getCosSimSoftMaxExtra(kR, bR, self.amountReadHeads)

        if self.cosSimMask:
            mask = tf.reshape(
                tf.sigmoid(
                    helper.map("map_wr_mask", O,
                               self.amountReadHeads * self.memory.bitDepth)),
                [-1, self.amountReadHeads, self.memory.bitDepth])
            c = self.getCosSimSoftMaxExtraMasked(kR, bR, self.amountReadHeads,
                                                 mask)
        else:
            c = self.getCosSimSoftMaxExtra(kR, bR, self.amountReadHeads)

        pi = tf.nn.softmax(tf.reshape(o3, [-1, self.amountReadHeads, 3]))
        w = tf.expand_dims(pi[:, :, 0], axis=-1) * b + tf.expand_dims(
            pi[:, :, 1], axis=-1) * c + tf.expand_dims(pi[:, :, 2],
                                                       axis=-1) * f
        assert helper.check(w, [self.amountReadHeads, self.memory.length],
                            self.batchSize)

        self.p = self.getP(self.p, self.wWriteList[-1])

        f = tf.sigmoid(o4)
        self.memory.queueForget(1 - (tf.expand_dims(f, axis=-1) * w))

        return w
コード例 #9
0
ファイル: NTMHead.py プロジェクト: pks42/P-DNC
    def getWmFast(self, wg, s):
        #Amount of concat operations is proportional to the shift size, instead of memory length (Only significantly faster on a big memory)
        assert helper.check(wg, [self.memory.length], self.batchSize)
        assert helper.check(s, [5], self.batchSize)

        w1 = tf.concat([wg[:,-2:], wg[:,:-2]], axis=-1)
        w2 = tf.concat([wg[:,-1:], wg[:,:-1]], axis=-1)
        w4 = tf.concat([wg[:,1:], wg[:,:1]], axis=-1)
        w5 = tf.concat([wg[:,2:], wg[:,:2]], axis=-1)

        w = tf.stack([w1,w2,wg,w4,w5], axis=-1)
        result = tf.squeeze(tf.matmul(w, tf.expand_dims(s, axis=-1)), axis=-1)

        assert helper.check(result, [self.memory.length], self.batchSize)
        return result
コード例 #10
0
ファイル: DNCHead.py プロジェクト: pks42/P-DNC
    def getU(self, O, _u, _wW, _wR):
        assert helper.check(_u, [self.memory.length], self.batchSize)
        assert helper.check(_wW, [self.memory.length], self.batchSize)
        assert helper.check(_wR, [self.amountReadHeads, self.memory.length],
                            self.batchSize)
        f = tf.sigmoid(helper.map("map_f", O, self.amountReadHeads))

        #If a reading head reads a memory adress in t-1, and the free gate is activated, release the memory
        v = tf.reduce_prod(1 - (tf.expand_dims(f, axis=-1) * _wR), axis=-2)
        assert helper.check(v, [self.memory.length], self.batchSize)

        #If you write to a memory adress, reserve it
        u = (_u + _wW - (_u * _wW)) * v
        assert helper.check(u, [self.memory.length], self.batchSize)

        return u
コード例 #11
0
def user_profile_setemail(token, email):
    '''
    Update the authorised user's email address

    Parameters:
        token - The user's token that was generated from their user id
        email - The email the user wants to change to

    Returns:
        An empty dictionary

    Errors:
        InputError:
            The email is an invalid email
            The email has already been taken by another user
    '''
    check_token(token)
    data = get_data()
    # Checking if the email is valid
    if not check(email):
        raise InputError(description="Invalid email")
    # Checking if email is taken
    for user in data['users']:
        if user['email'] == email:
            raise InputError(description="Email is already taken")
    # Setting the email
    u_id = get_user_from_token(token)
    for user in data['users']:
        if user['u_id'] == u_id:
            user['email'] = email
    return {
    }
コード例 #12
0
ファイル: scheduler.py プロジェクト: monicadsong/hbc-webapp
 def heuristic(self, pieces):
     ordered = helper.order(pieces)
     names = [x.choreographer.name for x in ordered]
     print("the order of assignments is {}".format(names))
     while ordered:
         MRV = ordered[0]
         counts = helper.time_counts(ordered)
         values = [(x, counts[x]) for x in MRV.times]
         ordered = helper.order(ordered[1:])
         while len(values) != 0:
             #find the least constraining value
             LCV = min(values, key=lambda x: x[1])
             MRV.slot = LCV[0]
             if helper.check(ordered, MRV.slot):
                 for x in ordered:
                     if MRV.slot in x.times:
                         x.remove_time(MRV.slot)
                 #go to assigning the next piece
                 break
             else:
                 #go to the next least constraining value
                 MRV.slot = None
                 values.remove(LCV)
                 #if no more values remain
                 if not values:
                     print("Unable to assign time slot to {} rehearsal".
                           format(MRV.choreographer.name))
                     return False
     return True
コード例 #13
0
def adjust_period(full_data, prev_period):
    """
    helper function to adjust the lookback period slightly around its previous lookback period
    called everytime the model is retrained 
    """

    if not prev_period: prev_period = 100
    max_acc, best_period, model = 0, prev_period, None

    for i in [-30, 0, 30]:
        new_period = prev_period + i
        if new_period < 50 or new_period > 500: new_period = 250

        try:
            accuracy, temp_model = check(full_data, new_period, period,
                                         test_period)
        except:
            continue

        if accuracy > max_acc:
            best_period = new_period
            model = temp_model
            max_acc = accuracy

    print("{}-->{}".format(prev_period, best_period, period))
    return best_period, model, round(max_acc, 2)
コード例 #14
0
ファイル: LRUAHead.py プロジェクト: weiweivv2222/MANN
    def getWR(self, O):
        k = tf.nn.softplus(helper.map("map_k", O, self.memory.bitDepth))
        b = tf.nn.softplus(helper.map("map_b", O, 1))

        w = self.getCosSimSoftMax(k, b)

        assert helper.check(w, [self.memory.length], self.batchSize)
        return w
コード例 #15
0
    def read(self, w):
        if len(w.get_shape()) == 2:
            assert helper.check(w, [self.length], self.batchSize)
            assert helper.check(self.M[-1], [self.length, self.bitDepth],
                                self.batchSize)

            r = tf.squeeze(tf.matmul(tf.expand_dims(w, axis=-2), self.M[-1]),
                           axis=-2)
            assert helper.check(r, [self.bitDepth], self.batchSize)

            return r
        else:
            multiple = w.get_shape()[1]

            assert helper.check(w, [multiple, self.length], self.batchSize)
            assert helper.check(self.M[-1], [self.length, self.bitDepth],
                                self.batchSize)

            r = tf.matmul(w, self.M[-1])
            assert helper.check(r, [multiple, self.bitDepth], self.batchSize)

            r = tf.reshape(r, [self.batchSize, multiple * self.bitDepth])
            assert helper.check(r, [multiple * self.bitDepth], self.batchSize)

            return r
コード例 #16
0
ファイル: LRUAHead.py プロジェクト: weiweivv2222/MANN
    def getWW(self, O):
        g = tf.sigmoid(helper.map("map_g", O, 1))
        b = tf.nn.softplus(helper.map("map_b", O, 1))

        #differentiable approximation of lu
        lu = tf.nn.softmax((1 - tf.sigmoid(self.u)) * b)
        w = g * self.wReadList[-1] + (1 - g) * lu
        self.u = 0.95 * self.u + self.wReadList[-1] + w

        assert helper.check(w, [self.memory.length], self.batchSize)
        return w
コード例 #17
0
    def getCosSimSoftMaxExtra(self, k, b, extra):
        '''
            Calculate if there are multiple reading head
            TODO: merge with function above
        '''

        assert helper.check(k, [extra, self.memory.bitDepth], self.batchSize)
        assert helper.check(self.memory.M[-1],
                            [self.memory.length, self.memory.bitDepth],
                            self.batchSize)
        assert helper.check(b, [extra], self.batchSize)

        dot = tf.matmul(self.memory.M[-1], k, transpose_b=True)
        assert helper.check(dot, [self.memory.length, extra], self.batchSize)

        l1 = tf.sqrt(tf.reduce_sum(tf.pow(k, 2), axis=-1, keepdims=True))
        l2 = tf.expand_dims(tf.sqrt(
            tf.reduce_sum(tf.pow(self.memory.M[-1], 2), axis=-1)),
                            axis=-2)
        cosSim = tf.divide(tf.transpose(dot, perm=[0, 2, 1]),
                           tf.matmul(l1, l2) + 0.00001)
        assert helper.check(cosSim, [extra, self.memory.length],
                            self.batchSize)

        result = tf.nn.softmax((tf.expand_dims(b, axis=-1) * cosSim) + 0.00001)
        assert helper.check(result, [extra, self.memory.length],
                            self.batchSize)

        return result
コード例 #18
0
    def getCosSimSoftMax(self, k, b, mask=None):
        '''
            Calculate the cosine between a head and a memory
        '''

        assert helper.check(k, [self.memory.bitDepth], self.batchSize)
        assert helper.check(self.memory.M[-1],
                            [self.memory.length, self.memory.bitDepth],
                            self.batchSize)
        assert helper.check(b, [1], self.batchSize)

        if mask is not None:
            assert helper.check(mask, [self.memory.bitDepth], self.batchSize)

            M = self.memory.M[-1] * tf.expand_dims(mask, axis=-2) + 0.00001

            assert helper.check(M, [self.memory.length, self.memory.bitDepth],
                                self.batchSize)
        else:
            M = self.memory.M[-1]

        dot = tf.squeeze(tf.matmul(M, tf.expand_dims(k, axis=-1)), axis=-1)
        l1 = tf.sqrt(tf.reduce_sum(tf.pow(k, 2), axis=-1, keepdims=True))
        l2 = tf.sqrt(tf.reduce_sum(tf.pow(M, 2), axis=-1))
        cosSim = tf.divide(dot, l1 * l2 + 0.00001)

        result = tf.nn.softmax((b * cosSim) + 0.00001)
        assert helper.check(result, [self.memory.length], self.batchSize)

        return result
コード例 #19
0
ファイル: NTMHead.py プロジェクト: pks42/P-DNC
    def getWm(self, wg, s):
        assert helper.check(wg, [self.memory.length], self.batchSize)
        assert helper.check(s, [5], self.batchCheck)

        size = self.memory.length
        shiftSize = 2

        def shift(i):
            if(i<0):
                return size+i
            if(i>=size):
                return i-size
            return i

        def indices(i):
            indices = [shift(i+j) for j in range(shiftSize, -shiftSize-1, -1)]
            return tf.reduce_sum(tf.gather(wg, indices, axis=-1) * s, axis=-1)

        result = tf.stack([indices(i) for i in range(0,size)], axis=-1)

        assert helper.check(result, [self.memory.length], self.batchSize)
        return result
コード例 #20
0
    def getCosSimSoftMaxExtraMasked(self, k, b, extra, mask):
        '''
            Calculate if there are multiple reading head
            TODO: merge with function above
        '''

        assert helper.check(k, [extra, self.memory.bitDepth], self.batchSize)
        assert helper.check(self.memory.M[-1],
                            [self.memory.length, self.memory.bitDepth],
                            self.batchSize)
        assert helper.check(b, [extra], self.batchSize)
        assert helper.check(mask, [extra, self.memory.bitDepth],
                            self.batchSize)

        M = tf.expand_dims(self.memory.M[-1], axis=-3) * tf.expand_dims(
            mask, axis=-2) + 0.00001
        assert helper.check(M,
                            [extra, self.memory.length, self.memory.bitDepth],
                            self.batchSize)

        dot = tf.squeeze(tf.matmul(M, tf.expand_dims(k, axis=-1)), axis=-1)
        assert helper.check(dot, [extra, self.memory.length], self.batchSize)

        l1 = tf.sqrt(tf.reduce_sum(tf.pow(k, 2), axis=-1, keepdims=True))
        l2 = tf.sqrt(tf.reduce_sum(tf.pow(M, 2), axis=-1))
        assert helper.check(l1, [extra, 1], self.batchSize)
        assert helper.check(l2, [extra, self.memory.length], self.batchSize)

        cosSim = tf.divide(dot, l1 * l2 + 0.00001)
        assert helper.check(cosSim, [extra, self.memory.length],
                            self.batchSize)

        result = tf.nn.softmax((tf.expand_dims(b, axis=-1) * cosSim) + 0.00001)
        assert helper.check(result, [extra, self.memory.length],
                            self.batchSize)

        return result
コード例 #21
0
ファイル: DNCHead.py プロジェクト: pks42/P-DNC
    def getWW(self, O):
        self.u = self.getU(O, self.u, self.wWriteList[-1], self.wReadList[-1])
        a = self.getA(self.u)

        kW = helper.map("map_kW", O, self.memory.bitDepth)
        bW = tf.nn.softplus(helper.map("map_bW", O, 1)) + 1
        c = self.getCosSimSoftMax(kW, bW)

        gw = tf.sigmoid(helper.map("map_gw", O, 1))
        ga = tf.sigmoid(helper.map("map_ga", O, 1))

        w = gw * (ga * a + (1 - ga) * c)
        assert helper.check(w, [self.memory.length], self.batchSize)

        return w
コード例 #22
0
    def forget(self):
        if len(self.forgetQueue) == 0:
            u = self.u[-1]
        elif len(self.forgetQueue) == 1:
            if self.forgetQueue[0].get_shape()[-2] == 1:
                u = self.u[-1] * tf.squeeze(self.forgetQueue[0], axis=-2)
            else:
                u = self.u[-1] * tf.reduce_prod(self.forgetQueue[0], axis=-2)
        else:
            u = self.u[-1] * tf.reduce_prod(
                tf.concat(self.forgetQueue, axis=-2), axis=-2)

        self.forgetQueue = []

        assert helper.check(u, [self.length], self.batchSize)
        return u
コード例 #23
0
    def setupStartVariables(self):
        self.wWriteList = [tf.zeros([self.batchSize, self.memory.length])]
        self.wReadList = [
            tf.zeros(
                [self.batchSize, self.amountReadHeads, self.memory.length])
        ]

        self.p = tf.zeros([self.batchSize, self.memory.length])
        self.l = tf.zeros(
            [self.batchSize, self.memory.length, self.memory.length])

        self.lMask = tf.ones([
            self.batchSize, self.memory.length, self.memory.length
        ]) - tf.eye(self.memory.length, batch_shape=[self.batchSize])
        assert helper.check(self.lMask,
                            [self.memory.length, self.memory.length],
                            self.batchSize)
コード例 #24
0
ファイル: auth.py プロジェクト: ShoreoNoSure/COMP1531
def auth_register(email, password, name_first, name_last):
    '''This program registers a user'''
    data = get_data()
    # check email
    if check(email) is False:
        raise InputError(description="Invalid Email")
    # Invalid password
    if len(password) < 6:
        raise InputError(description="Invalid Password")
    # Invalid firstname
    if not name_first or len(name_first) > 50:
        raise InputError(description="Invalid First Name")
    # Invalid Lastname
    if not name_last or len(name_last) > 50:
        raise InputError(description="Invalid Last Name")
    # Email already in use
    for user in data['users']:
        if user['email'] == email:
            raise InputError(description="Email already in use")

    # New user for backend
    u_id = get_max_u_id() + 1
    # Assume that you are logged in once you register
    token = generate_token(u_id)
    new_user = {
        'u_id': u_id,
        'name_first': name_first,
        'name_last': name_last,
        'password': str(hash_password(password)),
        'email': email,
        'token': token,
        'reset_code': 0,
        'logged_in': 1,
        'handle_str': get_handle(name_first, name_last),
        'permission_id': 2,
        'profile_img_url': ''
    }
    data['users'].append(new_user)
    # Owner permision id = 1 normal memeber id = 2
    if u_id == 1:
        new_user['permission_id'] = 1
    return {"u_id": u_id, "token": token}
コード例 #25
0
def set_train_period(CLOSE, t_dict, a_dict):
    """
    helper function that is called only once at the start to search for lookback
    period that has highest accuracy in predicting direction of changefor each market.
    In the subsequent days, call adjust_period() instead to tune the lookback period based on previous lookback period
    """

    nMarkets = CLOSE.shape[1]
    for future_id in range(1, nMarkets):
        max_acc, train_period = 0, 0
        cur = np.array([k for k, g in groupby(CLOSE[:, future_id])])
        for i in range(50, 501, 50):
            accuracy, temp_model = check(cur, i, period, test_period)
            if accuracy > max_acc:
                max_acc = accuracy
                train_period = i
        if max_acc == 0: train_period = 0
        print('{}:({},{}%)'.format(markets[future_id - 1], train_period,
                                   int(max_acc * 100)))
        t_dict[future_id] = train_period
        a_dict[future_id] = max_acc
    return t_dict, a_dict
コード例 #26
0
ファイル: auth.py プロジェクト: ShoreoNoSure/COMP1531
def auth_login(email, password):
    '''This program logs a user in'''
    data = get_data()
    # Check email
    if check(email) is False:
        raise InputError(description="Invalid Email")
    #Password check
    password = hash_password(password)
    for user in data['users']:
        #if found a matching email
        if user['email'] == email:
            if user['password'] != password:
                #Not correct password
                raise InputError(description="Incorrect Password")
            else:
                #if password correct
                u_id = user['u_id']
                user['logged_in'] = 1
                #generate a token
                token = generate_token(u_id)
                return {'u_id': u_id, 'token': token}
    # No matching email
    raise InputError(description="No user found with this email")
コード例 #27
0
    def getWW(self, O):
        mapping = [self.memory.bitDepth, 1, 1, 1]
        o = helper.map("map_wwo", O, np.sum(mapping))
        o1, o2, o3, o4 = tf.split(o, mapping, -1)

        u = self.memory.getU()
        a = self.getA(u)

        kW = tf.nn.softplus(o1)
        bW = tf.nn.softplus(o2) + 1
        if self.cosSimMask:
            mask = tf.sigmoid(
                helper.map("map_ww_mask", O, self.memory.bitDepth))
            c = self.getCosSimSoftMax(kW, bW, mask)
        else:
            c = self.getCosSimSoftMax(kW, bW)

        gw = tf.sigmoid(o3)
        ga = tf.sigmoid(o4)

        w = gw * (ga * a + (1 - ga) * c)
        assert helper.check(w, [self.memory.length], self.batchSize)

        return w
コード例 #28
0
    def getA(self, u):
        assert helper.check(u, [self.memory.length], self.batchSize)

        uSorted, uIndices = tf.nn.top_k(-1 * u, k=self.memory.length)
        uSorted *= -1
        assert helper.check(uSorted, [self.memory.length], self.batchSize)
        assert helper.check(uIndices, [self.memory.length], self.batchSize)

        cumProd = tf.cumprod(uSorted + 0.0001, axis=-1, exclusive=True)
        assert helper.check(cumProd, [self.memory.length], self.batchSize)

        aSorted = (1 - uSorted) * cumProd
        assert helper.check(aSorted, [self.memory.length], self.batchSize)

        a = tf.reshape(
            tf.gather(
                tf.reshape(aSorted, [self.batchSize * self.memory.length]),
                tf.reshape(uIndices, [self.batchSize * self.memory.length])),
            [self.batchSize, self.memory.length])
        assert helper.check(a, [self.memory.length], self.batchSize)

        return a
コード例 #29
0
    def queueWrite(self, w, erase, add):
        assert helper.check(w, [self.length], self.batchSize)
        assert helper.check(erase, [self.bitDepth], self.batchSize)
        assert helper.check(add, [self.bitDepth], self.batchSize)

        self.ops.append({'w': w, 'e': erase, 'a': add})