Пример #1
0
    def baselevel_activation(self, times, d, now=None):
        ''' Base level activation.

            Input: Vector of calling times for a chunk.
                   Like [45023, 45046, 45102]

            Formula to compute baselevel activation:

            B_i = ln(sum_from_j=1_to_n(t_j^(-d)))    with

            n:  The number of presentations for chunk i = length of times.
            tj: The time since the jth presentation.
            d:  The decay parameter which is set using the
                :bll (base-level learning) parameter.
                This parameter is almost always set to 0.5.

            Values range around 0
                - activation falls
                + activation rises
        '''
        if not now:
            now = utilities.milliseconds(datetime.datetime.now())

        summed = 0
        for j in range(len(times)):
            diff = (now - times[j]) / 1000
            summed += math.pow(diff, -d)

        return math.log(summed)
Пример #2
0
    def paired_associate(self):
        ''' Simulating the paired associate example.
        '''
        from environment import Word, Pair

        pairs = [Pair('bank', '0'), Pair('card', '1'), Pair('dart', '2'),
                 Pair('face', '3'), Pair('game', '4'),
                 Pair('hand', '5'), Pair('jack', '6'), Pair('king', '7'),
                 Pair('lamb', '8'), Pair('mask', '9'),
                 Pair('neck', '0'), Pair('pipe', '1'), Pair('guip', '2'),
                 Pair('rope', '3'), Pair('sock', '4'),
                 Pair('tent', '5'), Pair('vent', '6'), Pair('wall', '7'),
                 Pair('xray', '8'), Pair('zinc', '9')]

        nr_runs = 8
        now = utilities.milliseconds(datetime.datetime.now())

        total_recalled = [0 for i in range(nr_runs)]
        total_probs = [0 for i in range(nr_runs)]
        for k in range(100):

            times = self.get_times(nr_runs, now)
            probs = self.get_probabilities(times, now)
            mean_probs = self.get_mean(probs)
            recalled = self.simulate_run(probs)

            for j in range(len(recalled)):
                total_recalled[j] += recalled[j]
                total_probs[j] += mean_probs[j]

        for i in range(nr_runs):
            print i+1, total_recalled[i] / 100, total_probs[i] / 100
Пример #3
0
    def formulate_expectation(self, word):

        now = utilities.milliseconds(datetime.datetime.now())

        activation = self.activation(word.times)
        retrieval_prob = self.retrieval_probability(activation, CogModule.THRESHOLD, CogModule.NOISE)
        retrieval_latency = self.retrieval_latency(CogModule.LATENCY, activation)
        
        self.expectation = self.get_expectation_name(retrieval_prob)
        status = str(retrieval_prob) + ': '
        emotion = None

        if self.expectation == 'positive':
            status += 'Expecting right answer.'
            emotion = CogModule.EXPECT_POS[0]
        elif self.expectation == 'none':
            status += 'Expecting nothing.'
        elif self.expectation == 'negative':
            status += 'Expecting wrong answer'
            emotion = CogModule.EXPECT_NEG[0]
        else:
            print 'Wrong expectation value', self.expectation

        self.logger.log('  Formulate expectation: prob={0:.2f}% latency={1:.2f}s : ({2},{3})'.format(retrieval_prob, retrieval_latency, self.expectation, emotion))

        return (status, utilities.emotion_by_name(emotion))
Пример #4
0
    def optimized_learning(self, times, d, now=None):
        ''' Formula for base level learning. Optimizes computation time.

            B_i = ln(n / (1-d)) - d * ln(L)     with

            n = nr of presentations of chunk i
            L = The lifetime of chunk i (the time since its creation)
            d = The decay parameter
        '''
        if not now:
            now = utilities.milliseconds(datetime.datetime.now())

        n = len(times)
        L = now - times[0]

        return math.log(n / (1 - d)) - d * math.log(L)
Пример #5
0
    def present_number(self, now):
        '''
        '''
        time_delta = now-self.start_time
        print time_delta, ': PRESENT NR', time_delta.seconds
        # Check if answer has been given. If not evaluate.
        if not self.answer_given:
            self.logger.log('  No answer given')
            self.evaluate('-1', now)        
        
        if 0 <= self.index and self.index <= len(self.pairs):
            number = self.pairs[self.index].number
            self.pairs[self.index].number_called(utilities.milliseconds(now))
            self.index += 1

            return self.agent.present_number(number)
        else:
            print 'Index Error'
Пример #6
0
    def activation(self, times, now=None):
        ''' Computes the activation for the given chunk.
        '''
        if not times:
            return 0.0
        
        d = CogModule.DECAY_RATE
        s = CogModule.NOISE

        if not now:
            now = utilities.milliseconds(datetime.datetime.now())

        if CogModule.FUNCTION == 'optimized':
            base_activation = self.optimized_learning(times, d)
        else:
            base_activation = self.baselevel_activation(times, d)

        noise = self.logistic_distribution(s)
        return base_activation + noise
Пример #7
0
    def present_word(self, now):
        '''
        '''
        time_delta = now-self.start_time
        print time_delta, ': PRESENT WORD', time_delta.seconds
        
        if self.has_next():
            word = self.pairs[self.index].word
            number = self.pairs[self.index].number
            
            self.logger.log('\nTask [{0} : {1}] @ {2}s'.format(
                            word.word, number.word, time_delta))
            
            self.pairs[self.index].word_called(utilities.milliseconds(now))
            self.start_time_answer = now
            self.answer_given = False

            return self.agent.present_word(word, number)
        else:
            print 'Index Error'