Example #1
0
    def put(self, sample):
        '''
         online learning method is adopted in the network, with one sample input at a time
        :param sample: one sample
        :return:
        '''
        # 计算样本的隐藏层映射向量
        h = self.__hidden_vector__(sample)

        # 如果h所有分量都为0,则添加新节点
        if collections.all(h, lambda i: i <= 0):
            n = Node(center=sample, sigma=1.0)
            n.parent = self.root
            self.leafs.append(n)
            return

        # 计算归属节点,即该样本归属于发放概率最大的那个节点
        i = np.argmax(h)  # h中多个相同的时候argmax只返回第一个
        self.leafs[i].put_sample(sample)

        # 如果是第一个样本,则并计算所有节点的激活能耗
        if len(self.samples) <= 0:
            self.samples.append(sample)
            self.evulate()
            return
        # 将新样本的激活能耗加入到总能耗中去
        #self.activation_energy_cost += np.sum(h)
        #  计算新样本与已有样本可区分度
        #for s in self.samples:
        #    h1 = self.__hidden_vector__(s)
        #    dis = np.linalg.norm(np.array(h1) - np.array(h))
        #    if dis > Net.eplison*max(1.0,math.sqrt(len(self.leafs))):
        #        self.differentablecount += 1
        # 新样本加入集合
        self.samples.append(sample)
Example #2
0
    def activate(self, inputs):
        '''
        网络的一次激活活动
        :param inputs: list of (box,value) 多个元组组成的list,box是指Box本身或者id,value是给box的输入值向量
                       list中可以只有部分感知box
        :return:
        '''
        if inputs is None: return []

        allbox = self.allbox()
        count = len(allbox)

        for box, values in inputs:
            box.activate(values)
            allbox.remove(box)
            count -= len(allbox)

        max_depth = np.max([box.depth for box in allbox])
        for i in range(1, max_depth + 1):
            boxes = self.findBox(depth=i)
            for box in boxes:
                if box not in allbox: continue
                if box.type != Box.type_attention: continue
                if collecitons.all(box.inputs, lambda b: b in allbox): continue
                box.do_attention()
                allbox.remove(box)
                count -= len(allbox)
Example #3
0
    def execute(self, neuron, net, activeno, clockinfo, **context):
        '''
        执行:对于没有输入的神经元,记录值为0,状态为未激活,返回0
             对于输入不完全的神经元,不做记录,返回None
             对于输入完全的神经元,记录计算值和激活状态,返回值
        :param neuron:  Neuron 计算的神经元
        :param net:     NeuralNetwork 网络
        :param activeno float 激活编号
        :param clockinfo tuple 时钟信息(上一个时钟,当前时钟,时间间隔)
        :param context: 上下文(保留)
        :return:
        '''
        # 取得时钟信息
        lastclock, clock, clockstep = clockinfo

        # 取得待计算突触的输入突触
        synapses = net.getSynapses(toId=neuron.id)
        if synapses is None or len(synapses) <= 0: return None

        # 检查突触是否都有值
        if not collections.all(synapses, lambda s: 'value' in s.states.keys()):
            return None

        # 取得神经元的关注表达式对象
        attentation = neuron.getVariableValue('attentation')

        # 取得突触所有输入值并求和(权重已经计算)
        inputs = list(map(lambda s: s.states['value'], synapses))
        sum = np.sum(inputs)

        # 加偏置
        sum += neuron['bias']

        # 取得激活函数
        activationFunctionConfig = neuron.modelConfiguration[
            'activationFunction']
        activationFunction = ActivationFunction.find(
            activationFunctionConfig.name)
        if activationFunction is None:
            raise RuntimeError('神经元计算失败(CommonNeuronModel),激活函数无效:' +
                               activationFunctionConfig.name)

        # 组合出激活函数参数(参数可能是网络)
        activationParamNames = activationFunction.getParamNames()
        activationParams = {}
        for name in activationParamNames:
            if name in map(lambda v: v.nameInfo.name, neuron.variables):
                activationParams[name] = neuron[name]
            elif name in activationFunctionConfig:
                activationParams[name] = activationFunctionConfig[name]

        # 用输入和计算激活函数
        value, activation = activationFunction.calculate(
            sum, activationParams)  #?这里有问题,激活函数的参数无法传入

        # 记录状态
        neuron.states['value'] = value
        neuron.states['activation'] = activation

        return value
Example #4
0
 def do_inference(self):
     if not collections.all(self.inputs,
                            lambda box: box.expection is not None):
         return None
     attention = self.getAttentionExpression()
     if attention is None: return
     expection = attention.do_expection(self)
     self.expection = expection
Example #5
0
    def execute(self, neuron, net, **context):
        '''
        执行:对于没有输入的神经元,记录值为0,状态为未激活,返回0
             对于输入不完全的神经元,不做记录,返回None
             对于输入完全的神经元,记录计算值和激活状态,返回值
        :param neuron:  计算的神经元
        :param net:     网络
        :param context: 上下文(保留)
        :return:
        '''

        # 取得待计算突触的输入突触
        synapses = net.getSynapses(toId=neuron.id)
        if synapses is None or len(synapses) <= 0: return

        # 检查突触是否都有值
        #if not collections.all(synapses,lambda s:'value' in s.states.keys()):
        if not collections.all(synapses, lambda s: 'value' in s.states):
            return None

        # 取得突触所有输入值并求和(权重已经计算)
        inputs = list(map(lambda s: s.states['value'], synapses))
        sum = np.sum(inputs)

        # 加偏置
        sum += neuron['bias']

        # 取得激活函数
        activationFunctionConfig = neuron.modelConfiguration[
            'activationFunction']
        if neuron.activationFunction is None:
            raise RuntimeError('神经元计算失败(CommonNeuronModel),激活函数无效:' +
                               activationFunctionConfig.name)

        # 组合出激活函数参数(参数可能是网络)
        activationParamNames = neuron.activationFunction.getParamNames()
        activationParams = {}
        for name in activationParamNames:
            if name in map(lambda v: v.nameInfo.name, neuron.variables):
                activationParams[name] = neuron[name]
            elif name in activationFunctionConfig:
                activationParams[name] = activationFunctionConfig[name]

        # 用输入和计算激活函数
        value, activation = neuron.activationFunction.calculate(
            sum, activationParams)  #?这里有问题,激活函数的参数无法传入

        # 记录状态
        neuron.states['value'] = value
        neuron.states['activation'] = activation

        return value
Example #6
0
 def rearrange_depth(self):
     allbox = self.allbox()
     maxdepth = 0
     for b in allbox:
         b.depth = -1
     for b in self.get_sensor_box():
         b.depth = 0
         allbox.remove(b)
     while len(allbox) > 0:
         for b in allbox:
             if len(b.inputs) <= 0: allbox.remove(b)
             elif collecitons.all(b.inputs, lambda x: x.depth != -1):
                 b.depth = np.max([t.depth for t in b.inputs]) + 1
                 maxdepth = b.depth if maxdepth < b.depth else maxdepth
                 allbox.remove(b)
     allbox = self.allbox()
     for b in allbox:
         if b.depth < 0: b.depth = maxdepth + 1
    def doTestStat(self):
        '''
        对测试结果进行统计
        :return: None
        '''

        task = self.definition.runner.task
        if task.test_y is None or len(task.test_y) <= 0:
            return

        testcount = correctcount = 0
        mae = mse = 0.0
        for index, result in enumerate(self.taskTestResult):
            if task.test_y is None or len(task.test_y) <= index:
                continue
            if collections.equals(self.taskTestResult[index],
                                  task.test_y[index]):
                correctcount += 1
            else:
                diff = abs(self.taskTestResult[index] - task.test_y[index])
                if not task.kwargs[
                        'multilabel'] and diff <= task.kwargs['deviation']:
                    correctcount += 1
                elif task.kwargs['multilabel']:
                    if isinstance(task.kwargs['deviation'], float):
                        if np.average(diff) <= task.kwargs['deviation']:
                            correctcount += 1
                    elif isinstance(task.kwargs['deviation'], list):
                        if collections.all(diff - task.kwargs['deviation'],
                                           lambda t: t < 0):
                            correctcount += 1

            mae += abs(self.taskTestResult[index] - task.test_y[index])
            mse += pow(self.taskTestResult[index] - task.test_y[index], 2)
            testcount += 1

        self.taskstat[NeuralNetworkTask.INDICATOR_TEST_COUNT] = testcount
        self.taskstat[NeuralNetworkTask.INDICATOR_CORRECT_COUNT] = correctcount
        self.taskstat[
            NeuralNetworkTask.INDICATOR_ACCURACY] = correctcount / testcount
        self.taskstat[
            NeuralNetworkTask.INDICATOR_MEAN_ABSOLUTE_ERROR] = mae / testcount
        self.taskstat[
            NeuralNetworkTask.INDICATOR_MEAN_SQUARED_ERROR] = mse / testcount
Example #8
0
    def execute(self, effector, net, **context):
        # 取得待计算突触的输入突触
        synapses = net.getSynapses(toId=effector.id)
        if synapses is None or len(synapses) <= 0: return

        # 检查突触是否都有值
        # if not collections.all(synapses,lambda s:'value' in s.states.keys()):
        if not collections.all(synapses, lambda s: 'value' in s.states):
            return None

        # 取得突触所有输入值中最大的那个
        inputs = list(map(lambda s: s.states['value'], synapses))
        value = maxinput = max(inputs)

        # 检查是否有抑制性突出(待实现)

        # 记录状态
        effector.states['value'] = value
        effector.states['activation'] = value > 0

        return value
Example #9
0
    def isInExpressionParam(self, var_name, parts='', includelist=True):
        '''
        是否在关注表达式的参数部分包含特定参数
        :param var_name:     Union(str,list) 特定参数名
        :param parts:        str 指定表达式参数的哪个部分,目前只对T有效,取值'cause','effect'
        :param includelist:  bool 是否可以是参数的时序
        :return: bool
        '''
        if isinstance(var_name, str):
            params = self.getExpressionParam()
            if parts is not None and parts != '':
                contents = self.getExpressionParam()
                if self.getExpressionOperation() == 'T':
                    return contents[
                        0] if parts == '0' or parts == 'cause' else contents[1]
            return var_name in params or '['+var_name+']' in params if includelist \
                    else var_name in params
        else:

            def f(var):
                return self.isInExpressionParam(var, parts, includelist)

            return collections.all(var_name, f)
Example #10
0
    def __spilt(self, node=None):
        '''
        执行节点分裂,找到
        :return:
        '''
        # 找到待分裂的节点,是所有节点中发放率总和最高的节点
        if node is None:
            firerates = np.array(
                list(map(lambda leaf: leaf.firerate, self.leafs)))
            index = np.argmax(firerates)
            node = self.leafs[index]
        # 找到该节点的分裂区,是样本出现最频繁的网格
        spiltareas = []
        spiltpositions = []
        while len(spiltareas) < Node.max_spilt_count:  # 分裂区数量不能大于设定
            maxvalue = np.amax(node.grids)  # 取得节点中网格统计的最大值
            if maxvalue <= 0: break
            position = np.where(node.grids == np.max(node.grids))
            if len(position[0]) >= 2:  # 如果有多个位置点索引,就只取得第一个 [[0,0],[1,7]]
                if len(position) == 1:
                    position = tuple(np.array([position[0][0]]))
                elif len(position) >= 2:
                    position = tuple([
                        np.array([position[0][0]]),
                        np.array([position[1][0]])
                    ])

            pos_center = node.getgridcenter(position)
            #position = np.delete(position, range(1, len(position[0])), axis=1)
            # 如果是第一个分裂区,直接加入
            if len(spiltareas) <= 0:
                spiltareas.append(pos_center)
                spiltpositions.append(copy.deepcopy(position))
                node.grids.put(position, 0.0)
                continue
            # 否则需要计算距离
            if not collections.any(
                    spiltpositions, lambda pos: np.sum(
                        [abs(p[0] - p[1])
                         for p in zip(pos, position)]) <= len(position)):
                spiltareas.append(pos_center)
                spiltpositions.append(copy.deepcopy(position))
            node.grids.put(position, 0.0)

        if len(spiltareas) <= 0:
            return
        # 如果只有一个分裂区,则不创建子节点,而调整当前节点
        if len(spiltareas) <= 1:
            node._center = spiltareas[0]
            node.scale(Node.width_scale)
            #self.__evulate()
            return

        # 创建子节点
        self.leafs.remove(node)
        for spiltarea in spiltareas:
            sigma = node._sigma * Node.width_scale
            n = Node(spiltarea, sigma)
            n.parent = node
            node.childs.append(n)
            self.leafs.append(n)

        # 将属于父节点的样本分配到子节点
        for s in node.samples:
            index = np.argmin(
                list(map(lambda c: np.linalg.norm(c - s), spiltareas)))
            if collections.all(node.childs[index].samples,
                               lambda x: np.linalg.norm(s, x) != 0):
                node.childs[index].put_sample(s)

        node.samples = []
        self.nodescount += len(spiltareas)
Example #11
0
 def do_expection(self,box):
     if box.inputs is None:return None
     if not collections.all(box.inputs,lambda b:b.expection is None):
         return None
     activation_features = [input.expection for input in box.inputs]
     return np.average(activation_features)
Example #12
0
    def _activateByTime(self, net, inputs, activeno, **kwargs):
        '''
        按照时间激活
        :param net:      NeuralNetwork  网络
        :param inputs:   Union(float,list,tuple,ndarray...)  输入
        :param activeno: int 激活编号
        :param kwargs:   dict 激活参数
        :return: tuple : (网络输出,结束时间),网络输出是一个list,其中每项是一个元组(value,activation,firerate,time)
        '''
        # 参数:迭代时间间隔
        #ticks = 0.01 if kwargs is None or 'ticks' not in kwargs.keys() else float(kwargs['ticks'])
        ticks = 0.01 if kwargs is None or 'ticks' not in kwargs else float(
            kwargs['ticks'])
        # 参数:最大时钟
        #maxclock = 0 if kwargs is None or 'maxclock' not in kwargs.keys() else float(kwargs['maxclock'])
        maxclock = 0 if kwargs is None or 'maxclock' not in kwargs else float(
            kwargs['maxclock'])
        clock = 0.0

        # 重置网络运行信息
        net.reset()

        # 取得输入神经元
        inputNeurons = net.getInputNeurons()
        # 设置输入
        for i, neuron in enumerate(inputNeurons):
            if i >= len(inputs): break
            model = neuron.getModel()
            model.execute(neuron, net, value=inputs[i], no=activeno)

        # 参数:检查输出状态稳定的次数
        #outputMaxCheckCount = 1 if kwargs is None or 'outputMaxCheckCount' not in kwargs.keys() else int(kwargs['outputMaxCheckCount'])
        outputMaxCheckCount = 1 if kwargs is None or 'outputMaxCheckCount' not in kwargs else int(
            kwargs['outputMaxCheckCount'])
        outputCheckCount = 0
        lastOutputs = []

        # 反复检查运行结果
        while 1:
            # 设置时钟
            lastclock, clock = clock, clock + ticks
            if maxclock > 0 and clock >= maxclock:
                return lastOutputs, clock

            # 遍历所有神经元
            neurons = net.getNeurons()
            outputNeurons = net.getOutputNeurons()
            outputno = 0
            outputs = [] * len(outputNeurons)
            for i, neuron in enumerate(neurons):
                if neuron in inputNeurons:
                    continue
                # execute函数检查是否各种状态是否发生变化,并计算变化;如果计算有效,若满足执行并返回结果,否则返回的是上次的结果
                model = neuron.getModel()
                value, activation, firerate, time = \
                    model.execute(neuron, net, value=inputs[i], no=activeno,time=(lastclock,clock,ticks))

                # 如果本次执行有效,且是输出神经元
                if neuron in outputNeurons:
                    if time == clock:
                        outputs[outputno] = (value, activation, firerate, time)
                    outputno += 1

            # 如果设置了最大时钟,则持续执行直到最大时钟到达
            if maxclock > 0:
                continue
            # 检查输出神经元是否全部产生输出,以及输出是否还变化
            if not collections.all(
                    outputs,
                    lambda o: o != None and o[0] != None):  # 输出不全,继续循环
                lastOutputs = copy.deepcopy(outputs)
                continue

            # 检查输出与上次输出是否相等
            if operator.eq(lastOutputs, outputs):
                outputCheckCount += 1
                if outputCheckCount >= outputMaxCheckCount:
                    return lastOutputs, clock
            else:
                outputCheckCount = 0
                lastOutputs = copy.deepcopy(outputs)
Example #13
0
    def _activateByEvent(self, net, inputs, activeno, **kwargs):
        '''
        按照时间激活
        :param net:      NeuralNetwork  网络
        :param inputs:   Union(float,list,tuple,ndarray...)  输入
        :param activeno: int 激活编号
        :param kwargs:   dict 激活参数
        :return: (网络输出,结束时间),网络输出是一个list,其中每项是一个元组(value,activation,firerate,time)
        '''
        # 参数:最大迭代次数
        #maxIterCount = 0 if kwargs is None or 'maxIterCount' not in kwargs.keys() else int(kwargs['maxIterCount'])
        maxIterCount = 0 if kwargs is None or 'maxIterCount' not in kwargs else int(
            kwargs['maxIterCount'])
        iterCount = 0
        # 参数:最大时钟
        #maxclock = 0 if kwargs is None or 'maxclock' not in kwargs.keys() else float(kwargs['maxclock'])
        maxclock = 0 if kwargs is None or 'maxclock' not in kwargs else float(
            kwargs['maxclock'])
        lastclock, clock = 0.0, 0.0

        # 重置网络运行信息
        net.reset()

        # 取得输入神经元
        inputNeurons = net.getInputNeurons()
        # 设置输入
        for i, neuron in enumerate(inputNeurons):
            if i >= len(inputs): break
            model = neuron.getModel()
            model.execute(neuron, net, value=inputs[i], no=activeno)

        # 参数:检查输出状态稳定的次数
        #outputMaxCheckCount = 1 if kwargs is None or 'outputMaxCheckCount' not in kwargs.keys() else int(
        #    kwargs['outputMaxCheckCount'])
        outputMaxCheckCount = 1 if kwargs is None or 'outputMaxCheckCount' not in kwargs else int(
            kwargs['outputMaxCheckCount'])
        outputCheckCount = 0
        lastOutputs = []

        while 1:
            # 检查是否达到最大时钟
            if maxclock > 0 and clock >= maxclock:
                return lastOutputs, clock
            # 取得所有所有神经元
            neurons = net.getNeurons()
            outputNeurons = net.getOutputNeurons()
            outputno = 0
            outputs = [None] * len(outputNeurons)
            nexttimes = []
            # 遍历所有神经元,检查下一次事件的时间
            for i, neuron in enumerate(neurons):
                if neuron in inputNeurons:
                    nexttimes.append(sys.maxsize)
                    continue
                # check函数检查下次发生状态发生变化的事件的时间
                model = neuron.getModel()
                time = model.check(neuron,
                                   net,
                                   value=inputs[i],
                                   no=activeno,
                                   time=(lastclock, clock, clock - lastclock))
                nexttimes.append(time)

            # 取得下次事件时间的最小值
            mintime = np.min(nexttimes)
            mintimeindex = np.argmin(mintime)
            lastclock, clock = clock, mintime

            # 执行将最早发生事件的那些神经元
            for index in mintimeindex:
                neuron = neurons[index]
                model = neuron.getModel()
                value, activation, firerate, time = \
                    model.execute(neuron, net, value=inputs[i], no=activeno, time=(lastclock, clock, clock-lastclock))

            # 获取输出
            outputs = []
            for j, outputNeuron in enumerate(outputNeurons):
                value, activation, firerate, time = outputNeuron.getReturnState(
                )
                outputs.append((value, activation, firerate, time))

            # 如果设置了最大时钟,则持续执行直到最大时钟到达
            if maxclock > 0:
                continue

            # 检查输出神经元是否全部产生输出,以及输出是否还变化
            if not collections.all(
                    outputs,
                    lambda o: o != None and o[0] != None):  # 输出不全,继续循环
                lastOutputs = copy.deepcopy(outputs)
                continue

            # 检查输出与上次输出是否相等
            if operator.eq(lastOutputs, outputs):
                outputCheckCount += 1
                if outputCheckCount >= outputMaxCheckCount:
                    return lastOutputs, clock
            else:
                outputCheckCount = 0
                lastOutputs = copy.deepcopy(outputs)
Example #14
0
    def activate(self, net, inputs):
        '''
        激活网络
        :param net:  测试网络
        :param task: 测试任务
        :return: outputs
        '''
        # 取得输入
        inputNeurons = net.getInputNeurons()

        # 重置神经元和突触状态
        collections.foreach(net.getNeurons(), lambda n: n.reset())
        collections.foreach(net.getSynapses(), lambda s: s.reset())

        # 设置输入
        for d, v in enumerate(inputs):
            if d >= len(inputNeurons): break
            model = models.nervousModels.find(
                inputNeurons[d].modelConfiguration.modelid)
            model.execute(inputNeurons[d], net, value=v)

            s = net.getOutputSynapse(inputNeurons[d].id)
            if collections.isEmpty(s): continue

            collections.foreach(s, lambda x: x.getModel().execute(x, net))

        # 反复执行
        ns = net.getNeurons()
        neuronCount = net.getNeuronCount()
        iterCount = 0
        outputNeurons = net.getOutputNeurons()
        #while not collections.all(outputNeurons,lambda n:'value' in n.states.keys()) and iterCount<=neuronCount:
        while not collections.all(
                outputNeurons,
                lambda n: 'value' in n.states) and iterCount <= neuronCount:
            iterCount += 1
            #uncomputeNeurons = collections.findall(ns,lambda n:'value' not in n.states.keys())
            uncomputeNeurons = collections.findall(
                ns, lambda n: 'value' not in n.states)
            if collections.isEmpty(uncomputeNeurons): break
            for n in uncomputeNeurons:
                model = n.getModel()
                synapses = net.getInputSynapse(n.id)
                if collections.isEmpty(synapses): continue
                #if not collections.all(synapses,lambda s:'value' in s.states.keys()):continue
                if not collections.all(synapses,
                                       lambda s: 'value' in s.states):
                    continue
                model.execute(n, net)

                synapses = net.getOutputSynapse(n.id)
                if collections.isEmpty(synapses): continue
                collections.foreach(synapses,
                                    lambda s: s.getModel().execute(s, net))

        # 将没结果的输出神经元的值设置为0
        #outputNeuronsWithNoResult = collections.findall(outputNeurons,lambda n:'value' not in n.states.keys())
        outputNeuronsWithNoResult = collections.findall(
            outputNeurons, lambda n: 'value' not in n.states)
        if not collections.isEmpty(outputNeuronsWithNoResult):
            collections.foreach(outputNeuronsWithNoResult,
                                lambda n: exec("n['value']=0"))
        # 取得结果
        outputs = list(map(lambda n: n['value'], outputNeurons))
        if len(outputs) == 1: outputs = outputs[0]
        return outputs