Example #1
0
def act_dyjs(env, socl_net, agent_no, agent, record, T, Tfi):
    assert isinstance(socl_net, SoclNet)
    global_arg = arg.init_global_arg()
    # 根据对自己影响的大小选择强化对象
    out_power = []
    agent.policy_now = 'dyjs_noaim'  # 添加当前行动记录
    for x in range(global_arg["Nagent"]):
        if x != agent_no:
            out_power.append(socl_net.power[x][agent_no]['weight'])
        elif x == agent_no:
            out_power.append(0)
    if max(out_power) > socl_net.arg['power_thld']:  # 对自己影响力大于一个阈值的人才会考虑定义角色
        to_power = random_choice(norm_softmax(out_power))
        for aim in range(global_arg['Nagent']):
            if aim != to_power:
                delta = env.arg['ACT']['dyjs']['delta_power'](
                    socl_net.power[to_power][aim]['weight'])
                socl_net.power_delta(to_power, aim, delta)
                leadership_bill.leader_bill.add_record(
                    record_type='talking',
                    meeting="dyjs",
                    talking_type="dyjs",
                    speaker=Sign(agent_no, T + Tfi, 'act_dyjs'),
                    listener=Sign(aim, T + Tfi, 'act_dyjs'))
        agent.policy_now = 'dyjs'  # 添加当前行动记录

    logging.debug(agent.policy_now)
    return socl_net, agent
Example #2
0
    def __init__(self):
        # 环境初始化
        self.global_arg = arg.init_global_arg()
        env_arg = arg.init_env_arg(self.global_arg)
        # 增加nk的一个读入操作
        self.main_env = Env(env_arg)
        for model_type in ['st', 'ed']:
            if all_config['checkpoint']['env'][model_type]['enable']:
                self.main_env.nkmodel_load(all_config['checkpoint']['env']['path'], model_type)
            self.main_env.nkmodel_save(all_config["nkmodel_path"][model_type], model_type)
        # 个体初始化
        self.agents = []
        csv_head_agent = ['agent_no'] + ['st_state'] + ['st_value'] + ['insight'] + ['xplr'] + ['xplt'] + ['enable']
        moniter.AppendToCsv(csv_head_agent, all_config['agent_csv_path'])
        for i in range(self.global_arg["Nagent"]):
            # 个体随机初始位置
            start_st_label = [randint(0, self.main_env.P - 1) for j in range(self.main_env.N)]
            state_start = State(start_st_label)
            self.agents.append(Agent(arg.init_agent_arg(self.global_arg,
                                                        self.main_env.arg),
                                     self.main_env))
            self.agents[i].state_now = deepcopy(state_start)
            self.agents[i].agent_id = i

            # 去除了一开始给一个全局area,改为添加一个包含起点的点area
            start_area = Area(self.agents[i].state_now, [False] * self.main_env.N, 0)
            start_area.info = get_area_sample_distr(env=self.main_env, area=start_area, state=self.agents[i].state_now,
                                                    T_stmp=0, sample_num=1, dfs_r=1)
            start_area.sign = Sign(i, 0, 'start')
            self.agents[i].renew_m_info(start_area, 0)
            self.a_plan = None
            logging.info("state:%s, st_value:%s,insight:%.5s ,xplr:%.5s, xplt:%.5s, enable:%.5s" % (
                str(self.agents[i].state_now),
                self.main_env.getValue(self.agents[i].state_now, 0),
                self.agents[i].agent_arg['a']['insight'],
                self.agents[i].agent_arg['a']['xplr'],
                self.agents[i].agent_arg['a']['xplt'],
                self.agents[i].agent_arg['a']['enable']))
            # 记录agent信息
            csv_info_agent = ['agent%d' % i] \
                             + [self.agents[i].state_now] \
                             + [self.main_env.getValue(self.agents[i].state_now, 0)] \
                             + [self.agents[i].agent_arg['a']['insight']] \
                             + [self.agents[i].agent_arg['a']['xplr']] \
                             + [self.agents[i].agent_arg['a']['xplt']] \
                             + [self.agents[i].agent_arg['a']['enable']]
            moniter.AppendToCsv(csv_info_agent, all_config['agent_csv_path'])

        # 社会网络初始化
        soclnet_arg = arg.init_soclnet_arg(self.global_arg, env_arg)
        self.socl_net = SoclNet(soclnet_arg)
        self.socl_net.new_flat_init()  # 修改初始化方法
        # self.socl_net.flat_init()
        if all_config['checkpoint']['socl_network']['enable']:
            self.socl_net.power_load(all_config['checkpoint']['socl_network']['power'])
            self.socl_net.relat_load(all_config['checkpoint']['socl_network']['relat'])
        self.record = Record()

        self.metric = metrics.register_all_metrics(metrics.Metrics())
Example #3
0
 def __init__(self):
     self.arg = arg.init_global_arg()
     self.T = self.arg['T']
     self.Ts = self.arg['Ts']
     self.Nagent = self.arg['Nagent']
     self.agents = [[]] * self.Nagent
     self.env = []
     self.socl_net = []
Example #4
0
def act_whlj(env, socl_net, agent_no, agent, record, T, Tfi):
    assert isinstance(socl_net, SoclNet)
    global_arg = arg.init_global_arg()
    to_select = [x for x in range(global_arg['Nagent'])]
    del to_select[agent_no]
    to_whlj = sample(to_select, env.arg['ACT']['whlj']['k'])
    for aim in to_whlj:
        delta = env.arg['ACT']['whlj']['delta_relate'](
            socl_net.relat[aim][agent_no]['weight'])
        socl_net.relat_delta(aim, agent_no, delta)
        leadership_bill.leader_bill.add_record(
            record_type='talking',
            meeting="whlj",
            talking_type="whlj",
            speaker=Sign(agent_no, T + Tfi, 'act_whlj'),
            listener=Sign(aim, T + Tfi, 'act_whlj'))

    agent.policy_now = 'whlj'  # 添加当前行动记录
    logging.debug(agent.policy_now)
    return socl_net, agent
Example #5
0
    if (state is None):
        state = area.center
    # logging.debug("start")
    state_values = get_area_sample_value(env, area, sample_num, state, dfs_r)
    all_value = sorted(state_values)
    return {
        "max": all_value[-1],
        "min": all_value[0],
        "avg": sum(all_value) / len(all_value),
        "mid": all_value[len(all_value) // 2],
        "p0.16": all_value[int(round((len(all_value) - 1) * 0.16))],
        "p0.84": all_value[int(round((len(all_value) - 1) * 0.84))],
        'T_stmp': T_stmp
    }


if (__name__ == "__main__"):
    import numpy as np

    all_config.load()
    moniter.LogInit()
    logging.info("Start")
    global_arg = arg.init_global_arg()
    env_arg = arg.init_env_arg(global_arg)
    N = env_arg['N']
    k = env_arg['K']
    P = env_arg['P']
    T = env_arg['T']
    env = Env(env_arg)
    print(env.getModelPeakDistri())
Example #6
0
 def __init__(self):
     self.global_arg = arg.init_global_arg()
     self.main_env = Env(arg.init_env_arg(self.global_arg))
     self.agents = []