Esempio n. 1
0
def manager() -> Manager:
    """Returns a Manager object.

    Returns:
        An instance of class ``core.Manager``.
    """
    manager = Manager()
    manager.schema = SchemaManager()
    manager.config = ConfigManager('tests/config/config.json', manager.schema)
    return manager
Esempio n. 2
0
def administrator():
    user = login("manager")
    if user and user["identified"] == "manager":
        admin = Manager('admin')
        while True:
            for index, i in enumerate(admin.manager_list, start=1):
                print("%s: %s" % (index, i[0]))
            choice_act = int(input("请选择:").strip())
            if choice_act <= len(admin.manager_list):
                getattr(admin, admin.manager_list[choice_act - 1][1])()
            else:
                print("选择错误!请重新选择")
Esempio n. 3
0
def manager():
    """
    管理员登录,从管理员对象的数据属性中展示可操作的选项,选择序号,使用getattr()映射到管理员类的具体函数属性上
    :return:
    """
    user = login('manager')
    if user and user['identity'] == 'manager':
        mana = Manager('admin')
        while True:
            for index, i in enumerate(mana.manager_dic, 1):
                print('%s. %s' % (index, i[0]))
            num = input('num>>>:').strip()
            if num.isdigit() and int(num) > 0:
                try:
                    getattr(mana, mana.manager_dic[int(num) - 1][1])()
                except Exception as e:
                    print(e)
                    print('\033[1;31m请输入正确的序号\033[0m')
            else:
                print('\033[1;31m请输入整数型序号\033[0m')
Esempio n. 4
0
def command():
    action = request.args.get("action").lower()
    plug = request.args.get("plug")
    if not action in ["on", "off"]:
        return "Error - action must be on or off."
    with open(os.path.join(os.environ["MYH_HOME"], "data", "plugs.json"), 'r') as plugs_file:
        plug_data = json.load(plugs_file)

    if not plug in plug_data:
        return "Error - " + plug + " is not a plug entry."
    else:
        plug_data[plug]["plug_state"] = action
        # save the action for manager
        with open(os.path.join(os.environ["MYH_HOME"], "data", "plugs.json"), 'w') as plugs_file:
            json.dump(plug_data, plugs_file)
        # do the action
        my_manager = Manager()
        my_manager.turn_on_off_plug(plug, action)

    return "done"
Esempio n. 5
0
def run():
    """主函数"""
    print('\033[1;42m欢迎您登陆选课系统\033[0m')
    auth_msg = auth.login()  # 接收登录之后的用户信息
    if auth_msg:
        if auth_msg['roleid'] == 0:  # 0表示管理员
            obj = Manager(auth_msg['username'])  # 实例化管理员对象
            while True:
                for i, func in enumerate(Manager.menu, 1):  # 取出类变量进行打印
                    print(i, func[1])
                try:
                    func_num = int(input("请输入功能序号:"))
                    getattr(obj,
                            Manager.menu[func_num -
                                         1][0])()  # 根据字符串从对象中找到对应的方法并执行(反射)
                except Exception as e:
                    print("你输入的内容有误")
        elif auth_msg['roleid'] == 1:  # 1表示讲师
            obj = Teacher(auth_msg['username'])  # 实例化管理员对象
            while True:
                for i, func in enumerate(Teacher.menu, 1):  # 取出类变量进行打印
                    print(i, func[1])
                try:
                    func_num = int(input("请输入功能序号:"))
                    getattr(obj,
                            Teacher.menu[func_num -
                                         1][0])()  # 根据字符串从对象中找到对应的方法并执行(反射)
                except Exception as e:
                    print("你输入的内容有误")
        elif auth_msg['roleid'] == 2:  # 2表示学生
            obj = Student(auth_msg['username'])  # 实例化管理员对象
            for i, func in enumerate(Student.menu, 1):  # 取出类变量进行打印
                print(i, func[1])
            try:
                func_num = int(input("请输入功能序号:"))
                getattr(obj, Student.menu[func_num -
                                          1][0])()  # 根据字符串从对象中找到对应的方法并执行(反射)
            except Exception as e:
                print("你输入的内容有误")
        else:
            print("你的角色出了问题,请联系管理员")
Esempio n. 6
0
    def __init__(self, config_file_path: str):
        """
        Args:
            config_file_path: The path to the OpenADMS Node configuration file.
        """
        self.logger = logging.getLogger('monitor')
        self._config_file_path = config_file_path
        manager = Manager()

        try:
            manager.schema = SchemaManager()
            manager.config = ConfigManager(self._config_file_path,
                                           manager.schema)
            manager.project = ProjectManager(manager)
            manager.node = NodeManager(manager)
            manager.sensor = SensorManager(manager.config)
            manager.module = ModuleManager(manager)
        except ValueError as e:
            self.logger.error(f'Fatal error: {e}')

        self._manager = manager
Esempio n. 7
0
import sys
import logging

from settings import config as cfg
from core.manager import Manager

logger = logging.getLogger('bot')

if __name__ == '__main__':
    # DEBUG = True if '-debug' in sys.argv else False

    manager = Manager(token=cfg.API_TOKEN)

    if cfg.DEBUG:
        logging.info("SetUp Polling")
        manager.start_polling()
    else:
        logging.info("SetUp Webhook")
        manager.start_webhook(cfg.WEBHOOK)
Esempio n. 8
0
from os import path

from core.metadata import Metadata
from core.manager import Manager

from components.channel import Channel
from components.series import Series
from components.episode import Episode

# Instantiate the manager with the path to the root of the file structure
m = Manager(path.abspath('./YouTube'))

# Add the types that the structure uses
m.add_resource(Channel, None)
m.add_resource(Series, Channel)
m.add_resource(Episode, Series)

# Scan to get the items from the structure
m.scan()

# The world is your canvas :)
Esempio n. 9
0
from core.manager import Manager

from reddit import RedditChannel
from imgur import ImgurChannel
from settings import BOT_TOKEN, DEBUG, PORT, HOST, HEROKU_APP_NAME, ADMINS

if __name__ == '__main__':
    manager = Manager(token=BOT_TOKEN, admins=ADMINS)

    manager.register(RedditChannel())
    manager.register(ImgurChannel())

    if DEBUG:
        manager.start_polling()
    else:
        manager.start_webhook(
            listen=HOST,
            port=PORT,
            url_path=BOT_TOKEN,
            webhook_url=f'https://{HEROKU_APP_NAME}.herokuapp.com/{BOT_TOKEN}')
Esempio n. 10
0
def register_student():
    print('欢迎注册'.center(20, '-'))  # 学费默认都交了 O(∩_∩)O
    mana = Manager('admin')
    mana.create_student()
Esempio n. 11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--adj_nodes_path",
                        help="Path to a list of ips of adjacent nodes",
                        required=True,
                        type=str)

    args = vars(parser.parse_args())

    self_ip = get_my_ip()
    adj_nodes_path = args['adj_nodes_path']

    adj_nodes_ips = None
    with open(adj_nodes_path, 'r') as f:
        adj_nodes_ips = f.read().splitlines()

    my_node = Node(self_ip, adj_nodes_ips)

    newstdin = os.fdopen(os.dup(sys.stdin.fileno()))
    manager = Manager()

    my_node.yet_to_submit = manager.dict()
    my_node.jobQ = manager.list()
    my_node.resources = manager.dict()
    my_node.job_pid = manager.dict()
    my_node.lost_resources = manager.dict()
    my_node.pids = manager.dict()

    my_node.leader_last_seen = manager.dict()

    my_node.log_q = manager.Queue()
    my_node.failed_msgs = manager.list()
    my_node.backup_state = manager.list()

    my_node.ip_dict = manager.dict()
    my_node.ip_dict['root'] = self_ip
    # my_node.backup_ip_dict = manager.dict()

    log_file = 'main_log_data.txt'
    logging_p = Process(target=start_logger,
                        args=(my_node.log_q, log_file, "INFO"))
    logging_p.start()
    time.sleep(5)
    my_node.pids['logging'] = logging_p.pid

    interface_p = Process(target=submit_interface, args=(my_node, newstdin))
    interface_p.start()
    my_node.submit_interface_pid = interface_p.pid

    # start receiving messages
    msg_socket = build_socket(self_ip)

    # Leader election
    initiate_leader_election(my_node)

    msg = Message()
    matchmaker_started = False

    while 1:
        conn, recv_addr = msg_socket.accept()
        recv_addr = recv_addr[0]
        msg = recv_msg(conn)

        ty = "INFO"
        # if 'HEARTBEAT' in msg.msg_type:
        #     ty = "DEBUG"

        # print('received msg of type %s from %s' %(msg.msg_type, recv_addr))
        add_log(my_node,
                'received msg of type %s from %s' % (msg.msg_type, recv_addr),
                ty)

        if msg.msg_type == 'LE_QUERY':
            handlers.le_query_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'LE_ACCEPT':
            handlers.le_accept_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'LE_REJECT':
            handlers.le_reject_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'LE_TERMINATE':
            handlers.le_terminate_handler(my_node)
        elif msg.msg_type == 'BACKUP_QUERY':
            handlers.backup_query_handler(my_node)
            leader_crash_detector_p = Process(target=leader_crash_detect,
                                              args=(my_node, ))
            leader_crash_detector_p.start()
            my_node.pids['leader_crash_detector'] = leader_crash_detector_p.pid

        elif msg.msg_type == 'EXEC_JOB':
            handlers.exec_job_handler(my_node, msg.content)
        elif msg.msg_type == 'QUERY_FILES':
            handlers.query_files_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'HEARTBEAT':
            handlers.heartbeat_handler(my_node, recv_addr, msg.content,
                                       manager)
        elif msg.msg_type == 'FILES_CONTENT':
            handlers.files_content_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'ARE_YOU_ALIVE':
            handlers.send_heartbeat(my_node, recv_addr)
        elif msg.msg_type == 'HEARTBEAT_ACK':
            handlers.heartbeat_ack_handler(my_node)
        elif msg.msg_type == 'LOG_FILE':
            handlers.log_file_handler(my_node, msg.content)
        elif msg.msg_type == 'LOG_FILE_ACK':
            handlers.log_file_ack_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'COMPLETED_JOB':
            handlers.completed_job_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'PREEMPT_AND_EXEC':
            handlers.preempt_and_exec_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'PREEMPTED_JOB':
            handlers.preempted_job_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'STATUS_JOB':
            handlers.status_job_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'STATUS_REPLY':
            handlers.status_reply_handler(my_node, msg.content)
        elif msg.msg_type == 'GET_ALIVE_NODE':
            handlers.get_alive_node_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'GET_ALIVE_NODE_ACK':
            handlers.get_alive_node_ack_handler(my_node, msg.content)
        elif msg.msg_type == 'DISPLAY_OUTPUT':
            handlers.display_output_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'FWD_DISPLAY_OUTPUT':
            handlers.fwd_display_output_handler(my_node, msg.content)
        elif msg.msg_type == 'DISPLAY_OUTPUT_ACK':
            handlers.display_output_ack_handler(my_node, msg.content)
        elif msg.msg_type == 'FWD_DISPLAY_OUTPUT_ACK':
            handlers.fwd_display_output_ack_handler(my_node, msg.content)
        elif msg.msg_type == 'BACKUP_HEARTBEAT':
            handlers.backup_heartbeat_handler(my_node)
        elif msg.msg_type == 'BACKUP_HEARTBEAT_ACK':
            handlers.backup_heartbeat_ack_handler(my_node, msg.content)
        elif msg.msg_type == 'U_ARE_LEADER':
            my_node.running_jobs = manager.dict()
            my_node.leader_jobPQ = JobPQ(manager)
            my_node.last_heartbeat_ts = manager.dict()
            my_node.leader_joblist = manager.list()

            handlers.new_leader_handler(my_node, recv_addr, msg.content)
            matchmaker_p = Process(target=matchmaking, args=(my_node, ))
            matchmaker_p.start()

            matchmaker_started = True

            add_log(my_node, "Starting Matchmaker", ty="INFO")

            crash_detector_p = Process(target=crash_detect, args=(my_node, ))
            crash_detector_p.start()

            add_log(my_node, "Starting Crash Detector", ty="INFO")
            time.sleep(5)

            my_node.pids['matchmaker'] = matchmaker_p.pid
            my_node.pids['crash_detector'] = crash_detector_p.pid

        elif msg.msg_type == 'ELECT_NEW_LEADER':
            handlers.elect_new_leader_handler(my_node)
        elif msg.msg_type == 'I_AM_NEWLEADER':
            handlers.i_am_newleader_handler(my_node, recv_addr)
        elif msg.msg_type == 'LE_FORCE_LEADER':
            handlers.le_force_leader_handler(my_node, recv_addr, content)
        else:
            add_log(my_node,
                    "Message of unexpected msg type" + msg.msg_type,
                    ty="DEBUG")

        if my_node.le_elected and my_node.self_ip == my_node.ip_dict[
                'root'] and not matchmaker_started:

            my_node.running_jobs = manager.dict()
            my_node.leader_jobPQ = JobPQ(manager)
            my_node.last_heartbeat_ts = manager.dict()
            my_node.leader_joblist = manager.list()

            matchmaker_p = Process(target=matchmaking, args=(my_node, ))
            matchmaker_p.start()
            # time.sleep(5)

            add_log(my_node, "Starting Matchmaker", ty="INFO")

            matchmaker_started = True

            crash_detector_p = Process(target=crash_detect, args=(my_node, ))
            crash_detector_p.start()
            time.sleep(5)

            add_log(my_node, "Starting Crash Detector", ty="INFO")

            my_node.pids['matchmaker'] = matchmaker_p.pid
            my_node.pids['crash_detector'] = crash_detector_p.pid
Esempio n. 12
0
    exit(0)


def parse_options():
    if '-h' in sys.argv:
        usage()
    elif '-v' in sys.argv:
        version()
    try:
        opts, args = getopt.getopt(sys.argv[1:], "h:v")
    except getopt.GetoptError as e:
        om.error("Invalid parameter({})\n".format(str(e)))
        exit(1)
    return args


if __name__ == '__main__':
    os.chdir(os.path.dirname(os.path.abspath(__file__)))

    builtins.config = ConfigManager()
    builtins.om = OutputManager()
    builtins.em = ExploitsManager()

    banner()
    options = parse_options()

    manager = Manager()
    manager.run()

    exit(0)
Esempio n. 13
0
 def show_course(self):
     """查看课程"""
     obj = Manager(self.name)
     obj.show_teacher(self.name, "course")
Esempio n. 14
0
 def show_classes(self):
     """查看班级"""
     obj = Manager(self.name)
     obj.show_teacher(self.name, "classes")
Esempio n. 15
0
    def learn_serial_critic(self):
        manager = Manager(self.logger)
        # report_clock = TrainReporterClock.remote()
        # manager.add_worker(report_clock, 'report_timeout')

        for worker in self.actor_workers:
            manager.add_worker(worker, 'actor_worker')

        training_kwargs = copy.copy(
            ray.get(
                self.actor_workers[0].get_default_training_kwargs.remote()))

        algorithm_state = 'population_ask'

        total_steps = 0
        episode_count = 0
        prev_actor_steps = 0
        eval_steps = 0
        current_actor_step = 0
        max_reward = -10000
        eval_max_reward = -10000

        individuals = []
        results = []
        individuals_queue = deque(maxlen=self.population_size)

        critic_training_index = 0

        ray.get([
            worker.set_eval.remote()
            for worker in self.critic_workers + self.actor_workers
        ])
        critic_names = ray.get(self.actor_workers[0].get_critic_names.remote())
        init_time = datetime.datetime.now()
        env_min, env_max = get_env_min_max(self.args.env_name)

        while True:
            if total_steps >= self.max_timestep and manager.num_running_worker('actor_worker') == 0 \
                    and algorithm_state == 'population_ask':
                break

            if algorithm_state == 'population_ask':
                individuals = self.population.ask(self.population_size)
                results = [None for _ in range(self.population_size)]
                if total_steps >= self.args.initial_steps:
                    algorithm_state = 'critic_training'
                else:
                    for idx in range(self.population_size):
                        individuals_queue.append(idx)
                    algorithm_state = 'actor_evaluating'
                critic_training_index = 0
                current_actor_step = 0

            if algorithm_state == 'critic_training':
                if manager.get_worker_state_by_index('actor_worker',
                                                     0) == 'idle':
                    worker = manager.get_worker_by_index('actor_worker', 0)
                    worker.set_train.remote()

                    worker.set_weight.remote(
                        individuals[critic_training_index], name='main.actor')
                    worker.set_weight.remote(
                        individuals[critic_training_index],
                        name='target.actor')

                    training_kwargs['learn_critic'] = True
                    training_kwargs['learn_actor'] = False
                    training_kwargs['reset_optim'] = False
                    training_kwargs['batches'] = int(prev_actor_steps /
                                                     self.rl_population)

                    manager.new_job('train',
                                    specific_worker=worker,
                                    job_name='actor_worker',
                                    job_setting=None,
                                    **training_kwargs)

                result = manager.wait(name='actor_worker', remove=False)
                if result is not None:
                    finished_job, finished_job_id, finished_worker_dict = result
                    finished_worker = finished_worker_dict['worker']
                    finished_worker.set_eval.remote()

                    critic_training_index += 1
                    manager.done(finished_worker_dict)

                    if critic_training_index >= self.rl_population:
                        for idx in range(self.rl_population):
                            individuals_queue.append(idx)

                        critic_weight = ray.get(
                            finished_worker.get_weight.remote(
                                name=critic_names))
                        critic_weight_obj = ray.put(critic_weight)
                        set_critic_weight_obj = [
                            actor_worker.set_weight.remote(critic_weight_obj,
                                                           name=critic_names)
                            for actor_worker in self.actor_workers
                        ]
                        ray.get(set_critic_weight_obj)
                        algorithm_state = 'actor_training'
                        set_train_obj = [
                            worker.set_train.remote()
                            for worker in self.actor_workers
                        ]
                        ray.get(set_train_obj)

            if algorithm_state == 'actor_training':
                if len(individuals_queue) == 0 and manager.num_running_worker(
                        'actor_worker') == 0:
                    algorithm_state = 'actor_evaluating'
                    set_train_obj = [
                        worker.set_eval.remote()
                        for worker in self.actor_workers
                    ]
                    ray.get(set_train_obj)

                    for idx in range(self.population_size):
                        individuals_queue.append(idx)

                elif manager.num_idle_worker('actor_worker') > 0 and len(
                        individuals_queue) > 0:
                    individual_idx = individuals_queue.popleft()
                    worker, worker_idx = manager.get_idle_worker(
                        'actor_worker')
                    worker.set_weight.remote(individuals[individual_idx],
                                             name='main.actor')
                    worker.set_weight.remote(individuals[individual_idx],
                                             name='target.actor')
                    ray.get(worker.set_train.remote())

                    training_kwargs['learn_critic'] = False
                    training_kwargs['learn_actor'] = True
                    training_kwargs['reset_optim'] = True
                    training_kwargs['batches'] = int(prev_actor_steps)
                    training_kwargs['individual_id'] = individual_idx
                    manager.new_job(
                        'train',
                        job_name='actor_worker',
                        job_setting={'individual_idx': individual_idx},
                        **training_kwargs)

                result = manager.wait(name='actor_worker',
                                      remove=False,
                                      timeout=0)
                if result is not None:
                    finished_job, finished_job_id, finished_worker_dict = result
                    finished_worker = finished_worker_dict['worker']

                    finished_individual = finished_worker_dict['setting'][
                        'individual_idx']
                    trained_weight = ray.get(
                        finished_worker.get_weight.remote(name='main.actor'))
                    individuals[finished_individual] = trained_weight
                    manager.done(finished_worker_dict)

            if algorithm_state == 'actor_evaluating':
                if len(individuals_queue) == 0 and manager.num_running_worker(
                        'actor_worker') == 0:
                    algorithm_state = 'population_tell'

                elif manager.num_idle_worker('actor_worker') > 0 and len(
                        individuals_queue) > 0:
                    individual_idx = individuals_queue.popleft()
                    worker, worker_idx = manager.get_idle_worker(
                        'actor_worker')
                    worker.set_weight.remote(individuals[individual_idx],
                                             name='main.actor')
                    # worker.set_weight.remote(individuals[individual_idx], name='target.actor')

                    random_action = False if total_steps >= self.args.initial_steps else True

                    manager.new_job(
                        'rollout',
                        job_name='actor_worker',
                        job_setting={'individual_idx': individual_idx},
                        random_action=random_action,
                        eval=False,
                        mid_train=False)

                result = manager.wait(name='actor_worker',
                                      remove=False,
                                      timeout=0)
                if result is not None:
                    finished_job, finished_job_id, finished_worker_dict = result
                    finished_worker = finished_worker_dict['worker']

                    finished_individual = finished_worker_dict['setting'][
                        'individual_idx']
                    episode_t, episode_reward = ray.get(finished_job_id)
                    results[finished_individual] = episode_reward

                    manager.done(finished_worker_dict)

                    total_steps += episode_t
                    current_actor_step += episode_t
                    eval_steps += episode_t
                    episode_count += 1

                    self.summary.add_scalar('train/individuals',
                                            episode_reward, total_steps)
                    if episode_reward > max_reward:
                        max_reward = episode_reward
                        self.summary.add_scalar('train/max', max_reward,
                                                total_steps)

            if algorithm_state == 'population_tell':
                self.population.tell(individuals, results)
                elapsed = (datetime.datetime.now() - init_time).total_seconds()
                result_str = [
                    prColor(f'{result:.2f}',
                            fore=prValuedColor(result, env_min, env_max, 40,
                                               "#600000", "#00F0F0"))
                    for result in results
                ]

                result_str = ', '.join(result_str)

                self.logger.log(
                    f'Total step: {total_steps}, time: {elapsed:.2f} s, '
                    f'max_reward: ' +
                    prColor(f'{max_reward:.3f}',
                            fore=prValuedColor(max_reward, env_min, env_max,
                                               40, "#600000", "#00F0F0")) +
                    f', results: {result_str}')

                prev_actor_steps = current_actor_step
                algorithm_state = 'mean_eval'
                # algorithm_state = 'population_ask'

            if algorithm_state == 'mean_eval':
                mean_weight, var_weight = self.population.get_mean()
                worker, worker_idx = manager.get_idle_worker('actor_worker')
                ray.get(
                    worker.set_weight.remote(mean_weight, name='main.actor'))
                manager.new_job('rollout',
                                job_name='actor_worker',
                                job_setting=None,
                                random_action=False,
                                eval=True,
                                mid_train=False)
                result = manager.wait(name='actor_worker',
                                      remove=False,
                                      timeout=None)
                if result is not None:
                    finished_job, finished_job_id, finished_worker_dict = result
                    finished_worker = finished_worker_dict['worker']

                    eval_t, eval_reward = ray.get(finished_job_id)
                    manager.done(finished_worker_dict)

                    if eval_reward > eval_max_reward:
                        eval_max_reward = eval_reward
                        self.summary.add_scalar('test/max', eval_reward,
                                                total_steps)

                    self.summary.add_scalar('test/mu', eval_reward,
                                            total_steps)

                    algorithm_state = 'population_ask'

            if eval_steps >= 50000:
                eval_t, eval_reward = self.run()
                self.logger.log(f'Evaluation: {eval_reward}')
                eval_steps = 0