Esempio n. 1
0
def manager() -> Manager:
    """Returns a Manager object.

    Returns:
        An instance of class ``core.Manager``.
    """
    manager = Manager()
    manager.schema = SchemaManager()
    manager.config = ConfigManager('tests/config/config.json', manager.schema)
    return manager
Esempio n. 2
0
def uninstall():
    print("")
    print("")
    print("")
    print("")
    print("UNINSTALLING")
    print("Removing all accounts linked to the system")

    man = Manager()
    for i in man.cuentas:
        man.deleteAccount(i)
 def process_state(self):
     state = 0
     try:
         state = self.contract_container.call().currentState()
     except Exception as ex:
         self.logger.info('Exception on process worker state.')
         self.logger.info(ex.args)
     Manager.get_instance().set_job_contract_state(
         self.state_table[state].name)
     self.logger.info("Contract %s initial state is %s",
                      self.__class__.__name__, self.state_table[state].name)
     self.state = state
     return state
 def process_state(self, job_id_hex):
     state = 0
     try:
         state = self.job_controller_container.call(
         ).getCognitiveJobDetails(job_id_hex)[6]
     except Exception as ex:
         self.logger.info('Exception on process job state.')
         self.logger.info(ex.args)
     Manager.get_instance().set_job_contract_state(
         self.state_table[state].name)
     self.logger.info("Contract %s initial state is %s",
                      self.__class__.__name__, self.state_table[state].name)
     self.state = state
     return state
Esempio n. 5
0
    def init_kernel(self):
        # get main kernel params
        try:
            self.model_address = self.json_kernel['model']
            Manager.get_instance().set_job_kernel_ipfs_address(
                self.model_address)
            self.weights_address = self.json_kernel['weights']
            Manager.get_instance().set_job_dataset_ipfs_address(
                self.weights_address)
        except Exception as ex:
            self.logger.error("Wrong Kernel data file structure:")
            self.logger.error(ex.args)
            return False

        # ------------------------------
        # validate values by string type
        # ------------------------------
        # model address is necessary
        if not isinstance(self.model_address, str):
            self.logger.error("Wrong model address type : " +
                              str(type(self.model_address)))
            self.model_address = None
            self.weights_address = None
            return False
        # weights is not necessary and may be empty
        if self.weights_address is not None:
            if not isinstance(self.weights_address, str):
                self.logger.error("Wrong weights address type : " +
                                  str(type(self.weights_address)))
                self.weights_address = None
                return False

        try:
            self.logger.info("Downloading model file %s", self.model_address)
            self.ipfs_api.download_file(self.model_address)
            if self.weights_address:
                self.logger.info("Downloading weights file %s",
                                 self.weights_address)
                self.ipfs_api.download_file(self.weights_address)
            else:
                self.logger.info("Weights address is empty, skip downloading")
        except Exception as ex:
            self.logger.error("Can't download kernel files from IPFS: %s",
                              type(ex))
            self.logger.error(ex.args)
            return False

        return True
Esempio n. 6
0
def instantiate_contracts(abi_path, eth_hooks):
    manager = Manager.get_instance()
    print("ABI folder path              : " + str(abi_path))
    if os.path.isdir(abi_path):
        if eth_hooks == 'True':
            if os.path.isfile(abi_path + "PandoraHooks.json"):
                with open(abi_path + "PandoraHooks.json", encoding='utf-8') as pandora_contract_file:
                    manager.eth_pandora_contract = json.load(pandora_contract_file)['abi']
                    print('Pandora hooks abi loaded')
        else:
            if os.path.isfile(abi_path + "Pandora.json"):
                with open(abi_path + "Pandora.json", encoding='utf-8') as pandora_contract_file:
                    manager.eth_pandora_contract = json.load(pandora_contract_file)['abi']
                    print('Pandora abi loaded')

        if os.path.isfile(abi_path + "WorkerNode.json"):
            with open(abi_path + "WorkerNode.json", encoding='utf-8') as worker_contract_file:
                manager.eth_worker_contract = json.load(worker_contract_file)['abi']
                print('WorkerNode abi loaded')
        if os.path.isfile(abi_path + "CognitiveJob.json"):
            with open(abi_path + "CognitiveJob.json", encoding='utf-8') as eth_cognitive_job_contract:
                manager.eth_cognitive_job_contract = json.load(eth_cognitive_job_contract)['abi']
                print('CognitiveJob abi loaded')
        if os.path.isfile(abi_path + "Kernel.json"):
            with open(abi_path + "Kernel.json", encoding='utf-8') as eth_kernel_contract:
                manager.eth_kernel_contract = json.load(eth_kernel_contract)['abi']
                print('Kernel abi loaded')
        if os.path.isfile(abi_path + "Dataset.json"):
            with open(abi_path + "Dataset.json", encoding='utf-8') as eth_dataset_contract:
                manager.eth_dataset_contract = json.load(eth_dataset_contract)['abi']
                print('Dataset abi loaded')

    else:
        print("ABI files not found, exiting")
        raise ContractsAbiNotFound()
Esempio n. 7
0
    def __init__(self, dataset_file, ipfs_api, batch_no: int):
        # Initializing logger object
        self.logger = logging.getLogger("Kernel")
        self.logger.addHandler(LogSocketHandler.get_instance())
        self.manager = Manager.get_instance()

        self.json_dataset = dataset_file
        # variable for determinate process (predict, fit)
        self.process = None

        # variables for predict job by batches
        self.data_address = None
        self.batch_no = batch_no
        self.dataset = None

        # variables for training (fit)
        self.train_x_address = None
        self.train_x_dataset = None
        self.train_y_address = None
        self.train_y_dataset = None
        self.loss = None
        self.optimizer = None
        self.batch_size = None
        self.epochs = None
        self.validation_split = 0
        self.shuffle = False
        self.initial_epoch = 0

        self.ipfs_api = ipfs_api
Esempio n. 8
0
 def _manage(self, *args):
     """管理员登陆功能"""
     data_header = args[0]
     username = data_header["username"]
     password = data_header["password"]
     ret = Manager.login(username, password)
     if ret["status"] == 654:
         self.user["type"] = "管理员"
         self.user["user_id"] = ret["obj"].id
     self.send_msg(ret["status"])
Esempio n. 9
0
def command():
    action = request.args.get("action").lower()
    plug = request.args.get("plug")
    if not action in ["on", "off"]:
        return "Error - action must be on or off."
    with open(os.path.join(os.environ["MYH_HOME"], "data", "plugs.json"), 'r') as plugs_file:
        plug_data = json.load(plugs_file)

    if not plug in plug_data:
        return "Error - " + plug + " is not a plug entry."
    else:
        plug_data[plug]["plug_state"] = action
        # save the action for manager
        with open(os.path.join(os.environ["MYH_HOME"], "data", "plugs.json"), 'w') as plugs_file:
            json.dump(plug_data, plugs_file)
        # do the action
        my_manager = Manager()
        my_manager.turn_on_off_plug(plug, action)

    return "done"
Esempio n. 10
0
    def __init__(self, kernel_file, ipfs_api):
        # Initializing logger object
        self.logger = logging.getLogger("Kernel")
        self.logger.addHandler(LogSocketHandler.get_instance())
        self.manager = Manager.get_instance()

        self.json_kernel = kernel_file
        self.ipfs_api = ipfs_api
        self.model_address = None
        self.weights_address = None
        self.model = None
Esempio n. 11
0
def administrator():
    user = login("manager")
    if user and user["identified"] == "manager":
        admin = Manager('admin')
        while True:
            for index, i in enumerate(admin.manager_list, start=1):
                print("%s: %s" % (index, i[0]))
            choice_act = int(input("请选择:").strip())
            if choice_act <= len(admin.manager_list):
                getattr(admin, admin.manager_list[choice_act - 1][1])()
            else:
                print("选择错误!请重新选择")
Esempio n. 12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--uninstall", help=argparse.SUPPRESS, action="store_true")
    parser.add_argument("--cli", help="Invoke a REPL", action="store_true")
    args = parser.parse_args()

    if args.uninstall:
        uninstall()
    else:
        try:
            if args.cli:
                man = Manager()
                repl = Repl(man)
                repl.start()
            else:
                man = Manager()
                if man.cuentas == []:
                    account_type = "dropbox"
                    print("Select the account type: ", account_type)
                    user = input("Enter a name for the new account: ")
                    man.newAccount(account_type, user)
                man.updateLocalSyncFolder()
        except Exception as e:
            logger = Logger(__name__)
            logger.critical(str(e))
            sys.exit(1)
        finally:
            input("Press ENTER to quit.")
Esempio n. 13
0
def instantiate_contracts(abi_path, eth_hooks):
    manager = Manager.get_instance()
    print("ABI folder path              : " + str(abi_path))
    if os.path.isdir(abi_path):
        if eth_hooks == 'True':
            if os.path.isfile(abi_path + "PandoraHooks.json"):
                with open(abi_path + "PandoraHooks.json", encoding='utf-8') \
                        as pandora_contract_file:
                    manager.eth_pandora_contract = json.load(
                        pandora_contract_file)['abi']
                    print('Pandora hooks abi loaded')
        else:
            if os.path.isfile(abi_path + "Pandora.json"):
                with open(abi_path + "Pandora.json", encoding='utf-8') \
                        as pandora_contract_file:
                    manager.eth_pandora_contract = json.load(
                        pandora_contract_file)['abi']
                    print('Pandora abi loaded')

        if os.path.isfile(abi_path + "WorkerNode.json"):
            with open(abi_path + "WorkerNode.json", encoding='utf-8') \
                    as worker_contract_file:
                manager.eth_worker_contract = json.load(
                    worker_contract_file)['abi']
                print('WorkerNode abi loaded')
        if os.path.isfile(abi_path + "CognitiveJobController.json"):
            with open(abi_path + "CognitiveJobController.json", encoding='utf-8') \
                    as eth_job_controller_contract:
                manager.eth_job_controller_contract = json.load(
                    eth_job_controller_contract)['abi']
                print('CognitiveJobController abi loaded')
        if os.path.isfile(abi_path + "Kernel.json"):
            with open(abi_path + "Kernel.json", encoding='utf-8') \
                    as eth_kernel_contract:
                manager.eth_kernel_contract = json.load(
                    eth_kernel_contract)['abi']
                print('Kernel abi loaded')
        if os.path.isfile(abi_path + "Dataset.json"):
            with open(abi_path + "Dataset.json", encoding='utf-8') \
                    as eth_dataset_contract:
                manager.eth_dataset_contract = json.load(
                    eth_dataset_contract)['abi']
                print('Dataset abi loaded')

    else:
        print("ABI files not found, exiting.")
        print("pyrrha-pynode repo contains link to pyrrha-consensus project.")
        print("for complete clone project please provide git commands :")
        print("cd .\pyrrha-pynode\"")
        print("git submodule init")
        print("git submodule update --recursive --remote")
        raise ContractsAbiNotFound()
Esempio n. 14
0
    def __init__(self, eth_server: str, abi_path: str, pandora: str, node: str,
                 ipfs_server: str, ipfs_port: int, data_dir: str):
        Broker.get_instance()
        Thread.__init__(self, daemon=True)

        # Initializing logger object
        self.logger = logging.getLogger("Broker")
        self.logger.addHandler(LogSocketHandler.get_instance())
        self.manager = Manager.get_instance()
        self.mode = self.manager.launch_mode

        # Saving starter configs
        self.eth_server = eth_server
        self.abi_path = abi_path
        self.pandora = pandora
        self.node = node
        self.ipfs_server = ipfs_server
        self.ipfs_port = ipfs_port
        self.data_dir = data_dir

        # Init empty container for pandora
        self.pandora_container = None

        # Init empty containers for worker node
        self.worker_node_container = None
        self.worker_node_state_machine = None
        self.worker_node_event_thread = None

        # Init empty containers for job
        self.job_address = None
        self.job_container = None
        self.job_state_machine = None
        self.job_state_event_thread = None

        # Init empty jobs and processor
        self.jobs = {}
        self.processors = {}

        # init connectors
        self.eth = EthService(strategic=EthConnector())
        self.ipfs = IpfsService(strategic=IpfsConnector())

        self.local_password = None
        self.key_tool = KeyTools()
        print('Pandora broker initialize success')
Esempio n. 15
0
 def __init__(self, ipfs_api, processor_id: str,
              delegate: ProcessorDelegate):
     super().__init__()
     # Initializing logger object
     self.logger = logging.getLogger("Processor")
     self.logger.addHandler(LogSocketHandler.get_instance())
     self.manager = Manager.get_instance()
     # Configuring
     self.id = processor_id
     self.results_file = None
     # variables for kernel and dataset objects
     self.kernel = None
     self.kernel_init_result = None
     self.dataset = None
     self.dataset_init_result = None
     # define delegate
     self.ipfs_api = ipfs_api
     self.delegate = delegate
Esempio n. 16
0
    def __init__(self, config_file_path: str):
        """
        Args:
            config_file_path: The path to the OpenADMS Node configuration file.
        """
        self.logger = logging.getLogger('monitor')
        self._config_file_path = config_file_path
        manager = Manager()

        try:
            manager.schema = SchemaManager()
            manager.config = ConfigManager(self._config_file_path,
                                           manager.schema)
            manager.project = ProjectManager(manager)
            manager.node = NodeManager(manager)
            manager.sensor = SensorManager(manager.config)
            manager.module = ModuleManager(manager)
        except ValueError as e:
            self.logger.error(f'Fatal error: {e}')

        self._manager = manager
Esempio n. 17
0
def manager():
    """
    管理员登录,从管理员对象的数据属性中展示可操作的选项,选择序号,使用getattr()映射到管理员类的具体函数属性上
    :return:
    """
    user = login('manager')
    if user and user['identity'] == 'manager':
        mana = Manager('admin')
        while True:
            for index, i in enumerate(mana.manager_dic, 1):
                print('%s. %s' % (index, i[0]))
            num = input('num>>>:').strip()
            if num.isdigit() and int(num) > 0:
                try:
                    getattr(mana, mana.manager_dic[int(num) - 1][1])()
                except Exception as e:
                    print(e)
                    print('\033[1;31m请输入正确的序号\033[0m')
            else:
                print('\033[1;31m请输入整数型序号\033[0m')
Esempio n. 18
0
def run():
    """主函数"""
    print('\033[1;42m欢迎您登陆选课系统\033[0m')
    auth_msg = auth.login()  # 接收登录之后的用户信息
    if auth_msg:
        if auth_msg['roleid'] == 0:  # 0表示管理员
            obj = Manager(auth_msg['username'])  # 实例化管理员对象
            while True:
                for i, func in enumerate(Manager.menu, 1):  # 取出类变量进行打印
                    print(i, func[1])
                try:
                    func_num = int(input("请输入功能序号:"))
                    getattr(obj,
                            Manager.menu[func_num -
                                         1][0])()  # 根据字符串从对象中找到对应的方法并执行(反射)
                except Exception as e:
                    print("你输入的内容有误")
        elif auth_msg['roleid'] == 1:  # 1表示讲师
            obj = Teacher(auth_msg['username'])  # 实例化管理员对象
            while True:
                for i, func in enumerate(Teacher.menu, 1):  # 取出类变量进行打印
                    print(i, func[1])
                try:
                    func_num = int(input("请输入功能序号:"))
                    getattr(obj,
                            Teacher.menu[func_num -
                                         1][0])()  # 根据字符串从对象中找到对应的方法并执行(反射)
                except Exception as e:
                    print("你输入的内容有误")
        elif auth_msg['roleid'] == 2:  # 2表示学生
            obj = Student(auth_msg['username'])  # 实例化管理员对象
            for i, func in enumerate(Student.menu, 1):  # 取出类变量进行打印
                print(i, func[1])
            try:
                func_num = int(input("请输入功能序号:"))
                getattr(obj, Student.menu[func_num -
                                          1][0])()  # 根据字符串从对象中找到对应的方法并执行(反射)
            except Exception as e:
                print("你输入的内容有误")
        else:
            print("你的角色出了问题,请联系管理员")
Esempio n. 19
0
def run_pynode():
    try:
        manager = Manager.get_instance()
        # startup broker main process
        broker = Broker(eth_server=manager.eth_host,
                        abi_path=manager.eth_abi_path,
                        pandora=manager.eth_pandora,
                        node=manager.eth_worker,
                        data_dir=manager.ipfs_storage,
                        ipfs_server=manager.ipfs_host,
                        ipfs_port=manager.ipfs_port)
    except Exception as ex:
        logging.error("Error broker initialization: %s, exiting", type(ex))
        logging.error(ex.args)
        return

    if broker.connect() is True:
        return

    # Remove the following line in order to put the app into a daemon mode (running on the background)
    broker.join()
Esempio n. 20
0
import sys
import logging

from settings import config as cfg
from core.manager import Manager

logger = logging.getLogger('bot')

if __name__ == '__main__':
    # DEBUG = True if '-debug' in sys.argv else False

    manager = Manager(token=cfg.API_TOKEN)

    if cfg.DEBUG:
        logging.info("SetUp Polling")
        manager.start_polling()
    else:
        logging.info("SetUp Webhook")
        manager.start_webhook(cfg.WEBHOOK)
Esempio n. 21
0
from os import path

from core.metadata import Metadata
from core.manager import Manager

from components.channel import Channel
from components.series import Series
from components.episode import Episode

# Instantiate the manager with the path to the root of the file structure
m = Manager(path.abspath('./YouTube'))

# Add the types that the structure uses
m.add_resource(Channel, None)
m.add_resource(Series, Channel)
m.add_resource(Episode, Series)

# Scan to get the items from the structure
m.scan()

# The world is your canvas :)
Esempio n. 22
0
from core.manager import Manager

from reddit import RedditChannel
from imgur import ImgurChannel
from settings import BOT_TOKEN, DEBUG, PORT, HOST, HEROKU_APP_NAME, ADMINS

if __name__ == '__main__':
    manager = Manager(token=BOT_TOKEN, admins=ADMINS)

    manager.register(RedditChannel())
    manager.register(ImgurChannel())

    if DEBUG:
        manager.start_polling()
    else:
        manager.start_webhook(
            listen=HOST,
            port=PORT,
            url_path=BOT_TOKEN,
            webhook_url=f'https://{HEROKU_APP_NAME}.herokuapp.com/{BOT_TOKEN}')
Esempio n. 23
0
 def show_course(self):
     """查看课程"""
     obj = Manager(self.name)
     obj.show_teacher(self.name, "course")
Esempio n. 24
0
    exit(0)


def parse_options():
    if '-h' in sys.argv:
        usage()
    elif '-v' in sys.argv:
        version()
    try:
        opts, args = getopt.getopt(sys.argv[1:], "h:v")
    except getopt.GetoptError as e:
        om.error("Invalid parameter({})\n".format(str(e)))
        exit(1)
    return args


if __name__ == '__main__':
    os.chdir(os.path.dirname(os.path.abspath(__file__)))

    builtins.config = ConfigManager()
    builtins.om = OutputManager()
    builtins.em = ExploitsManager()

    banner()
    options = parse_options()

    manager = Manager()
    manager.run()

    exit(0)
Esempio n. 25
0
def main(argv):
    help_message = """
        Pandora Boxchain python node realisation    
        
                -/////////+++++/`                
             `/ss:--:sds/d/````.hNso-             
           /hNhsoosh+.   `h+     ss`/ss`          
          so     yoh/      yy//+++dh- /h`         
         y+    `h/  yo  -ohM+...`.d+/sssm-        
       `hh++++od/    sds/-h:    `h:    -sN/       
      `dN`````-Mh`  .d- -mo::::/m:     `h/yo      
     .d:m     `m:d.-d/os++yds:-.:os+. .d-  oy`    
    -d.`m     `m .dmy/`     -ossooshNdN-    +d    
    so :Mhoooood: so          :Mh`    :d.  -NM    
    so/h`y+     y++o          :d:d.    .d-:d.N    
    sNs   os     oNo          -d .dsoooosNd``N`   
    :d     dhmdsooyyo:      -odm: s+     os +y    
     :h` `y/ `/oo/..-+hh+:os+ho`y+o+     osos     
      .h:h:     `d+/////N/ `y+   oNo     +Mo      
       `dh:    .h-    `h:-oyh    `dsoo++oN+       
        `yyos+:d-````-mds/` /h` `h:    `h/        
          oo .oN+++++od`     -h:d-    `h:         
           /s/`/h`    :h.    -shyoosmmh-          
             .+shd.    .d:/ss/` `/ss/`            
                `/ooo+++oooo+++++-                
                                                     
        to see more https://pandoraboxchain.ai/ 
        
        Current configuration performs launch with different parameters and configurations
         
        example >python ./pynode.py -p <your vault password>
                                    - starts pynode with default config 
                                      placed in pynode.ini file 
        ATTENTION! - vault password is necessary, and if you use docker and if you use the docker, 
        enter the password in the startup line in Dockerfile

        if we need launch with different ETH HOST and IPFS config use launch params
        example >python ./pynode.py -e (--ethereum) remote -i (--ipfs) pandora
                    with eth_connector instance to connect remote  = http://rinkeby.pandora.network:8545
                    and IPFS config server = http://ipfs.pandora.network
                                             port = 5001

        -c (--config) performs path for custom config file for launch params
        -a (--abi) performs change abi directory path

        -w (--worker) alow to set custom worker contract address
                      seted by console value will replace value in config file
                      for this launch                     
    """

    parser = argparse.ArgumentParser(description=help_message, formatter_class=argparse.RawTextHelpFormatter)

    parser.add_argument('-p',
                        '--password',
                        action="store",
                        dest='vault_key',
                        default='',
                        help='necessary parameter for launch pynode.'
                             '(used for encrypt vault and use private key to local transactions sign)',
                        metavar='')
    parser.add_argument('-c',
                        '--config',
                        action="store",
                        dest='configuration_file',
                        default='../pynode/core/config/pynode.ini',
                        help='startup pyrrha-pynode with custom configuration file '
                             '(default is ../pynode.ini strongly recommended for use)',
                        metavar='')
    parser.add_argument('-e',
                        '--ethereum',
                        action="store",
                        dest='ethereum_use',
                        default='remote',
                        help='setting up current used host for ethereum node '
                             '(default is remote)',
                        metavar='')
    parser.add_argument('-a',
                        '--abi',
                        action='store',
                        dest='abi_path',
                        default='../pyrrha-consensus/build/contracts/',
                        help='setting up path to folder with ABI files '
                             '(default is ../abi/ strongly recommended for use)',
                        metavar='')
    parser.add_argument('-i',
                        '--ipfs',
                        action='store',
                        dest='ipfs_use',
                        default='pandora',
                        help='setting up current used host for ipfs connection '
                             '(default is "pandora" strongly recommended for use)',
                        metavar='')
    parser.add_argument('-v ',
                        '--version',
                        action='version',
                        version='%(prog)s 0.1.2')

    results = parser.parse_args()

    # read configuration file and parse base settings
    print("Configuration file path      : " + str(results.configuration_file))
    if results.configuration_file:
        try:
            config = ConfigParser()
            config.read(results.configuration_file)
            eth_section = config['Ethereum']
            account_section = config['Account']
            eth_contracts = config['Contracts']
            ipfs_section = config['IPFS']
            web_section = config['Web']
            eth_host = eth_section[results.ethereum_use]
            eth_worker_node_account = account_section['worker_node_account']
            pandora_address = eth_contracts['pandora']
            worker_address = eth_contracts['worker_node']
            eth_hooks = eth_contracts['hooks']
            pynode_start_on_launch = eth_contracts['start_on_launch']
            ipfs_storage = ipfs_section['store_in']
            ipfs_use_section = config['IPFS.%s' % results.ipfs_use]
            ipfs_host = ipfs_use_section['server']
            ipfs_port = ipfs_use_section['port']
            socket_enable = web_section['enable']
            socket_host = web_section['host']
            socket_port = web_section['port']
            socket_listen = web_section['connections']
        except Exception as ex:
            print("Error reading config: %s, exiting", type(ex))
            logging.error(ex.args)
            return
    print("Config reading success")
    manager = Manager.get_instance()
    if not results.vault_key:
        print('Vault key is necessary for launch (use -p key for provide if)')
        return
    manager.vault_key = results.vault_key
    # -------------------------------------
    # launch pynode
    # -------------------------------------
    worker_contract_address = worker_address

    manager.pynode_config_file_path = results.configuration_file
    manager.launch_mode = "0"  # results.launch_mode
    manager.eth_use = results.ethereum_use
    manager.eth_host = eth_host

    manager.eth_worker_node_account = eth_worker_node_account

    manager.eth_abi_path = results.abi_path
    manager.eth_pandora = pandora_address
    manager.eth_worker = worker_contract_address
    manager.ipfs_use = results.ipfs_use
    manager.ipfs_host = ipfs_host
    manager.ipfs_port = ipfs_port
    manager.ipfs_storage = ipfs_storage
    manager.pynode_start_on_launch = pynode_start_on_launch
    manager.web_socket_enable = socket_enable
    manager.web_socket_host = socket_host
    manager.web_socket_port = socket_port
    manager.web_socket_listeners = socket_listen

    print("Pynode production launch")
    print("Node launch mode             : " + str(manager.launch_mode))
    print("Ethereum use                 : " + str(results.ethereum_use))
    print("Ethereum host                : " + str(eth_host))
    print("Worker node account owner    : " + str(eth_worker_node_account))
    print("Primary contracts addresses")
    print("Pandora main contract        : " + str(pandora_address))
    print("Worker node contract         : " + str(worker_contract_address))
    print("IPFS configuration")
    print("IPFS use                     : " + str(results.ipfs_use))
    print("IPFS host                    : " + str(ipfs_host))
    print("IPFS port                    : " + str(ipfs_port))
    print("IPFS file storage            : " + str(ipfs_storage))
    print("Web socket enable            : " + str(socket_enable))
    # inst contracts
    instantiate_contracts(results.abi_path, eth_hooks)
    # launch socket web listener
    if socket_enable == 'True':
        print("Launch client socket listener")
        print("Web socket enable            : " + str(manager.web_socket_enable))
        print("Web socket host              : " + str(manager.web_socket_host))
        print("Web socket port              : " + str(manager.web_socket_port))
        print("Web socket listeners         : " + str(manager.web_socket_listeners))
        WebSocket(socket_host, socket_port, socket_listen)
    # launch pynode
    if pynode_start_on_launch == 'True':
        print("Launch pynode...")
        run_pynode()
Esempio n. 26
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--adj_nodes_path",
                        help="Path to a list of ips of adjacent nodes",
                        required=True,
                        type=str)

    args = vars(parser.parse_args())

    self_ip = get_my_ip()
    adj_nodes_path = args['adj_nodes_path']

    adj_nodes_ips = None
    with open(adj_nodes_path, 'r') as f:
        adj_nodes_ips = f.read().splitlines()

    my_node = Node(self_ip, adj_nodes_ips)

    newstdin = os.fdopen(os.dup(sys.stdin.fileno()))
    manager = Manager()

    my_node.yet_to_submit = manager.dict()
    my_node.jobQ = manager.list()
    my_node.resources = manager.dict()
    my_node.job_pid = manager.dict()
    my_node.lost_resources = manager.dict()
    my_node.pids = manager.dict()

    my_node.leader_last_seen = manager.dict()

    my_node.log_q = manager.Queue()
    my_node.failed_msgs = manager.list()
    my_node.backup_state = manager.list()

    my_node.ip_dict = manager.dict()
    my_node.ip_dict['root'] = self_ip
    # my_node.backup_ip_dict = manager.dict()

    log_file = 'main_log_data.txt'
    logging_p = Process(target=start_logger,
                        args=(my_node.log_q, log_file, "INFO"))
    logging_p.start()
    time.sleep(5)
    my_node.pids['logging'] = logging_p.pid

    interface_p = Process(target=submit_interface, args=(my_node, newstdin))
    interface_p.start()
    my_node.submit_interface_pid = interface_p.pid

    # start receiving messages
    msg_socket = build_socket(self_ip)

    # Leader election
    initiate_leader_election(my_node)

    msg = Message()
    matchmaker_started = False

    while 1:
        conn, recv_addr = msg_socket.accept()
        recv_addr = recv_addr[0]
        msg = recv_msg(conn)

        ty = "INFO"
        # if 'HEARTBEAT' in msg.msg_type:
        #     ty = "DEBUG"

        # print('received msg of type %s from %s' %(msg.msg_type, recv_addr))
        add_log(my_node,
                'received msg of type %s from %s' % (msg.msg_type, recv_addr),
                ty)

        if msg.msg_type == 'LE_QUERY':
            handlers.le_query_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'LE_ACCEPT':
            handlers.le_accept_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'LE_REJECT':
            handlers.le_reject_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'LE_TERMINATE':
            handlers.le_terminate_handler(my_node)
        elif msg.msg_type == 'BACKUP_QUERY':
            handlers.backup_query_handler(my_node)
            leader_crash_detector_p = Process(target=leader_crash_detect,
                                              args=(my_node, ))
            leader_crash_detector_p.start()
            my_node.pids['leader_crash_detector'] = leader_crash_detector_p.pid

        elif msg.msg_type == 'EXEC_JOB':
            handlers.exec_job_handler(my_node, msg.content)
        elif msg.msg_type == 'QUERY_FILES':
            handlers.query_files_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'HEARTBEAT':
            handlers.heartbeat_handler(my_node, recv_addr, msg.content,
                                       manager)
        elif msg.msg_type == 'FILES_CONTENT':
            handlers.files_content_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'ARE_YOU_ALIVE':
            handlers.send_heartbeat(my_node, recv_addr)
        elif msg.msg_type == 'HEARTBEAT_ACK':
            handlers.heartbeat_ack_handler(my_node)
        elif msg.msg_type == 'LOG_FILE':
            handlers.log_file_handler(my_node, msg.content)
        elif msg.msg_type == 'LOG_FILE_ACK':
            handlers.log_file_ack_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'COMPLETED_JOB':
            handlers.completed_job_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'PREEMPT_AND_EXEC':
            handlers.preempt_and_exec_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'PREEMPTED_JOB':
            handlers.preempted_job_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'STATUS_JOB':
            handlers.status_job_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'STATUS_REPLY':
            handlers.status_reply_handler(my_node, msg.content)
        elif msg.msg_type == 'GET_ALIVE_NODE':
            handlers.get_alive_node_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'GET_ALIVE_NODE_ACK':
            handlers.get_alive_node_ack_handler(my_node, msg.content)
        elif msg.msg_type == 'DISPLAY_OUTPUT':
            handlers.display_output_handler(my_node, recv_addr, msg.content)
        elif msg.msg_type == 'FWD_DISPLAY_OUTPUT':
            handlers.fwd_display_output_handler(my_node, msg.content)
        elif msg.msg_type == 'DISPLAY_OUTPUT_ACK':
            handlers.display_output_ack_handler(my_node, msg.content)
        elif msg.msg_type == 'FWD_DISPLAY_OUTPUT_ACK':
            handlers.fwd_display_output_ack_handler(my_node, msg.content)
        elif msg.msg_type == 'BACKUP_HEARTBEAT':
            handlers.backup_heartbeat_handler(my_node)
        elif msg.msg_type == 'BACKUP_HEARTBEAT_ACK':
            handlers.backup_heartbeat_ack_handler(my_node, msg.content)
        elif msg.msg_type == 'U_ARE_LEADER':
            my_node.running_jobs = manager.dict()
            my_node.leader_jobPQ = JobPQ(manager)
            my_node.last_heartbeat_ts = manager.dict()
            my_node.leader_joblist = manager.list()

            handlers.new_leader_handler(my_node, recv_addr, msg.content)
            matchmaker_p = Process(target=matchmaking, args=(my_node, ))
            matchmaker_p.start()

            matchmaker_started = True

            add_log(my_node, "Starting Matchmaker", ty="INFO")

            crash_detector_p = Process(target=crash_detect, args=(my_node, ))
            crash_detector_p.start()

            add_log(my_node, "Starting Crash Detector", ty="INFO")
            time.sleep(5)

            my_node.pids['matchmaker'] = matchmaker_p.pid
            my_node.pids['crash_detector'] = crash_detector_p.pid

        elif msg.msg_type == 'ELECT_NEW_LEADER':
            handlers.elect_new_leader_handler(my_node)
        elif msg.msg_type == 'I_AM_NEWLEADER':
            handlers.i_am_newleader_handler(my_node, recv_addr)
        elif msg.msg_type == 'LE_FORCE_LEADER':
            handlers.le_force_leader_handler(my_node, recv_addr, content)
        else:
            add_log(my_node,
                    "Message of unexpected msg type" + msg.msg_type,
                    ty="DEBUG")

        if my_node.le_elected and my_node.self_ip == my_node.ip_dict[
                'root'] and not matchmaker_started:

            my_node.running_jobs = manager.dict()
            my_node.leader_jobPQ = JobPQ(manager)
            my_node.last_heartbeat_ts = manager.dict()
            my_node.leader_joblist = manager.list()

            matchmaker_p = Process(target=matchmaking, args=(my_node, ))
            matchmaker_p.start()
            # time.sleep(5)

            add_log(my_node, "Starting Matchmaker", ty="INFO")

            matchmaker_started = True

            crash_detector_p = Process(target=crash_detect, args=(my_node, ))
            crash_detector_p.start()
            time.sleep(5)

            add_log(my_node, "Starting Crash Detector", ty="INFO")

            my_node.pids['matchmaker'] = matchmaker_p.pid
            my_node.pids['crash_detector'] = crash_detector_p.pid
Esempio n. 27
0
    def learn_serial_critic(self):
        manager = Manager(self.logger)
        # report_clock = TrainReporterClock.remote()
        # manager.add_worker(report_clock, 'report_timeout')

        for worker in self.actor_workers:
            manager.add_worker(worker, 'actor_worker')

        training_kwargs = copy.copy(
            ray.get(
                self.actor_workers[0].get_default_training_kwargs.remote()))

        algorithm_state = 'population_ask'

        total_steps = 0
        episode_count = 0
        prev_actor_steps = 0
        eval_steps = 0
        current_actor_step = 0
        max_reward = -10000
        eval_max_reward = -10000

        individuals = []
        results = []
        individuals_queue = deque(maxlen=self.population_size)

        critic_training_index = 0

        ray.get([
            worker.set_eval.remote()
            for worker in self.critic_workers + self.actor_workers
        ])
        critic_names = ray.get(self.actor_workers[0].get_critic_names.remote())
        init_time = datetime.datetime.now()
        env_min, env_max = get_env_min_max(self.args.env_name)

        while True:
            if total_steps >= self.max_timestep and manager.num_running_worker('actor_worker') == 0 \
                    and algorithm_state == 'population_ask':
                break

            if algorithm_state == 'population_ask':
                individuals = self.population.ask(self.population_size)
                results = [None for _ in range(self.population_size)]
                if total_steps >= self.args.initial_steps:
                    algorithm_state = 'critic_training'
                else:
                    for idx in range(self.population_size):
                        individuals_queue.append(idx)
                    algorithm_state = 'actor_evaluating'
                critic_training_index = 0
                current_actor_step = 0

            if algorithm_state == 'critic_training':
                if manager.get_worker_state_by_index('actor_worker',
                                                     0) == 'idle':
                    worker = manager.get_worker_by_index('actor_worker', 0)
                    worker.set_train.remote()

                    worker.set_weight.remote(
                        individuals[critic_training_index], name='main.actor')
                    worker.set_weight.remote(
                        individuals[critic_training_index],
                        name='target.actor')

                    training_kwargs['learn_critic'] = True
                    training_kwargs['learn_actor'] = False
                    training_kwargs['reset_optim'] = False
                    training_kwargs['batches'] = int(prev_actor_steps /
                                                     self.rl_population)

                    manager.new_job('train',
                                    specific_worker=worker,
                                    job_name='actor_worker',
                                    job_setting=None,
                                    **training_kwargs)

                result = manager.wait(name='actor_worker', remove=False)
                if result is not None:
                    finished_job, finished_job_id, finished_worker_dict = result
                    finished_worker = finished_worker_dict['worker']
                    finished_worker.set_eval.remote()

                    critic_training_index += 1
                    manager.done(finished_worker_dict)

                    if critic_training_index >= self.rl_population:
                        for idx in range(self.rl_population):
                            individuals_queue.append(idx)

                        critic_weight = ray.get(
                            finished_worker.get_weight.remote(
                                name=critic_names))
                        critic_weight_obj = ray.put(critic_weight)
                        set_critic_weight_obj = [
                            actor_worker.set_weight.remote(critic_weight_obj,
                                                           name=critic_names)
                            for actor_worker in self.actor_workers
                        ]
                        ray.get(set_critic_weight_obj)
                        algorithm_state = 'actor_training'
                        set_train_obj = [
                            worker.set_train.remote()
                            for worker in self.actor_workers
                        ]
                        ray.get(set_train_obj)

            if algorithm_state == 'actor_training':
                if len(individuals_queue) == 0 and manager.num_running_worker(
                        'actor_worker') == 0:
                    algorithm_state = 'actor_evaluating'
                    set_train_obj = [
                        worker.set_eval.remote()
                        for worker in self.actor_workers
                    ]
                    ray.get(set_train_obj)

                    for idx in range(self.population_size):
                        individuals_queue.append(idx)

                elif manager.num_idle_worker('actor_worker') > 0 and len(
                        individuals_queue) > 0:
                    individual_idx = individuals_queue.popleft()
                    worker, worker_idx = manager.get_idle_worker(
                        'actor_worker')
                    worker.set_weight.remote(individuals[individual_idx],
                                             name='main.actor')
                    worker.set_weight.remote(individuals[individual_idx],
                                             name='target.actor')
                    ray.get(worker.set_train.remote())

                    training_kwargs['learn_critic'] = False
                    training_kwargs['learn_actor'] = True
                    training_kwargs['reset_optim'] = True
                    training_kwargs['batches'] = int(prev_actor_steps)
                    training_kwargs['individual_id'] = individual_idx
                    manager.new_job(
                        'train',
                        job_name='actor_worker',
                        job_setting={'individual_idx': individual_idx},
                        **training_kwargs)

                result = manager.wait(name='actor_worker',
                                      remove=False,
                                      timeout=0)
                if result is not None:
                    finished_job, finished_job_id, finished_worker_dict = result
                    finished_worker = finished_worker_dict['worker']

                    finished_individual = finished_worker_dict['setting'][
                        'individual_idx']
                    trained_weight = ray.get(
                        finished_worker.get_weight.remote(name='main.actor'))
                    individuals[finished_individual] = trained_weight
                    manager.done(finished_worker_dict)

            if algorithm_state == 'actor_evaluating':
                if len(individuals_queue) == 0 and manager.num_running_worker(
                        'actor_worker') == 0:
                    algorithm_state = 'population_tell'

                elif manager.num_idle_worker('actor_worker') > 0 and len(
                        individuals_queue) > 0:
                    individual_idx = individuals_queue.popleft()
                    worker, worker_idx = manager.get_idle_worker(
                        'actor_worker')
                    worker.set_weight.remote(individuals[individual_idx],
                                             name='main.actor')
                    # worker.set_weight.remote(individuals[individual_idx], name='target.actor')

                    random_action = False if total_steps >= self.args.initial_steps else True

                    manager.new_job(
                        'rollout',
                        job_name='actor_worker',
                        job_setting={'individual_idx': individual_idx},
                        random_action=random_action,
                        eval=False,
                        mid_train=False)

                result = manager.wait(name='actor_worker',
                                      remove=False,
                                      timeout=0)
                if result is not None:
                    finished_job, finished_job_id, finished_worker_dict = result
                    finished_worker = finished_worker_dict['worker']

                    finished_individual = finished_worker_dict['setting'][
                        'individual_idx']
                    episode_t, episode_reward = ray.get(finished_job_id)
                    results[finished_individual] = episode_reward

                    manager.done(finished_worker_dict)

                    total_steps += episode_t
                    current_actor_step += episode_t
                    eval_steps += episode_t
                    episode_count += 1

                    self.summary.add_scalar('train/individuals',
                                            episode_reward, total_steps)
                    if episode_reward > max_reward:
                        max_reward = episode_reward
                        self.summary.add_scalar('train/max', max_reward,
                                                total_steps)

            if algorithm_state == 'population_tell':
                self.population.tell(individuals, results)
                elapsed = (datetime.datetime.now() - init_time).total_seconds()
                result_str = [
                    prColor(f'{result:.2f}',
                            fore=prValuedColor(result, env_min, env_max, 40,
                                               "#600000", "#00F0F0"))
                    for result in results
                ]

                result_str = ', '.join(result_str)

                self.logger.log(
                    f'Total step: {total_steps}, time: {elapsed:.2f} s, '
                    f'max_reward: ' +
                    prColor(f'{max_reward:.3f}',
                            fore=prValuedColor(max_reward, env_min, env_max,
                                               40, "#600000", "#00F0F0")) +
                    f', results: {result_str}')

                prev_actor_steps = current_actor_step
                algorithm_state = 'mean_eval'
                # algorithm_state = 'population_ask'

            if algorithm_state == 'mean_eval':
                mean_weight, var_weight = self.population.get_mean()
                worker, worker_idx = manager.get_idle_worker('actor_worker')
                ray.get(
                    worker.set_weight.remote(mean_weight, name='main.actor'))
                manager.new_job('rollout',
                                job_name='actor_worker',
                                job_setting=None,
                                random_action=False,
                                eval=True,
                                mid_train=False)
                result = manager.wait(name='actor_worker',
                                      remove=False,
                                      timeout=None)
                if result is not None:
                    finished_job, finished_job_id, finished_worker_dict = result
                    finished_worker = finished_worker_dict['worker']

                    eval_t, eval_reward = ray.get(finished_job_id)
                    manager.done(finished_worker_dict)

                    if eval_reward > eval_max_reward:
                        eval_max_reward = eval_reward
                        self.summary.add_scalar('test/max', eval_reward,
                                                total_steps)

                    self.summary.add_scalar('test/mu', eval_reward,
                                            total_steps)

                    algorithm_state = 'population_ask'

            if eval_steps >= 50000:
                eval_t, eval_reward = self.run()
                self.logger.log(f'Evaluation: {eval_reward}')
                eval_steps = 0
Esempio n. 28
0
    exit(0)


def parse_options():
    if '-h' in sys.argv:
        usage()
    elif '-v' in sys.argv:
        version()
    try:
        opts, args = getopt.getopt(sys.argv[1:], "h:v")
    except getopt.GetoptError as e:
        om.error("Invalid parameter({})\n".format(str(e)))
        exit(1)
    return args


if __name__ == '__main__':
    os.chdir(os.path.dirname(os.path.abspath(__file__)))

    builtins.config = ConfigManager()
    builtins.om = OutputManager()
    builtins.em = ExploitsManager()

    banner()
    options = parse_options()

    manager = Manager()
    manager.run()

    exit(0)
Esempio n. 29
0
    def __init__(self, eth_server: str, abi_path: str,
                 pandora: str,
                 node: str,
                 ipfs_server: str, ipfs_port: int, data_dir: str):
        Broker.get_instance()
        Thread.__init__(self, daemon=True)

        # Initializing logger object
        self.logger = logging.getLogger("Broker")
        self.logger.setLevel(logging.INFO)
        self.logger.addHandler(LogSocketHandler.get_instance())

        self.manager = Manager.get_instance()
        self.mode = self.manager.launch_mode

        # Saving starter configs
        self.eth_server = eth_server
        self.abi_path = abi_path
        self.pandora = pandora
        self.node = node
        self.ipfs_server = ipfs_server
        self.ipfs_port = ipfs_port
        self.data_dir = data_dir

        # initial filtering current block number
        self.current_block_number = 0

        # Init empty container for pandora
        self.pandora_container = None
        # Init empty container for cognitive job manager
        self.job_controller = None
        self.job_controller_address = None
        self.job_controller_container = None

        # Init empty containers for worker node
        self.worker_node_container = None
        self.worker_node_state_machine = None
        self.worker_node_event_thread = None

        # Init empty containers for job
        self.job_id_hex = None
        self.job_container = None
        self.job_state_machine = None
        self.job_state_event_thread = None
        self.job_state_thread_flag = True

        # Init empty jobs and processor
        self.jobs = {}
        self.processors = {}

        # init connectors
        self.eth = EthService(strategic=EthConnector())
        self.ipfs = IpfsService(strategic=IpfsConnector())

        # init progress delegate
        self.send_progress = False
        self.start_training_time = None
        self.finish_training_time = None
        self.current_epoch = None
        self.start_epoch_time = None
        self.finish_epoch_time = None
        self.time_per_epoch = None
        self.sends_count = 1
        self.send_progress_interval = 300  # set to 5min by default

        self.local_password = None
        self.key_tool = KeyTools()
        print('Pandora broker initialize success')
Esempio n. 30
0
 def __init__(self, listener):
     self.manager = Manager.get_instance()
     self.mode = self.manager.launch_mode
     self.listener = listener
Esempio n. 31
0
 def setUp(self):
     """This method is run once before _each_ test method is executed"""
     self.man = Manager('user', 'password', config=self.config_default)
     self.man.fileSystemModule = FileSystemModuleStub()
Esempio n. 32
0
class TestSecuritySelective(object):
    """docstring for TestSecuritySelective"""
    def __init__(self):
        super(TestSecuritySelective, self).__init__()
        self.config_default = {"sync_folder_name": "./test/sync_folder", "database": ":memory:"}
        self.man = None

    @classmethod
    def setup_class(klass):
        """This method is run once for each class before any tests are run"""

    @classmethod
    def teardown_class(klass):
        """This method is run once for each class _after_ all tests are run"""

    def setUp(self):
        """This method is run once before _each_ test method is executed"""
        self.man = Manager('user', 'password', config=self.config_default)
        self.man.fileSystemModule = FileSystemModuleStub()

    def teardown(self):
        """This method is run once after _each_ test method is executed"""
        self.man.databaseManager.cleanDatabase()

        self.man = None

    def test_manager(self):
        assert_true(self.man)

    def test_markEncryption(self):
        self.man.newAccount('dropbox_stub', 'user')
        filename = 'test_file.txt'
        self.man.fileSystemModule.createFile(filename)  # create a file
        self.man.updateLocalSyncFolder()

        self.man.markForEncription(filename)
        self.man.fileSystemModule.createFile(filename)  # modify the file

        self.man.updateLocalSyncFolder()  # it should try to encrypt

        remoteFile = self.man.cuentas[0].getFile(filename)
        localFile = self.man.fileSystemModule.openFile(filename)
        assert_equal(localFile.read(), b'text')
        assert_not_equal(remoteFile.read(), b'text')

    def test_unmarkEncryption(self):
        self.man.newAccount('dropbox_stub', 'user')
        filename = 'test_file.txt'
        self.man.fileSystemModule.createFile(filename)  # create a file
        self.man.updateLocalSyncFolder()

        self.man.unmarkForEncription(filename)
        self.man.fileSystemModule.createFile(filename)  # modify the file

        self.man.updateLocalSyncFolder()  # it should try to encrypt

        remoteFile = self.man.cuentas[0].getFile(filename)
        localFile = self.man.fileSystemModule.openFile(filename)
        assert_equal(remoteFile.read(), b'text')
        assert_equal(localFile.read(), b'text')

    def test_unmarked_decryption(self):
        self.man.newAccount('dropbox_stub', 'user')
        filename = 'test_file.txt'

        self.man.fileSystemModule.createFile(filename)  # temporally create a file
        # "text" -('user', 'password')->
        ctext = simplecrypt.encrypt(self.man.securityModule.hashPassword('user', 'password'), b'text')
        stream = tempfile.TemporaryFile()
        stream.write(ctext)
        stream.seek(0)
        self.man.cuentas[0].uploadFile(filename, 'different_revision', stream)  # we upload it to the second account
        self.man.fileSystemModule.remove(filename)  # we have the file only in the remote

        self.man.updateLocalSyncFolder()  # it should try to decrypt, and the file get marked to encrypt

        assert_true(self.man.databaseManager.shouldEncrypt(filename))

        remoteFile = self.man.cuentas[0].getFile(filename)
        localFile = self.man.fileSystemModule.openFile(filename)
        assert_equal(remoteFile.read(), ctext)
        assert_equal(localFile.read(), b'text')

    def test_deleteAccount_decrypt(self):
        self.test_markEncryption()
        filename = 'test_file.txt'

        account = self.man.cuentas[0]
        self.man.deleteAccount(account)

        remoteFile = account.getFile(filename)
        fileList = self.man.fileSystemModule.getFileList()

        expected_fileList = []

        compareFileLists(fileList, expected_fileList)
        assert_equal(remoteFile.read(), b'text')
Esempio n. 33
0
def register_student():
    print('欢迎注册'.center(20, '-'))  # 学费默认都交了 O(∩_∩)O
    mana = Manager('admin')
    mana.create_student()
 def emit(self, record):
     if Manager.get_instance().web_socket_enable == 'True':
         from service.webapi.web_socket_listener import WebSocket
         socket = WebSocket.get_instance()
         socket.update_log_record(record)
Esempio n. 35
0
 def show_classes(self):
     """查看班级"""
     obj = Manager(self.name)
     obj.show_teacher(self.name, "classes")