示例#1
0
    def handle(self, *args, **options):
        
        if options['daemonize']:
            daemonize()
        
        settings.LOG_TO_STREAM = True        
            
        r = redis.Redis(connection_pool=settings.REDIS_FEED_POOL)
        
        if options['initialize']:
            feeds = Feed.objects.filter(num_subscribers__gte=1).order_by('?')
            print 'Query feeds done with num of feeds',len(feeds)
            r.ltrim('freeze_feeds',1,0)

            pipeline = r.pipeline()
            for feed in feeds:
                pipeline.rpush('freeze_feeds',feed.pk)
            pipeline.execute()
            print 'Initialize freeze_feeds done'

        feed_id = r.lpop('freeze_feeds')
        while feed_id:
            try:
                frozen_num = MStory.freeze_feed(int(feed_id))
                if frozen_num > 0:
                    r.rpush('freeze_feeds',feed_id)
            except Exception, e:
                logging.error(str(e)+\
                            traceback.format_exc()+'\n'+\
                            'Error from:  freeze_feeds\n')
            feed_id = r.lpop('freeze_feeds')
示例#2
0
def current(roster, cli, config):
    '''Print information on the current shift in the roster.'''
    # roster.current return a *list* of all the people on duty
    shifts = roster.current
    if len(shifts) == 1:
        [current] = shifts
    elif len(shifts) == 0:
        log.error('Nobody is on duty.')
        now = datetime.datetime.now(tz=pytz.UTC)
        current = Shift(now, now, None, None, None)
    else:
        log.error('Several people where on duty, picking a random one.')
        for counter, shift in enumerate(shifts, 1):
            log.error('On duty #{}: {}'.format(counter, shift))
        current = choice(shifts)
    # Replace missing fields with fallback ones
    if not current.email:
        current.email = config['fallback.email']
        if current.name is not None:
            log.error('Missing email address for "{}"'.format(current.name))
    if not current.phone:
        current.phone = config['fallback.phone']
        if current.name is not None:
            log.error('Missing phone number for "{}"'.format(current.name))
    current.name = current.name or 'Fallback Contact Details'
    # Compute what fields to output
    fields = ('start', 'end', 'name', 'email', 'phone')
    mask = []
    for attr_name in fields:
        mask.append(cli[attr_name])
    if not any(mask):
        mask = [True] * 5  # No explicit field, means all fields
    bits = [val for val, flag in zip(current.as_string_tuple, mask) if flag]
    print('\t'.join(bits))
示例#3
0
def load_from_rawxls(file_path):
    """从 xlsx 文件恢复数据到 lang.csv 的格式

    xlsx 文件可能是 <name, desc> 模式,也可能是 list<text> 模式。

    Args:
        file_path (str): xlsx 文件路径

    Returns:
        category (str): category from lang_def
        csv_data (list[str]): list of [file_id, unknown, index, text],不带前导0
    """

    data = load_xls(file_path)[1:]
    # 判断文件模式
    id_split = data[0][1].split('-')
    if len(id_split) > 3 and id_split[-1].isdigit() and id_split[-2].isdigit() and id_split[-3].isdigit():
        # list of text
        return load_from_list_category(data)
    elif len(id_split) > 1 and id_split[-1].isdigit():
        # name_desc
        return load_from_pair_category(data)
    else:
        log.error('load %s failed.' % file_path)
        return '', []
示例#4
0
文件: parser.py 项目: muke5hy/macaw
    def _is_valid_link(self, link):
        """
        Return True if given link is non document, Since this is not a perfect way to check
        but it avoids a call to server. 
        """

        # Check ONLY_ROOTDOMAIN

        scheme, netloc, path, params, query, fragment = urlparse(link)

        try:
            if get_tld(self.base_url) == get_tld(link) and not ONLY_ROOTDOMAIN:
            # if get_tld(self.base_url) == get_tld(link):
                return False
        except Exception as e:
            log.error(str(e), self.base_url, link)


        # Need to add more
        DOC_EXT = [".pdf", ".xmls", ".docx", ".odt"]

        try:

            urlPath = [i for i in (path.split('/')) if i]

            file_name = urlPath[-1]
            ext = file_name.split(".")[-1]
        except IndexError:
            # Its just a root URL
            return True
        return ext not in DOC_EXT
示例#5
0
文件: test.py 项目: bweir/packthing2
def isType(key, value, expected):
    try:
        assert isinstance(value, expected)
    except AssertionError:
        log.error("Key '"+key+"' is of type '"+type(value).__name__+"';",
                   "expected '"+expected.__name__+"'.",
                   "(value: "+str(value)+")")
示例#6
0
def required(a, group=None, name=None):
    k = key(a, group)
    if k.required == True:
        if name:
            log.error("Missing '"+a+"' key in "+group+" '"+name+"'")
        else:
            log.error("Missing key '"+a+"' in group '"+group+"'")
示例#7
0
文件: app.py 项目: einalex/todo
    def push(self):
        """Push todo to gist.github.com"""
        github = Github()
        gist_id = GistId().get()
        token = GithubToken().get()

        github.login(token)

        if not self.todo.name:
            name = "Todo"
        else:
            name = self.todo.name

        files = {
            name: {
                "content": self.todo_content
            }
        }

        log.info(
            "Pushing '%s' to https://gist.github.com/%s .." % (name, gist_id)
        )
        response = github.edit_gist(gist_id, files=files)

        if response.status_code == 200:
            log.ok("Pushed success.")
        elif response.status_code == 401:
            log.warning("Github token out of date, empty the old token")
            GithubToken().save('')  # empty the token!
            self.push()  # and repush
        else:
            log.error("Pushed failed. %d" % response.status_code)
示例#8
0
文件: app.py 项目: einalex/todo
 def get_task_by_id(self, index):
     """return task object by its index, if not found, fatal error."""
     index -= 1
     tasks = self.todo.tasks
     if index >= len(tasks):
         log.error("Task not found.")
     return tasks[index]
示例#9
0
def load_from_langxls(file_path, lang, need_check=False, load_ui=False):
    """从 xlsx 文件中读取 lang.csv 的翻译

    xlsx 文件可能是 <name, desc> 模式,也可能是 list<text> 模式。

    Args:
        file_path (str): xlsx 文件路径
        lang (str): "zh"/"en", 读中文还是英文
        need_check (bool): 是否检查
        load_ui (bool): 是否读取ui汉化文件的内容

    Returns:
        category (str): category from lang_def
        translated_data (list[str]): list of [file_id, unknown, index, text],不带前导0
    """

    data = load_xls(file_path)[1:]
    # 判断文件模式
    id_split = data[0][1].split('-')
    if len(id_split) > 3 and id_split[-1].isdigit() and id_split[-2].isdigit() and id_split[-3].isdigit():
        # list of text
        return load_from_list_category(data, lang, need_check)
    elif len(id_split) > 1 and id_split[-1].isdigit():
        # name_desc
        return load_from_pair_category(data, lang, need_check)
    elif len(id_split) == 1 and '_' in id_split[0] \
            and id_split[0][0].isalpha() and id_split[0][-1].isalpha():
        if load_ui:
            return load_from_ui_fake(data, lang, need_check)
        else:
            log.info('skip ui file %s.' % file_path)
            return '', []
    else:
        log.error('load %s failed.' % file_path)
        return '', []
示例#10
0
文件: app.py 项目: einalex/todo
    def get(self):
        """
          call this method to get a token::

            token = GithubToken().get()

          what the get() does:
            1) read from "~/.todo/token"
            2) check if the token read is empty.
               (yes)-> 1) if empty,
                          ask user for user&passwd to access api.github.com
                        2) fetch the token, set to this instance and store it.
            3) return the token (a string)
        """
        if self.is_empty:
            user = ask_input.text("Github user:"******"Password for %s:" % user)

            log.info("Authorize to github.com..")
            response = Github().authorize(user, password)

            if response.status_code == 201:
                # 201 created
                log.ok("Authorized success.")
                # get token from dict
                token = response.json()["token"]
                self.save(token)
            else:
                log.error("Authorization failed. %d" % response.status_code)
        return self.content
示例#11
0
 def fetch_request(self):
     try:
         proxies = {"http": settings.COW_PROXY_HANDLER, "https": settings.COW_PROXY_HANDLER}
         r = requests.get(self.story.story_permalink, headers=self.headers, verify=False, proxies=proxies)
         # r = requests.get(self.story.story_permalink, headers=self.headers, verify=False)
     except (
         AttributeError,
         SocketError,
         requests.ConnectionError,
         requests.models.MissingSchema,
         requests.sessions.InvalidSchema,
     ), e:
         logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal text~FY: %s" % e)
         logging.error(
             "fetch_request with:ERROR \n"
             + str(e)
             + "  feed_id:"
             + str(self.story.story_feed_id)
             + "  stroy_link:"
             + str(self.story.story_permalink)
         )
         if settings.SEND_ERROR_MAILS:
             mail_admins(
                 "Error in text_importer fetch_request",
                 str(e)
                 + "\n"
                 + "  feed_id:"
                 + str(self.story.story_feed_id)
                 + "\n"
                 + "  stroy_link:"
                 + str(self.story.story_permalink)
                 + "\n"
                 + traceback.format_exc(),
             )
         return
示例#12
0
    def run(self, target=None, tid=None):
        if target is None:
            log.critical("Please set --target param")
            sys.exit()
        if tid is None:
            log.critical("Please set --tid param")
            sys.exit()

        # Statistic Code
        p = subprocess.Popen(
            ['cloc', target], stdout=subprocess.PIPE)
        (output, err) = p.communicate()
        rs = output.split("\n")
        for r in rs:
            r_e = r.split()
            if len(r_e) > 3 and r_e[0] == 'SUM:':
                t = CobraTaskInfo.query.filter_by(id=tid).first()
                if t is not None:
                    t.code_number = r_e[4]
                    try:
                        db.session.add(t)
                        db.session.commit()
                        log.info("Statistic code number done")
                    except Exception as e:
                        log.error("Statistic code number failed" + str(e.message))
示例#13
0
 def fetch(self, skip_save=False):
     # =====================
     # =Modified by SongJun=
     # =====================
     try:
         request = urllib2.Request(self.story.story_permalink, headers=self.headers)
         opener = urllib2.build_opener(
             urllib2.ProxyHandler({"http": settings.COW_PROXY_HANDLER, "https": settings.COW_PROXY_HANDLER})
         )
         # opener = urllib2.build_opener()
         text = opener.open(request).read()
         # logging.info(text)
         # Add by Xinyan Lu: some websites always return gzip files
         if text[:6] == "\x1f\x8b\x08\x00\x00\x00":
             text = gzip.GzipFile(fileobj=cStringIO.StringIO(text)).read()
     except httplib.IncompleteRead as e:
         text = e.partial
     except (Exception), e:
         logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal text~FY: %s" % e)
         logging.error(
             "error fetch_request"
             + str(e)
             +
             # '  feed_id:'+str(self.story.story_feed_id)+\
             "  stroy_link:"
             + str(self.story.story_permalink)
         )
         return
示例#14
0
 def on_receive(self, msg):
     try:
         return getattr(self, msg['func'])(msg['msg'])
     except Exception, e:
         log.error('Get error: %s', e, exc_info=True)
         # TODO fix return
         return None
示例#15
0
    def run(self, feed_pks, **kwargs):
        try:
            from apps.rss_feeds.models import Feed
            #from apps.statistics.models import MStatistics
            r = redis.Redis(connection_pool=settings.REDIS_FEED_POOL)
            #mongodb_replication_lag = int(MStatistics.get('mongodb_replication_lag', 0))
            #compute_scores = bool(mongodb_replication_lag < 10)

            options = {
            #    'quick': float(MStatistics.get('quick_fetch', 0)),
            #    'compute_scores': compute_scores,
            #    'mongodb_replication_lag': mongodb_replication_lag,
            }

            if not isinstance(feed_pks, list):
                feed_pks = [feed_pks]

            for feed_pk in feed_pks:
                feed = Feed.get_by_id(feed_pk)
                if not feed or feed.pk != int(feed_pk):
                    logging.info(" ---> ~FRRemoving feed_id %s from tasked_feeds queue, points to %s..." % (feed_pk, feed and feed.pk))
                    r.zrem('tasked_feeds', feed_pk)
                if feed:
                    feed.update(**options)
        except Exception, e:
            logging.error(str(e)+\
                traceback.format_exc()+'\n'+\
                'error from:  UpdateFeeds\n')
            if settings.SEND_ERROR_MAILS:
                mail_admins("Error in UpdateFeeds",str(e)+'\n'+traceback.format_exc())
示例#16
0
def create_zip(archive, files):
    '''Creates a zip file containing the files being backed up.'''
    import zipfile
    from utils.misc import add_file_hash

    try:
        # zipfile always follows links
        with zipfile.ZipFile(archive, 'w') as zipf:
            zipf.comment = 'Created by s3-backup'
            for f in files:
                f = f.strip()
                if os.path.exists(f):
                    zipf.write(f)
                    add_file_hash(archive, f)
                    log.debug('Added %s.' % f)
                else:
                    log.error('%s does not exist.' % f)
                
                if zipf.testzip() != None:
                    log.error('An error occured creating the zip archive.')
    except zipfile.BadZipfile:
        # I assume this only happens on reads? Just in case...
        log.critical('The zip file is corrupt.')
    except zipfile.LargeZipFile:
        log.critical('The zip file is greater than 2 GB.'
                ' Enable zip64 functionality.')
示例#17
0
文件: dottm.py 项目: berenm/gentulu
  def parse(self, file_name):
    errors = []

    patterns = {}
    patterns['type'] = r'^(?P<name>\w+),\*,\*,\s*(?P<type>(?:struct |unsigned |const )?[*\w]+(?: \*| const)?),\*,\*,?$'
    patterns['comment'] = r'^#.*$'

    with open(file_name, 'r') as dottm_file:
      for line in [ l.strip() for l in dottm_file if len(l.strip()) > 0 ]:
        for k, regex in patterns.items():
          match = re.match(regex, line)
          if match:
            if k == 'type':
              type_name = match.group('type')
              if type_name == '*':
                type_name = match.group('name')

              type_mappings[match.group('name')] = type_name

            break
        else:
          errors.append('U: ' + line)

    for e in errors:
      log.error(file_name + ': ' + e)
示例#18
0
文件: spider.py 项目: muke5hy/macaw
    def get_links(self, url, page_depth=0):
        """
        Get the links from url
        """
        if (self._is_url(url) and 
            page_depth <= MAX_DEPTH and 
            not self._is_crawled(url)):

            try:
                redis.hset(URLS_CRAWLLING, base64.b64encode(url.encode('UTF-8')), "")

                response = requests.get(url)

                if (response.status_code//100 == 2 and
                    self._is_html(response)):

                    redis.hset(URLS_CRAWLLING, base64.b64encode(url.encode('UTF-8')), response.content)

                    status, links = Parser(self._base_url(url)).parse_href(response.content)

                    log.debug("Page Depth {0} URL {2} Total count  {1}".format(page_depth, len(links), url))
                    for link in links:
                        self.get_links(link, page_depth+1)

            except Exception as e:
                log.error(str(e))
                raise
示例#19
0
def module(name, parent=None):
    if parent == None:
        return importlib.import_module(name)
    else:
        if not type(parent) == ModuleType:
            log.error("Module '"+str(parent)+"' is not a module!")

        return importlib.import_module(parent.__name__+'.'+name)
示例#20
0
文件: test.py 项目: bweir/packthing2
def isMissingKeys(keylist):
    if len(_missingkeys):
        msg = "The following keys are missing from the packfile!\n"
        for k in _missingkeys:
            msg += "\n- %-16s %-50s (e.g. %s)" % (k,
                    keylist.description(k),
                    keylist.example(k))
        log.error(msg)
示例#21
0
 def verify(self, receipt=None):
     trans_receipt = receipt
     trans = simplejson.loads(trans_receipt)
     result = True
     result_code = PurchaseResultCode.Value("UNKNOWN")
     try:
         resp = self.do_verify(trans_receipt)
     except Exception, e:
         log.error("IAP - verify exception: %s" % str(e))
示例#22
0
def add_root(tree):
    if not tree.group == None:
        log.error("Not root tree:", tree.name)

    for t in tree_table:
        if t.name == tree.name:
            log.note("Root tree is defined twice:", tree.name)

    tree_table.append(tree)
示例#23
0
def run_server():
    host = settings.LISTEN_HOST
    port = settings.LISTEN_PORT
    #log.info('version=%s', settings.GAME_VERSION)
    try:
        log.info('serving on %s:%s...', host, port)
        WSGIServer((host, port), app).serve_forever()
    except Exception, e:
        log.error('Server', exc_info=e)
示例#24
0
    def run(self, target=None, tid=None, pid=None):
        if target is None:
            log.critical("Please set --target param")
            sys.exit()
        if tid is not None:
            task_id = tid
            # Start Time For Task
            t = CobraTaskInfo.query.filter_by(id=tid).first()
            if t is None:
                log.critical("Task id doesn't exists.")
                sys.exit()
            if t.status not in [0, 1]:
                log.critical("Task Already Scan.")
                sys.exit()
            t.status = 1
            t.time_start = int(time.time())
            t.updated_at = time.strftime('%Y-%m-%d %X', time.localtime())
            try:
                db.session.add(t)
                db.session.commit()
            except Exception as e:
                log.error("Set start time failed" + str(e.message))
        else:
            task_id = None

        target_type = self.parse_target(target)
        if target_type is False:
            log.error("""
                Git Repository: must .git end
                SVN Repository: can http:// or https://
                Directory: must be local directory
                File: must be single file or tar.gz/zip/rar compress file
                """)
        from engine import static
        s = static.Static(target, task_id=task_id, project_id=pid)
        if target_type is 'directory':
            s.analyse()
        elif target_type is 'compress':
            from utils.decompress import Decompress
            # load an compressed file. only tar.gz, rar, zip supported.
            dc = Decompress(target)
            # decompress it. And there will create a directory named "222_test.tar".
            dc.decompress()
            s.analyse()
        elif target_type is 'file':
            s.analyse()
        elif target_type is 'git':
            from pickup.GitTools import Git
            g = Git(target, branch='master')
            g.get_repo()
            if g.clone() is True:
                s.analyse()
            else:
                log.critical("Git clone failed")
        elif target_type is 'svn':
            log.warning("Not Support SVN Repository")
示例#25
0
文件: app.py 项目: Willyfrog/compra
def call_action(module, action_name, method="GET", get_p=None, post_p=None, item=None):
    """
    Intenta llamar a la funcion y pasar los parametros apropiados
    """
    try:
        a = getattr(module, action_name)
    except AttributeError, e:  # diferencia entre fallo por modulo o accion
        log.error("module %s doesn't have a %s function" % (module.__name__, action_name))
        log.error("%s" % e)
        redirect("/400")
示例#26
0
文件: app.py 项目: Willyfrog/compra
def get_module(module_name):
    """
    recupera el modulo
    """
    try:
        m = import_module(module_name)
        log.debug("modulo %s cargado" % module_name)
    except ImportError, e:
        log.error("No se pudo importar %s: %s" % (module_name, e))
        m = None
示例#27
0
def parse_message(name, body):
    #cls = globals()[name]
    cls = getattr(protocol_pb2, name)
    event = None
    try:
        event = cls()
        msg=body
        Base64=base64.b64decode(msg)
        event.ParseFromString(Base64)
    except Exception, e:
        log.error('Error parse_message: %s', e)
示例#28
0
    def setPriority(self, idx, priority):
        query = QSqlQuery(self.db)
        query.prepare("update TodoList set priority = ? where id = ?")
        query.addBindValue(priority)
        query.addBindValue(idx)
        ok = query.exec_()
        query.finish()

        if not ok:
            log.error("Update TodoList Priority Failed: " + query.lastError().text())
        return ok
示例#29
0
    def setContent(self, idx, content):
        query = QSqlQuery(self.db)
        query.prepare("update TodoList set content = ? where id = ?")
        query.addBindValue(content)
        query.addBindValue(idx)
        ok = query.exec_()
        query.finish()

        if not ok:
            log.error("Update TodoList Content Failed: " + query.lastError().text())
        return ok
 def test_encode_error(self):
     for i, (data, expected) in enumerate(test_data_encode_error):
         try:
             encode(data)
         except expected:
             log.error(
                 "**IGNORE ERROR LOG** This exception was raised by a test")
         except (Exception), e:
             debug_print(i, data, expected, e)
             raise
         else:
             debug_print(i, data, expected, 'NO EXCEPTION RAISED')
             assert False
    def fit(self):
        self.alpha = -1 if self.alpha is None else self.alpha
        self.beta = -1 if self.beta is None else self.beta
        self.l = -1 if self.l is None else self.l
        self.c = -1 if self.c is None else self.c
        if self.distance == self.SIM_ASYMCOSINE and not (0 <= self.alpha <= 1):
            log.error(
                'Invalid parameter alpha in asymmetric cosine Similarity_MFD!')
            return
        if self.distance == self.SIM_TVERSKY and not (0 <= self.alpha <= 1
                                                      and 0 <= self.beta <= 1):
            log.error(
                'Invalid parameter alpha/beta in tversky Similarity_MFD!')
            return
        if self.distance == self.SIM_P3ALPHA and self.alpha is None:
            log.error('Invalid parameter alpha in p3alpha Similarity_MFD')
            return
        if self.distance == self.SIM_RP3BETA and self.alpha is None and self.beta is None:
            log.error('Invalid parameter alpha/beta in rp3beta Similarity_MFD')
            return
        if self.distance == self.SIM_SPLUS and not (
                0 <= self.l <= 1 and 0 <= self.c <= 1 and 0 <= self.alpha <= 1
                and 0 <= self.beta <= 1):
            log.error(
                'Invalid parameter alpha/beta/l/c in s_plus Similarity_MFD')
            return

        # compute and stores the Similarity_MFD matrix using one of the distance metric: S = R•R'
        if self.distance == self.SIM_COSINE:
            self._sim_matrix = sim.cosine(self.matrix,
                                          k=self.k,
                                          shrink=self.shrink,
                                          threshold=self.threshold,
                                          binary=self.implicit)
        elif self.distance == self.SIM_ASYMCOSINE:
            self._sim_matrix = sim.asymmetric_cosine(self.matrix,
                                                     k=self.k,
                                                     shrink=self.shrink,
                                                     threshold=self.threshold,
                                                     binary=self.implicit,
                                                     alpha=self.alpha)
        elif self.distance == self.SIM_JACCARD:
            self._sim_matrix = sim.jaccard(self.matrix,
                                           k=self.k,
                                           shrink=self.shrink,
                                           threshold=self.threshold,
                                           binary=self.implicit)
        elif self.distance == self.SIM_DICE:
            self._sim_matrix = sim.dice(self.matrix,
                                        k=self.k,
                                        shrink=self.shrink,
                                        threshold=self.threshold,
                                        binary=self.implicit)
        elif self.distance == self.SIM_TVERSKY:
            self._sim_matrix = sim.tversky(self.matrix,
                                           k=self.k,
                                           shrink=self.shrink,
                                           threshold=self.threshold,
                                           binary=self.implicit,
                                           alpha=self.alpha,
                                           beta=self.beta)
        elif self.distance == self.SIM_P3ALPHA:
            self._sim_matrix = sim.p3alpha(self.matrix,
                                           k=self.k,
                                           shrink=self.shrink,
                                           threshold=self.threshold,
                                           binary=self.implicit,
                                           alpha=self.alpha)
        elif self.distance == self.SIM_RP3BETA:
            self._sim_matrix = sim.rp3beta(self.matrix,
                                           k=self.k,
                                           shrink=self.shrink,
                                           threshold=self.threshold,
                                           binary=self.implicit,
                                           alpha=self.alpha,
                                           beta=self.beta)
        elif self.distance == self.SIM_SPLUS:
            self._sim_matrix = prep.normalize(sim.s_plus(
                self.matrix,
                k=self.k,
                shrink=self.shrink,
                threshold=self.threshold,
                binary=self.implicit,
                l=self.l,
                t1=self.alpha,
                t2=self.beta,
                c=self.c),
                                              norm='l2',
                                              axis=0)
        else:
            log.error('Invalid distance metric: {}'.format(self.distance))
        return self._sim_matrix
示例#32
0
 def recommend(self, userid, urm=None, N=10, filter_already_liked=True, with_scores=False, items_to_exclude=[]):
     if not userid >= 0:
         log.error('Invalid user id')
         return None
     return self.recommend_batch([userid], urm, N, filter_already_liked, with_scores, items_to_exclude)
示例#33
0
def toc(visitor, block, headers=None, relations=None):
    """
    Format:
        
        {% toc max_depth=2,class=multiple %}
        file1.md
        file2.md
        {% endtoc %}
    """
    s = []
    s.append('<div class="toc">\n')
    depth = int(block['kwargs'].get('max_depth', '1'))
    if 'class' in block['kwargs']:
        s.append('<ul class=%s>\n' % block['kwargs']['class'])
    else:
        s.append('<ul>\n')
    prev = {'prev': {}, 'next': {}, 'current': {}}

    for fname in block['body'].splitlines():
        hs = headers.get(fname, [])
        if not hs:
            log.error("File %s can't be found, will be skipped" % fname)
            continue

        def make_title(x):
            if x['id']:
                _id = '#' + x['id']
            else:
                _id = ''
            return '<li><a href="%s%s">%s</a></li>' % (x['link'], _id,
                                                       x['title'])

        _fname, _ext = os.path.splitext(fname)
        last = 1
        title = _fname
        for x in hs:
            level = x['level']

            #fetch title, so an article should be one h1 subject
            if level == 1:
                title = x['title']

            if level > depth:
                continue
            if level == last:  #same
                s.append(make_title(x))
            elif level == last - 1:  #reindent
                s.append('</ul>\n')
                s.append(make_title(x))
                last -= 1
            elif level == last + 1:  #indent
                s.append('<ul>\n')
                s.append(make_title(x))
                last += 1
            else:
                pass
        for i in range(last, 1, -1):
            s.append('</ul>\n')

        #process prev and next
        c = {'link': _fname + '.html', 'title': title}
        current = relations[_fname] = {
            'prev': {},
            'next': {},
            'current': c,
        }
        p = prev['current']
        if p:
            current['prev'] = {'link': p['link'], 'title': p['title']}
        prev['next'] = c
        prev = current
    s.append('</ul>\n')
    s.append('</div>\n')
    return ''.join(s)
示例#34
0
    async def post(cls,
                   url,
                   params=None,
                   body=None,
                   headers=None,
                   encode_type='utf-8',
                   decode_type='utf-8',
                   parse_json=True,
                   timeout=30):
        """ HTTP POST 请求
        @param url 请求url
        @param params 请求的uri qurey参数
        @param body 请求的body参数
        @param headers 请求的header参数
        @param encode_type 请求body编码格式,默认使用utf-8编码
        @param decode_type 返回body解码格式,默认使用utf-8解码
        @param parse_json 是否解析返回body为json格式,默认为True
        @param timeout 请求超时时间,默认30秒
        @return data 返回的http body
        """

        if params:
            url = url_concat(url, params)

        if body:
            if not encode_type:
                pass
            elif encode_type == 'utf-8':
                body = json.dumps(body)
            else:
                body = urlencode(body, encoding=encode_type)
        http_client = AsyncHTTPClient()
        response = await http_client.fetch(url,
                                           method='POST',
                                           body=body,
                                           headers=headers,
                                           request_timeout=timeout)

        if response.code not in (200, 201, 202, 203, 204, 205, 206):
            logger.error('url:',
                         url,
                         'post data:',
                         body,
                         'response code:',
                         response.code,
                         'response body:',
                         response.body,
                         caller=cls)
            msg = '请求url失败: {url}'.format(url=url)
            raise exceptions.CustomException(msg=msg)

        if response.body:
            data = response.body

            if decode_type:
                data = data.decode(decode_type)

            if parse_json:
                return json.loads(data)
            else:
                return data
        else:
            return None
示例#35
0
def exec_test(config_data):

    add_test_info = AddTestInfo(11, 'salt key and salt key id')
    add_test_info.started_info()

    try:

        api_salt_key = APISaltKeyOps(**config_data)
        api_salt_key.get_salt_key()
        api_salt_key.get_salt_key_id()

        add_test_info.status('ok')

    except Exception, e:
        log.error('test error')
        log.error(e)
        add_test_info.status('error')

    add_test_info.completed_info()


if __name__ == '__main__':
    config_data = config.get_config()

    if not config_data['auth']:
        log.error('auth failed')

    else:
        exec_test(config_data)
示例#36
0
    def _test_ping_error(self):
        outgoing_query = OutgoingPingQuery(tc.CLIENT_ID)
        #outgoing_query.my_id = CLIENT_ID
        #outgoing_query.tid = tc.TID
        # TID and ARGS ID are None
        assert_raises(MsgError, outgoing_query.encode)
        log.error(
            "**IGNORE 2 ERROR LOGS** This exception was raised by a test")

        outgoing_query = OutgoingPingQuery()
        outgoing_query.my_id = tc.CLIENT_ID
        #outgoing_query.tid = tc.TID
        assert_raises(MsgError, outgoing_query.encode)
        log.error(
            "**IGNORE 2 ERROR LOGS** This exception was raised by a test")

        outgoing_query = OutgoingPingQuery()
        #outgoing_query.my_id = tc.CLIENT_ID
        outgoing_query.tid = tc.TID
        assert_raises(MsgError, outgoing_query.encode)
        log.error(
            "**IGNORE 2 ERROR LOGS** This exception was raised by a test")
        
        outgoing_query = OutgoingPingQuery()
        assert_raises(MsgError, outgoing_query.__setattr__, 'my_id', '')
        log.error(
            "**IGNORE 2 ERROR LOGS** This exception was raised by a test")
                
        outgoing_query = OutgoingPingQuery()
        outgoing_query.my_id = tc.CLIENT_ID
        outgoing_query.tid = 567
        data = outgoing_query.encode()
        assert_raises(MsgError, decode, data)
        log.error(
            "**IGNORE 2 ERROR LOGS** This exception was raised by a test")

        outgoing_query = OutgoingPingQuery()
        outgoing_query.my_id = tc.CLIENT_ID
        outgoing_query.tid = tc.TID
        data = outgoing_query.encode()
        data += 'this string ruins the bencoded msg'
        assert_raises(MsgError, decode, data)
        log.error(
            "**IGNORE 2 ERROR LOGS** This exception was raised by a test")



        
        outgoing_response = OutgoingPingResponse(tc.TID, tc.SERVER_ID)
        outgoing_response.tid = None
        assert_raises(MsgError, outgoing_response.encode)
        log.error(
            "**IGNORE ERROR LOGS** This exception was raised by a test")
示例#37
0
    def run(self, **kwargs):
        try:
            from apps.rss_feeds.models import Feed
            settings.LOG_TO_STREAM = True
            now = datetime.datetime.utcnow()
            start = time.time()
            r = redis.Redis(connection_pool=settings.REDIS_FEED_POOL)
            tasked_feeds_size = r.zcard('tasked_feeds')

            hour_ago = now - datetime.timedelta(hours=1)
            r.zremrangebyscore('fetched_feeds_last_hour', 0, int(hour_ago.strftime('%s')))

            now_timestamp = int(now.strftime("%s"))
            queued_feeds = r.zrangebyscore('scheduled_updates', 0, now_timestamp)
            print len(queued_feeds)
            r.zremrangebyscore('scheduled_updates', 0, now_timestamp)
            logging.debug(" ---> ~SN~FBQueuing ~SB%s~SN stale feeds (~SB%s~SN/~FG%s~FB~SN/%s tasked/queued/scheduled)" % (
                            len(queued_feeds),
                            r.zcard('tasked_feeds'),
                            r.scard('queued_feeds'),
                            r.zcard('scheduled_updates')))
            print len(queued_feeds)
            if len(queued_feeds)>0:
                r.sadd('queued_feeds', *queued_feeds)
            logging.debug(" ---> ~SN~FBQueuing ~SB%s~SN stale feeds (~SB%s~SN/~FG%s~FB~SN/%s tasked/queued/scheduled)" % (
                            len(queued_feeds),
                            r.zcard('tasked_feeds'),
                            r.scard('queued_feeds'),
                            r.zcard('scheduled_updates')))

            # Regular feeds
            if tasked_feeds_size < 5000:
                feeds = r.srandmember('queued_feeds',5000)
                Feed.task_feeds(feeds, verbose=True)
                active_count = len(feeds)
            else:
                logging.debug(" ---> ~SN~FBToo many tasked feeds. ~SB%s~SN tasked." % tasked_feeds_size)
                active_count = 0
            cp1 = time.time()

            # Force refresh feeds
            refresh_feeds = Feed.objects.filter(
                active=True,
                fetched_once=False,
                active_subscribers__gte=1
            ).order_by('?')[:100]
            refresh_count = refresh_feeds.count()
            cp2 = time.time()

            # Mistakenly inactive feeds
            hours_ago = (now - datetime.timedelta(minutes=10)).strftime('%s')
            old_tasked_feeds = r.zrangebyscore('tasked_feeds', 0, hours_ago)
            inactive_count = len(old_tasked_feeds)
            if inactive_count:
                r.zremrangebyscore('tasked_feeds', 0, hours_ago)
                # r.sadd('queued_feeds', *old_tasked_feeds)
                for feed_id in old_tasked_feeds:
                    r.zincrby('error_feeds', feed_id, 1)
                    feed = Feed.get_by_id(feed_id)
                    feed.set_next_scheduled_update()
                logging.debug(" ---> ~SN~FBRe-queuing ~SB%s~SN dropped feeds (~SB%s/%s~SN queued/tasked)" % (
                                inactive_count,
                                r.scard('queued_feeds'),
                                r.zcard('tasked_feeds')))
            cp3 = time.time()

            old = now - datetime.timedelta(days=1)
            old_feeds = Feed.objects.filter(
                next_scheduled_update__lte=old,
                active_subscribers__gte=1
            ).order_by('?')[:500]
            old_count = old_feeds.count()
            cp4 = time.time()

            logging.debug(" ---> ~FBTasking ~SB~FC%s~SN~FB/~FC%s~FB (~FC%s~FB/~FC%s~SN~FB) feeds... (%.4s/%.4s/%.4s/%.4s)" % (
                active_count,
                refresh_count,
                inactive_count,
                old_count,
                cp1 - start,
                cp2 - cp1,
                cp3 - cp2,
                cp4 - cp3
            ))

            Feed.task_feeds(refresh_feeds, verbose=True)
            Feed.task_feeds(old_feeds, verbose=True)

            logging.debug(" ---> ~SN~FBTasking took ~SB%s~SN seconds (~SB%s~SN/~FG%s~FB~SN/%s tasked/queued/scheduled)" % (
                            int((time.time() - start)),
                            r.zcard('tasked_feeds'),
                            r.scard('queued_feeds'),
                            r.zcard('scheduled_updates')))
        except Exception, e:
            import traceback
            traceback.print_exc()
            logging.error(str(e))
            if settings.SEND_ERROR_MAILS:
                mail_admins("Error in Task-Feeds",str(e)+'\n'+traceback.format_exc())
示例#38
0
def export_xls_full(raw):
    """
    Выгрузка полной (с внутренними номерами) статистики звонков
    :param raw: 
    :return: 
    """
    wb = xlwt.Workbook()
    ws = wb.add_sheet('Подробный список')

    # Заголовок
    ws.write(0, 0, 'Гор. номер')
    ws.write_merge(0, 0, 1, 5, 'Вх.')
    ws.write_merge(0, 0, 6, 10, 'Исх.')

    path = get_options('main', 'xls_path_full', True)

    if not path:
        log.critical('Ошибка чтения конфигурационного файла, см. ошибки выше')
        return

    line = 1

    for kc in sorted(raw):
        ws.write(line, 0, kc)
        ki = raw[kc]['inc']
        ko = raw[kc]['out']

        kiu = ki['users']
        kou = ko['users']

        ws.write(line, 2, format_time(ki['duration']))
        ws.write(line, 3, ki['count'])
        ws.write(line, 4, format_time(ki['billsec']))
        ws.write(line, 5, ki['answer'])

        ws.write(line, 7, format_time(ko['duration']))
        ws.write(line, 8, ko['count'])
        ws.write(line, 9, format_time(ko['billsec']))
        ws.write(line, 10, ko['answer'])

        inc_line = 0
        for inc in sorted(kiu):
            inc_line += 1
            ws.write(line + inc_line, 1, inc)
            ws.write(line + inc_line, 4, format_time(kiu[inc]['billsec']))
            ws.write(line + inc_line, 5, kiu[inc]['answer'])

        out_line = 0
        for out in sorted(kou):
            out_line += 1
            ws.write(line + out_line, 6, out)
            ws.write(line + out_line, 7, format_time(kou[out]['duration']))
            ws.write(line + out_line, 8, kou[out]['count'])
            ws.write(line + out_line, 9, format_time(kou[out]['billsec']))
            ws.write(line + out_line, 10, kou[out]['answer'])

        line += max([inc_line, out_line]) + 1

    try:
        wb.save(path)
    except PermissionError as e:
        log.error('Недостаточно прав для сохранения файла: %s' % e.filename)
        return
    except FileNotFoundError as e:
        log.error('Неверный путь или имя файла: %s' % e.filename)
        return

    return True
示例#39
0
    def copy_to_my_pan(self, user_id, user_ref_id, share_log: ShareLogs,
                       default_pan_id):
        fs_id = share_log.fs_id
        key_prefix = "client:ready:"
        # 文件已迁出,等待迁入
        async_service.update_state(key_prefix, user_id, {"state": 0, "pos": 1})
        top_dir_item = self.checkout_root_item(user_id, user_ref_id,
                                               default_pan_id)
        if not top_dir_item:
            return -3, None  # 无法创建顶级目录
        pan_acc: PanAccounts = auth_service.get_pan_account(
            default_pan_id, user_id)
        if not pan_acc:
            logger.error("PanAccount not exists![{}],user_id:{}".format(
                default_pan_id, user_id))
            return -3, None
        pan_acc = auth_service.check_pan_token_validation(pan_acc)
        # 目录结构构建
        async_service.update_state(key_prefix, user_id, {"state": 0, "pos": 2})

        find_pan_file = False
        client_item_params = {}
        jsonrs = restapi.file_search(pan_acc.access_token,
                                     key=fs_id,
                                     parent_dir=top_dir_item.path)
        if jsonrs:
            for finfo in jsonrs:
                if "fs_id" in finfo:
                    md5_val = finfo["md5"]
                    if md5_val == share_log.md5_val:
                        find_pan_file = True
                        client_item_params = self.build_client_item_params(
                            finfo, top_dir_item.id, user_ref_id, pan_acc.id,
                            fs_id)
                        break
        if find_pan_file:
            client_data_item = ClientDataDao.new_data_item(client_item_params)
            return 0, client_data_item
        # print("target pan acc id:", pan_acc.id)

        jsonrs = restapi.transfer_share_files(pan_acc.access_token,
                                              share_id=share_log.share_id,
                                              from_uk=share_log.uk,
                                              randsk=share_log.randsk,
                                              fs_id=share_log.fs_id,
                                              path=top_dir_item.path)
        if "extra" in jsonrs and "list" in jsonrs["extra"]:
            file_path = jsonrs["extra"]["list"][0]["to"]
            key = self.parse_query_key(file_path)
            # 迁出后, 获取信息
            async_service.update_state(key_prefix, user_id, {
                "state": 0,
                "pos": 3
            })
            time.sleep(3)
            client_data_item = self.check_file_by_key_search(
                key, top_dir_item.path, top_dir_item.id, share_log.md5_val,
                fs_id, user_ref_id, pan_acc)
            if client_data_item:
                return 0, client_data_item
            return -4, None  # 无法转存文件
        elif jsonrs.get('errno', 0) == -30 and 'path' in jsonrs:
            logger.info("exists jsonrs:{}".format(jsonrs))
            file_path = jsonrs.get('path', None)
            if file_path:
                # 迁出后, 获取信息
                async_service.update_state(key_prefix, user_id, {
                    "state": 0,
                    "pos": 3
                })
                key = self.parse_query_key(file_path)
                client_data_item = self.check_file_by_key_search(
                    key, top_dir_item.path, top_dir_item.id, share_log.md5_val,
                    fs_id, user_ref_id, pan_acc)
                if client_data_item:
                    return 0, client_data_item
            return -4, None  # 无法转存文件
        elif "errno" in jsonrs:
            return -4, None  # 无法转存文件
示例#40
0
    def fit(self,
            matrix,
            k,
            distance,
            shrink=0,
            threshold=0,
            implicit=True,
            alpha=None,
            beta=None,
            l=None,
            c=None,
            verbose=False):
        """
        Initialize the model and compute the similarity matrix S with a distance metric.
        Access the similarity matrix using: self._sim_matrix

        Parameters
        ----------
        matrix : csr_matrix
            A sparse matrix. For example, it can be the URM of shape (number_users, number_items).
        k : int
            K nearest neighbour to consider.
        distance : str
            One of the supported distance metrics, check collaborative_filtering_base constants.
        shrink : float, optional
            Shrink term used in the normalization
        threshold: float, optional
            All the values under this value are cutted from the final result
        implicit: bool, optional
            If true, treat the URM as implicit, otherwise consider explicit ratings (real values) in the URM
        alpha: float, optional, included in [0,1]
        beta: float, optional, included in [0,1]
        l: float, optional, balance coefficient used in s_plus distance, included in [0,1]
        c: float, optional, cosine coefficient, included in [0,1]
        """
        alpha = -1 if alpha is None else alpha
        beta = -1 if beta is None else beta
        l = -1 if l is None else l
        c = -1 if c is None else c
        if distance == self.SIM_ASYMCOSINE and not (0 <= alpha <= 1):
            log.error(
                'Invalid parameter alpha in asymmetric cosine similarity!')
            return
        if distance == self.SIM_TVERSKY and not (0 <= alpha <= 1
                                                 and 0 <= beta <= 1):
            log.error('Invalid parameter alpha/beta in tversky similarity!')
            return
        if distance == self.SIM_P3ALPHA and alpha is None:
            log.error('Invalid parameter alpha in p3alpha similarity')
            return
        if distance == self.SIM_RP3BETA and alpha is None and beta is None:
            log.error('Invalid parameter alpha/beta in rp3beta similarity')
            return
        if distance == self.SIM_SPLUS and not (0 <= l <= 1 and 0 <= c <= 1
                                               and 0 <= alpha <= 1
                                               and 0 <= beta <= 1):
            log.error('Invalid parameter alpha/beta/l/c in s_plus similarity')
            return

        # compute and stores the similarity matrix using one of the distance metric: S = R•R'
        if distance == self.SIM_COSINE:
            self._sim_matrix = sim.cosine(matrix,
                                          k=k,
                                          shrink=shrink,
                                          threshold=threshold,
                                          binary=implicit)
        elif distance == self.SIM_ASYMCOSINE:
            self._sim_matrix = sim.asymmetric_cosine(matrix,
                                                     k=k,
                                                     shrink=shrink,
                                                     threshold=threshold,
                                                     binary=implicit,
                                                     alpha=alpha)
        elif distance == self.SIM_JACCARD:
            self._sim_matrix = sim.jaccard(matrix,
                                           k=k,
                                           shrink=shrink,
                                           threshold=threshold,
                                           binary=implicit)
        elif distance == self.SIM_DICE:
            self._sim_matrix = sim.dice(matrix,
                                        k=k,
                                        shrink=shrink,
                                        threshold=threshold,
                                        binary=implicit)
        elif distance == self.SIM_TVERSKY:
            self._sim_matrix = sim.tversky(matrix,
                                           k=k,
                                           shrink=shrink,
                                           threshold=threshold,
                                           binary=implicit,
                                           alpha=alpha,
                                           beta=beta)
        elif distance == self.SIM_P3ALPHA:
            self._sim_matrix = sim.p3alpha(matrix,
                                           k=k,
                                           shrink=shrink,
                                           threshold=threshold,
                                           binary=implicit,
                                           alpha=alpha)
        elif distance == self.SIM_RP3BETA:
            self._sim_matrix = sim.rp3beta(matrix,
                                           k=k,
                                           shrink=shrink,
                                           threshold=threshold,
                                           binary=implicit,
                                           alpha=alpha,
                                           beta=beta)
        elif distance == self.SIM_SPLUS:
            self._sim_matrix = sim.s_plus(matrix,
                                          k=k,
                                          shrink=shrink,
                                          threshold=threshold,
                                          binary=implicit,
                                          l=l,
                                          t1=alpha,
                                          t2=beta,
                                          c=c)
        else:
            log.error('Invalid distance metric: {}'.format(distance))
        #self.SIM_DOTPRODUCT: sim.dot_product(matrix, k=k, shrink=shrink, threshold=threshold, binary=implicit)
        return self._sim_matrix
示例#41
0
                    use_salience=opts.use_salience,
                    salience_features=salience_features)
        else:
            assert model.pair_composition_network, \
                'pair_composition_network in the model cannot be None'
            assert model.pair_composition_network.use_salience \
                == opts.use_salience
            assert model.pair_composition_network.salience_features \
                == salience_features

        trainer = EventCompositionTrainer(model,
                                          saving_path=opts.output_path,
                                          log=log)

        if not os.path.isdir(opts.indexed_corpus):
            log.error('Cannot find indexed corpus at {}'.format(
                opts.indexed_corpus))
            exit(-1)

        log.info('Loading indexed corpus from: {}, with batch_size={}, '
                 'use_salience={}'.format(opts.indexed_corpus, opts.batch_size,
                                          opts.use_salience))
        corpus_it = PairTuningCorpusIterator(
            opts.indexed_corpus,
            batch_size=opts.batch_size,
            use_salience=opts.use_salience,
            salience_features=salience_features)
        log.info('Found {} lines in the corpus'.format(len(corpus_it)))

        val_corpus_it = None
        if opts.val_indexed_corpus and os.path.isdir(opts.val_indexed_corpus):
            log.info('Loading validation indexed corpus from: {}, '
示例#42
0
文件: static.py 项目: pekita1/cobra
    def analyse(self):
        if self.directory is None:
            log.critical("Please set directory")
            sys.exit()
        log.info('Start code static analyse...')

        d = directory.Directory(self.directory)
        files = d.collect_files()
        log.info('Scan Files: {0}, Total Time: {1}s'.format(
            files['file_nums'], files['collect_time']))

        ext_language = {
            # Image
            '.jpg': 'image',
            '.png': 'image',
            '.bmp': 'image',
            '.gif': 'image',
            '.ico': 'image',
            '.cur': 'image',
            # Font
            '.eot': 'font',
            '.otf': 'font',
            '.svg': 'font',
            '.ttf': 'font',
            '.woff': 'font',
            # CSS
            '.css': 'css',
            '.less': 'css',
            '.scss': 'css',
            '.styl': 'css',
            # Media
            '.mp3': 'media',
            '.swf': 'media',
            # Execute
            '.exe': 'execute',
            '.sh': 'execute',
            '.dll': 'execute',
            '.so': 'execute',
            '.bat': 'execute',
            '.pl': 'execute',
            # Edit
            '.swp': 'tmp',
            # Cert
            '.crt': 'cert',
            # Text
            '.txt': 'text',
            '.csv': 'text',
            '.md': 'markdown',
            # Backup
            '.zip': 'backup',
            '.bak': 'backup',
            '.tar': 'backup',
            '.rar': 'backup',
            '.tar.gz': 'backup',
            '.db': 'backup',
            # Config
            '.xml': 'config',
            '.yml': 'config',
            '.spf': 'config',
            '.iml': 'config',
            '.manifest': 'config',
            # Source
            '.psd': 'source',
            '.as': 'source',
            # Log
            '.log': 'log',
            # Template
            '.template': 'template',
            '.tpl': 'template',
        }
        for ext in files:
            if ext in ext_language:
                log.info('{0} - {1}'.format(ext, files[ext]))
                continue
            else:
                log.info(ext)

        languages = CobraLanguages.query.all()

        rules = CobraRules.query.filter_by(status=1).all()
        extensions = None
        for rule in rules:
            for language in languages:
                if language.id == rule.language:
                    extensions = language.extensions.split('|')

            if extensions is None:
                log.warning("Rule Language Error")
            # grep name is ggrep on mac
            grep = '/bin/grep'
            if 'darwin' == sys.platform:
                log.info('In Mac OS X System')
                for root, dir_names, file_names in os.walk(
                        '/usr/local/Cellar/grep'):
                    for filename in file_names:
                        if 'ggrep' == filename:
                            grep = os.path.join(root, filename)

            filters = []
            for e in extensions:
                filters.append('--include=*' + e)

            # White list
            white_list = []
            ws = CobraWhiteList.query.filter_by(project_id=self.project_id,
                                                rule_id=rule.id,
                                                status=1).all()
            if ws is not None:
                for w in ws:
                    white_list.append(w.path)

            try:
                log.info('Scan rule id: {0}'.format(rule.id))
                # -n Show Line number / -r Recursive / -P Perl regular expression
                p = subprocess.Popen([grep, "-n", "-r", "-P"] + filters +
                                     [rule.regex, self.directory],
                                     stdout=subprocess.PIPE)
                result = p.communicate()

                # Exists result
                if len(result[0]):
                    log.info('Found:')
                    per_line = str(result[0]).split("\n")
                    log.debug(per_line)
                    for r in range(0, len(per_line) - 1):
                        try:
                            rr = str(per_line[r]).replace(self.directory,
                                                          '').split(':', 1)
                            code = str(rr[1]).split(':', 1)
                            if self.task_id is None:
                                self.task_id = 0
                            rule_id = rule.id
                            current_time = datetime.now().strftime(
                                '%Y-%m-%d %H:%M:%S')
                            m_file = rr[0].strip()
                            m_line = code[0]
                            m_code = str(code[1].strip())
                            params = [
                                self.task_id, rule_id, m_file, m_line, m_code,
                                current_time, current_time
                            ]
                            try:
                                if m_file in white_list or ".min.js" in m_file:
                                    log.debug("In White list or min.js")
                                else:
                                    # # // /* *
                                    match_result = re.match(
                                        "(#)?(//)?(\*)?(/\*)?", m_code)
                                    if match_result.group(
                                            0
                                    ) is not None and match_result.group(
                                            0) is not "":
                                        log.debug("In Annotation")
                                    else:
                                        log.debug('In Insert')
                                        if rule.regex == "":
                                            # Didn't filter line when regex is empty
                                            r_content = CobraResults.query.filter_by(
                                                task_id=self.task_id,
                                                rule_id=rule_id,
                                                file=m_file).first()
                                            m_line = 0
                                        else:
                                            r_content = CobraResults.query.filter_by(
                                                task_id=self.task_id,
                                                rule_id=rule_id,
                                                file=m_file,
                                                line=m_line).first()
                                        if r_content is not None:
                                            log.warning("Exists Result")
                                        else:
                                            results = CobraResults(
                                                self.task_id, rule_id, m_file,
                                                m_line, m_code, current_time,
                                                current_time)
                                            db.session.add(results)
                                            db.session.commit()
                                            log.info('Insert Results Success')
                            except Exception as e:
                                log.error('Insert Results Failed' +
                                          str(e.message))
                            log.debug(params)
                        except Exception as e:
                            log.critical('Error parsing result: ' +
                                         str(e.message))

                else:
                    log.info('Not Found')

            except Exception as e:
                log.critical('Error calling grep: ' + str(e))

        # Set End Time For Task
        t = CobraTaskInfo.query.filter_by(id=self.task_id).first()
        t.status = 2
        t.file_count = files['file_nums']
        t.time_end = int(time.time())
        t.time_consume = t.time_end - t.time_start
        t.updated_at = time.strftime('%Y-%m-%d %X', time.localtime())
        try:
            db.session.add(t)
            db.session.commit()
        except Exception as e:
            log.critical("Set start time failed:" + e.message)

        log.info("Scan Done")
示例#43
0
    def _handle_ctrl_msg(self, m):
        """Handle all types of CTRL messages.
        May change inner state (see diagram).
        @m - a CTRL message
        """
        # switch case on msg type
        typ = m.get_type()

        # Reset Node
        if typ == Message.TYPE.Reset:
            self._io.reset()
            self.reset()
            self._state = self.State.UNINITIALIZED

        # Send debug data to Master
        elif typ == Message.TYPE.DebugData:
            self._send_to_master(Message.TYPE.DebugData, repr(self))

        # Read and set given serialized circuit
        elif typ == Message.TYPE.SetCircuit:
            self._assert_state("SetCircuit", self.State.UNINITIALIZED)
            if self._circuit is not None:
                log.warn("Setting new circuit without reset!")
            c = unserialize(m.get_msg(), Circuit)
            self._set_circuit(c)
            if self._fully_initialized():
                self._state = self.State.INITIALIZED

        # Read and set given parameters for secret sharing schemes
        elif typ == Message.TYPE.SetSecretSharing:
            self._assert_state("SetSecretSharing", self.State.UNINITIALIZED)
            ss_args, ss2_args = unserialize(m.get_msg())
            self._set_secret_sharing(ss_args, ss2_args)
            if self._fully_initialized():
                self._state = self.State.INITIALIZED

        # Read and set given input as party's input (FFE)
        elif typ == Message.TYPE.SetInput:
            self._assert_state("SetInput", self.State.UNINITIALIZED)
            secret_input = unserialize(m.get_msg(), FFE)
            self._set_input(secret_input)
            if self._fully_initialized():
                self._state = self.State.INITIALIZED

        # Set party's input to a random input in the given serialized field
        elif typ == Message.TYPE.SetRandInput:
            self._assert_state("SetRandInput", self.State.UNINITIALIZED)
            field = unserialize(m.get_msg())
            secret_input = field.rand()
            log.info("Node %d: Setting my input to %r (random)", self._id,
                     secret_input)
            self._set_input(secret_input)
            if self._fully_initialized():
                self._state = self.State.INITIALIZED

        # Share party's input. Node must INITIALIZED (input and circuit received)
        elif typ == Message.TYPE.ShareInput:
            self._assert_state("ShareInput", self.State.INITIALIZED)
            self._input_shares = [None] * self._n_parties
            self._share(self._ss, self._input, Message.TYPE.InputShare)
            self._state = self.State.RECEIVING_INPUTS

        # Read and set given Truncinator as party's truncinator (for multiplication)
        elif typ == Message.TYPE.SetTruncinator:
            self._assert_state("SetTruncinator", [
                self.State.UNINITIALIZED, self.State.INITIALIZED,
                self.State.RUNNING
            ])
            msg = m.get_msg()
            self._truncinator = unserialize(msg, Truncinator)

        # Read and set given Resampler as party's resampler (for bulk random masks)
        elif typ == Message.TYPE.SetResampler:
            self._assert_state("SetResampler", [
                self.State.UNINITIALIZED, self.State.INITIALIZED,
                self.State.RUNNING
            ])
            msg = m.get_msg()
            self._resampler = unserialize(msg, Resampler)

        # Generate a bulk of masks for DIK10 multiplication
        elif typ == Message.TYPE.GenBulkMulMasks:
            self._assert_state("GenBulkMulMasks",
                               [self.State.INITIALIZED, self.State.RUNNING])
            assert self._resampler is not None, "Cannot generate bulk masks - Resampler not set!"
            rand_val = self._field.rand()
            self._share(self._ss, rand_val, Message.TYPE.DIKBulkShareLow)
            self._share(self._ss2, rand_val, Message.TYPE.DIKBulkShareHigh)
            self._dik_low_shares = [None] * self._n_parties
            self._dik_high_shares = [None] * self._n_parties
            self._old_state = self._state
            self._state = self.State.GEN_BULK_MUL_MASKS

        # Generation of a bulk of masks for node going down - Phase1
        elif typ == Message.TYPE.GenBulkRegenMasks:
            self._assert_state("GenBulkRegenMasks",
                               [self.State.INITIALIZED, self.State.RUNNING])
            assert self._resampler is not None, "Cannot generate bulk masks - Resampler not set!"

            # Choose a random polynomial with zero in the position to be regenerated & send shares
            self._regen_pos = unserialize(m.get_msg())
            zero = self._field.zero()
            self._share(self._ss,
                        zero,
                        Message.TYPE.RegenRandPolyShare,
                        pos=self._regen_pos)

            # Init structures for collecting shares and masks
            self._regen_shares = [None] * self._n_parties
            if self._regen_pos not in self._regen_masks:
                self._regen_masks[self._regen_pos] = []
            self._old_state = self._state
            self._state = self.State.GEN_BULK_REGEN_MASKS

        # Evaluate a linear gate. Gate's name received in message
        elif typ == Message.TYPE.EvalGate:
            self._assert_state("EvalGate", self.State.RUNNING)
            self._eval_linear_gate(m.get_msg())

        ### <DIK multiplication> ###

        # Initialize and start evaluation of multiplication gate. Gate's name received in message
        elif typ == Message.TYPE.EvalMulGateDIK:
            self._assert_state("EvalMulGateDIK", self.State.RUNNING)
            assert len(self._dik_low) == len(
                self._dik_high
            ), "DIK sanity check fail - low & high masks out of sync!"
            assert self._dik_ptr < len(
                self._dik_low
            ), "Cannot multiply DIK - Not enough masks generated!"

            # Parse message for gate's name and the party performing the calculation
            self._mul_gate, self._mul_party = unserialize(m.get_msg())

            # Calculate local multiplication, add current (high degree) random mask & send to "dealer"
            naive_mult = self._circuit.evaluate_gate(self._mul_gate)
            masked_mult = self._dik_high[self._dik_ptr] + naive_mult
            processed_share = self._ss2.preprocess(
                masked_mult, self._id, pos=None)  # pos=None for secret pos
            self._send_to_party(self._mul_party, Message.TYPE.DIKMulHighShare,
                                processed_share)

            # Am I the dealer?
            if self._id == self._mul_party:
                self._state = self.State.MUL_DIK_DEALER
                self._mul_shares = [None] * self._n_parties
            else:
                self._state = self.State.MUL_DIK_NON_DEALER

        ### </DIK multiplication> ###

        ### <BGW multiplication> ###

        # Initialize and start evaluation of multiplication gate. Gate's name received in message
        elif typ == Message.TYPE.EvalMulGateInit:
            self._assert_state("EvalMulGateInit", self.State.RUNNING)
            assert self._truncinator is not None, "Cannot multiply - Truncinator not set!"

            # Init data structures for multiplication
            self._mul_gate = m.get_msg()
            self._mul_shares = [None] * self._n_parties

            # Generate a my part in the random mask & share
            zero = self._field.zero()
            self._share(self._ss2, zero, Message.TYPE.BGWMulRandPolyShare)
            self._state = self.State.MUL_RANDPOLY_SHARE

        # Perform local multiplication and mask result using the joint mask by received shares
        elif typ == Message.TYPE.EvalMulGateReduce:
            self._assert_state("EvalMulGateReduce",
                               self.State.MUL_RANDPOLY_DONE)

            # Calculate regular multiplication and add joint random mask
            naive_mult = self._circuit.evaluate_gate(self._mul_gate)
            masked_mult = sum(self._mul_shares, naive_mult)

            # Share result (in high dimension) & reset data structure for new shares
            self._share(self._ss2, masked_mult,
                        Message.TYPE.BGWMulReductionShare)
            self._mul_shares = [None] * self._n_parties
            self._state = self.State.MUL_REDUCTION_SHARE

        # Perform degree reduction on shares received, and send each party back its appropriate part
        elif typ == Message.TYPE.EvalMulGateFinalize:
            self._assert_state("EvalMulGateFinalize",
                               self.State.MUL_REDUCTION_CALC)

            # Calculate linear degree reduction on the vector of evaluations
            reduced = self._truncinator.reduce(self._mul_shares)

            # Send back the reduced result & reset data structure for new shares
            processed_shares = [
                self._ss2.preprocess(s, self._id, pos=None) for s in reduced
            ]
            self._send_to_parties(Message.TYPE.BGWMulReductionResult,
                                  processed_shares)
            self._mul_shares = [None] * self._n_parties
            self._state = self.State.MUL_RECONSTRUCTION

        ### </BGW multiplication> ###

        # Send my final output to all of the parties
        elif typ == Message.TYPE.EvalOutput:
            self._assert_state("ShareOutput", self.State.RUNNING)
            self._output_shares = [None] * self._n_parties
            out_share = self._circuit.get_output()
            output_msg = self._ss.preprocess(
                out_share, self._id, pos=None)  # pos=None for secret pos
            msgs = [output_msg] * self._n_parties
            self._send_to_parties(Message.TYPE.OutputShare, msgs)
            self._state = self.State.RECEIVING_OUTPUTS

        else:
            log.error("Unknown ctrl msg received:\n%r", m)
示例#44
0
def wizard_hybrid():
    SIM_MATRIX = ['saved_sim_matrix', 'saved_sim_matrix_evaluation']
    R_HAT = ['saved_r_hat', 'saved_r_hat_evaluation']
    SAVE = ['saved_sim_matrix', 'saved_r_hat']
    EVALUATE = ['saved_sim_matrix_evaluation', 'saved_r_hat_evaluation']

    start = time.time()

    matrices_array, folder, models = hb.create_matrices_array()

    print('matrices loaded in {:.2f} s'.format(time.time() - start))
    log.success('You have loaded: {}'.format(models))

    NORMALIZATION_MODE = normalization_mode_selection()

    if folder in SAVE:
        WEIGHTS = weights_selection(models)

        if folder in SIM_MATRIX:
            name, urm_filter_tracks, rel_path = option_selection_save('SIM')
            hybrid_rec = HybridSimilarity(
                matrices_array,
                normalization_mode=NORMALIZATION_MODE,
                urm_filter_tracks=urm_filter_tracks)
            sps.save_npz('raw_data/' + rel_path + name,
                         hybrid_rec.get_r_hat(weights_array=WEIGHTS))
        if folder in R_HAT:
            name, urm_filter_tracks, rel_path, EXPORT = option_selection_save(
                'R_HAT')
            hybrid_rec = HybridRHat(matrices_array,
                                    normalization_mode=NORMALIZATION_MODE,
                                    urm_filter_tracks=urm_filter_tracks)
            if EXPORT:
                N = ask_number_recommendations()
                recommendations = hybrid_rec.recommend_batch(
                    weights_array=WEIGHTS,
                    target_userids=data.get_target_playlists(),
                    N=N)
                exportcsv(recommendations, path='submission', name=name)
            else:
                sps.save_npz('raw_data/' + rel_path + name,
                             hybrid_rec.get_r_hat(weights_array=WEIGHTS))

    elif folder in EVALUATE:
        log.success('|WHAT YOU WANT TO DO ???|')
        log.warning('\'1\' BAYESIAN SEARCH VALIDATION')
        log.warning('\'2\' HAND CRAFTED WEIGHTS')
        mode = input()[0]

        # BAYESIAN SEARCH
        if mode == '1':
            log.success(
                '|SELECT A NUMBER OF |||ITERATIONS||| FOR THE ALGORITHM|')
            iterations = float(input())
            urm_filter_tracks = data.get_urm_train_1()
            if folder in SIM_MATRIX:
                hybrid_rec = HybridSimilarity(
                    matrices_array,
                    normalization_mode=NORMALIZATION_MODE,
                    urm_filter_tracks=urm_filter_tracks)
            if folder in R_HAT:
                hybrid_rec = HybridRHat(matrices_array,
                                        normalization_mode=NORMALIZATION_MODE,
                                        urm_filter_tracks=urm_filter_tracks)
            hybrid_rec.validate(iterations=iterations,
                                urm_test=data.get_urm_test_1(),
                                userids=data.get_target_playlists())

        # MANUAL WEIGHTS
        elif mode == '2':
            WEIGHTS = weights_selection(models)
            urm_filter_tracks = data.get_urm_train_1()
            chose = option_selection_evaluation_2()  # save, evaluate or csv
            if chose == 's':
                log.success('|CHOSE A NAME FOR THE MATRIX...|')
                name = input()
                if folder in SIM_MATRIX:
                    type = 'SIM'
                    hybrid_rec = HybridSimilarity(
                        matrices_array,
                        normalization_mode=NORMALIZATION_MODE,
                        urm_filter_tracks=urm_filter_tracks)
                elif folder in R_HAT:
                    type = 'R_HAT'
                    hybrid_rec = HybridRHat(
                        matrices_array,
                        normalization_mode=NORMALIZATION_MODE,
                        urm_filter_tracks=urm_filter_tracks)

                sps.save_npz('raw_data/saved_r_hat_evaluation/' + name,
                             hybrid_rec.get_r_hat(weights_array=WEIGHTS))
                sym_rec = symmetric_recommender_creator(
                    models,
                    type,
                    NORMALIZATION_MODE,
                    urm_filter_tracks=data.get_urm_train_2())
                sps.save_npz('raw_data/saved_r_hat_evaluation_2/' + name,
                             sym_rec.get_r_hat(weights_array=WEIGHTS))

            elif chose == 'e':
                if folder in SIM_MATRIX:
                    type = 'SIM'
                    hybrid_rec = HybridSimilarity(
                        matrices_array,
                        normalization_mode=NORMALIZATION_MODE,
                        urm_filter_tracks=urm_filter_tracks)
                elif folder in R_HAT:
                    type = 'R_HAT'
                    hybrid_rec = HybridRHat(
                        matrices_array,
                        normalization_mode=NORMALIZATION_MODE,
                        urm_filter_tracks=urm_filter_tracks)
                N = ask_number_recommendations()
                print('Recommending...')
                recs = hybrid_rec.recommend_batch(
                    weights_array=WEIGHTS,
                    target_userids=data.get_target_playlists(),
                    N=N)
                hybrid_rec.evaluate(recommendations=recs,
                                    test_urm=data.get_urm_test_1())

                # export the recommendations
                log.success(
                    'Do you want to save the CSV with these recomendations? (y/n)'
                )
                if input()[0] == 'y':
                    export_csv_wizard(recs)

                sym_rec = symmetric_recommender_creator(
                    models,
                    type,
                    NORMALIZATION_MODE,
                    urm_filter_tracks=data.get_urm_train_2())
                recs2 = sym_rec.recommend_batch(
                    weights_array=WEIGHTS,
                    target_userids=data.get_target_playlists())
                sym_rec.evaluate(recommendations=recs2,
                                 test_urm=data.get_urm_test_2())

            elif chose == 'c':
                if folder in R_HAT:
                    hybrid_rec = HybridRHat(
                        matrices_array,
                        normalization_mode=NORMALIZATION_MODE,
                        urm_filter_tracks=urm_filter_tracks)
                    N = ask_number_recommendations()
                    print('Recommending...')
                    recs = hybrid_rec.recommend_batch(
                        weights_array=WEIGHTS,
                        target_userids=data.get_target_playlists(),
                        N=N)

                    export_csv_wizard(recs)
                else:
                    log.error('not implemented yet')
    else:
        log.error('WRONG FOLDER')
示例#45
0
def get_at_inc_list():
    """
    Импортируем список городских входящих номеров из БД АТС

    :return: {string: {string}}, {гор_номер: {вн_номер, ...}}
    """
    raw = defaultdict(set)

    options_list = get_options('asterisk', 'db')

    if options_list:
        host, user, password, db = options_list

        try:
            with pymysql.connect(host, user, password, db) as cur:

                re_group = re.compile(r'ext-group,(\d{3}),1')
                re_im = re.compile(r'from-did-direct,(\d{4}),1')
                re_ivr = re.compile(r'ivr-(\d+),s,1')

                # Запрос, привязка групп к вн. номерам
                cur.execute("SELECT grpnum, grplist FROM ringgroups ")

                # Составляем словарь {группа: set(список_вн_номеров)}
                groups_list = dict(
                    (k, v.replace('#', '').split('-')) for k, v in cur)

                cur.execute(
                    "SELECT ivr_id, selection, dest FROM ivr_dests WHERE ivr_ret = 0"
                )

                ivr_list = defaultdict(dict)

                for ivr_id, sel, dest in cur:
                    ivr_group = re_group.match(dest)

                    if ivr_group:
                        ivr_list[str(ivr_id)][sel] = groups_list[
                            ivr_group.group(1)]
                    else:
                        ivr_im = re_im.match(dest)

                        if ivr_im:
                            ivr_list[str(ivr_id)][sel] = [ivr_im.group(1)]

                # Запрос, привязка гор. номеров к вн. номерам или группам
                cur.execute(
                    "SELECT extension, destination FROM incoming "
                    "WHERE LENGTH(extension) = 7 OR LENGTH(extension) = 11")

                for ext, des in cur:

                    # Формат гор. номера "###-##-##"
                    cm = '%s-%s-%s' % (ext[-7:-4], ext[-4:-2], ext[-2:])

                    # Выбираем только группы
                    group = re_group.match(des)

                    if group:
                        for x in groups_list[group.group(1)]:
                            # Добавляем все вн. номера группы связанные с гор. номером
                            raw[cm].add(x)
                    else:
                        # Выбираем только прямые вн. номера
                        im = re_im.match(des)

                        if im:
                            # Добавляем вн. номер связанный с гор. номером
                            # raw[im.group(1)].add(cm)
                            raw[cm].add(im.group(1))
                        else:
                            ivr = re_ivr.match(des)

                            if ivr and ivr.group(1) in ivr_list:
                                for ik, iv in ivr_list[ivr.group(1)].items():
                                    for x in iv:
                                        ivr_cm = '%s/%s' % (cm, ik)

                                        raw[ivr_cm].add(x)

        except OperationalError as e:
            log.error(e)

    return raw
示例#46
0
    step_cmds.append(base_cmd + f" {poolname}")
    step_cmds.append(base_cmd + f" -p {poolname}")
    step_cmds.append(base_cmd + f" --pool {poolname}")

    for step in step_cmds:
        rbd_util.exec_cmd(f"rbd mirror pool enable {poolname} pool")
        if rbd_util.exec_cmd(step) == False:
            log.error(f"Test case Failed executing: {step}")
            exit(1)
        if not rbd_util.exec_cmd(
                f'rbd mirror pool info {poolname}|grep \\"Mode: disabled\\"'):
            log.error(f"command not worked: {step}")
            exit(1)

    log.info("Test Case Passed")


if __name__ == "__main__":

    parser = argparse.ArgumentParser(description="RBD CLI Test")
    parser.add_argument("-e", "--ec-pool-k-m", required=False)
    parser.add_argument("--test-case", required=True)

    args = parser.parse_args()

    try:
        globals()[args.test_case]()
        rbd_util.delete_pool(poolname=args.test_case)
    except KeyError:
        log.error(f"{args.test_case} not yet implemented")
示例#47
0
    try:

        api_event = APIEventOps(**config_data)
        api_event.get_event()

        api_event_cluster_event = APIClusterEventOps(**config_data)
        api_event_cluster_event.get_cluster_event()

        api_server_event = APIServerEventOps(**config_data)
        api_server_event.get_server_event_api()

        add_test_info.status("ok")

    except Exception, e:
        log.error("test error")
        log.error(e)
        add_test_info.status("error")

    add_test_info.completed_info()


if __name__ == "__main__":
    config_data = config.get_config()

    if not config_data["auth"]:
        log.error("auth failed")

    else:
        exec_test(config_data)
示例#48
0
def update_translation():
    """1. 更新翻译"""
    NEED_CLEAR = True
    log.info(os.getcwd())
    eso_path = ' '.join(sys.argv[2:])
    log.info('ESO PATH: %s' % eso_path)
    mnf_path = os.path.join(eso_path, 'depot/eso.mnf')
    extract_path = '../../temp/extract/'

    datestr = time.strftime('%Y%m%d')
    print('请输入文件后缀并按下回车: (默认为今天日期: %s)' % datestr)
    datestr_new = input().strip()
    if datestr_new != '':
        datestr = datestr
    log.info('filename suffix: %s' % datestr)

    if NEED_CLEAR:
        print('### 正在清理...')
        log.debug('正在清理...')
        # 清理输出目录
        dirs = ('../../输出/更新翻译/1_new/',
                '../../输出/更新翻译/2_diff/',)
        for dir in dirs:
            if os.path.exists(dir):
                log.info('clear %s' % dir)
                shutil.rmtree(dir)
        # 清理 mnf 提取目录
        if os.path.exists(extract_path):
            log.info('clear %s' % extract_path)
            shutil.rmtree(extract_path)
        # 清理翻译中间目录
        log.info('clear csv and xlsx')
        for root, dirs, files in os.walk('../../translation/lang'):
            for f in files:
                if (f.startswith('en.') or f.startswith('jp.')) \
                        and (f.endswith('.lang.csv') or f.endswith('.lang.xlsx')):
                    filename = os.path.join(root, f)
                    log.debug('remove %s' % filename)
                    os.remove(filename)
            break
        log.info('clear ui text')
        files = (
            '../../translation/en_pregame.lua',
            '../../translation/en_client.lua',
            '../../translation/zh_translate.txt',
        )
        for f in files:
            if os.path.exists(f):
                log.debug('remove %s' % f)
                os.remove(f)

    print('### 创建目录...')
    log.debug('创建目录...')
    dirs = (
        '../../输出/更新翻译/1_new/',
        '../../输出/更新翻译/2_diff/',
        '../../输出/更新翻译/4_old/',
    )
    for dir in dirs:
        if not os.path.isdir(dir):
            log.info('create %s' % dir)
            os.makedirs(dir)

    print('### 正在解码...')
    log.debug('正在解码...')
    log.debug('extract eso.mnf -a 0')
    execute('EsoExtractData.exe "%s" -a 0 ../../temp/extract' % mnf_path)
    log.debug('extract eso.mnf -a 2')
    execute('EsoExtractData.exe "%s" -a 2 ../../temp/extract' % mnf_path)
    if not os.path.exists(extract_path):
        log.error('提取失败')
        sys.exit(-1)

    os.chdir('../../')
    log.info(os.getcwd())
    print('### 正在复制...')
    log.debug('正在复制...')
    src_dst = (
        ('temp/extract/gamedata/lang/en.lang.csv', 'translation/lang/en.lang.csv',),
        ('temp/extract/gamedata/lang/jp.lang.csv', 'translation/lang/jp.lang.csv',),
        ('temp/extract/esoui/lang/en_pregame.lua', 'translation/en_pregame.lua',),
        ('temp/extract/esoui/lang/en_client.lua', 'translation/en_client.lua',),
    )
    for src, dst in src_dst:
        log.info('copy %s to %s' % (src, dst))
        shutil.copy(src, dst)

    os.chdir('scripts/')
    log.info(os.getcwd())
    print('### 正在分析...')
    log.debug('正在分析...')
    execute('python split_lang_csv_by_id.py')
    execute('python split_lang_csv_by_id.py -l jp')

    print('### 正在导出新版xls...')
    log.debug('正在导出新版xls...')
    execute('python prepare_lang.py --all')
    execute('python convert_lua_to_txt.py')
    execute('python convert_txt_to_xls.py')

    print('### 正在保存结果...')
    log.debug('正在保存结果...')
    dst = '../输出/更新翻译/1_new/'
    for root, dirs, files in os.walk('../translation/lang'):
        for f in files:
            if f.startswith('en.') and f.endswith('.lang.xlsx'):
                filename = os.path.join(root, f)
                log.info('copy %s to %s' % (filename, dst))
                shutil.copy(filename, dst)
        break
    filename = '../translation/zh_translate.xlsx'
    log.info('copy %s to %s' % (filename, dst))
    shutil.copy(filename, dst)
    
    print('### 正在重命名...')
    log.debug('正在重命名...')
    execute('python rename_lang_xls.py %s %s' % (datestr, dst))
示例#49
0
    def query_history(self, req: HistoryRequest) -> Optional[List[BarData]]:
        """
        从tushare里查询历史数据
        :param req:查询请求
        :return: Optional[List[BarData]]
        """
        if self.symbols is None:
            return None

        symbol = req.symbol
        exchange = req.exchange
        interval = req.interval
        start = req.start.strftime(TS_DATE_FORMATE)
        end = req.end.strftime(TS_DATE_FORMATE)

        if interval is not Interval.DAILY:
            return None
        if exchange not in [Exchange.SSE, Exchange.SZSE]:
            return None

        tscode = to_ts_symbol(symbol, exchange)

        # 修改查询数据逻辑,在每次5000条数据的限制下,很可能一次无法读取完
        cnt = 0
        df: pd.DataFrame = None
        while datetime.strptime(start, TS_DATE_FORMATE) <= datetime.strptime(
                end, TS_DATE_FORMATE):
            # 保证每次查询最多5000天数据
            start_date = datetime.strptime(start, TS_DATE_FORMATE)
            simulate_end_date = min(
                datetime.strptime(end, TS_DATE_FORMATE),
                start_date + timedelta(days=MAX_QUERY_SIZE))
            simulate_end = simulate_end_date.strftime(TS_DATE_FORMATE)

            # 保证每次调用时间在60/500=0.12秒内,以保证每分钟调用次数少于500次
            # begin_time = time.time()
            tushare_df = None
            while True:
                try:
                    tushare_df = self.pro.query('daily',
                                                ts_code=tscode,
                                                start_date=start,
                                                end_date=simulate_end)
                except (requests.exceptions.SSLError,
                        requests.exceptions.ConnectionError) as e:
                    log.error(e)
                    # traceback.print_exc()
                    # ('Connection aborted.', ConnectionResetError(10054, '远程主机强迫关闭了一个现有的连接。', None, 10054, None))
                    if '10054' in str(e):
                        sleep_time = 60.0
                        log.info("请求过于频繁,sleep:" + str(sleep_time) + "s")
                        time.sleep(sleep_time)
                        log.info("继续发送请求:" + tscode)
                        continue  # 继续发请求
                    else:
                        raise Exception(e)  # 其他异常,抛出来
                break
            if tushare_df is not None:
                if df is None:
                    df = tushare_df
                else:
                    df = pd.concat([df, tushare_df], ignore_index=True)
            # end_time = time.time()
            # delta = round(end_time - begin_time, 3)
            # if delta < 60 / MAX_QUERY_TIMES:
            sleep_time = 0.50
            log.info("sleep:" + str(sleep_time) + "s")
            time.sleep(sleep_time)

            cnt += 1
            start = (simulate_end_date +
                     timedelta(days=1)).strftime(TS_DATE_FORMATE)

        data: List[BarData] = []

        if df is not None:
            for ix, row in df.iterrows():
                date = datetime.strptime(row.trade_date, '%Y%m%d')
                date = CHINA_TZ.localize(date)

                if pd.isnull(row['open']):
                    log.info(symbol + '.' + EXCHANGE_VT2TS[exchange] +
                             row['trade_date'] + "open_price为None")
                elif pd.isnull(row['high']):
                    log.info(symbol + '.' + EXCHANGE_VT2TS[exchange] +
                             row['trade_date'] + "high_price为None")
                elif pd.isnull(row['low']):
                    log.info(symbol + '.' + EXCHANGE_VT2TS[exchange] +
                             row['trade_date'] + "low_price为None")
                elif pd.isnull(row['close']):
                    log.info(symbol + '.' + EXCHANGE_VT2TS[exchange] +
                             row['trade_date'] + "close_price为None")
                elif pd.isnull(row['amount']):
                    log.info(symbol + '.' + EXCHANGE_VT2TS[exchange] +
                             row['trade_date'] + "volume为None")

                row.fillna(0)
                bar = BarData(symbol=symbol,
                              exchange=exchange,
                              interval=interval,
                              datetime=date,
                              open_price=row['open'],
                              high_price=row['high'],
                              low_price=row['low'],
                              close_price=row['close'],
                              volume=row['amount'],
                              gateway_name='tushare')

                data.append(bar)
        return data
示例#50
0
def gen_chs():
    """1. 简体"""
    NEED_CLEAR = True
    log.info(os.getcwd())

    if NEED_CLEAR:
        print('### 正在清理...')
        log.debug('正在清理...')
        # 清理输出目录
        dirs = ('../../输出/生成简体插件/', )
        for dir in dirs:
            if os.path.exists(dir):
                log.info('clear %s' % dir)
                shutil.rmtree(dir)
        # 清理翻译中间目录
        log.info('clear csv and xlsx')
        for root, dirs, files in os.walk('../../translation/lang/translated/'):
            for f in files:
                if f.endswith('.csv') or f.endswith('.xlsx') or f.endswith(
                        '.lang'):
                    filename = os.path.join(root, f)
                    log.debug('remove %s' % filename)
                    os.remove(filename)
            break
        for root, dirs, files in os.walk('../../translation/'):
            for f in files:
                if f.endswith('.csv') or f.endswith('.xlsx'):
                    filename = os.path.join(root, f)
                    log.debug('remove %s' % filename)
                    os.remove(filename)
            break
        files = (
            '../../translation/zh_translate.txt',
            '../../translation/zh_translate.xlsx',
        )
        for f in files:
            if os.path.exists(f):
                log.debug('remove %s' % f)
                os.remove(f)

    print('### 创建目录...')
    log.debug('创建目录...')
    dirs = ('../../输出/生成简体插件/', )
    for dir in dirs:
        if not os.path.isdir(dir):
            log.info('create %s' % dir)
            os.makedirs(dir)

    print('### 拷贝文件...')
    log.debug('拷贝文件...')
    for root, dirs, files in os.walk('汉化xlsx/'):
        for f in files:
            if f.endswith('.xlsx') and not f.startswith('~'):
                filename = os.path.join(root, f)
                data = load_xls(filename)
                if len(data[1]) == 8:  # ui 汉化文件
                    dst = '../../translation/'
                    ui_xls_file = f
                    log.info('copy %s to %s' % (filename, dst))
                    shutil.copy(filename, dst)
                dst = '../../translation/lang/translated/'
                log.info('copy %s to %s' % (filename, dst))
                shutil.copy(filename, dst)

    os.chdir('../../scripts/')
    log.info(os.getcwd())
    print('### 转换UI文本...')
    log.debug('转换UI文本...')
    execute('python export_uixls_to_txt.py ../translation/%s' % ui_xls_file)
    execute('python convert_txt_to_str.py -m translation')

    print('### 转换其他文本...')
    log.debug('转换其他文本...')
    execute('python export_langxls_to_csv.py')

    os.chdir('../translation/lang/translated/')
    log.info(os.getcwd())
    print('### 正在编码...')
    log.debug('正在编码...')
    execute('EsoExtractData.exe -x zh.lang.csv')

    print('### 正在校验...')
    log.debug('正在校验...')
    shutil.copy('zh.lang', 'zh1.lang')
    execute('EsoExtractData -l zh1.lang')
    num1 = get_linenum('../en.lang.csv')
    num2 = get_linenum('zh.lang.csv')
    num3 = get_linenum('zh1.lang.csv')
    log.info('validate line num: %d, %d, %d' % (num1, num2, num3))
    if not num1 == num2 == num3:
        log.error('校验失败')
        sys.exit(-1)

    os.chdir('../../../')
    log.info(os.getcwd())
    print('### 正在打包...')
    log.debug('正在打包...')
    log.info('copy AddOns')
    shutil.copytree('AddOns/', '输出/生成简体插件/AddOns')
    log.info('copy lang')
    shutil.copy('translation/lang/translated/zh.lang',
                '输出/生成简体插件/AddOns/gamedata/lang/')
    log.info('copy readme')
    shutil.copy('工具/生成插件/README_chs.txt', '输出/生成简体插件/README.txt')
    log.info('clear AddOns')
    os.remove('输出/生成简体插件/AddOns/EsoUI/lang/.gitignore')
    os.remove('输出/生成简体插件/AddOns/EsoZH/fonts/README.md')
    os.remove('输出/生成简体插件/AddOns/gamedata/lang/.gitignore')

    os.chdir('输出/生成简体插件/')
    log.info(os.getcwd())
    with open('README.txt', 'rt', encoding='utf-8') as fp:
        desc = fp.read()
    zip_name = 'ESO汉化插件.zip'
    zipf = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)
    zipf.comment = bytes(desc, encoding='gbk')
    for root, dirs, files in os.walk('.'):
        for f in files:
            if f != zip_name:
                zipf.write(os.path.join(root, f))
    zipf.close()
示例#51
0
    def getImages(self, base_url, gid, token, imageCount, flag):
        headers = getRandomHead()
        pageCount = int(imageCount / 40)
        if imageCount % 40 != 0:
            pageCount += 1
        imageUrls = []
        beginPage = 0
        tsequence = 0
        sequence = 0
        if flag:
            sequence = self.db.getLastImageSequenceByGidAndToken(gid,
                                                                 token) + 1
            tsequence = self.db.getLastThumbImageSequenceByGidAndToken(
                gid, token) + 1
            beginPage = int(sequence / 40)
            if sequence % 40 != 0:
                beginPage += 1
        for i in range(beginPage, pageCount):
            info(
                "info", "excrawler.getImages", "开始爬取本子详情页第" + str(i + 1) +
                "页,gid:" + str(gid) + " token:" + token)
            if i != 0:
                url = base_url + "?p=" + str(i)
            else:
                url = base_url

            errorCount = 0
            while True:
                if errorCount >= 5:
                    #记录出现异常的本子
                    self.db.insertError(gid, token)
                    error("异常", "excrawler.getImages",
                          "ParseError:连续5次未能从本子详情页获取缩略图列表")
                    #raise ParseError("连续5次未能从本子详情页获取缩略图列表")
                try:
                    res = invokeRequest("获取本子详情页第" + str(i + 1) + "页",
                                        "excrawler.getImages",
                                        requests.get,
                                        url,
                                        cookies=COOKIE,
                                        headers=headers,
                                        timeout=30,
                                        proxies=self.pool.getProxysequence())
                except Networkerror as e:
                    self.db.insertError(gid, token)
                    error("异常", "excrawler.getImages",
                          "Networkerror:" + e.message)

                soup = BeautifulSoup(res.text, "html.parser")
                tags = soup.select("#gdt > .gdtm > div")
                if len(tags) == 0:
                    info("信息", "excrawler.getImages",
                         "本子详情页缩略图列表中没有图片,url:" + url + "html:" + res.text)
                    errorCount += 1
                    continue
                images = []
                for tag in tags:
                    timageUrl = re.findall(r"url\((.+)\)", tag['style'])[0]
                    a = tag.select("a")[0]
                    href = a['href']
                    img = {
                        'gid': gid,
                        'token': token,
                        'sequence': sequence,
                        'url': href
                    }
                    images.append(img)
                    sequence += 1
                    if timageUrl not in imageUrls:
                        timg = {
                            'gid': gid,
                            'token': token,
                            'sequence': tsequence,
                            'url': timageUrl
                        }
                        self.db.insertThumbimage(timg)
                        tsequence += 1
                        imageUrls.append(timageUrl)
                self.db.insertEroimage(images)
                break
示例#52
0
def gen_chs_force():
    """3. 强制简体,用原版做一次繁简转换"""
    NEED_CLEAR = True
    log.info(os.getcwd())

    if NEED_CLEAR:
        print('### 正在清理...')
        log.debug('正在清理...')
        # 清理输出目录
        dirs = ('../../输出/生成简体插件/', )
        for dir in dirs:
            if os.path.exists(dir):
                log.info('clear %s' % dir)
                shutil.rmtree(dir)
        # 清理翻译中间目录
        files = (
            '../../translation/STOthers_ts.txt',
            '../../translation/lang/translated/zh1.lang',
            '../../translation/lang/translated/zh1.lang.csv',
        )
        for f in files:
            if os.path.exists(f):
                log.debug('remove %s' % f)
                os.remove(f)
        for root, dirs, files in os.walk('../../translation/'):
            for f in files:
                if f.endswith('.xlsx'):
                    filename = os.path.join(root, f)
                    log.debug('remove %s' % filename)
                    os.remove(filename)
            break

    print('### 创建目录...')
    log.debug('创建目录...')
    dirs = ('../../输出/生成简体插件/', )
    for dir in dirs:
        if not os.path.isdir(dir):
            log.info('create %s' % dir)
            os.makedirs(dir)

    print('### 分析对照表...')
    log.debug('分析对照表...')
    for root, dirs, files in os.walk('繁简对照'):
        for f in files:
            if f.endswith('.xlsx') and not f.startswith('~'):
                filename = os.path.join(root, f)
                dst = '../../translation/'
                log.info('copy %s to %s', filename, dst)
                shutil.copy(filename, dst)
                cht_to_chs_file = f
                break

    os.chdir('../../translation/')
    log.info(os.getcwd())
    execute('python ../scripts/xls2csv.py "%s" STOthers_ts.txt' %
            cht_to_chs_file)
    with open('STOthers_ts.txt', 'rt', encoding='utf-8') as fp:
        lines = fp.readlines()
    with open('STOthers_ts.txt', 'wt', encoding='utf-8') as fp:
        fp.write(''.join(lines[1:]))

    os.chdir('../scripts/')
    log.info(os.getcwd())
    print('### 繁简转换...')
    log.debug('繁简转换...')
    # 直接覆盖吧
    src_dst = (
        (
            '../AddOns/EsoUI/lang/zh_pregame.str',
            '../AddOns/EsoUI/lang/zh_pregame.str',
        ),
        (
            '../AddOns/EsoUI/lang/zh_client.str',
            '../AddOns/EsoUI/lang/zh_client.str',
        ),
        (
            '../translation/lang/translated/zh.lang.csv',
            '../translation/lang/translated/zh.lang.csv',
        ),
    )
    for src, dst in src_dst:
        execute('python convert_to_chs.py %s %s' % (src, dst))

    os.chdir('../translation/lang/translated/')
    log.info(os.getcwd())
    print('### 正在编码...')
    log.debug('正在编码...')
    execute('EsoExtractData.exe -x zh.lang.csv')

    print('### 正在校验...')
    log.debug('正在校验...')
    shutil.copy('zh.lang', 'zh1.lang')
    execute('EsoExtractData -l zh1.lang')
    num1 = get_linenum('../en.lang.csv')
    num2 = get_linenum('zh.lang.csv')
    num3 = get_linenum('zh1.lang.csv')
    log.info('validate line num: %d, %d, %d' % (num1, num2, num3))
    if not num1 == num2 == num3:
        log.error('校验失败')
        sys.exit(-1)

    os.chdir('../../../')
    log.info(os.getcwd())
    print('### 正在打包...')
    log.debug('正在打包...')
    log.info('copy AddOns')
    shutil.copytree('AddOns/', '输出/生成简体插件/AddOns')
    log.info('copy lang')
    shutil.copy('translation/lang/translated/zh.lang',
                '输出/生成简体插件/AddOns/gamedata/lang/')
    log.info('copy readme')
    shutil.copy('工具/生成插件/README_chs.txt', '输出/生成简体插件/README.txt')
    log.info('clear AddOns')
    os.remove('输出/生成简体插件/AddOns/EsoUI/lang/.gitignore')
    os.remove('输出/生成简体插件/AddOns/EsoZH/fonts/README.md')
    os.remove('输出/生成简体插件/AddOns/gamedata/lang/.gitignore')

    os.chdir('输出/生成简体插件/')
    log.info(os.getcwd())
    with open('README.txt', 'rt', encoding='utf-8') as fp:
        desc = fp.read()
    zip_name = 'ESO汉化插件.zip'
    zipf = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)
    zipf.comment = bytes(desc, encoding='gbk')
    for root, dirs, files in os.walk('.'):
        for f in files:
            if f != zip_name:
                zipf.write(os.path.join(root, f))
    zipf.close()
示例#53
0
                text = gzip.GzipFile(fileobj=cStringIO.StringIO(text)).read()
        except httplib.IncompleteRead as e:
            text = e.partial
        except (Exception), e:
            logging.user(
                self.request,
                "~SN~FRFailed~FY to fetch ~FGoriginal text~FY: %s" % e)
            logging.error('error fetch_request'+str(e)+\
                # '  feed_id:'+str(self.story.story_feed_id)+\
                '  stroy_link:'+str(self.story.story_permalink))
            return
        finally:
            opener.close()

        if not text:
            logging.error('error fetch text: text is null')
            return
        #soup = BeautifulSoup(text)
        #text = soup.renderContents()
        try:
            original_text_doc = readability.Document(
                text, url=self.story.story_permalink)
            content = original_text_doc.summary(html_partial=True)
            print "the length of content: %s" % len(content)
            #content = content.encode("utf-8")
        except readability.Unparseable, e:
            logging.error('error getting summary: '+str(e)+\
                # '  feed_id:'+str(self.story.story_feed_id)+\
                '  stroy_link:'+str(self.story.story_permalink))
            # if settings.SEND_ERROR_MAILS:
            #     mail_admins("Error in text_importer Build Document",str(e)+\
示例#54
0
def main():
    args = parse_args()

    # --------------------------- Step 1. Initialize OpenVINO Runtime Core ------------------------------------------------
    log.info('Creating OpenVINO Runtime Core')
    core = Core()

    # --------------------------- Step 2. Read a model --------------------------------------------------------------------
    if args.model:
        log.info(f'Reading the model: {args.model}')
        # (.xml and .bin files) or (.onnx file)
        model = core.read_model(args.model)

        # --------------------------- Step 3. Apply preprocessing -------------------------------------------------------------
        if args.output_layers:
            output_layer_names, output_layer_ports = parse_outputs_from_args(
                args)
            model.add_outputs(list(zip(output_layer_names,
                                       output_layer_ports)))

        if args.layout:
            layouts = parse_input_layouts(args, model.inputs)

        ppp = PrePostProcessor(model)

        for i in range(len(model.inputs)):
            ppp.input(i).tensor().set_element_type(Type.f32)

            input_name = model.input(i).get_any_name()

            if args.layout and input_name in layouts.keys():
                ppp.input(i).tensor().set_layout(Layout(layouts[input_name]))
                ppp.input(i).model().set_layout(Layout(layouts[input_name]))

        for i in range(len(model.outputs)):
            ppp.output(i).tensor().set_element_type(Type.f32)

        model = ppp.build()

        if args.batch_size:
            batch_size = args.batch_size if args.context_window_left == args.context_window_right == 0 else 1

            if any([not _input.node.layout.empty for _input in model.inputs]):
                set_batch(model, batch_size)
            else:
                log.warning(
                    'Layout is not set for any input, so custom batch size is not set'
                )

# ---------------------------Step 4. Configure plugin ---------------------------------------------------------
    devices = args.device.replace('HETERO:', '').split(',')
    plugin_config = {}

    if 'GNA' in args.device:
        gna_device_mode = devices[0] if '_' in devices[0] else 'GNA_AUTO'
        devices[0] = 'GNA'

        plugin_config['GNA_DEVICE_MODE'] = gna_device_mode
        plugin_config['GNA_PRECISION'] = f'I{args.quantization_bits}'
        plugin_config['GNA_EXEC_TARGET'] = args.exec_target
        plugin_config['GNA_PWL_MAX_ERROR_PERCENT'] = str(args.pwl_me)

        # Set a GNA scale factor
        if args.import_gna_model:
            if args.scale_factor:
                log.warning(
                    f'Custom scale factor will be used for imported GNA model: {args.import_gna_model}'
                )
                set_scale_factors(plugin_config, parse_scale_factors(args))
            else:
                log.info(
                    f'Using scale factor from the imported GNA model: {args.import_gna_model}'
                )
        else:
            if args.scale_factor:
                set_scale_factors(plugin_config, parse_scale_factors(args))
            else:
                scale_factors = []

                for file_name in re.split(', |,', args.input):
                    _, utterances = read_utterance_file(file_name)
                    scale_factors.append(get_scale_factor(utterances[0]))

                log.info(
                    'Using scale factor(s) calculated from first utterance')
                set_scale_factors(plugin_config, scale_factors)

        if args.export_embedded_gna_model:
            plugin_config[
                'GNA_FIRMWARE_MODEL_IMAGE'] = args.export_embedded_gna_model
            plugin_config[
                'GNA_FIRMWARE_MODEL_IMAGE_GENERATION'] = args.embedded_gna_configuration

        if args.performance_counter:
            plugin_config['PERF_COUNT'] = 'YES'

    device_str = f'HETERO:{",".join(devices)}' if 'HETERO' in args.device else devices[
        0]

    # --------------------------- Step 5. Loading model to the device -----------------------------------------------------
    log.info('Loading the model to the plugin')
    if args.model:
        compiled_model = core.compile_model(model, device_str, plugin_config)
    else:
        with open(args.import_gna_model, 'rb') as f:
            buf = BytesIO(f.read())
            compiled_model = core.import_model(buf, device_str, plugin_config)

# --------------------------- Exporting GNA model using InferenceEngine AOT API ---------------------------------------
    if args.export_gna_model:
        log.info(f'Writing GNA Model to {args.export_gna_model}')
        user_stream = compiled_model.export_model()
        with open(args.export_gna_model, 'wb') as f:
            f.write(user_stream)
        return 0

    if args.export_embedded_gna_model:
        log.info(
            f'Exported GNA embedded model to file {args.export_embedded_gna_model}'
        )
        log.info(
            f'GNA embedded model export done for GNA generation {args.embedded_gna_configuration}'
        )
        return 0

# --------------------------- Step 6. Set up input --------------------------------------------------------------------
    if args.input_layers:
        input_layer_names = re.split(', |,', args.input_layers)
    else:
        input_layer_names = [
            _input.any_name for _input in compiled_model.inputs
        ]

    input_file_names = re.split(', |,', args.input)

    if len(input_layer_names) != len(input_file_names):
        log.error(
            f'Number of model inputs ({len(compiled_model.inputs)}) is not equal '
            f'to number of ark files ({len(input_file_names)})')
        sys.exit(-3)

    input_file_data = [
        read_utterance_file(file_name) for file_name in input_file_names
    ]

    infer_data = [{
        input_layer_names[j]: input_file_data[j].utterances[i]
        for j in range(len(input_layer_names))
    } for i in range(len(input_file_data[0].utterances))]

    if args.output_layers:
        output_layer_names, output_layer_ports = parse_outputs_from_args(args)
        # If a name of output layer contains a port number then concatenate output_layer_names and output_layer_ports
        if ':' in compiled_model.outputs[0].any_name:
            output_layer_names = [
                f'{output_layer_names[i]}:{output_layer_ports[i]}'
                for i in range(len(output_layer_names))
            ]
    else:
        output_layer_names = [compiled_model.outputs[0].any_name]

    if args.output:
        output_file_names = re.split(', |,', args.output)

        if len(output_layer_names) != len(output_file_names):
            log.error(
                'The number of output files is not equal to the number of model outputs.'
            )
            sys.exit(-6)

    if args.reference:
        reference_file_names = re.split(', |,', args.reference)

        if len(output_layer_names) != len(reference_file_names):
            log.error(
                'The number of reference files is not equal to the number of model outputs.'
            )
            sys.exit(-5)

        reference_file_data = [
            read_utterance_file(file_name)
            for file_name in reference_file_names
        ]

        references = [{
            output_layer_names[j]: reference_file_data[j].utterances[i]
            for j in range(len(output_layer_names))
        } for i in range(len(input_file_data[0].utterances))]

# --------------------------- Step 7. Create infer request ------------------------------------------------------------
    infer_request = compiled_model.create_infer_request()

    # --------------------------- Step 8. Do inference --------------------------------------------------------------------
    log.info('Starting inference in synchronous mode')
    results = []
    total_infer_time = 0

    for i in range(len(infer_data)):
        start_infer_time = default_timer()

        # Reset states between utterance inferences to remove a memory impact
        for state in infer_request.query_state():
            state.reset()

        results.append(
            do_inference(
                infer_data[i],
                infer_request,
                args.context_window_left,
                args.context_window_right,
            ))

        infer_time = default_timer() - start_infer_time
        total_infer_time += infer_time
        num_of_frames = infer_data[i][input_layer_names[0]].shape[0]
        avg_infer_time_per_frame = infer_time / num_of_frames

        # --------------------------- Step 9. Process output ------------------------------------------------------------------
        log.info('')
        log.info(f'Utterance {i}:')
        log.info(f'Total time in Infer (HW and SW): {infer_time * 1000:.2f}ms')
        log.info(f'Frames in utterance: {num_of_frames}')
        log.info(
            f'Average Infer time per frame: {avg_infer_time_per_frame * 1000:.2f}ms'
        )

        for name in output_layer_names:
            log.info('')
            log.info(f'Output blob name: {name}')
            log.info(f'Number scores per frame: {results[i][name].shape[1]}')

            if args.reference:
                log.info('')
                compare_with_reference(results[i][name], references[i][name])

        if args.performance_counter:
            if 'GNA' in args.device:
                total_cycles = infer_request.profiling_info[
                    0].real_time.total_seconds()
                stall_cycles = infer_request.profiling_info[
                    1].real_time.total_seconds()
                active_cycles = total_cycles - stall_cycles
                frequency = 10**6
                if args.arch == 'CORE':
                    frequency *= GNA_CORE_FREQUENCY
                else:
                    frequency *= GNA_ATOM_FREQUENCY
                total_inference_time = total_cycles / frequency
                active_time = active_cycles / frequency
                stall_time = stall_cycles / frequency
                log.info('')
                log.info('Performance Statistics of GNA Hardware')
                log.info(
                    f'   Total Inference Time: {(total_inference_time * 1000):.4f} ms'
                )
                log.info(f'   Active Time: {(active_time * 1000):.4f} ms')
                log.info(f'   Stall Time:  {(stall_time * 1000):.4f} ms')

    log.info('')
    log.info(f'Total sample time: {total_infer_time * 1000:.2f}ms')

    if args.output:
        for i, name in enumerate(output_layer_names):
            data = [
                results[i][name]
                for i in range(len(input_file_data[0].utterances))
            ]
            write_utterance_file(output_file_names[i], input_file_data[0].keys,
                                 data)
            log.info(f'File {output_file_names[i]} was created!')


# ----------------------------------------------------------------------------------------------------------------------
    log.info(
        'This sample is an API example, '
        'for any performance measurements please use the dedicated benchmark_app tool\n'
    )
    return 0
示例#55
0
    log.warning('(x) Exit')
    arg = input()[0]
    print()
    
    model = ContentBasedRecommender()
    if arg == 't':
        # recs = model.recommend_batch(userids=data.get_target_playlists(), urm=data.get_urm_train())
        # model.evaluate(recommendations=recs, test_urm=data.get_urm_test())
        model.test(distance=model.SIM_SPLUS, k=500,alpha=0.75,beta=1,shrink=500,l=0.5,c=0.5)
    elif arg == 'r':
        log.info('Wanna save for evaluation (y/n)?')
        choice = input()[0] == 'y'
        model.fit(urm=data.get_urm_train_2(),icm=data.get_icm(), distance=model.SIM_SPLUS,k=500,shrink=500,alpha=0.75,beta=1,l=0.5,c=0.5)
        print('Saving the R^...')
        model.save_r_hat(evaluation=choice)
    elif arg == 's':
        model.fit(urm=data.get_urm_train_2(),icm=data.get_icm(), distance=model.SIM_SPLUS,k=500,shrink=500,alpha=0.75,beta=1,l=0.5,c=0.5)
        print('Saving the similarity matrix...')
        sps.save_npz('raw_data/saved_sim_matrix_evaluation_2/{}'.format(model.name), model.get_sim_matrix())
    # elif arg == 'v':
    #     model.validate(....)
    elif arg == 'x':
        pass
    else:
        log.error('Wrong option!')

    # recs = model.recommend_batch(userids=data.get_target_playlists(), urm=data.get_urm_train())
    # recs_seq = model.recommend_batch(userids=data.get_sequential_target_playlists(), urm=data.get_urm_train())
    # model.evaluate(recommendations=recs, test_urm=data.get_urm_test())
    # model.evaluate(recommendations=recs_seq, test_urm=data.get_urm_test())
示例#56
0
def export_xls(raw):
    """
    Выгрузка структуры телефонной книги в excel

    :param raw: {string: {string: [[string]]}}, структура тел. книги {организация: {отдел: [[данные_сотрудника]]}}
    :return: bool, True - если тел. книга выгружена, False - если произошла ошибка выгрузки
    """

    wb = xlwt.Workbook()
    ws = wb.add_sheet('Телефонная книга')

    # Заголовок
    ws.write_merge(0, 0, 0, 1, '', ts.sh0)
    ws.write(0, 2, 'ФИО', ts.sh1)
    ws.write(0, 3, 'Должность', ts.sh2)
    ws.write(0, 4, 'Вн. тел.', ts.sh2)
    ws.write(0, 5, 'Гор. тел.', ts.sh2)
    ws.write(0, 6, 'Почта', ts.sh3)

    line = 1

    item_len = [0, 0, 0, 0, 0, 0]  # Для подсчёта ширины колонок
    item_style = [ts.ss1, ts.ss2, ts.ss2, ts.ss2, ts.ss3]  # Стили для полей сотрудников

    for kc in sorted(raw):
        # Наименование организации
        ws.write_merge(line, line, 0, 6, kc, ts.so0)
        ws.row(line).level = 0
        line += 1
        ws.row(line).level = 1

        for kd in sorted(raw[kc]):
            # Наименование отдела
            ws.write(line, 0, '', ts.sd0)
            ws.write_merge(line, line, 1, 6, kd, ts.sd1)
            ws.row(line).level = 1
            line += 1
            ws.row(line).level = 2

            for item in sorted(raw[kc][kd]):
                ws.write_merge(line, line, 0, 1, '', ts.ss0)

                for k, field in enumerate(item):
                    # Поля сотрудника: ФИО, Должность, Вн. тел., Гор. тел., Почта
                    ws.write(line, k + 2, field, item_style[k])

                    # Считаем максимальную ширину для каждой колонки
                    item_len[k] = max(item_len[k], len(field))

                line += 1
                ws.row(line).level = 2

    # Подчёркиваем таблицу
    ws.write_merge(line, line, 0, 6, '', ts.sb0)

    ws.row(line).level = 0

    # Задаем ширину колонок, для организации и отдела ширина фиксирована
    ws.col(0).width = int(36.5 * 17)
    ws.col(1).width = int(36.5 * 17)

    # Для остальных колонок ширина вычисляется в зависимости от длинны полей
    for k, v in enumerate(item_len):
        ws.col(k + 2).width = int(36.5 * 7.3 * v)

    # Закрепляем заголовок
    ws.panes_frozen = True
    ws.horz_split_pos = 1

    path = get_options('main', 'xls_path', True)

    if not path:
        log.critical('Ошибка чтения конфигурационного файла, см. ошибки выше')
        return

    try:
        wb.save(path)
    except PermissionError as e:
        log.error('Недостаточно прав для сохранения файла: %s' % e.filename)
        return
    except FileNotFoundError as e:
        log.error('Неверный путь или имя файла: %s' % e.filename)
        return

    return True
示例#57
0
                feed.save_feed_history(e.code, e.msg, e.fp.read())
                fetched_feed = None
            except Feed.DoesNotExist, e:
                logging.debug('   ---> [%-30s] ~FRFeed is now gone...' %
                              (unicode(feed_id)[:30]))
                continue
            except TimeoutError, e:
                logging.debug('   ---> [%-30s] ~FRFeed fetch timed out...' %
                              (feed.title[:30]))
                feed.save_feed_history(505, 'Timeout', '')
                feed_code = 505
                fetched_feed = None
            except Exception, e:
                logging.debug('[%d] ! -------------------------' % (feed_id, ))
                tb = traceback.format_exc()
                logging.error(tb)
                logging.debug('[%d] ! -------------------------' % (feed_id, ))
                ret_feed = FEED_ERREXC
                feed = Feed.get_by_id(getattr(feed, 'pk', feed_id))
                feed.save_feed_history(500, "Error", tb)
                feed_code = 500
                fetched_feed = None
                mail_feed_error_to_admin(feed, e, local_vars=locals())
                if (not settings.DEBUG and hasattr(settings, 'RAVEN_CLIENT')
                        and settings.RAVEN_CLIENT):
                    settings.RAVEN_CLIENT.captureException()

            if not feed_code:
                if ret_feed == FEED_OK:
                    feed_code = 200
                elif ret_feed == FEED_SAME:
示例#58
0
            i += len(w)
            if i + len(w) <= 18:
                i += len(w)
                final_sentence += ' ' + w
            else:
                i = len(w)
                final_sentence += '\n ' + w
    return final_sentence


# -----------------------------------
#  I/O Procedures
# -----------------------------------


def read_file(path):
    with open(path, "r") as f:
        sentences = f.readlines()
    return sentences


def save_file(path, image, name_file):
    log.info(f'4/4 Saving image: {name_file}')
    image.save(path + '/' + name_file)


if __name__ == '__main__':
    if len(sys.argv) < 3:
        log.error('Bad arguments. Required: Images text file path')
    run(sys.argv[1], sys.argv[2])
示例#59
0
def main():
    lang = 'zh'
    mode = 'both'  # origin, translation, both

    # getopt
    try:
        opts, args = getopt.getopt(sys.argv[1:], 'l:m:h')
    except getopt.GetoptError as e:
        log.error(e)
        usage()
        sys.exit(2)
    for o, a in opts:
        if o == '-l':
            lang = a.lower()
        elif o == '-m':
            mode = a.lower()
            if mode not in ('origin', 'translation', 'both'):
                usage()
                sys.exit(2)
        elif o == '-h':
            usage()
            return

    cd = sys.path[0]
    translation_path = os.path.join(cd, '../translation')
    dest_path = os.path.join(cd, '../AddOns/EsoUI/lang')

    # load header
    header_file = os.path.join(translation_path, 'str_header.txt')
    with open(header_file, 'rt', encoding='utf-8') as fp:
        header = fp.readlines()

    ui_mgr_pregame = UiMgr()
    ui_mgr_client = UiMgr()

    # load lua
    pregame_src = os.path.join(translation_path, 'en_pregame.lua')
    ui_mgr_pregame.load_lua_file(pregame_src)

    client_src = os.path.join(translation_path, 'en_client.lua')
    ui_mgr_client.load_lua_file(client_src)

    # load translation
    translate_file = os.path.join(translation_path, '%s_translate.txt' % lang)
    with open(translate_file, 'rt', encoding='utf-8') as fp:
        lines = fp.readlines()
        ui_mgr_pregame.apply_translate_from_txt_lines(lines)
        ui_mgr_client.apply_translate_from_txt_lines(lines)

    log.info('mode: %s' % mode)

    # save str
    pregame_dest = os.path.join(dest_path, '%s_pregame.str' % lang)
    pregame_lines = ui_mgr_pregame.get_str_lines(mode)
    log.info('save to %s.' % pregame_dest)
    with open(pregame_dest, 'wt', encoding='utf-8') as fp:
        fp.writelines(header)
        fp.writelines(pregame_lines)

    client_dest = os.path.join(dest_path, '%s_client.str' % lang)
    client_lines = ui_mgr_client.get_str_lines(mode)
    log.info('save to %s.' % client_dest)
    with open(client_dest, 'wt', encoding='utf-8') as fp:
        fp.writelines(header)
        fp.writelines(client_lines)

    # save en str (fix fonts)
    if lang != 'en':
        pregame_dest = os.path.join(dest_path, 'en_pregame.str')
        pregame_lines = ui_mgr_pregame.get_str_lines(mode)
        log.info('save to %s.' % pregame_dest)
        with open(pregame_dest, 'wt', encoding='utf-8') as fp:
            fp.writelines(header)
            # keep version info
            for line in pregame_lines:
                if line.startswith('[SI_VERSION] ='):
                    fp.write(line + '\n')

        client_dest = os.path.join(dest_path, 'en_client.str')
        client_lines = ui_mgr_client.get_str_lines(mode)
        log.info('save to %s.' % client_dest)
        with open(client_dest, 'wt', encoding='utf-8') as fp:
            fp.writelines(header)
            # keep version info
            for line in client_lines:
                if line.startswith('[SI_VERSION] ='):
                    fp.write(line + '\n')
示例#60
0
    else:
        log.error('WRONG FOLDER')


def wizard_CF():
    print('Wizard soon...')


def wizard_CB():
    print('Wizard soon...')


def wizard_misc():
    print('Wizard soon...')


if __name__ == '__main__':
    print()
    log.error(
        '(¯`·.¸¸.·´¯`·.¸¸.->  GabDamPar ® - The AIO Recommender System  <-.¸¸.·´¯`·.¸¸.·´¯)'
    )
    print()

    menu.show('Which model do you want to run?', {
        '1': ('Collaborative Filtering', wizard_CF),
        '2': ('Content Based', wizard_CB),
        '3': ('Miscellaneous', wizard_misc),
        '4': ('Hybrid', wizard_hybrid),
    },
              main_menu=True)