Exemple #1
0
    def __init__(self, log_path, log_sample_size=500, gzip_decode=False):
        """
        :param log_path: The path to a log file
        :param log_sample_size: The number of most recent entries to include
        :param gzip_decode: If True, we'll decode the log before reading it in
        """

        self.log_path = log_path
        self.exists = False
        self.current_line = 0
        if gzip_decode and not log_path.endswith('.decoded'):
            decoded_log_path = log_path + '.decoded'
            if not os.path.exists(decoded_log_path):
                with open(decoded_log_path, 'w') as out:
                    with gzip.open(log_path, 'rb') as f:
                        line = f.readline().decode('utf-8', errors='ignore')
                        while line:
                            out.write(line)
                            try:
                                line = f.readline().decode('utf-8',
                                                           errors='ignore')
                            except gzip.BadGzipFile:
                                pass

            self.log_path = decoded_log_path
        linecache.updatecache(self.log_path)
        self.last_line_num = self.find_latest_line_offset()
        if self.last_line_num < log_sample_size:
            self.entries = [entry for entry in self.iter_cache(start=1)]
        else:
            self.entries = [
                entry for entry in self.iter_cache(start=self.last_line_num -
                                                   log_sample_size + 1)
            ]
def build_cache(verbose = False):
    global first_name_cache
    global name_index_cache
    global name_index_sorted_cache
    global field_cache
    count = 0

    first_name_cache[:] = []
    name_index_cache.clear()
    name_index_sorted_cache[:] = []
    field_cache.clear()

    try:
        read_file_object = open('contacts.ab', 'r')
        for line in read_file_object:
            count += 1
            temp_split_list = line.split('|')
            temp_split_field_list = temp_split_list[0].split('@')
            first_name_cache.append(temp_split_field_list[1])
            name_index_cache[temp_split_field_list[1]] = count
        read_file_object.close()
        sort_index_cache()
        field_cache = get_fields()
        linecache.updatecache('contacts.ab')
    except IOError:
        if verbose:
            print 'Contact database not found'
        else:
            pass
def _fix_linecache_record(obj):
  """Fixes potential corruption of linecache in the presence of functools.wraps.

  functools.wraps modifies the target object's __module__ field, which seems
  to confuse linecache in special instances, for example when the source is
  loaded from a .par file (see https://google.github.io/subpar/subpar.html).

  This function simply triggers a call to linecache.updatecache when a mismatch
  was detected between the object's __module__ property and the object's source
  file.

  Args:
    obj: Any
  """
  if hasattr(obj, '__module__'):
    obj_file = inspect.getfile(obj)
    obj_module = obj.__module__

    # A snapshot of the loaded modules helps avoid "dict changed size during
    # iteration" errors.
    loaded_modules = tuple(sys.modules.values())
    for m in loaded_modules:
      if hasattr(m, '__file__') and m.__file__ == obj_file:
        if obj_module is not m:
          linecache.updatecache(obj_file, m.__dict__)
Exemple #4
0
    def remote_exec(self, source, **kwargs):
        """ return channel object and connect it to a remote
            execution thread where the given ``source`` executes.

            * ``source`` is a string: execute source string remotely
              with a ``channel`` put into the global namespace.
            * ``source`` is a pure function: serialize source and
              call function with ``**kwargs``, adding a
              ``channel`` object to the keyword arguments.
            * ``source`` is a pure module: execute source of module
              with a ``channel`` in its global namespace

            In all cases the binding ``__name__='__channelexec__'``
            will be available in the global namespace of the remotely
            executing code.
        """
        call_name = None
        if isinstance(source, types.ModuleType):
            linecache.updatecache(inspect.getsourcefile(source))
            source = inspect.getsource(source)
        elif isinstance(source, types.FunctionType):
            call_name = source.__name__
            source = _source_of_function(source)
        else:
            source = textwrap.dedent(str(source))

        if call_name is None and kwargs:
            raise TypeError("can't pass kwargs to non-function remote_exec")

        channel = self.newchannel()
        self._send(Message.CHANNEL_EXEC, channel.id,
                   gateway_base.dumps_internal((source, call_name, kwargs)))
        return channel
Exemple #5
0
def test_field_overwrites_bases_method_in_item_in_script(
    tmp_path, source_code, lineno, offset, text, stack_frame_support
):
    tmp_file = tmp_path / "foo.py"
    tmp_file.write_text(source_code)
    tmp_file = str(tmp_file)
    linecache.updatecache(tmp_file)

    exc = None
    try:
        exec(compile(source_code, tmp_file, "exec"))
    except Exception as exc_:
        exc = exc_

    assert isinstance(exc, SyntaxError)
    if stack_frame_support:
        assert exc.filename == tmp_file
        assert exc.lineno == lineno
        assert exc.offset == offset
        assert exc.text == text
    else:
        assert exc.filename is None
        assert exc.lineno is None
        assert exc.offset is None
        assert exc.text == text.replace(" ", "").replace('"', "'")
Exemple #6
0
def test_field_overwrites_item_property_oneline_in_script(
        tmp_path, stack_frame_support, template, item_property):
    source_code = template % (item_property, )
    tmp_file = tmp_path / "foo.py"
    tmp_file.write_text(source_code)
    tmp_file = str(tmp_file)
    linecache.updatecache(tmp_file)

    with pytest.raises(SyntaxError) as catch:
        exec(compile(source_code, tmp_file, "exec"))

    exc = catch.value
    if stack_frame_support:
        assert exc.filename == tmp_file
        assert exc.lineno == 1
        assert exc.offset == 0
        assert exc.text == source_code
    else:
        assert exc.filename is None
        assert exc.lineno is None
        assert exc.offset is None
        assert (
            exc.text ==
            f"""{item_property}=Field(XPathExtractor("./span[@class='name']"))"""
        )
Exemple #7
0
def _fix_linecache_record(obj):
    """Fixes potential corruption of linecache in the presence of functools.wraps.

  functools.wraps modifies the target object's __module__ field, which seems
  to confuse linecache in special instances, for example when the source is
  loaded from a .par file (see https://google.github.io/subpar/subpar.html).

  This function simply triggers a call to linecache.updatecache when a mismatch
  was detected between the object's __module__ property and the object's source
  file.

  Args:
    obj: Any
  """
    if hasattr(obj, '__module__'):
        obj_file = inspect.getfile(obj)
        obj_module = obj.__module__

        # A snapshot of the loaded modules helps avoid "dict changed size during
        # iteration" errors.
        loaded_modules = tuple(sys.modules.values())
        for m in loaded_modules:
            if hasattr(m, '__file__') and m.__file__ == obj_file:
                if obj_module is not m:
                    linecache.updatecache(obj_file, m.__dict__)
Exemple #8
0
def test_field_overwrites_item_property_common_in_script(
        tmp_path, stack_frame_support, item_property):
    source_code = f"""
class User(Item):
    uid = Field(JSONExtractor("id"))
    {item_property} = Field(JSONExtractor({item_property!r}))
    """.strip()

    tmp_file = tmp_path / "foo.py"
    tmp_file.write_text(source_code)
    tmp_file = str(tmp_file)
    linecache.updatecache(tmp_file)

    with pytest.raises(SyntaxError) as catch:
        exec(compile(source_code, tmp_file, "exec"))

    exc = catch.value
    if stack_frame_support:
        assert exc.filename == tmp_file
        assert exc.lineno == 3
        assert exc.offset == 4
        assert (exc.text ==
                f"{item_property} = Field(JSONExtractor({item_property!r}))")
    else:
        assert exc.filename is None
        assert exc.lineno is None
        assert exc.offset is None
        assert (exc.text ==
                f"{item_property}=Field(JSONExtractor({item_property!r}))")
Exemple #9
0
    def remote_exec(self, source, **kwargs):
        """ return channel object and connect it to a remote
            execution thread where the given ``source`` executes.

            * ``source`` is a string: execute source string remotely
              with a ``channel`` put into the global namespace.
            * ``source`` is a pure function: serialize source and
              call function with ``**kwargs``, adding a
              ``channel`` object to the keyword arguments.
            * ``source`` is a pure module: execute source of module
              with a ``channel`` in its global namespace

            In all cases the binding ``__name__='__channelexec__'``
            will be available in the global namespace of the remotely
            executing code.
        """
        call_name = None
        if isinstance(source, types.ModuleType):
            linecache.updatecache(inspect.getsourcefile(source))
            source = inspect.getsource(source)
        elif isinstance(source, types.FunctionType):
            call_name = source.__name__
            source = _source_of_function(source)
        else:
            source = textwrap.dedent(str(source))

        if call_name is None and kwargs:
            raise TypeError("can't pass kwargs to non-function remote_exec")

        channel = self.newchannel()
        self._send(Message.CHANNEL_EXEC,
                   channel.id,
                   gateway_base.dumps_internal((source, call_name, kwargs)))
        return channel
Exemple #10
0
 def test_locals(self):
     linecache.updatecache('/foo.py', globals())
     c = test_code('/foo.py', 'method')
     f = test_frame(c, globals(), {'something': 1})
     s = traceback.StackSummary.extract(iter([(f, 6, 0)]),
                                        capture_locals=True)
     self.assertEqual(s[0].locals, {'something': '1'})
Exemple #11
0
def log_output():
    """
    日志
    :return:
    """
    last_line = int(request.args.get('line', 0))
    limit = int(request.args.get('limit', 10))
    max_old = 200  # 取最新时 往后再取的数
    file = Config().OUT_PUT_LOG_TO_FILE_PATH
    res = []

    if last_line == -1:
        total_line = get_file_total_line_num(file)
        last_line = total_line - max_old if total_line > max_old else 0
        ranges = range(max_old + limit)
    else:
        ranges = range(limit)

    if Config().OUT_PUT_LOG_TO_FILE_ENABLED:
        # with open(Config().OUT_PUT_LOG_TO_FILE_PATH, 'r', encoding='utf-8') as f:
        #     res = f.readlines()[last_line:limit]
        linecache.updatecache(file)
        for i in ranges:
            tmp = linecache.getline(file, last_line + i)
            if tmp != '': res.append(tmp)
        last_line += len(res)
    else:
        res = CommonLog.MESSAGE_OUTPUT_TO_FILE_IS_UN_ENABLE
    return jsonify({'last_line': last_line, 'data': res})
Exemple #12
0
    def decode(self, object, filename):
        lines = []

        linecache.updatecache(filename)
        if filename in linecache.cache:
            linecache.cache[filename] = (
                linecache.cache[filename][0],
                linecache.cache[filename][1],
                lines,
                filename,
            )
        last, new, old = slice(0, 0), 0, 0
        for current, cell, source in super().decode(object):
            if cell:
                lines += ["\n"] * (
                    object[last.stop : current.start].splitlines().__len__()
                    - 1
                    + (old - new)
                )

                source = getattr(self, "transform_" + cell["cell_type"])(source)

                lines += list(map("{}\n".format, source.splitlines()))
                new, old = map(len, map(str.splitlines, (source, object[current])))
                if not lines[-1]:
                    lines.pop()
                last = current

        return "".join(lines)
 def test_no_locals(self):
     linecache.updatecache('/foo.py', globals())
     e = Exception("uh oh")
     c = test_code('/foo.py', 'method')
     f = test_frame(c, globals(), {'something': 1})
     tb = test_tb(f, 6, None)
     exc = traceback.TracebackException(Exception, e, tb)
     self.assertEqual(exc.stack[0].locals, None)
 def test_extract_stackup_deferred_lookup_lines(self):
     linecache.clearcache()
     c = test_code('/foo.py', 'method')
     f = test_frame(c, None, None)
     s = traceback.StackSummary.extract(iter([(f, 6)]), lookup_lines=False)
     self.assertEqual({}, linecache.cache)
     linecache.updatecache('/foo.py', globals())
     self.assertEqual(s[0].line, "import sys")
 def addCache(self, name, code):
     moduleName = 'cache.' + name
     filename =  os.path.join('cache', name + '.py')
     f = file(filename, 'w')
     f.write(code)
     f.close()
     linecache.updatecache(filename)
     return moduleName
 def test_no_locals(self):
     linecache.updatecache('/foo.py', globals())
     e = Exception("uh oh")
     c = test_code('/foo.py', 'method')
     f = test_frame(c, globals(), {'something': 1})
     tb = test_tb(f, 6, None)
     exc = traceback.TracebackException(Exception, e, tb)
     self.assertEqual(exc.stack[0].locals, None)
 def test_extract_stackup_deferred_lookup_lines(self):
     linecache.clearcache()
     c = test_code('/foo.py', 'method')
     f = test_frame(c, None, None)
     s = traceback.StackSummary.extract(iter([(f, 6)]), lookup_lines=False)
     self.assertEqual({}, linecache.cache)
     linecache.updatecache('/foo.py', globals())
     self.assertEqual(s[0].line, "import sys")
Exemple #18
0
 def JDCouponMain1(self):
   noticpath = linecache.getline(r"jdgetc1cfg.set", 28).strip()
   noticsetpath = noticpath + "/notic.set"
   try:
     noticlines = len(open(noticsetpath, errors="ignore", encoding="UTF-8").readlines())
     if noticlines != 15:
       print("出错了, notic.set 的行数不对,将无法开启推送通知哦")
       noticsetpath = "false"
   except FileNotFoundError:
     print("该目录下没有 notic.set 文件,将无法开启推送通知哦")
     noticsetpath = "false"
   if noticsetpath != "false":
     linecache.updatecache(noticsetpath)
   print("\n正在运行京东抢任意优惠券\n")
   jdheaders1 = {
     "User-Agent": "Mozilla/5.0 (Linux;Android 10;GM1910) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Mobile Safari/537.36",
     "Cookie": "%s" % (linecache.getline(r"jdgetc1cfg.set", 25).strip())}
   jdcpactid1 = linecache.getline(r"jdgetc1cfg.set", 5).strip()
   jdcpkeyid1 = linecache.getline(r"jdgetc1cfg.set", 7).strip()
   jdcproleid1 = linecache.getline(r"jdgetc1cfg.set", 9).strip()
   if jdcproleid1 == "0":
     print("当前抢券的 keyid 是: %s\n" % (jdcpkeyid1))
   else:
     print("当前抢券的 keyid 是: %s\nroleid 是: %s\n" % (jdcpkeyid1, jdcproleid1))
   for files in os.walk(os.getcwd()):
     if re.findall(r"JDCoupon1的优惠券 %s.*\.rushed" % (jdcpkeyid1), str(files), flags=re.I) != []:
       print("该JDCoupon1的优惠券: %s 已抢券成功了,如果需要再次抢券,请先删除目录下对应的.rushed文件" % (jdcpkeyid1))
       AllinOneExit1()
   jdgetcoupons1 = self.JDGetCoupons1(jdcpactid1, jdcpkeyid1, jdcproleid1, jdheaders1)
   if re.findall(r"not login", str(jdgetcoupons1), flags=re.I) != []:
     print("返回信息: %s\nJD登录状态失效了,请重新获取Cookie" % (jdgetcoupons1))
     AllinOneExit1()
   elif jdgetcoupons1 is None or re.findall(r"activityId invalid", str(jdgetcoupons1), flags=re.I) != []:
     print("返回信息: %s\n活动页面ID错误,请重新获取活动页面ID" % (jdgetcoupons1))
     AllinOneExit1()
   elif re.findall(r"已经参加过", str(jdgetcoupons1)) != []:
     print("返回信息: %s\n该账号已经领取到优惠券了,请自行查看" % (jdgetcoupons1))
     AllinOneExit1()
   elif re.findall(r"来太晚了|结束", str(jdgetcoupons1)) != []:
     print("返回信息: %s\n来晚了,券已过期" % (jdgetcoupons1))
     AllinOneExit1()
   # 删除倒计时5秒,有需要自行打开
   """
   for i in range(5,0,-1):
     print("倒计时 %s 秒"%(i),end="\r")
     time.sleep(1)
   """
   self.JDCGetting1(jdcpactid1, jdcpkeyid1, jdcproleid1, jdheaders1)
   message = "%s 抢券成功,请自行查看" % (jdcpkeyid1)
   print(message)
   with open("JDCoupon1的优惠券 " + jdcpkeyid1 + " " + \
             time.strftime("%H{}%M{}%S{}").format("时", "分", "秒") + "抢券成功.rushed", "w"):
     print("已记录JDCoupon1的优惠券:%s 抢券成功时间" % (jdcpkeyid1))
   if noticsetpath != "false":
     Notification().NoticMain(noticsetpath, message)
   AllinOneExit1()
 def test_lookup_lines(self):
     linecache.clearcache()
     e = Exception("uh oh")
     c = test_code('/foo.py', 'method')
     f = test_frame(c, None, None)
     tb = test_tb(f, 6, None)
     exc = traceback.TracebackException(Exception, e, tb, lookup_lines=False)
     self.assertEqual({}, linecache.cache)
     linecache.updatecache('/foo.py', globals())
     self.assertEqual(exc.stack[0].line, "import sys")
Exemple #20
0
 def test_lookup_lines(self):
     linecache.clearcache()
     e = Exception("uh oh")
     c = test_code('/foo.py', 'method')
     f = test_frame(c, None, None)
     tb = test_tb(f, 6, None)
     exc = traceback.TracebackException(Exception, e, tb, lookup_lines=False)
     self.assertEqual(linecache.cache, {})
     linecache.updatecache('/foo.py', globals())
     self.assertEqual(exc.stack[0].line, "import sys")
Exemple #21
0
 def test_locals(self):
     linecache.updatecache('/foo.py', globals())
     e = Exception("uh oh")
     c = test_code('/foo.py', 'method')
     f = test_frame(c, globals(), {'something': 1, 'other': 'string'})
     tb = test_tb(f, 6, None)
     exc = traceback.TracebackException(
         Exception, e, tb, capture_locals=True)
     self.assertEqual(
         exc.stack[0].locals, {'something': '1', 'other': "'string'"})
Exemple #22
0
 def _get_line(self, key):
     if key in self.buffer:
         line = self.buffer[key]
     else:
         line = linecache.getline(self.filepath, self.keys.index(key) + 2)
         if line == '':
             linecache.updatecache(self.filepath)
             line = linecache.getline(self.filepath,
                                      self.keys.index(key) + 2)
     return line
 def test_locals(self):
     linecache.updatecache('/foo.py', globals())
     e = Exception("uh oh")
     c = test_code('/foo.py', 'method')
     f = test_frame(c, globals(), {'something': 1, 'other': 'string'})
     tb = test_tb(f, 6, None)
     exc = traceback.TracebackException(
         Exception, e, tb, capture_locals=True)
     self.assertEqual(
         exc.stack[0].locals, {'something': '1', 'other': "'string'"})
Exemple #24
0
 def decode(self, object, filename):
     source = self.module_to_source(ast.parse(object))
     linecache.updatecache(filename)
     if filename in linecache.cache:
         linecache.cache[filename] = (
             linecache.cache[filename][0],
             linecache.cache[filename][1],
             source,
             filename,
         )
     return "\n".join(source)
Exemple #25
0
 def select(self, path, target):
     linecache.updatecache(path)
     file = open(path, 'r')
     filelines = linecache.getlines(path)
     # result={}
     for line in filelines:
         arr = line.split('=')
         if arr[0] == target:
             val = arr[1]
             val = val[:len(val) - 1]
             print val
             return val
Exemple #26
0
 def make_module(name, code):
     code = dedent(code)
     assert name not in sys.modules
     spec = importlib.util.spec_from_loader(name, loader=None)
     mod = sys.modules[name] = importlib.util.module_from_spec(spec)
     path = tmp_path / f"{name}_{str(uuid4()).replace('-', '_')}.py"
     path.write_text(code)
     mod.__file__ = str(path)
     exec(code, mod.__dict__)
     linecache.updatecache(str(path), mod.__dict__)
     added_modules.append(name)
     return mod
Exemple #27
0
def micro_test(data_len, path):
    # replace DATA_LEN in zkPoD.zok
    with open("zkPoD_template.zok", "r") as f:
        data = f.read().replace("DATA_LEN", str(data_len))
        with open(path + "zkPoD.zok", "w+") as file:
            file.write(data)

    print("data length: %s" % data_len)

    # compile circuit
    os.system(
        "cd " + path +
        " && zokrates compile -i zkPoD.zok -o zkPoD --light > compile.out")
    linecache.updatecache(path + "compile.out")
    constraints = linecache.getline(path + "compile.out",
                                    4).split(":")[1].strip()
    print("# of constraints: %s" % constraints)

    # GenParam
    st = time.perf_counter()
    os.system("cd " + path + " && zokrates setup -i zkPoD --light > setup.out")
    ed = time.perf_counter()
    linecache.updatecache(path + "setup.out")
    points = linecache.getline(path + "setup.out", 3).split(" ")[2].strip()
    print("# of points: %s" % points)
    print("GenParam: %s ms" % ((ed - st) * 1000))
    genparam = (ed - st) * 1000

    # ProveData
    args = generate_inputs(data_len)
    st = time.perf_counter()
    os.system(
        "cd " + path + " && zokrates compute-witness -i zkPoD -a " + args +
        " --light > witness.out && zokrates generate-proof -i zkPoD > proof.out"
    )
    ed = time.perf_counter()
    print("ProveData: %s ms" % ((ed - st) * 1000))
    provedata = (ed - st) * 1000

    # VerifyProof
    st = time.perf_counter()
    os.system("cd " + path + " && zokrates verify > verify.out")
    # 512bits -> 64 bytes, simulate the operation of calculating hash of the last block, '8e6245e107a0127f17e480ba65f27e20ac48d13f15eedc93b716eb2806701f7d'
    sha2 = hashlib.sha256(
        b'0000000100020003000400050006000700080009001000110012001300140015'
    ).hexdigest()
    ed = time.perf_counter()
    print("VerifyProof: %s ms\n" % ((ed - st) * 1000))
    verifyproof = (ed - st) * 1000

    logs = [data_len, constraints, points, genparam, provedata, verifyproof]
    return ",".join(map(str, logs))
Exemple #28
0
    def refresh(self) -> None:
        """
        Refresh linecache

        Returns:
            None

        """
        linecache.updatecache(self.log_path)
        if self.last_line_num < self.log_sample_size:
            self.entries = [entry for entry in self.iter_cache(start=1)]
        else:
            self.entries = [entry for entry in self.iter_cache(start=self.last_line_num - self.log_sample_size + 1)]
Exemple #29
0
    def sendpacket(self):

        files = os.listdir(self.file_path)
        for file in files:
            print self.file_path+file
            lines= linecache.getlines(self.file_path+file)
            linecache.updatecache(self.file_path+file)
            p1 = re.compile("Get request from sender", re.I)
            for num in range(len(lines)):
                if p1.search(lines[num]):
                    if '20%s' % lines[num][3:18] > self.start_time and '20%s' % lines[num][3:18] < self.end_time:
                        packet = lines[num+1]
                        if 'ACB2012777' in packet and 'T3' in packet:
                            print packet
Exemple #30
0
def test_field_name_overwrite_item_parameter_oneline_in_script(
        source_code, tmp_path):
    tmp_file = tmp_path / "foo.py"
    tmp_file.write_text(source_code)
    tmp_file = str(tmp_file)
    linecache.updatecache(tmp_file)

    with pytest.raises(SyntaxError) as catch:
        exec(compile(source_code, tmp_file, "exec"))

    exc = catch.value
    assert exc.filename == tmp_file
    assert exc.lineno == 1
    assert exc.offset == 0
    assert exc.text == source_code.split("\n")[0].strip()
Exemple #31
0
 def test_lazycache_provide_after_failed_lookup(self):
     linecache.clearcache()
     lines = linecache.getlines(NONEXISTENT_FILENAME, globals())
     linecache.clearcache()
     linecache.getlines(NONEXISTENT_FILENAME)
     linecache.lazycache(NONEXISTENT_FILENAME, globals())
     self.assertEqual(lines, linecache.updatecache(NONEXISTENT_FILENAME))
Exemple #32
0
 def end_mark_line(self, n, get_mark):
     test_line = get_mark + 1
     while True:
         if self.check_line_exist(test_line) == False:
             time.sleep(2)
             linecache.updatecache(self.filename)
             self.logger.info("wait file to change")
         else:
             break
     for i in reversed(range(1, n)):
         tmp_line = get_mark + i
         self.logger.debug("tmp_line:" + str(tmp_line))
         self.logger.debug(self.check_line_exist(tmp_line))
         if self.check_line_exist(tmp_line) == True:
             break
     return tmp_line
Exemple #33
0
def test_field_name_overwrite_item_parameter_common_in_script(tmp_path):
    source_code = source_codes[-1]

    tmp_file = tmp_path / "foo.py"
    tmp_file.write_text(source_code)
    tmp_file = str(tmp_file)
    linecache.updatecache(tmp_file)

    with pytest.raises(SyntaxError) as catch:
        exec(compile(source_code, tmp_file, "exec"))

    exc = catch.value
    assert exc.filename == tmp_file
    assert exc.lineno == 4
    assert exc.offset == 4
    assert exc.text == 'name = Field(JSONExtractor("name"))'
Exemple #34
0
 def get_table(self, table_name: str) -> DBTable:
     meta_data_table = read_json_file(META_DATA)
     if table_name not in meta_data_table.keys():
         raise ValueError("table name does not exist")
     fields = linecache.updatecache(f'{db_api.DB_ROOT}\\{table_name}.csv')[0][:-1].split(",")
     table = DBTable(table_name, fields, meta_data_table[table_name])
     return table
Exemple #35
0
 def end_mark_line(self, n, get_mark):
     test_line = get_mark + 1
     while True:
         if self.check_line_exist(test_line) == False:
             time.sleep(2)
             linecache.updatecache(self.filename)
             self.logger.info('wait file to change')
         else:
             break
     for i in reversed(range(1,n)):
         tmp_line = get_mark + i
         self.logger.debug(
                 'tmp_line:' + str(tmp_line))
         self.logger.debug(
                 self.check_line_exist(tmp_line))
         if self.check_line_exist(tmp_line) == True:
             break
     return tmp_line
Exemple #36
0
def getTimeRangeInFile(fname):
    """
    returns the time range in the suspicious file as a tuple (firstTimestamp,
    lastTimestamp)
    """
    from os.path import getsize

    def _getTimestamp(line):
        spl=line.split()
        return int(spl[0])

    linecache.updatecache(fname)
    with open(fname, 'r') as fh:
        firstLine = next(fh).decode()
        try:
            first=_getTimestamp(firstLine)
        except IndexError:
            return (None, None)

        last=None
        numBytesInFile = getsize(fname)
        seekTo=numBytesInFile
        while not last:

            # seek back 1024 bytes from the end of the file, hoping that we
            # would arrive somewhere before the start of the last line
            seekTo-=1024
            if seekTo < 0:
                # cannot seek over the start of the file
                seekTo = 0

            # seek relative to start of file
            fh.seek(seekTo)
            lines = fh.readlines()
            lastLine = lines[-1].decode()
            try:
                last=_getTimestamp(lastLine)
            except IndexError:
                if seekTo==0:
                    #nothing else we could do, give up
                    return (None, None)
        return (first, last)
    return (None, None)
Exemple #37
0
def requestMain():
    if os.path.isfile("request.set") and len(
            open("request.set", errors="ignore",
                 encoding="utf-8").readlines()) == 23:
        noticpath = linecache.getline("request.set", 23).strip()
        noticsetpath = noticpath + "/notic.set"
        try:
            noticlines = len(
                open(noticsetpath, errors="ignore",
                     encoding="UTF-8").readlines())
            if noticlines != 15:
                print("出错了, notic.set 的行数不对,将无法开启推送通知哦")
                noticsetpath = "false"
        except FileNotFoundError:
            print("指定目录下没有 notic.set 文件,将无法开启推送通知哦")
            noticpath = "false"
        if noticpath != "false":
            linecache.updatecache(noticsetpath)
        requesttimeout = linecache.getline("request.set", 8).strip()
        requestlooptime = linecache.getline("request.set", 11).strip()
    else:
        print("当前目录没有 request.set 文件或文件行数不对,需要自行填写 连接超时 和 间隔刷新时间 且无法开启推送通知哦")
        noticsetpath = "false"
        requesttimeout = input("输入连接超时的时间(秒,仅数字或小数):")
        requestlooptime = input("输入间隔刷新时间(秒,仅数字或小数):")
    try:
        requesttimeout = float(requesttimeout)
    except ValueError:
        input("检测到 连接超时 为非数字或小数,重新修改或输入仅数字或小数!\n按确定键继续...")
        requestMain()
    try:
        requestlooptime = float(requestlooptime)
    except ValueError:
        input("检测到 间隔刷新时间 为非数字或小数,重新修改或输入仅数字或小数!\n按确定键继续...")
        requestMain()
    requesturlp = requestUrlp()
    requestheaders = requestHeaders()
    requestdata = requestData()
    requestTiming()
    requestGo(requesturlp, requestheaders, requestdata, requesttimeout,
              requestlooptime, noticpath)
    requestExit()
def set_ml():
    """
    读取日志文件到内存,并保存到mysql数据库
    :return: 无
    """
    # 读取数据库 mlist 表的所有内容
    try:
        ret = MList.objects.all().values()
    except Exception as e:
        logger.error(e)
        return

    # 设置5个set集合,用于存放读取的mlist表中所有字段的数据,以便后面判断mlfile文件是否有更新
    pcr_set = set()
    templatehash_set = set()
    tmptype_set = set()
    filedata_set = set()
    filerouter_set = set()
    for r in ret:
        pcr_set.add(r['pcr'])
        templatehash_set.add(r['templatehash'])
        tmptype_set.add(r['tmptype'])
        filedata_set.add(r['filedata'])
        filerouter_set.add(r['filerouter'])

    # linecache 可以实时更新文件信息,读取文件内容
    lines = linecache.updatecache(gc.TRUST_LOG_PATH)

    # 遍历文件内容 如果出现新的记录,就创建新对象,添加到临时列表 mlist
    mlist = []
    for line in lines:
        if (line is not None) and line[0] != 'None' and (line.find(gc.ML_TAG)
                                                         != -1):
            line = line.split()
            if (line[0] not in pcr_set) or \
                    (line[1] not in templatehash_set) or \
                    (line[2] not in tmptype_set) or \
                    (line[3] not in filedata_set) or \
                    (line[4] not in filerouter_set):
                mlist.append(
                    MList(pcr=line[0],
                          template_hash=line[1],
                          tmp_type=line[2],
                          file_data=line[3],
                          file_router=line[4]))
    # 将新记录批量插入数据库
    try:
        MList.objects.bulk_create(mlist)
    except Exception as e:
        logger.error(e)
def parse_large_file_via_line_cache(file_path):
    file_lines = get_file_lines(file_path)
    number_of_threads = multiprocessing.cpu_count() / 2
    slice_lines = file_lines / number_of_threads

    cached_lines = linecache.updatecache(file_path)
    threads = []
    for i in range(number_of_threads):
        start_line = i * slice_lines
        stop_line = max(
            (i + 1) * slice_lines,
            file_lines) if i + 1 == number_of_threads else (i +
                                                            1) * slice_lines
        t_name = 'Line cache thread {}'.format(i)
        print('{} {} -> {}'.format(t_name, start_line, stop_line))
        t = threading.Thread(target=parse_line_range,
                             name=t_name,
                             args=(cached_lines, start_line, stop_line))
        threads.append(t)
        t.start()

    [t.join() for t in threads]
 def test_no_locals(self):
     linecache.updatecache('/foo.py', globals())
     c = test_code('/foo.py', 'method')
     f = test_frame(c, globals(), {'something': 1})
     s = traceback.StackSummary.extract(iter([(f, 6)]))
     self.assertEqual(s[0].locals, None)
Exemple #41
0
 def __init__(self, co_filename, co_name):
     linecache.updatecache(co_filename, None)
     self.co_filename = co_filename
     self.co_name = co_name
Exemple #42
0
    def post(self):
        # NOTE: just return
        self.write_ret(ErrorCode.SUCCESS,
                       dict_=DotDict(res=[]))
        return

        try:
            data = json_decode(self.request.body)
            logging.info("[LOG] packet request body: %s", data)
            start_time = data.get("start_time")
            end_time = data.get("end_time")
            mobile = data.get("mobile")
            sn = data.get("sn")
            packet_type = data.get("packet_types")
            is_report = data.get("is_report")
            search_type = data.get("search_type")
        except Exception as e:
            logging.exception("[LOG] Wrong data format. Exception: %s",
                              e.args)
            status = ErrorCode.ILLEGAL_DATA_FORMAT
            self.write_ret(status)
            return

        try:
            if search_type == '0':
                result = self.acbdb.get("SELECT tid "
                                        "  FROM T_TERMINAL_INFO"
                                        "  WHERE mobile = %s",
                                        mobile)
                if result:
                    tid = result['tid']
                else:
                    logging.error("[LOG] Packet Inquiry: %s don't has terminal.", 
                                  mobile)
                    self.write_ret(ErrorCode.TERMINAL_NOT_EXISTED, 
                                   dict_=None)
                    return
            elif search_type == '1':
                tid = sn

            fc = FileConf()
            file_path = fc.getLogFile() + '/'
            files = os.listdir(file_path)

            # make the files ordered
            d = {}
            for f in files:
                if not f.startswith('error'):
                    continue
                file_time = os.path.getmtime(file_path + f)
                d[file_time] = f
                L = d.keys()
            L.sort()
            ftime = []
            for file_time in L:
                format = '%Y%m%d %H:%M:%S'
                tmp = time.localtime(file_time)
                dt = time.strftime(format, tmp)
                if dt < start_time:
                    logging.info("[LOG] skip file time :%s, file name:%s", 
                                 file_time, d[file_time])
                else:
                    ftime.append(file_time)
            files = [d.get(file_time) for file_time in ftime]

            lst = []
            for file in files:
                logging.info("[LOG] handle file: %s", file)
                lines = linecache.getlines(file_path + file)
                linecache.updatecache(file_path + file)
                if len(lines) != 0:
                    first_num = 0
                    last_num = len(lines) - 1
                    if len(lines[first_num]) <= 1:
                        continue
                    while lines[first_num][0] != '[':
                        first_num = first_num + 1
                    first_time = '20%s' % lines[first_num][3:18]
                    while lines[last_num][0] != '[':
                        last_num = last_num - 1
                    last_time = '20%s' % lines[last_num][3:18]
                    if start_time > last_time or end_time < first_time:
                        logging.info(
                            "[LOG] Ignored file: %s, BeginTime: %s, EndTime: %s", file, first_time, last_time)
                    else:
                        p1 = re.compile(tid, re.I)
                        p2 = re.compile("recv:", re.I)
                        p3 = re.compile(packet_type)
                        for num in range(len(lines)):
                            if p1.search(lines[num]) and p2.search(lines[num]) and p3.search(lines[num]):
                                if '20%s' % lines[num][3:18] > start_time and '20%s' % lines[num][3:18] < end_time:
                                    ldata = lines[num].split(',')
                                    T_packet_type = ldata[5][0:3] + ','
                                    T_packet_time = lines[num][3:18]
                                    packet = lines[num]
                                    p = {'packet_time': T_packet_time,
                                         'packet_type': T_packet_type,
                                         'packet': packet}
                                    lst.append(p)
                                    if is_report == 1:
                                        #ip = lines[num].split('\'')[1]

                                        #ip_index = lines[num].find('from')+4
                                        #ip = lines[num][ip_index:][1:-1]
                                        #ip = lines[num].split('\'')[1]

                                        p_ip = re.compile(
                                            r"from \('.*?', .*?\)")
                                        ip = p_ip.findall(lines[num])[0][6:-1]

                                        match_type = 'S' + ldata[5][1:3] + ','
                                        next_num = num + 1
                                        p6 = re.compile("I ", re.I)
                                        # NOTE: never add : in pattern
                                        #p7 = re.compile("send:", re.I)
                                        p7 = re.compile("send", re.I)
                                        p8 = re.compile(ip, re.I)
                                        p9 = re.compile(match_type, re.I)
                                        while True:
                                            if len(lines) - 1 < next_num:
                                                logging.info("[LOG] next_num:%s may be invalid, break", 
                                                             next_num)
                                                break
                                            if p6.search(lines[next_num]) and p7.search(lines[next_num]) and p8.search(lines[next_num]) and p9.search(lines[next_num]):
                                                S_packet_time = lines[
                                                    next_num][3:18]
                                                packet = lines[next_num]
                                                p = {'packet_time': S_packet_time,
                                                     'packet_type': match_type,
                                                     'packet': packet}
                                                lst.append(p)
                                                break
                                            else:
                                                next_num = next_num + 1
                                                if next_num == num + 5000:
                                                    break
                                    elif is_report == 0:
                                        pass
                else:
                    pass
                linecache.clearcache()
            self.write_ret(ErrorCode.SUCCESS,
                           dict_=DotDict(res=lst))

        except Exception as e:
            logging.exception("[LOG] Mobile: %s 's packet inquiry is failed. Exception: %s",
                              mobile, e.args)
            linecache.clearcache()
            self.write_ret(ErrorCode.FAILED, dict_=None)
 def test_no_locals(self):
     linecache.updatecache('/foo.py', globals())
     c = test_code('/foo.py', 'method')
     f = test_frame(c, globals(), {'something': 1})
     s = traceback.StackSummary.extract(iter([(f, 6)]))
     self.assertEqual(s[0].locals, None)
Exemple #44
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#python2.7x
#linecache_test.py
#author: orangleliu
'''
官网的解释就是可以得到文件的任意一行,并且这方法是经过优化的,使用了缓存。
'''

import linecache
filename = './test.txt'

##获取所有的行
f = linecache.getlines(filename)
print f

##获取任意一行
context = linecache.getline(filename, 1)
print context

#当文件的内容改变的时候
#需要  check 或者是update 下,才能获取新的文件
linecache.checkcache(filename)
#或者
linecache.updatecache(filename)

#使用完了要清空缓存
linecache.clearcache()
Exemple #45
0
def readSuspiciousFile(filename, lineNumStart=1, lineNumStop=0,
        omitNewIPs=False, filterExp=[], removeSingles=True):
    """
    expected format:
    timestamp fqdn IP None score <number of IPBlocks in which this fqdn
    appears> <number of fqdns in the IPBlock which contains this IP>
    """
    data=[]
    lineNum=lineNumStart

    if filterExp:
        filterHits=dict.fromkeys([regex.pattern for regex in filterExp], 0)
    else:
        filterHits=dict()

    print 'reading',filename,'from line',lineNumStart,'to line',lineNumStop
    linecache.updatecache(filename)

    while True:
        line=linecache.getline(filename, lineNum)

        if not line:
            # end of file
            break

        if lineNum>=lineNumStop:
            break

        lineNum+=1
        sl=line.split()
        try:
            if omitNewIPs and float(sl[4])==-1:
                continue

            dStr=sl[1]
            if dStr=='invalid_hostname':
                continue

#           if any(regex.match(dStr) for regex in filterExp):
#               #print 'whitelisted',dStr
#               filterHits+=1
#               continue

            for regex in filterExp:
                if regex.match(dStr):
                    filterHits[regex.pattern]+=1
                    break
            else:
                dUnicode=unicode(dStr, 'utf-8')
                if dStr==dUnicode:
                    data.append((int(sl[0]), dStr, str(netaddr.IPAddress(sl[2])),
                        sl[3], float(sl[4]), int(sl[5])))

        except (IndexError, ValueError):
            # may happen when reading incomplete files - ignore
            pass

    #print filterHits, 'filtered'
    if filterHits:
        print 'Filter hits:'
        for pattern, hits in filterHits.iteritems():
            print pattern,':',hits

    if removeSingles:
        cntPrevData=len(data)
        uniqueFqdns=set([fqdn for _,fqdn,_,_,_,_ in data])
        cntPrevUniqueFqdns=len(uniqueFqdns)
        #data=filterSuspiciousData(data, 1, 2)
        data=filterSingles(data)
        uniqueFqdns=set([fqdn for _,fqdn,_,_,_,_ in data])
        print 'removed',cntPrevData-len(data),'/',cntPrevData,'data records'
        print 'removed',cntPrevUniqueFqdns-len(uniqueFqdns),'/',cntPrevUniqueFqdns,'single FQDNs'

    return data