Example #1
0
def generateInfoFiles_new(dbimg='0,0-00038.jpg', dbdir='dummy_db', offset=180, 
                          camerafile='temp/cory/%s_aaron.txt', fov = math.degrees(math.atan(float(1024)/float(612)))*2):
  dbnum = int(dbimg[4:-4])
  oname, dbimg = linecache.getline(dbdir+'/namemap.txt', dbnum).rstrip().split(';')
  location = oname[:-4].split('_')[-1]
  number = int(oname[:-4].split('_')[-2][5:])
  if number - 9 < 1: return
  
  imagename = dbimg[:-4]

  print imagename
  print str(number) + location
  
  camerafile2 = camerafile % location
  camera = linecache.getline(camerafile2, number - 9).strip().split()
  roll,pitch,yaw = (math.degrees(float(camera[4])), math.degrees(float(camera[5])), math.degrees(float(camera[6])))
  x,y,z = (float(camera[1]), float(camera[2]), float(camera[3]))
  #this shift in yaw puts everything in Aaron's coordinate frame
  yaw = (yaw + offset) % 360
  print "yaw after: " + str(yaw)
    
  myString = "{'is-known-occluded': False, 'url': {'href': ''}, 'field-of-view': %s, 'image-size': {'width': 2048, 'height': 2448}, 'view-direction': {'yaw': %s, 'pitch': %s, 'roll': %s}, 'view-location': {'lat': %s, 'alt': %s, 'lon': %s}, 'location': {'x': %s, 'y': %s, 'z': %s}, 'id': 'cory_image'}"
  myString = myString % (fov, yaw, pitch, roll, '0.0', '0.0', '0.0', x,y,z)
  f = open(dbdir +'/'+ imagename + '.info', 'wb')
  f.write(myString)
  f.close()
def calc_elapsed_time(params):
	f=params['inputfile']	
	o = '%s_elapsedTime.txt' %(params['inputfile'][:-4])
	o1 = open(o,'w')

	tot=len(open(f,'r').readlines())

	count=1

	while count <=tot-1:

		line = linecache.getline(f,count)
		linenext=linecache.getline(f,count+1)

		if params['debug'] is True:
			print line[:-2]
			print linenext[:-2]

		region1=line.split()[5]
		region2=linenext.split()[5]

		if region1 != region2:
			if params['debug'] is True:
				print '<--------------------------------->Next region..'
			count = count + 1
			continue
	
		t1=datetime.strptime(line.split()[2],'%Y-%m-%dT%H:%M:%S-0500')
		t2=datetime.strptime(linenext.split()[2],'%Y-%m-%dT%H:%M:%S-0500')
		price=line.split()[1]
		if params['debug'] is True:
			print (t1-t2).seconds
		o1.write('%s\t%s\t%f\n' %(region1,price,(t1-t2).seconds))

		count = count + 1
def reallyUnofficialTranscript():
    global semesterMarkerLine
    headingTitle = ['Call Number', 'Department', 'Class', 'Section', 'Credits', 'Title', 'Grade']
    print "########################################"
    print "Really Unofficial Transcript"
    print "########################################"
    
    print 'semesterMarkerLine',semesterMarkerLine
    print 'gradeLine', gradeLine
    print semesterMarkerLine[0]
    semesterMarkerLine.append(1000000)
    #todo fix this area to get grade line after semestermarkerline but before next semester marker line
    semesterMarkerLineR = []
    for reverse in sorted(semesterMarkerLine,reverse=True):
        semesterMarkerLineR.append(reverse)
    print 'semesterMarkerLineR',semesterMarkerLineR,semesterMarkerLineR[0],semesterMarkerLineR[1]
    
    print 'gradeLine', gradeLine.reverse()
    print semesterMarkerLine[0]
    
    index = 0
    for seasonLine in semesterMarkerLine:
        semesterClassList = []
        line = linecache.getline('input.txt', seasonLine)
        print line #prints the semester information
        print "{0[0]:11} | {0[1]:10} | {0[2]:5} | {0[3]:7} | {0[4]:7} | {0[5]:25} | {0[6]:5}".format(headingTitle)
        
        for grade in gradeLine:
            line = linecache.getline('input.txt', grade)
            print '{0[0]:11} | {0[1]:10} | {0[2]:5} | {0[3]:7} | {0[4]:7} | {0[5]:25} | {0[6]:5}'.format(line.split('\t'))
            semesterClassList.append(grade)
            index += 1
        print semesterClassList
        print "########################################"
Example #4
0
    def report(self, scope, lines=0, level=1, is_tty=stdout.isatty()):
        if level >= len(utils.sev):
            level = len(utils.sev) - 1
        tmpstr = ""
        if self.count > 0:
            tmpstr += "%sFiles tested (%s):%s\n\t" % (utils.color['HEADER'], len(scope), utils.color['DEFAULT']) if is_tty else "File tested (%s):\n\t" % (len(scope))
            tmpstr += "%s\n" % "\n\t".join(scope)

            tmpstr += "%sFiles skipped (%s):%s" % (utils.color['HEADER'], len(self.skipped), utils.color['DEFAULT']) if is_tty else "File skipped (%s):\n\t" % (len(self.skipped))
            for (fname, reason) in self.skipped:
                tmpstr += "\n\t%s (%s)" % (fname, reason)

            tmpstr += "\n%sTest results:%s\n" % (utils.color['HEADER'], utils.color['DEFAULT']) if is_tty else "Test results:\n"

            for filename,issues in self.resstore.items():
                for lineno, issue_type, issue_text in issues:
                    if utils.sev.index(issue_type) >= level:
                        tmpstr += "%s>> %s\n - %s::%s%s\n" % (utils.color.get(issue_type, utils.color['DEFAULT']), issue_text, filename, lineno, utils.color['DEFAULT']) if is_tty else ">> %s\n - %s::%s\n" % (issue_text, filename, lineno)
                        for i in utils.mid_range(lineno, lines):
                            line = linecache.getline(filename, i)
                            #linecache returns '' if line does not exist
                            if line != '':
                                tmpstr += "\t%3d  %s" % (i, linecache.getline(filename, i))
            print(tmpstr)
        else:
            self.logger.error("no results to display - %s files scanned" % self.count)
Example #5
0
    def __re_def(self):
        r = re.compile(r'.*intent *\(in\)[^:]*::\s*([^!]*)\s*.*', re.IGNORECASE)
        r_cont = re.compile(r'.*intent *\(in\)[^:]*::\s*([^!]*)\s*.*&', re.IGNORECASE)

        # Line contains intent with continuation
        m_cont = r_cont.search(self.__line)
        m = r.search(self.__line)
        if m_cont:
            splitted = self.__line.split('::')
            splitted[1] = re.sub(r'!.*', '', splitted[1]) # Remove comments at end of the line
            if not self.__check_intent_in(splitted[1]):
                # look ahead to find the variable
                lookahead_index = self.__linenum
                # set to line after the intent declaration
                lookahead_index += 1
                # look ahead
                nextline = linecache.getline(os.path.join(self.infile), lookahead_index)
                while nextline:
                    self.__check_intent_in(nextline)
                    if(nextline.find('&')!=-1):
                        lookahead_index += 1
                        nextline = linecache.getline(os.path.join(self.infile), lookahead_index)
                    else:
                        nextline = None

        # Match a standard declaration with variable and intent on the same line
        elif m:
            splitted = self.__line.split('::')
            splitted[1] = re.sub(r'!.*', '', splitted[1]) # Remove comments at end of the line
            self.__check_intent_in(splitted[1])
        return m
Example #6
0
 def gro(self,pairs,inputGro):
         occupancies, donorResNumber, donorResName, hydrogenNumber, donorGroupName, acceptorResNumber, acceptorResName,\
         acceptorGroupNumber, acceptorGroupName, occupanciesThreshold = ([] for i in range(10))
         # Assign the three variables of pairs[] to separated variables,
         for i in range(0, numberHbonds):
             lineDonorGroupNumber,lineHydrogenNumber,lineAcceptorGroupNumber=pairs[i]
             # Add +2 to compensate the 2 header lines in the .gro file
             lineDonorGroupNumber=int(lineDonorGroupNumber)+2
             lineHydrogenNumber=int(lineHydrogenNumber)+2
             lineAcceptorGroupNumber=int(lineAcceptorGroupNumber)+2
             # Extract the donor/acceptor residues/atoms and number/names from the .gro file
             # donorResName and hydrogenNumber belong to the same residue, so they have the same residue number 'donorResNumber'
             line_donor = linecache.getline(inputGro, lineHydrogenNumber)
             line_acceptor = linecache.getline(inputGro, lineAcceptorGroupNumber)
             donorResNumber.append(line_donor[1:5])
             donorResName.append(line_donor[5:8])
             donorGroupName.append(line_donor[12:15])
             hydrogenNumber.append(line_donor[15:20])
             acceptorResNumber.append(line_acceptor[1:5])
             acceptorResName.append(line_acceptor[5:8])
             acceptorGroupName.append(line_acceptor[12:15])
             acceptorGroupNumber.append(line_acceptor[15:20])
             # Calculate occupancies in percentage
             occupancies.append(counter[i]/numberFrames*100)
             
         return occupancies, donorResNumber, donorResName, hydrogenNumber, donorGroupName, acceptorResNumber, acceptorResName,\
         acceptorGroupNumber, acceptorGroupName          
Example #7
0
def find_tallies(meshtal) :
    """Searches the meshtal file to determine the number of tallies present and
    returns the tallies and lines where they are listed.
    
    Parameters
    ----------
    meshtal : string
        File path to meshtal file.

    Returns
    -------
    tally_numbers : list of strings
        List of tally numbers (as strings)
    tally_lines : list of integers
        List of the line numbers where tallies begin
    """
    tally_lines=[]
    tally_numbers = []
    count = 1;
    line = linecache.getline(meshtal, count)
    while line != '' :

        if line.split()[0:3] == ['Mesh', 'Tally', 'Number'] :
            tally_numbers.append(line.split()[3])
            tally_lines.append(count)
        count += 1;

        line = linecache.getline(meshtal, count)
    return tally_numbers, tally_lines
Example #8
0
    def setFromDir(self, dir_to_check, ndir):
      global current_pwd
      global name_filename
      global description_filename
      global run_filename
      global beta_filename
      global offlinedoc_filename

      
      name_tmp = linecache.getline(current_pwd + '/' + ndir + '/' + name_filename,1)
      name_tmp = name_tmp.rstrip('\r\n');
      
      
      description_tmp = linecache.getline(current_pwd + '/' + ndir + '/' + description_filename,1)
      description_tmp = description_tmp.rstrip('\r\n');
      
      
      print 'DEBUG: Directory (code) found: ' + ndir
      code_list.append(ndir)
      self.setCode(ndir)
      print 'DEBUG: Name found: ' + name_tmp
      self.setName(name_tmp)
      name_list.append(name_tmp)
      print 'DEBUG: Description found: ' + description_tmp
      self.setDescription(description_tmp)
      description_list.append(description_tmp)
      
      if (os.path.isfile(current_pwd + '/' + ndir + '/' + run_filename)):
	self.setAsOption()
	self.setExecutable(True)
      if (os.path.isfile(current_pwd + '/' + ndir + '/' + beta_filename)):
	self.setBeta(True)
      if (os.path.isfile(current_pwd + '/' + ndir + '/' + offlinedoc_filename)):
	self.setAsOption()
	self.setHasOfflineDoc(True)
Example #9
0
def get_file_metadata(fname):
    import linecache

    metadata = {}

    grid_metadata = linecache.getline(fname, 1).split()
    n_grids = int(grid_metadata[0])

    # Trajectory metadata present after grid metadata
    trajectory_metadata = linecache.getline(fname, n_grids+2).split()
    metadata['n_trajectories'] = int(trajectory_metadata[0])

    # Get starting lat/lon/alt of each trajectory
    metadata['trajectories'] = {}
    for t in range(metadata['n_trajectories']):
        tstart = linecache.getline(fname, n_grids+3+t).split()
        # Save trajectories according to numbering in file
        metadata['trajectories'][t+1] = (tstart[-3], tstart[-2], tstart[-1])

    metadata['data_start'] = n_grids + metadata['n_trajectories'] + 3

    # Get custom variable names
    variable_names = linecache.getline(fname, metadata['data_start']).split()[2:]
    metadata['labels'] = hysplit_default_var + variable_names
    metadata['custom_labels'] = variable_names

    linecache.clearcache()
    return metadata
Example #10
0
    def forwardon(self):
        import linecache
        import Global
        print "actionforward"
        if Global.NAV_P == Global.NAV_NO:
            print "no method forward!"
        else:
            linecache.clearcache()

            Global.NAV_P += 1
            i = 2*Global.NAV_P
            classname = linecache.getline('1.txt',i)
            pathindex = linecache.getline('1.txt',i-1)
            pathindex = pathindex.strip()
            classname = linecache.getline('1.txt',i)
            classname = classname[:-1]
    
            print "get from 1.txt"
            print pathindex
            print classname
            method = self.path2method[pathindex]
            print "the type of method is %s, the type of classname is %s" %(type(method),type(classname))
            self.displayMethod(method,classname)
            Global.currentclass = classname
            QMessageBox.information(self ,'Current Class', classname)
            Global.currentmethod = method
            QMessageBox.information(self ,'Current method', method)
            print method
            print classname
def map_follows(follows_dir, client_ip):
    '''
    Given a directory of all follow file, created by tshark, does the following:
        - Returns follows_dir[c_s_pair] = corresponding follow file
        - Makes another copy of the follow file with the c_s_pair in the name, just for fun!
        - If a follow file doesn't start by client, prints 'Whaaaaat????'  
    '''
    l     = client_ip.split('.')
    l[:4] = map(lambda x : x.zfill(3), l[:4])
    client_ip = '.'.join(l)
    
    follows_dir = os.path.abspath(follows_dir)
    file_list = python_lib.dir_list(follows_dir, True)
    follow_files = {}
    for file in file_list:
        if ('follow-stream-' not in file):
            continue
        if linecache.getline(file, 7)[0] == '=':
            print 'empty file:', file
            continue
        node0 = convert_ip(((linecache.getline(file, 5)).split()[2]).replace(':', '.'))
        node1 = convert_ip(((linecache.getline(file, 6)).split()[2]).replace(':', '.'))
        c_s_pair = '-'.join([node0, node1])
        if node0.rpartition('.')[0] != client_ip:
            print 'Whaaaaat????', file
        follow_files[c_s_pair] = file 
        outfile = file.rpartition('/')[0] + '/' + c_s_pair + '.' + file.partition('.')[2]
        if os.path.isfile(outfile) is False:
            os.system(('cp ' + file + ' ' + outfile))
    print 'map_follows Done:', len(follow_files)  
    return follow_files
Example #12
0
	def startServer(self):
		print("Starting server")
		serverPort = None
		interface = None

		#Obtains the necessary info from config files
		try:
		        serverPort = linecache.getline(os.path.dirname(__file__)+'/../etc/whisper.cfg', 3)
			serverPort = serverPort.split('=',1)[1]
			serverPort = serverPort[:-1]

			interface = linecache.getline(os.path.dirname(__file__)+'/../etc/whisper.cfg',4)
			interface = interface.split('=',1)[1]
			interface = interface[:-1]
        
		except Exception as e:
		        print("Error in config file!"+str(e))

		#Creates the sockets and waits for connections to show up

		print("Listening on port "+serverPort) 

		self.host = netifaces.ifaddresses(interface)[2][0]['addr']

		self.serverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
		self.serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
		self.serverSocket.bind((self.host, int(serverPort)))

		while self.keepServerRunning:
			data, server = self.serverSocket.recvfrom(1024);
			print(data)
Example #13
0
    def backon(self):
        import linecache
        import Global
        print "actionbackactionback" 
        if Global.NAV_P == 0 or Global.NAV_P == 1:
            print "no history!"
            QMessageBox.warning(self ,'warning', 'no history!')
        else:
            linecache.clearcache()
            Global.NAV_P -= 1
            i = 2*Global.NAV_P
            print "NAV_P="
            print Global.NAV_P
            print "NAV_NO="
            print Global.NAV_NO
#            method = self.path2method[1]
            pathindex = linecache.getline('1.txt',i-1)
            pathindex = pathindex.strip()
            classname = linecache.getline('1.txt',i)
            classname = classname[:-1]
            Global.current
            print "get from 1.txt"
            print pathindex
            print classname
            method = self.path2method[pathindex]
            Global.currentmethod = method
            QMessageBox.information(self ,'Current Method', method)
            print "the type of method is %s, the type of classname is %s" %(type(method),type(classname))
            self.displayMethod(method,classname)
            Global.currentclass = classname
            QMessageBox.information(self ,'Current Class', classname)
            Global.currentmethod = method
            QMessageBox.information(self ,'Current method', method)
            print method
            print classname
Example #14
0
 def _read_tomoModel_header(tomofile):
     """
     Read metadata from a tomo file and return an instance
     """
     header = { }
     line = linecache.getline(tomofile, 1)
     header['title'] = line.rstrip()
     line = linecache.getline(tomofile, 2)
     ifanis, tref, ifdeck = line.split()
     anisotropic = int(ifanis) != 0   # True if model is anisotropic
     carddeck = int(ifdeck) != 0     # True if model is a carddeck
     tref = float(tref)              # TODO: what is tref?
 
     if carddeck:
         onedmod = RPoints(anisotropic=anisotropic)
         line = linecache.getline(tomofile, 3)
         ntotal, nic, noc = line.split()
         onedmod.NPoints = int(ntotal)       # total number of points
         onedmod.NInnerCore = int(nic)       # inner core number of points
         onedmod.NOuterCore = int(noc)       # outer core number of points
     else:
         # polynomial model
         # TODO: check e.g. rayleigh for more on how to finish this 
         raise NotImplementedError, "Polynomial models not implemented yet."
         #line = linecache.getline(self.filename, 3)
         #nreg, nic, noc, rx = line.split()
     return onedmod
Example #15
0
def fetchfromlistfile():

    numofcomics= int(linecache.getline("RSSDATA/list.txt", 1))

    print(numofcomics,"Items in list file\n\n")


    x=2
    while x<=numofcomics+1:

        print(x-1,". ",linecache.getline("RSSDATA/list.txt", x),sep='')

        itemname=linecache.getline("RSSDATA/list.txt", x)
        itemname=itemname[:-1]

        lastfile=itemname+".txt"

        lastfile="RSSDATA/"+lastfile
        #print("read",lastfile)
        last=linecache.getline(lastfile, 1)
        if last!="":
            print ("Last Download::",last)


        x += 1

    return numofcomics
Example #16
0
	def generate_tokenizer(self,linenoList=None):
		""" 定義されたソースコードへの参照ファイル名から、トークンジェネレーターを生成する """
		# Generate
		if linenoList is not None :
			max_lineno = file_utils.count_lineno(self.filename)
			if not all( (isinstance(num,int) and 1 <= num <= max_lineno) for num in linenoList ) :
				raise Exception("行数定義が不正です: %s " % linenoList)
			elif linecache.getline(self.filename,linenoList[-1]).strip().endswith('\\') :
				return self.generate_tokenizer(linenoList + [linenoList[-1]+1])

			gen = ( linecache.getline(self.filename,lineno) for lineno in linenoList )
			def readline():
				try :
					line = gen.next()
				except StopIteration :
					return ""

				return line
			tokenizer = tokenize.generate_tokens(readline)
		else :
			# Generate tokenizer
			f = open(self.filename)
			tokenizer = tokenize.generate_tokens(f.readline)

		return tokenizer
Example #17
0
def identity_similarity(alignments):
    identity, similarity, aln_out = [], [], []
    for alignment in sorted(glob.glob(alignments)):
        iden, siml = linecache.getline(alignment,24), linecache.getline(alignment,25)
        aln_out.append([float(iden.split("(")[1].split("%")[0]),float(siml.split("(")[1].split("%")[0])])
    df = pd.DataFrame(aln_out, columns=('identity(%)','similarity(%)'))
    return df
Example #18
0
def Merger(dir,oldname,newname):
    linecache.clearcache()
#    num_lines = file_len(dir+oldname)
#    print num_lines    
    lineNo = 1
    text=[]
    while(lineNo < 265000):
#        print lineNo   
        line1 = linecache.getline(dir+oldname, lineNo)        
        line2 = linecache.getline(dir+oldname, lineNo+1)
        
#        if len(line1)<2 and len(line2)<2 and len(line3)<2 and len(line4)<2:
#            break
    #        print line2        
        if len(line1.split(','))>3:            
            if len(line2.split(','))<3:            
                line1=line1.strip()+line2
                text.append(line1)
                lineNo=lineNo+2            
            else:
                text.append(line1)
    #            text.append(line2)
                lineNo=lineNo+1
        else:
#            print "1"+text[-1]
            text[-1]=(text[-1].strip())
#            print "2"+text[-1]          
            text[-1]=text[-1]+line1
#            print "3"+text[-1]
            lineNo=lineNo+1
    for item in text:            
        new_file = open(newname,'a+')
        new_file.write(item)
def createMatrix(bedFileName, uniqNames, startLine, endLine):
	countMatrix = {row:{col:0 for col in uniqNames} for row in uniqNames}
	
	for n in range(startLine, endLine+1):
		baseLineList = re.split(re.compile('\s+'), linecache.getline(bedFileName, n).strip())
		baseDict = {'chrom':baseLineList[0], 'start':int(baseLineList[1]), 'end':int(baseLineList[2]), 'name':baseLineList[3]}
		
		ahead = n
		keepGoing = True
		
		while keepGoing:
			ahead += 1
			if ahead <= endLine:
				newLineList = re.split(re.compile('\s+'), linecache.getline(bedFileName, ahead).strip())
				newDict = {'chrom':newLineList[0], 'start':int(newLineList[1]), 'end':int(newLineList[2]), 'name':newLineList[3]}
			
				if newDict['chrom'] != baseDict['chrom']:
					keepGoing = False
				else:
					if newDict['start'] > baseDict['end'] + 100:
						keepGoing = False
					else:
						countMatrix[baseDict['name']][newDict['name']] += 1
			else:
				keepGoing = False
			
	return countMatrix
Example #20
0
    def load(self, amount):
        content = []

        from_begin = self._cursor == 0
        rewind = False
        s = 0
        while s < amount:
            line = linecache.getline(self.file_name, self._cursor + 1)
            if line:
                line = line.rstrip().split(',')
                content.append([float(i) for i in line])
                self._cursor += 1
                s += 1
            else:
                if from_begin:
                    self._wane = True
                self._cursor = 0
                rewind = True
                if self._wane:
                    break

        self._has_more = False
        if not rewind:
            line = linecache.getline(self.file_name, self._cursor + 1)
            if line:
                self._has_more = True

        content = np.array(content)
        np.random.shuffle(content)
        return content, self._has_more
def get_n_best_list_sbleu_score_list_and_total_base_score_list(source_sentence_index, start_line_n_best_list_list, sbleu_score_list_file_name, n_best_list_file_name):
    """
    Returns a list with n best translations of a source_sentence, a sentence-level BLEU score list for these translations and a list with the total feature value of these translations.

    @source_sentence_index - index for a source sentence
    @start_line_n_best_list_list - list of lines in the n best list that should be fetched 
    @sbleu_score_list_file_name - path to file of the pre-computed sbleu score for all n best list sentences.
    @n_best_list_file_name - path to n_best_list file
    return - a list with n best translations of a source_sentence, a sentence-level BLEU score list for these translations and a list with the total feature value of these translations
    """    

    start_line_index = start_line_n_best_list_list[source_sentence_index]
    stop_line_index = start_line_n_best_list_list[source_sentence_index+1]

    n_best_list = []
    sbleu_score_list = []
    total_base_score_list = []

    for line_index in xrange(start_line_index,stop_line_index):
        target_sentence = linecache.getline(n_best_list_file_name, line_index).strip().lower()

        n_best_list.append(target_sentence)

        total_base_score = get_base_total_score(target_sentence)
        total_base_score_list.append(total_base_score)

        sbleu_score = float(linecache.getline(sbleu_score_list_file_name, line_index).strip())
        sbleu_score_list.append(sbleu_score)

    return n_best_list, total_base_score_list, sbleu_score_list
Example #22
0
def split_csv_texts(input_fpath, output1_fpath, output2_fpath, split_percent):
    # Check the parameters 
    if (not os.path.exists(input_fpath)) or (split_percent < 0) or (split_percent > 1):
        print "Error: wrong input arguments."
        return
	
    # Open the files
    input_file = open(input_fpath, "r")
    output1_file = open(output1_fpath,"w")
    output2_file = open(output2_fpath,"w")
    
    # Get number of lines 
    input_number = len(input_file.readlines())
    output1_number = int(input_number * split_percent)
    print input_fpath, ":", input_number, "texts"
    print output1_fpath, ":", output1_number, "texts"
    print output2_fpath, ":", input_number - output1_number, "texts"    

    # Get a random sample of line numbers
    input_lines = range(1, input_number + 1)
    output1_lines = random.sample(input_lines, output1_number)
    
    # Save the lines in two separate files
    for line in input_lines:
        if(line in output1_lines):
            output1_file.write(linecache.getline(input_fpath, line))
        else:
            output2_file.write(linecache.getline(input_fpath, line))
    
    linecache.clearcache()    
    input_file.close()
    output1_file.close()
    output2_file.close()
Example #23
0
    def __init__(self, filename):
      self.epicsfilen=filename
      try:
#         tt1=time.clock()
          self.epicsfile=open(self.epicsfilen,"rb")
          self.totline = 0
          while True:
              fbuffer = self.epicsfile.read(8192*1024)
              if not fbuffer:
                  break
              self.totline += fbuffer.count('\n')
#          self.totline=len(self.epicsfile.readlines())
#          print self.totline
#          tt2=time.clock()
      except Exception as e:
          print e
          raise Exception("file not existed or not available!!")
      tmptime=linecache.getline(self.epicsfilen,1)[0:10]
      if tmptime.isdigit():
          self.valsplit=10
          self.timestart=int(tmptime)
      else:
          self.valsplit=19
          self.timestart=times2r(linecache.getline(self.epicsfilen,1)[:self.valsplit])
 #       tt3=time.clock()
          self.timeend=times2r(linecache.getline(self.epicsfilen,self.totline)[:self.valsplit])
Example #24
0
def sys_exc_info( level, infos, main=None ):
    try:
        filename = infos[ 2 ].tb_frame.f_code.co_filename
        write_line = "%s" % ( os.path.basename( os.path.splitext( filename )[ 0 ] ), )

        if main:
            write_line += "::%s" % ( main[ 0 ].__class__.__name__, )

        lineno = infos[ 2 ].tb_lineno
        write_line += "::%s (%d)" % ( infos[ 2 ].tb_frame.f_code.co_name, lineno, )

        next = infos[ 2 ].tb_next
        if next is not None:
            write_line += " in %s" % ( next.tb_frame.f_code.co_name, )

        linecache.checkcache( filename )
        try:# python 2.5
            strline = linecache.getline( filename, lineno, infos[ 2 ].tb_frame.f_globals )
        except:
            strline = linecache.getline( filename, lineno )
        if strline:
            write_line += ", %s" % ( repr( strline.strip() ), )

        write_line += " - %s" % ( infos[ 1 ], )
        xbmc_log( level, write_line )
    except:
        print_exc()
Example #25
0
def runAllDefs(flies, center, distanceThreshold, inFile, outFile, window):
# This definition runs all other definitions to identify haplotypes, their clusters, and summary statistics. This is run for every analysis window. 

        # Count the haplotypes
        haps = countHaps(flies)

        # clump haplotypes that differ by some min threshold (including haps that differ by only an N:
        [haps_clumped, haps_clumped_count] = clusterDiffs(haps, distanceThreshold)

        # find all clusters with at least three haplotypes
        clusters = findClusters(haps_clumped)

        sizeVector = []
        keyVector = []
        if (len(clusters.keys()) == 0):
            for key in haps_clumped.iterkeys():        
                sizeVector.append(1)
                keyVector.append(key)
        else:
            [keyVector, sizeVector] = sortClusters(clusters,haps_clumped)

        centerCoord = linecache.getline(inFile,center).split(',')[0]
        edgeCoord1 = linecache.getline(inFile,center-window).split(',')[0]
        edgeCoord2 = linecache.getline(inFile,center+window).split(',')[0]
        absLengthWin = int(edgeCoord2)-int(edgeCoord1)
        

        printClusters(inFile, outFile, centerCoord, clusters, haps_clumped,  keyVector, sizeVector, absLengthWin, edgeCoord1, edgeCoord2)
Example #26
0
def test_get_ticket_info(out_file):
    """
    :param out_file: ticket pdf file
    :return:
    """
    #tdata = load_file(exec_pdf2txt(out_file))
    #tsoup = BeautifulSoup(tdata)
    #todo: use re to get [date, departure-destination, nt$, train number, ticket type]
    import linecache

    txt_file = out_file.replace('.pdf', '.txt')

    LINE_OF_DATE = 23
    LINE_OF_TRAIN_NO = 25
    LINE_OF_TRIP = 27
    LINE_OF_AMOUNT = 29

    tDate = linecache.getline(txt_file, LINE_OF_DATE)
    tTrainNo = linecache.getline(txt_file, LINE_OF_TRAIN_NO)
    tTrip = linecache.getline(txt_file, LINE_OF_TRIP)
    tAmount = linecache.getline(txt_file, LINE_OF_AMOUNT)

    #txt_file = exec_pdf2txt(out_file)
    with open(txt_file) as f:  # , encoding='iso-8859-1'
        lines = f.readlines()
        f.close()
    #print (lines)
    return (lines[LINE_OF_DATE-1], lines[LINE_OF_TRAIN_NO-1], (lines[LINE_OF_TRIP-1]).decode('utf-8') , lines[LINE_OF_AMOUNT-1])
Example #27
0
def GetNGP_L():
    path = []
    str1 = linecache.getline('path.txt', 2).strip('\n')
    str2 = linecache.getline('path.txt', 3).strip('\n')
    path.append(str1)
    path.append(str2)
    return path
Example #28
0
    def rawtime2value(self,t):
        t=int(t)
#      print t,self.timestart,self.timeend
        if t>self.timeend or t<self.timestart:
            print "time out of range, please use range %s to %s"%(self.timestart,self.timeend)
            #sys.exit()
            return None
        found=0
        line1=1
        line2=int(self.totline/2)
        line3=self.totline
        time1=times2r(linecache.getline(self.epicsfilen,line2)[:self.valsplit])
        while found==0:
            if t>time1:
                line1=line2
            elif t<time1:
                line3=line2
            else:
                found=1
                continue
            line2=int((line3+line1)/2)
 #           print line1,line2,line3
            if line2-line1<1:
                found=1
                continue
            elif line3-line2<2:
                found=1
                continue
            time1=times2r(linecache.getline(self.epicsfilen,line2)[:self.valsplit])
        value=linecache.getline(self.epicsfilen,line2)[self.valsplit+1:].replace("\n", "")
        if value.isdigit():
            return float(value)
        else:
            return value
Example #29
0
def main():
    pre = 0
    after = 2
    key = None
    if len(sys.argv) > 1:
        pre = int(sys.argv[1])
    if len(sys.argv) > 2:
        after = int(sys.argv[2])
    if len(sys.argv) > 3:
        key = sys.argv[3]
    count = 0
    while True:
        pipline = sys.stdin.readline()
        if pipline is None or len(pipline) == 0:
            break
        print
        # print pipline
        if not ":" in pipline:
            continue
        count += 1
        s = pipline.split(":")
        filename = s[0]
        line = int(s[1])
        print "%d. \033[1;32m%s\033[1;0m:%s%s\033[1;0m" % (count, filename, COLOR_RED, line)

        for i in range(1, pre + 1):
            sys.stdout.write(linecache.getline(filename, line - i))

        keyline = linecache.getline(filename, line)
        keyline = hlkey(keyline, key)
        sys.stdout.write(keyline)

        for i in range(1, after + 1):
            sys.stdout.write(linecache.getline(filename, line + i))
 def __init__(self, store, key):
     self._store = store
     self._key = key
     filePath = self._store.getFilePath()
     self._nrows = int(linecache.getline(filePath, 1).lstrip("ncols"))
     self._ncols = int(linecache.getline(filePath, 2).lstrip("nrows"))
     self._data = None
Example #31
0
def calc_other_word(mutation, position, strand):
    if (mutation.find('>') > -1):
        return 0
    else:
        words = list(mutation)
        if (mutation.find('ins') > -1):
            number = mutation.find('s')
            head = 'ins'
        elif (mutation.find('del') > -1):
            number = mutation.find('l')
            head = 'del'
        else:
            print('errorだよ')
            return 0

    if (words[number + 1] in ['1', '2', '3', '4', '5', '6', '7', '8', '9']):
        leng = ''
        for i in range(number + 1, len(words)):
            leng += words[i]
        length = int(leng)
    elif (words[number + 1] in ['A', 'C', 'G', 'T']):
        vocab = list()
        for i in range(number + 1, len(words)):
            vocab += words[i]
        length = len(vocab)
    else:
        return 0

    position_list = re.split(r'[:-]', position)
    if (int(position_list[0]) == 23):
        chromosome = 'X'
    elif (int(position_list[0]) == 24):
        chromosome = 'Y'
    elif (int(position_list[0]) == 25):
        chromosome = 'M'
    else:
        chromosome = int(position_list[0])
    start = int(position_list[1])
    end = int(position_list[2])
    if (head == 'ins'):
        num = 0
    elif (head == 'del'):
        num = length
    GRCh_file = 'raw_data/chr' + str(chromosome) + '.fa'
    quotient = start // 50
    surplus = start % 50
    if (head == 'ins'):
        if (surplus != 0):
            forward_index = int(surplus) - 1
        else:
            quotient -= 1
            forward_index = 49
    else:
        if (surplus not in [0, 1]):
            forward_index = int(surplus) - 2
        else:
            quotient -= 1
            forward_index = int(surplus) + 48
    targetline = linecache.getline(GRCh_file, int(quotient) + 1)
    forward = targetline[forward_index]
    if (head == 'ins'):
        if (forward_index != 49):
            backward_index = forward_index + 1
        else:
            quotient += 1
            backward_index = 0
        targetline = linecache.getline(GRCh_file, int(quotient) + 1)
        backward = targetline[backward_index]
    else:
        end_quotient = end // 50
        end_surplus = end % 50
        if (end_surplus != 0):
            backward_index = end_surplus
        else:
            backward_index = end_surplus
            end_quotient += 1
        endline = linecache.getline(GRCh_file, int(end_quotient) + 1)
        backward = endline[backward_index]
    vocab = forward + backward
    return make_vocab(vocab, length)
 def __getitem__(self, idx):
     line = linecache.getline(self._filename, idx + 1)
     return line.strip().split(",")
Example #33
0
def set_static(CIDR):
    length = CIDR.split('/')[1]

    bits = 0
    for i in xrange(32 - int(length), 32):
        bits |= (1 << i)
    netmask = "%d.%d.%d.%d" % ((bits & 0xff000000) >> 24,
                               (bits & 0xff0000) >> 16, (bits & 0xff00) >> 8,
                               (bits & 0xff))

    print "\nARP Scanning based on targetted CIDR\n"
    subprocess.call(
        "sudo sort /home/pi/WarBerry/Results/CIDR | uniq > /home/pi/WarBerry/Results/unique_CIDR",
        shell=True)
    subprocess.call("sudo rm /home/pi/WarBerry/Results/CIDR", shell=True)
    subprocess.call(
        "sudo netdiscover -i eth0 -P -l /home/pi/WarBerry/resources/discover | grep -P -o \'([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+).*? ' | grep -P -o \'[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' > /home/pi/WarBerry/Results/used_ips",
        shell=True)

    with open('/home/pi/WarBerry/Results/avail_ips', 'w') as avail:
        with open('/home/pi/WarBerry/Results/unique_subnets', 'r') as subs:
            for sub in subs:
                for i in range(1, 255):
                    avail.write(sub.strip() + str(i) + "\n")

    with open('/home/pi/WarBerry/Results/used_ips', 'r') as used:
        used_ips = used.readlines()
        with open('/home/pi/WarBerry/Results/statics', 'w') as statics:
            with open('/home/pi/WarBerry/Results/avail_ips', 'r') as avail_ips:
                for available in avail_ips:
                    isUsed = False
                    for used in used_ips:
                        if ((available.strip() == used.strip())
                                and (isUsed == False)):
                            print bcolors.FAIL + "[-] IP %s is in use, excluding from static list" % used.strip(
                            ) + bcolors.ENDC
                            isUsed = True
                    if (isUsed == False):
                        statics.write(available)

    with open('/home/pi/WarBerry/Results/statics') as static:
        total_frees = sum(1 for _ in static)
        if total_frees > 0:
            print bcolors.TITLE + '\n%s Available IPs to choose from.' % total_frees + bcolors.ENDC
        else:
            print bcolors.FAIL + "No free IPs Found\n" + bcolors.ENDC

    with open('/home/pi/WarBerry/Results/statics', 'r') as statics:
        line_count = (sum(1 for _ in statics))
        for i in range(0, line_count):
            newline = randint(0, line_count)

            static = linecache.getline('/home/pi/WarBerry/Results/statics',
                                       newline)
            print bcolors.WARNING + "[*] Attempting to set random static ip %s" % static.strip(
            ) + bcolors.ENDC
            subprocess.call([
                "ifconfig", "eth0",
                static.strip(), "netmask",
                netmask.strip()
            ])

            for used in reversed(
                    open('/home/pi/WarBerry/Results/used_ips').readlines()):
                print "[*] Pinging %s to ensure that we are live..." % used.strip(
                )
                ping_response = subprocess.call(
                    ['ping', '-c', '5', '-W', '3',
                     used.strip()],
                    stdout=open(os.devnull, 'w'),
                    stderr=open(os.devnull, 'w'))
                if ping_response == 0:
                    print bcolors.OKGREEN + "[+] Success. IP %s is valid and %s is reachable" % (
                        static.strip(), used.strip()) + bcolors.ENDC
                    return static.strip()
                else:
                    print bcolors.WARNING + "[-] Failed. IP %s is not valid" % static.strip(
                    ) + bcolors.ENDC
            print "Attempting to bypass MAC Filtering\n"
            macbypass(unique_CIDR)
Example #34
0
 def line(self):
     if self._line is None:
         self._line = linecache.getline(self.filename, self.lineno).strip()
     return self._line
Example #35
0
i = 2
flag = True
while flag:
    print("\n")
    print("-----------------------------------------------------------------")
    print("i = " + str(i))
    achn_excel_ID = str(achn_sheet.cell(row=i, column=1).value)
    print("achn_excel_ID is: " + achn_excel_ID)
    with open(r'F:\TR_3\Achn_gene.fa', 'r') as achn_db:
        for (achn_db_num, achn_db_ID) in enumerate(achn_db):
            achn_db_ID = re.findall(r'A\w*\d*', achn_db_ID)
            achn_db_ID = achn_db_ID[0]
            # print("compare with " + str(achn_db_ID[0]))
            if str(achn_db_ID) == str(achn_excel_ID):
                print("match!")
                seq = linecache.getline('F:\TR_3\Achn_gene.fa', achn_db_num + 2)
                seq = re.findall(r'[ATGC]*', seq)[0]
                print("seq is: " + seq)
                with open(r'F:\TR_3\seq.fa', 'a') as seqfa:
                    seqfa.write(">" + achn_db_ID)
                    seqfa.write("\n")
                    seqfa.write(seq)
                    seqfa.write("\n")
                with open(r'F:\TR_3\new_Acc_Gene.fa', 'r') as acc_db:
                    for (acc_db_num, acc_db_seq) in enumerate(acc_db):
                        acc_db_seq = re.findall(r'[ATGCatgc]*', acc_db_seq)[0]
                        acc_db_seq = acc_db_seq.upper()
                        # print("acc_db_seq is: " + str(acc_db_seq))
                        if str(acc_db_seq) == str(seq):
                            acc_db_ID = linecache.getline(r'F:\TR_3\new_Acc_Gene.fa', acc_db_num)
                            acc_db_ID = re.findall(r'A\w*\d*', acc_db_ID)[0]
Example #36
0
def better_exchook(etype, value, tb, debugshell=False, autodebugshell=True):
    output("EXCEPTION")
    output('Traceback (most recent call last):')
    allLocals, allGlobals = {}, {}
    try:
        import linecache
        limit = None
        if hasattr(sys, 'tracebacklimit'):
            limit = sys.tracebacklimit
        n = 0
        _tb = tb

        def _resolveIdentifier(namespace, id):
            obj = namespace[id[0]]
            for part in id[1:]:
                obj = getattr(obj, part)
            return obj

        def _trySet(old, prefix, func):
            if old is not None: return old
            try:
                return prefix + func()
            except KeyError:
                return old
            except Exception, e:
                return prefix + "!" + e.__class__.__name__ + ": " + str(e)

        while _tb is not None and (limit is None or n < limit):
            f = _tb.tb_frame
            allLocals.update(f.f_locals)
            allGlobals.update(f.f_globals)
            lineno = _tb.tb_lineno
            co = f.f_code
            filename = co.co_filename
            name = co.co_name
            output('  File "%s", line %d, in %s' % (filename, lineno, name))
            if not os.path.isfile(filename):
                altfn = fallback_findfile(filename)
                if altfn:
                    output("    -- couldn't find file, trying this instead: " +
                           altfn)
                    filename = altfn
            linecache.checkcache(filename)
            line = linecache.getline(filename, lineno, f.f_globals)
            if line:
                line = line.strip()
                output('    line: ' + line)
                output('    locals:')
                alreadyPrintedLocals = set()
                for tokenstr in grep_full_py_identifiers(
                        parse_py_statement(line)):
                    splittedtoken = tuple(tokenstr.split("."))
                    for token in map(lambda i: splittedtoken[0:i],
                                     range(1,
                                           len(splittedtoken) + 1)):
                        if token in alreadyPrintedLocals: continue
                        tokenvalue = None
                        tokenvalue = _trySet(
                            tokenvalue, "<local> ", lambda: pretty_print(
                                _resolveIdentifier(f.f_locals, token)))
                        tokenvalue = _trySet(
                            tokenvalue, "<global> ", lambda: pretty_print(
                                _resolveIdentifier(f.f_globals, token)))
                        tokenvalue = _trySet(
                            tokenvalue, "<builtin> ", lambda: pretty_print(
                                _resolveIdentifier(f.f_builtins, token)))
                        tokenvalue = tokenvalue or "<not found>"
                        output('      ' + ".".join(token) + " = " + tokenvalue)
                        alreadyPrintedLocals.add(token)
                if len(alreadyPrintedLocals) == 0: output("       no locals")
            else:
                output('    -- code not available --')
            _tb = _tb.tb_next
            n += 1
Example #37
0
def GetPath(filename, num):
    path = linecache.getline(filename, num)
    path = path.rstrip("\n")
    return path
def _get_line(filename, line_number):
    line = linecache.getline(filename,line_number)
    if line:
        m = re.match(guff, line)
        if m:
            return (strftime("%d %b %Y", strptime(m.group(1), "%Y%m%d")), re.sub(guff,"",line))
Example #39
0
from scipy import sparse
import numpy as np
from sklearn import preprocessing
import linecache
import random

count = 0
n = 1000
row = []
col = []
data = []
idf = np.loadtxt(open("idf.txt", "r"), delimiter=",")
for i in range(n):
    a = random.randrange(0, 50000)
    line = linecache.getline('data', a)
    line = line.strip().split(' ')
    comment = line[1:]
    for i in range(len(comment)):
        index_value = comment[i].split(":")
        index = int(index_value[0])
        value = int(index_value[1])
        col.append(index)
        data.append(value * idf[index])
        row.append(count)
    count = count + 1
testdata = sparse.coo_matrix((data, (row, col)), shape=(n, 89527))
testdata = preprocessing.normalize(testdata, norm='l1')
testdata = np.array(testdata.todense())
result = 0
for i in range(n):
    for j in range(n):
Example #40
0
def calc_word(mutation, position, strand):
    before = mutation[len(mutation) - 3]
    after = mutation[len(mutation) - 1]
    position_list = re.split(r'[:-]', position)
    if (len(position_list) != 3):
        print('position error')
        return -1
    if (int(position_list[0]) == 23):
        chromosome = 'X'
    elif (int(position_list[0]) == 24):
        chromosome = 'Y'
    elif (int(position_list[0]) == 25):
        chromosome = 'M'
    else:
        chromosome = int(position_list[0])
    start = int(position_list[1])
    num = int(position_list[2]) - int(position_list[1]) + 1
    GRCh_file = 'raw_data/chr' + str(chromosome) + '.fa'
    quotient = start // 50
    surplus = start % 50

    if (surplus != 0):
        target_index = int(surplus) - 1
    else:
        quotient -= 1
        target_index = 49
    target_line = linecache.getline(GRCh_file, int(quotient) + 1)

    if (((target_line[target_index] != before) and (strand == '+')) or
        ((target_line[target_index] != swap(before)) and (strand == '-'))):
        print('error: ' + mutation)
        print('target: ' + target_line[target_index])
        print('strand: ' + strand)
        strand = swap(strand)
        if (((target_line[target_index] != before) and (strand == '+')) or
            ((target_line[target_index] != swap(before)) and (strand == '-'))):
            print('still error')
            return -1

    if ((target_index >= 2) and (target_index <= 47)):
        pattern = 1
    elif (target_index == 0):
        pattern = 2
    elif (target_index == 1):
        pattern = 3
    elif (target_index == 48):
        pattern = 4
    else:
        pattern = 5

    if (pattern == 1):
        forward = target_line[target_index - 1]
        for_forward = target_line[target_index - 2]
        backward = target_line[target_index + 1]
        back_backward = target_line[target_index + 2]
    elif (pattern == 2):
        pre_line = linecache.getline(GRCh_file, int(quotient))
        forward = pre_line[49]
        for_forward = pre_line[48]
        backward = target_line[target_index + 1]
        back_backward = target_line[target_index + 2]
    elif (pattern == 3):
        pre_line = linecache.getline(GRCh_file, int(quotient))
        for_forward = pre_line[49]
        forward = target_line[target_index - 1]
        backward = target_line[target_index + 1]
        back_backward = target_line[target_index + 2]
    elif (pattern == 4):
        post_line = linecache.getline(GRCh_file, int(quotient) + 2)
        back_backward = post_line[0]
        forward = target_line[target_index - 1]
        for_forward = target_line[target_index - 2]
        backward = target_line[target_index + 1]
    if (pattern == 5):
        post_line = linecache.getline(GRCh_file, int(quotient) + 2)
        backward = post_line[0]
        back_backward = post_line[1]
        forward = target_line[target_index - 1]
        for_forward = target_line[target_index - 2]

    if (((strand == '+') and (before in ['A', 'G']))
            or ((strand == '-') and (before in ['C', 'T']))):
        buf_f = swap(forward)
        buf_ff = swap(for_forward)
        forward = swap(backward)
        for_forward = swap(back_backward)
        backward = buf_f
        back_backward = buf_ff
    if (before in ['A', 'G']):
        before = swap(before)
        after = swap(after)

    if (for_forward == 'A'):
        first = 0
    elif (for_forward == 'C'):
        first = 1
    elif (for_forward == 'G'):
        first = 2
    else:
        first = 3

    if (forward == 'A'):
        second = 0
    elif (forward == 'C'):
        second = 1
    elif (forward == 'G'):
        second = 2
    else:
        second = 3

    if (before == 'C'):
        if (after == 'A'):
            third = 0
        elif (after == 'G'):
            third = 1
        else:
            third = 2
    elif (before == 'T'):
        if (after == 'A'):
            third = 3
        elif (after == 'C'):
            third = 4
        else:
            third = 5
    elif (before == 'G'):
        if (after == 'T'):
            third = 0
        elif (after == 'C'):
            third = 1
        else:
            third = 2
    else:
        if (after == 'T'):
            third = 3
        elif (after == 'G'):
            third = 4
        else:
            third = 5

    if (back_backward == 'A'):
        fourth = 0
    elif (back_backward == 'C'):
        fourth = 1
    elif (back_backward == 'G'):
        fourth = 2
    else:
        fourth = 3

    if (backward == 'A'):
        fifth = 0
    elif (backward == 'C'):
        fifth = 1
    elif (backward == 'G'):
        fifth = 2
    else:
        fifth = 3
    answer = 384 * first + 96 * second + 16 * third + 4 * fourth + fifth
    return (answer)
Example #41
0
    def traceit(frame, event, args):
        """ Tracing method executed before running each line in a module or sub-module
            Record memory allocated in a list with debugging information
        """
        global _is_memory_tracing_enabled

        if not _is_memory_tracing_enabled:
            return traceit

        # Filter events
        if events_to_trace is not None:
            if isinstance(events_to_trace, str) and event != events_to_trace:
                return traceit
            elif isinstance(events_to_trace,
                            (list, tuple)) and event not in events_to_trace:
                return traceit

        # Filter modules
        name = frame.f_globals["__name__"]
        if not isinstance(name, str):
            return traceit
        else:
            # Filter whitelist of modules to trace
            if modules_to_trace is not None:
                if isinstance(modules_to_trace,
                              str) and modules_to_trace not in name:
                    return traceit
                elif isinstance(modules_to_trace, (list, tuple)) and all(
                        m not in name for m in modules_to_trace):
                    return traceit

            # Filter blacklist of modules not to trace
            if modules_not_to_trace is not None:
                if isinstance(modules_not_to_trace,
                              str) and modules_not_to_trace in name:
                    return traceit
                elif isinstance(modules_not_to_trace, (list, tuple)) and any(
                        m in name for m in modules_not_to_trace):
                    return traceit

        # Record current tracing state (file, location in file...)
        lineno = frame.f_lineno
        filename = frame.f_globals["__file__"]
        if filename.endswith(".pyc") or filename.endswith(".pyo"):
            filename = filename[:-1]
        line = linecache.getline(filename, lineno).rstrip()
        traced_state = Frame(filename, name, lineno, event, line)

        # Record current memory state (rss memory) and compute difference with previous memory state
        cpu_mem = 0
        if process is not None:
            mem = process.memory_info()
            cpu_mem = mem.rss

        gpu_mem = 0
        if log_gpu:
            # Clear GPU caches
            if is_torch_available():
                torch_empty_cache()
            if is_tf_available():
                tf_context.context()._clear_caches(
                )  # See https://github.com/tensorflow/tensorflow/issues/20218#issuecomment-416771802

            # Sum used memory for all GPUs
            nvml.nvmlInit()

            for i in devices:
                handle = nvml.nvmlDeviceGetHandleByIndex(i)
                meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
                gpu_mem += meminfo.used

            nvml.nvmlShutdown()

        mem_state = UsedMemoryState(traced_state, cpu_mem, gpu_mem)
        memory_trace.append(mem_state)

        return traceit
Example #42
0
        GetAndSee.getHB(url, wxcookie2)
    elif whoGet == 3:
        GetAndSee.getHB(url, wxcookie3)
    else:
        print("无法识别")
    # 透视
    GetAndSee.seeHB(url, True, wxcookie1)
elif number == 3:
    filePath = 'starUrls.txt'
    lineNumber = ReadFile.getFileLineNumber(filePath)
    littleUrl = []  #统计未领取到最大的url,写入文件时用
    # count 计数 表示行数
    for count in range(1, lineNumber + 1):
        urlInfo = []  #捆绑信息,将最大数,朋友数目,url捆绑起来
        # line 某一行的内容
        urlLine = linecache.getline(filePath, count)
        # 过滤不合格的url,比如换行  只领取大于5个字符的url
        if len(urlLine) > 5:
            # 透视 当前line内容为url
            results = GetAndSee.seeHB(urlLine, False, wxcookie1)
            if results != None:
                luckNumber = results['luckNumber']
                friend_info = results['friends_info']
                friendsNumber = len(friend_info)
                # 只提出 未领取到最大的url
                if friendsNumber < luckNumber:
                    # littleUrl.append(urlLine)
                    # 捆绑信息
                    urlInfo.append(luckNumber)
                    urlInfo.append(friendsNumber)
                    urlInfo.append(urlLine)
Example #43
0
                        if all(seg <= 0):
                            print '\n less than 0.5 gaps {}\n'.format(
                                fn_acceptableGapProp(seg, init_maxGapProp))

                            print 'are all positions > {}? {}\n'.format(
                                gt, fn_profVal(seg, gt))

                            print 'is longer than {} positions? {}\n'.format(
                                init_minSegLen,
                                fn_checkLen(seg, init_minSegLen))

                            print 'are all gaps less than {} in length? {}\n'.format(
                                init_maxGapLen,
                                fn_acceptableGapLen(seg, init_maxGapLen))

                        encodedSeq = linecache.getline('encodingTemp',
                                                       lineNum).strip()
                        testSeg = encodedSeq[segStart:segEnd + 1]

                        if len(encodedSeq) == 0:

                            print 'ERROR: LENGTH OF SEQUENCE IS 0'
                        else:
                            cgContent = get_CG_proportion(encodedSeq)
                            consProp = get_cons_prop(encodedSeq)

                        bedChromStart = chromStart + segStart
                        bedChromEnd = chromStart + segEnd

                        # The order of stuff in bed file justChrom bedChromStart bedChromEnd cgContent consProp
                        writeString = '{}\t{}\t{}\t{}\t{}\n'.format(
                            justChrom, bedChromStart, bedChromEnd, cgContent,
Example #44
0
#6.3linecache高效读取文本文件
#作用:从文件或导入的Python模块获取文本行,维护一个结果缓存,从而可以更高效地从相同文件读取多行文本
#处理Python源文件时,linecache模块会在Python标准库的其他部分中用到。缓存实现将在内存中保存文件的内容(解析为单独的行)。API通过索引一个list返回所请求的行,与反复地读取文件并解析文本来查找所需文本行相比,这样可以节省时间。这个方法在查找同一文件中的多行时尤其有用,比如为一个错误报告生生一个跟踪记录(traceback)
#6.3.1测试数据
#6.3.2读取特定行
#linecache模块读取的文件行号从1开始,不过通常列表的数组索引会从0开始
import linecache
from linecache_data import *
filename = make_tempfile()
#Pick out tha same line from source and cache.
#(Notice that linecache counts from 1)
print 'SOURCE:'
print '%r' % lorem.split('\n')[4]
print
print 'CACHE'
print '%r' % linecache.getline(filename, 5)
cleanup(filename)
print
#6.3.3处理空行
#返回值通常在行末尾都包括一个换行符,所以如果文本行为空,那么返回值就是一个换行符
filename = make_tempfile()
#Blank lines include the newline
print 'BLANK : %r' % linecache.getline(filename, 8)
cleanup(filename)
#6.3.4错误处理
#如果所请求的行号超出了文件中合法行号的范围,getline()会返回一个空串
filename = make_tempfile()
#The cache always returns a string, and uses
#an empty string to indicate a line which does
#not exist
not_there = linecache.getline(filename, 500)
Example #45
0
def _read_output(path):
    """Read CmdStan output.csv.

    Parameters
    ----------
    path : str

    Returns
    -------
    List[DataFrame, DataFrame, List[str], List[str], List[str]]
        pandas.DataFrame
            Sample data
        pandas.DataFrame
            Sample stats
        List[str]
            Configuration information
        List[str]
            Adaptation information
        List[str]
            Timing info
    """
    chains = []
    configuration_info = []
    adaptation_info = []
    timing_info = []
    i = 0
    # Read (first) configuration and adaption
    with open(path, "r") as f_obj:
        column_names = False
        for i, line in enumerate(f_obj):
            line = line.strip()
            if line.startswith("#"):
                if column_names:
                    adaptation_info.append(line.strip())
                else:
                    configuration_info.append(line.strip())
            elif not column_names:
                column_names = True
                pconf = _process_configuration(configuration_info)
                if pconf["save_warmup"]:
                    warmup_range = range(pconf["num_warmup"] // pconf["thin"])
                    for _, _ in zip(warmup_range, f_obj):
                        continue
            else:
                break

    # Read data
    with open(path, "r") as f_obj:
        df = pd.read_csv(f_obj, comment="#")

    # split dataframe if header found multiple times
    if df.iloc[:, 0].dtype.kind == "O":
        first_col = df.columns[0]
        col_locations = first_col == df.loc[:, first_col]
        col_locations = list(col_locations.loc[col_locations].index)
        dfs = []
        for idx, last_idx in zip(col_locations,
                                 [-1] + list(col_locations[:-1])):
            df_ = deepcopy(df.loc[last_idx + 1:idx - 1, :])
            for col in df_.columns:
                df_.loc[:, col] = pd.to_numeric(df_.loc[:, col])
            if len(df_):
                dfs.append(df_.reset_index(drop=True))
            df = df.loc[idx + 1:, :]
        for col in df.columns:
            df.loc[:, col] = pd.to_numeric(df.loc[:, col])
        dfs.append(df)
    else:
        dfs = [df]

    for j, df in enumerate(dfs):
        if j == 0:
            # Read timing info (first) from the end of the file
            line_num = i + df.shape[0] + 1
            for k in range(5):
                line = linecache.getline(path, line_num + k).strip()
                if len(line):
                    timing_info.append(line)
            configuration_info_len = len(configuration_info)
            adaptation_info_len = len(adaptation_info)
            timing_info_len = len(timing_info)
            num_of_samples = df.shape[0]
            header_count = 1
            last_line_num = (configuration_info_len + adaptation_info_len +
                             timing_info_len + num_of_samples + header_count)
        else:
            # header location found in the dataframe (not first)
            configuration_info = []
            adaptation_info = []
            timing_info = []

            # line number for the next dataframe in csv
            line_num = last_line_num + 1

            # row ranges
            config_start = line_num
            config_end = config_start + configuration_info_len

            # read configuration_info
            for reading_line in range(config_start, config_end):
                line = linecache.getline(path, reading_line)
                if line.startswith("#"):
                    configuration_info.append(line)
                else:
                    msg = ("Invalid input file. "
                           "Header information missing from combined csv. "
                           "Configuration: {}".format(path))
                    raise ValueError(msg)

            pconf = _process_configuration(configuration_info)
            warmup_rows = pconf["save_warmup"] * pconf["num_warmup"] // pconf[
                "thin"]
            adaption_start = config_end + 1 + warmup_rows
            adaption_end = adaption_start + adaptation_info_len
            # read adaptation_info
            for reading_line in range(adaption_start, adaption_end):
                line = linecache.getline(path, reading_line)
                if line.startswith("#"):
                    adaptation_info.append(line)
                else:
                    msg = ("Invalid input file. "
                           "Header information missing from combined csv. "
                           "Adaptation: {}".format(path))
                    raise ValueError(msg)

            timing_start = adaption_end + len(df) - warmup_rows
            timing_end = timing_start + timing_info_len
            # read timing_info
            raise_timing_error = False
            for reading_line in range(timing_start, timing_end):
                line = linecache.getline(path, reading_line)
                if line.startswith("#"):
                    timing_info.append(line)
                else:
                    raise_timing_error = True
                    break
            no_elapsed_time = not any("elapsed time" in row.lower()
                                      for row in timing_info)
            if raise_timing_error or no_elapsed_time:
                msg = ("Invalid input file. "
                       "Header information missing from combined csv. "
                       "Timing: {}".format(path))
                raise ValueError(msg)

            last_line_num = reading_line

        # Remove warmup
        if pconf["save_warmup"]:
            saved_samples = pconf["num_samples"] // pconf["thin"]
            df = df.iloc[-saved_samples:, :]

        # Split data to sample_stats and sample
        sample_stats_columns = [
            col for col in df.columns if col.endswith("__")
        ]
        sample_columns = [
            col for col in df.columns if col not in sample_stats_columns
        ]

        sample_stats = df.loc[:, sample_stats_columns]
        sample_df = df.loc[:, sample_columns]

        chains.append((sample_df, sample_stats, configuration_info,
                       adaptation_info, timing_info))

    return chains
Example #46
0
def readSourceLine(source_ref):
    import linecache
    return linecache.getline(filename=source_ref.getFilename(),
                             lineno=source_ref.getLineNumber())
Example #47
0
#!/usr/bin/env python3
import os
import sys
import linecache
from time import sleep

if os.path.isfile(".con") == True:
    checker = open(".con", "r").readlines()
    line_number = 3
    file = ".con"
    line = linecache.getline(file, line_number)

    if 'sys:auto-boot' in checker:
        import mainNS
        mainNS.mainNS()
    if 'sys:boot-NoDefualt' in checker:
        import main
        main.main()

    if 'sys:session-ONLY, sys:auto-boot' in checker:
        open("etc/4532/45.txt", "w+").write(" ")
        import mainNS
        mainNS.mainNS()

    if 'sys:session-ONLY, sys:boot=NON-DEFUALT' in checker:
        open("etc/4532/45.txt", "w+").write(" ")
        import main
        main.main()

    if 'config:stay-updated' in line:
        remember = open("etc/4532/32.txt", "r").read()
Example #48
0
    if rowctr == 0:
        pats = row[1:]

    if rowctr > 1:
        genedict[row[0].split('|')[1]] = rowctr

    rowctr += 1

# Now for each gene in genes, go to the appropriate line and read in the data
intgenes = [item for item in genes if item in genedict.keys()]
parseddata = np.zeros((len(intgenes), len(pats)))
itemctr = 0
for item in intgenes:

    line = linecache.getline(currfile, genedict[item] + 1)
    splitline = line.split('\t')

    # Check that the gene ID of the line we got matches
    if splitline[0].split('|')[1] == item:

        parseddata[itemctr, :] = [np.float(item) for item in splitline[1:]]

    else:
        print 'Entrez ID does not match!'
        pdb.set_trace()

    itemctr += 1

p = pd.DataFrame(parseddata, index=intgenes, columns=pats)
p.to_csv('../data/rna/' + study + '_RNA_Norm.csv')
Example #49
0
            head = False
        if len(line.split(' ')) <= 3:
            r += 1
        count += 1

    a = range(x + 1, count - (r - x) + 1)
    b = np.random.permutation(a)  # random order of a
    d = dict(zip(a, b))
    num = int((count - r) / n)
    print('number of sub-datasets: ', num)  # number of subset

    print('start splitting particles...')
    for i in range(1, num + 1):
        f = open("batch" + str(i) + ".star", 'a')
        for j in range(1, x + 1):
            line = linecache.getline(filename, j)
            f.write(line)
        for j in range(x + 1, x + n + 1):
            l = d[(i - 1) * n + j]
            line = linecache.getline(filename, l)
            f.write(line)
        f.close()

    if num * n != count - r:
        f = open("batch" + str(num + 1) + "_extra.star", 'a')
        for j in range(1, x + 1):
            line = linecache.getline(filename, j)
            f.write(line)
        for k in range(x + num * n + 1, count):
            line = linecache.getline(filename, d[k])
            f.write(line)
Example #50
0
 def readline(lno=[lineno], *args):
     if args: print(args)
     try:
         return linecache.getline(fname, lno[0])
     finally:
         lno[0] += 1
Example #51
0
    def handle_exception(self, frame, event, arg):
        try:
            # print 'handle_exception', frame.f_lineno, frame.f_code.co_name

            # We have 3 things in arg: exception type, description, traceback object
            trace_obj = arg[2]
            main_debugger = self._args[0]

            if not hasattr(trace_obj, 'tb_next'):
                return  #Not always there on Jython...

            initial_trace_obj = trace_obj
            if trace_obj.tb_next is None and trace_obj.tb_frame is frame:
                #I.e.: tb_next should be only None in the context it was thrown (trace_obj.tb_frame is frame is just a double check).

                if main_debugger.break_on_exceptions_thrown_in_same_context:
                    #Option: Don't break if an exception is caught in the same function from which it is thrown
                    return
            else:
                #Get the trace_obj from where the exception was raised...
                while trace_obj.tb_next is not None:
                    trace_obj = trace_obj.tb_next

            if main_debugger.ignore_exceptions_thrown_in_lines_with_ignore_exception:
                for check_trace_obj in (initial_trace_obj, trace_obj):
                    filename = get_abs_path_real_path_and_base_from_frame(
                        check_trace_obj.tb_frame)[1]

                    filename_to_lines_where_exceptions_are_ignored = self.filename_to_lines_where_exceptions_are_ignored

                    lines_ignored = filename_to_lines_where_exceptions_are_ignored.get(
                        filename)
                    if lines_ignored is None:
                        lines_ignored = filename_to_lines_where_exceptions_are_ignored[
                            filename] = {}

                    try:
                        curr_stat = os.stat(filename)
                        curr_stat = (curr_stat.st_size, curr_stat.st_mtime)
                    except:
                        curr_stat = None

                    last_stat = self.filename_to_stat_info.get(filename)
                    if last_stat != curr_stat:
                        self.filename_to_stat_info[filename] = curr_stat
                        lines_ignored.clear()
                        try:
                            linecache.checkcache(filename)
                        except:
                            #Jython 2.1
                            linecache.checkcache()

                    from_user_input = main_debugger.filename_to_lines_where_exceptions_are_ignored.get(
                        filename)
                    if from_user_input:
                        merged = {}
                        merged.update(lines_ignored)
                        #Override what we have with the related entries that the user entered
                        merged.update(from_user_input)
                    else:
                        merged = lines_ignored

                    exc_lineno = check_trace_obj.tb_lineno

                    # print ('lines ignored', lines_ignored)
                    # print ('user input', from_user_input)
                    # print ('merged', merged, 'curr', exc_lineno)

                    if not dict_contains(
                            merged, exc_lineno
                    ):  #Note: check on merged but update lines_ignored.
                        try:
                            line = linecache.getline(
                                filename, exc_lineno,
                                check_trace_obj.tb_frame.f_globals)
                        except:
                            #Jython 2.1
                            line = linecache.getline(filename, exc_lineno)

                        if IGNORE_EXCEPTION_TAG.match(line) is not None:
                            lines_ignored[exc_lineno] = 1
                            return
                        else:
                            #Put in the cache saying not to ignore
                            lines_ignored[exc_lineno] = 0
                    else:
                        #Ok, dict has it already cached, so, let's check it...
                        if merged.get(exc_lineno, 0):
                            return

            thread = self._args[3]

            try:
                frame_id_to_frame = {}
                frame_id_to_frame[id(frame)] = frame
                f = trace_obj.tb_frame
                while f is not None:
                    frame_id_to_frame[id(f)] = f
                    f = f.f_back
                f = None

                thread_id = get_thread_id(thread)
                pydevd_vars.add_additional_frame_by_id(thread_id,
                                                       frame_id_to_frame)
                try:
                    main_debugger.send_caught_exception_stack(
                        thread, arg, id(frame))
                    self.set_suspend(thread, CMD_STEP_CAUGHT_EXCEPTION)
                    self.do_wait_suspend(thread, frame, event, arg)
                    main_debugger.send_caught_exception_stack_proceeded(thread)

                finally:
                    pydevd_vars.remove_additional_frame_by_id(thread_id)
            except:
                traceback.print_exc()

            main_debugger.set_trace_for_frame_and_parents(frame)
        finally:
            #Clear some local variables...
            trace_obj = None
            initial_trace_obj = None
            check_trace_obj = None
            f = None
            frame_id_to_frame = None
            main_debugger = None
            thread = None
 root_path = CVAR_dataset_path+"\\"+dir
 #ex: C:\OpenARK_test\CVAR\P1\all_fingertips.txt
 cvar_fingertips_file_lines = sum(1 for line in open(root_path+"\\"+"all_fingertips.txt"))
 #ex. CVAR_folders\openark_P1.txt
 if os.path.exists("CVAR_folders\\openark_"+dir+".txt"):
     with open("CVAR_folders\\openark_"+dir+".txt", 'r') as openark_cvar_handle:
         openark_line_count = 0
         for line in openark_cvar_handle:
                 openark_line_count += 1
                 line_split = line.split(' ')
                 line_split = ' '.join(line.split(' ')[:]).rstrip()
                 line_split = line_split.split(' ')
                 CVAR_depth_image = line_split[0]
                 CVAR_dir = os.path.dirname(CVAR_depth_image)
                 try:
                     cvar_line = linecache.getline(root_path+"\\"+"all_fingertips.txt", int(os.path.basename(CVAR_depth_image)[:6])+1)
                     cvar_line_split= cvar_line.split(' ')
                     cvar_depth_image = cvar_line_split[0]
                     cvar_line_split = ' '.join(cvar_line_split[1:]).rstrip()
                     cvar_line_split = cvar_line_split.split(' ')
                     iterable = iter(cvar_line_split)
                     cvar_sliced = list(iter(lambda: list(itertools.islice(iterable, 2)), []))
                     cvar_sliced_reversed = cvar_sliced[::-1]
                     line_split = line_split [1:]
                     list_of_list = []
                     iterable = iter(line_split)
                     openark_sliced = list(iter(lambda: list(itertools.islice(iterable, 3)), []))
                     cvar_flag_line = linecache.getline(root_path+"\\"+"visible_fingertips.txt", openark_line_count)
                     cvar_flag_line_split = cvar_flag_line.split(' ')
                     cvar_flag_line_image = cvar_flag_line_split[0]
                     cvar_flag_line_split = ' '.join(cvar_flag_line_split[1:]).rstrip()
Example #53
0
    def interaction_print(self):
        filename = self.curframe.f_code.co_filename
        globs = self.curframe.f_globals if hasattr(self, 'curframe') else None
        lno = self.stack[self.curindex][1] # line number
        line = linecache.getline(filename, lno, globs) # current line

        # self.print_next_step(lno, line) # show the next step that will be executed

        # add variables to displaying when they were added to display
        self.usedVars = []
        while self.to_display:
            var = self.to_display.pop()
            # simple variable to print
            if 'simple' == var['v_type']:
                self._display_simple_variable(var)

            elif 'array' == var['v_type']:
                self._display_array_variable(var)

            elif 'dict' == var['v_type']:
                self._display_dict_variable(var)

            elif 'index_of_array' in var['v_type']:
                self._display_array_index(var, var['index_of'])

        # if a wrong line number was given or variable doesn't exist yet, try again later
        while self.failed:
            failed = self.failed.pop()
            self.to_display.append(failed)

        # determine if you have to display a variable next step
        self.check_variables(lno)

        # determine if we have to show a loop next step
        self.check_loops(lno)

        # display loops
        if (self.show_loops):
            # self.message("---------LOOPS--------")
            for loop in self.show_loops:
                if loop['iter_flag']:
                    # self.message('loop ' + loop['v_name'] + '; iteration ' + str(loop['iter']))
                    msg = json.dumps({'type': 'LOOP', 'loop_name': loop['v_name'],
                    'loop_ln': loop['ln'],'iteration': str(loop['iter']) })
                    self.socket.send_string(msg)
                else:
                    # self.message('loop ' + loop['v_name'])
                    msg = json.dumps({'type': 'LOOP', 'loop_name': loop['v_name'],
                    'loop_ln': loop['loop_ln'] })
                    self.socket.send_string(msg)
                if lno == int(float(loop['ln_end']) - 1) and loop['cur_frame'] == self.curframe:
                    loop['iter'] += 1
                self.usedVars.append(loop['v_name'] + str(loop['ln']))

        # display all variables that are in displaying at the current step
        displaying = self.displaying.get(self.curframe)
        if displaying:
            for expr, (oldvalue, arg) in displaying.items():
                lno = arg['ln']
                self.usedVars.append(expr + str(lno))

                newvalue = self._getval_except(expr)
                if not isinstance(newvalue, (list,)):
                    # self.message('%s: %r\n' %
                    #              (expr, newvalue))
                    self.usedVars.append(expr + str(lno))
                else:
                    msg = json.dumps({'type': 'ARRAY', 'v_name': expr,
                    'v_ln': lno, 'v_value': newvalue, 'orientation': arg['orientation'] })
                    self.socket.send_string(msg)

                    if expr in self.array_indices:
                        for var in self.array_indices[expr]:
                            msg = json.dumps({'type': 'ARRAY_INDEX', 'v_name': var['v_name'],
                            'v_ln': var['ln'], 'v_value': self._getval_except(var['v_name']),
                            'index_of': var['index_of'], 'array_ln': var['array_ln'],
                            'color': var['color'], 'shape': var['shape'],
                            'position': var['position'] })
                            self.socket.send_string(msg)

                # detect changes
                if newvalue is not oldvalue and newvalue != oldvalue:
                    displaying[expr] = newvalue
        else:
            pass

        msg = json.dumps(self.usedVars)
        self.socket.send_string(msg)
Example #54
0
def extract_string_from_file(file_name, line):
    return "".join(linecache.getline(file_name, (line)).split("\n"))
Example #55
0
def get_author(patch):
    """Reads the author name and email from the .patch file"""
    author = linecache.getline(patch, 2)
    return author.replace('From: ', '').rstrip()
Example #56
0
def formatTraceback(ex_type=None, ex_value=None, ex_tb=None, detailed=False):
    """Formats an exception traceback. If you ask for detailed formatting,
    the result will contain info on the variables in each stack frame.
    You don't have to provide the exception info objects, if you omit them,
    this function will obtain them itself using ``sys.exc_info()``."""
    if ex_type is not None and ex_value is None and ex_tb is None:
        # possible old (3.x) call syntax where caller is only providing exception object
        if type(ex_type) is not type:
            raise TypeError(
                "invalid argument: ex_type should be an exception type, or just supply no arguments at all"
            )
    if ex_type is None and ex_tb is None:
        ex_type, ex_value, ex_tb = sys.exc_info()
    if detailed and sys.platform != "cli":  # detailed tracebacks don't work in ironpython (most of the local vars are omitted)

        def makeStrValue(value):
            try:
                return repr(value)
            except:
                try:
                    return str(value)
                except:
                    return "<ERROR>"

        try:
            result = ["-" * 52 + "\n"]
            result.append(" EXCEPTION %s: %s\n" % (ex_type, ex_value))
            result.append(
                " Extended stacktrace follows (most recent call last)\n")
            skipLocals = True  # don't print the locals of the very first stackframe
            while ex_tb:
                frame = ex_tb.tb_frame
                sourceFileName = frame.f_code.co_filename
                if "self" in frame.f_locals:
                    location = "%s.%s" % (frame.f_locals["self"].__class__.
                                          __name__, frame.f_code.co_name)
                else:
                    location = frame.f_code.co_name
                result.append("-" * 52 + "\n")
                result.append("File \"%s\", line %d, in %s\n" %
                              (sourceFileName, ex_tb.tb_lineno, location))
                result.append("Source code:\n")
                result.append("    " + linecache.getline(
                    sourceFileName, ex_tb.tb_lineno).strip() + "\n")
                if not skipLocals:
                    names = set()
                    names.update(getattr(frame.f_code, "co_varnames", ()))
                    names.update(getattr(frame.f_code, "co_names", ()))
                    names.update(getattr(frame.f_code, "co_cellvars", ()))
                    names.update(getattr(frame.f_code, "co_freevars", ()))
                    result.append("Local values:\n")
                    for name in sorted(names):
                        if name in frame.f_locals:
                            value = frame.f_locals[name]
                            result.append("    %s = %s\n" %
                                          (name, makeStrValue(value)))
                            if name == "self":
                                # print the local variables of the class instance
                                for name, value in vars(value).items():
                                    result.append("        self.%s = %s\n" %
                                                  (name, makeStrValue(value)))
                skipLocals = False
                ex_tb = ex_tb.tb_next
            result.append("-" * 52 + "\n")
            result.append(" EXCEPTION %s: %s\n" % (ex_type, ex_value))
            result.append("-" * 52 + "\n")
            return result
        except Exception:
            return [
                "-" * 52 + "\nError building extended traceback!!! :\n",
                "".join(traceback.format_exception(*sys.exc_info())) +
                '-' * 52 + '\n', "Original Exception follows:\n",
                "".join(traceback.format_exception(ex_type, ex_value, ex_tb))
            ]
    else:
        # default traceback format.
        return traceback.format_exception(ex_type, ex_value, ex_tb)
Example #57
0
def main():
    """
    Add binary flags (0/1) to a differential expression dataset depending on p-value thresholds.

    Arguments:
        :param deaDataset: Matrix with Differential Expression Analysis information
        :type deaDataset: file

        :param pvalue: Name of the column with the p-value information
        :type pvalue: string

        :param uniqid: Name of the column with the unique identifier
        :type uniqid: string

        :param thresholds: Desired flag thresholds. Must be separed with ",", no spaces allowed.
        :type thresholds: string

    Returns:
        :return output: Table with input and added correspondent flags columns
        :rtype output: file

        :return flags: Table with only the correspondent flags columns
        :rtype flags: file
    """
    args = getOptions()
    logger = logging.getLogger()
    sl.setLogger(logger)
    logger.info(
        u"""Importing data with following parameters: \
        \n\tDEA Dataset: {0}\
        \n\tUnique ID: {1}\
        \n\tPvalues: {2}\
        \n\tThresholds: {3}""".format(
            args.deaDataset, args.uniqID, args.pvalue, args.thresholds
        )
    )

    modules.checkForDuplicates(args.deaDataset, args.uniqID)

    output = open(args.output, "w")
    flags = open(args.flags, "w")

    with open(args.deaDataset, "r") as data:
        header = data.readline().strip().split("\t")

    thresholds = args.thresholds.split(",")

    header_list = []
    for word in header:
        if word == "":
            output.write("NA")
            header_list.append("NA")
        elif header.index(word) == len(header) - 1:
            word = word.replace('"', "")
            output.write(word)
            header_list.append(word)
        else:
            word = word.replace('"', "")
            output.write(word + "\t")
            header_list.append(word)

    flags.write(str(args.uniqID))
    for threshold in thresholds:
        flags.write("\tFlag_" + threshold)
        output.write("\tFlag_" + threshold)
        header_list.append("\tFlag_" + threshold)

    flags.write("\n")
    output.write("\n")
    # Get P value column from a DEA dataset
    deaTable = genfromtxt(
        args.deaDataset,
        delimiter="\t",
        usecols=header_list.index(args.pvalue),
        dtype=None,
    )
    deaTable = np.delete(deaTable, 0)

    # Add 1/0 if smaller/greater than threshold
    i = 2
    for pvalue in deaTable:
        line = linecache.getline(args.deaDataset, i).strip()
        pvalue = float(pvalue.strip())
        flags.write(line.split("\t")[header_list.index(args.uniqID)])
        output.write(line)
        for threshold in thresholds:
            if pvalue <= float(threshold):
                flags.write("\t1")
                output.write("\t1")
            else:
                flags.write("\t0")
                output.write("\t0")
        flags.write("\n")
        output.write("\n")
        i += 1

    return args
Example #58
0
 for i, fname in enumerate([
         os.path.join(root, file) for file in files
         if not any(exclusion in root for exclusion in exclusions)
 ]):
     try:
         if fname.endswith(('.py', '.sh')):
             # find all consecutive comment lines that include a flag string (exclude shebang lines that come before)
             headerlines = []
             flagstring = "License Agreement"
             with open(fname, 'r') as f:
                 for j, line in enumerate(f, 1):
                     if line.strip().startswith(
                             '#') and not line.strip().startswith('#!'):
                         if j == 1:
                             linestart = 1
                         if not linecache.getline(fname, j-1).strip().startswith('#') or \
                                 linecache.getline(fname, j-1).strip().startswith('#!'):
                             linestart = j
                         if not linecache.getline(
                                 fname, j + 1).strip().startswith('#'):
                             lineend = j
                             tmp = {}
                             for k in range(linestart, lineend + 1):
                                 tmp[k] = linecache.getline(fname, k)
                             for e in tmp:
                                 if flagstring in tmp[e]:
                                     headerlines.append(tmp)
                                     break
             print('File {} has {} contiguous headers'.format(
                 fname, len(headerlines)))
             for n, h in enumerate(headerlines):
#%% Load libraries
from __future__ import division
import pandas as pd
from linecache import getline
import numpy as np
import glob

#%% Path of input & output files
pathI="...\\input\\"
pathO="...\\results\\"
pathOv="...\\overland\\"

#%% load spatial input data

#read Header
hdr=[getline(pathI+'twi.txt',i) for i in range (1,7)]
values=[float(h.split(" ")[-1].strip()) \
    for h in hdr]
cols,rows,lx,ly,cell,nd=values
xres=cell
yres=cell*-1

#read topographic index
topo=np.loadtxt(pathI+'twi.txt',skiprows=6)
hru=np.loadtxt(pathI+'hru.txt',skiprows=6)
topo[topo==-9999]='NaN'
hru[hru==-9999]='NaN'

#lamWell
lam1=topo[hru==3]
lam1[np.isnan(lam1)]=np.nanmean(lam1)
def my_tracer(frame, event, arg=None):
    global currline
    global current_variables
    global stack
    global execTrace
    global visited
    whitespace = "&nbsp;"*4
    code = frame.f_code
    func_name = code.co_name
    currInd = 0
    cellNo = 0
    if func_name == 'encode' or func_name == 'main' or func_name[0] == "<":
        return
    line_no = frame.f_lineno-124
    if line_no in searchLinoNo[0]:
        currInd = searchLinoNo[0].index(line_no)
        # print(currInd)
        line = linecache.getline("combine.py", line_no+124)
        # print("line %d: %s" % (line_no, line))
        execTrace.append(currInd)
        # currInd = searchLinoNo[0].index(line_no)
        # print("currind ",currInd)
    else:
        return
    

    if event == 'call':
        # print(currInd)
        # print("call lineno",line_no)
        call_entry = "Enter a function " + func_name + " with arguments"
        for j, k in frame.f_locals.items():
            call_entry += " " + str(j) + " -> " + str(k)
        cellNo = (currInd)*2
        # print(cellNo)
        # print(nb['cells'][cellNo]['source'])
        nb['cells'][cellNo]['source'] += call_entry + "\n\n"

    if event == 'line':
        new_variables = inspect.stack()[1][0].f_locals
        for var in new_variables:
            if var not in current_variables:
                text = "Introduce a variable :- " + \
                    var + " = " + str(new_variables[var])
                # if currLine == len(output):
                #     output.append([line_no, text])
                # else:
                #     output[currLine][1] = output[currLine][1] + \
                #         " -> " + str(new_variables[var])
                # print(currLine,output)
                cellNo = (execTrace[-2])*2
                # print("cellNo " ,cellNo)
                # if cellNo not in visited:
                nb['cells'][cellNo]['source'] += whitespace*lineNo[execTrace[-2]][1] + text + "\n\n"
                    # visited.append(cellNo)


                # nb['cells'].append(nbf.v4.new_markdown_cell(text))
                # nb['cells'].append(nbf.v4.new_code_cell())
                # currLine += 1
                # print("<div style=\"display:inline-block;width:50px;\"></div>", "<div style=\"display:inline-block;\">%s</div>" % (var + " = " + str(new_variables[var]) + " is introduced."),"<br>")

            else:
                if new_variables[var] != current_variables[var]:
                    # print("var ", var)
                    text = var + " = " + \
                        str(current_variables[var]) + \
                        " -> " + str(new_variables[var])
                    cellNo = (execTrace[-2])*2
                    # print("hello else", currInd)
                    # print("cellNo " ,cellNo)
                    # if cellNo not in visited:
                    nb['cells'][cellNo]['source'] += whitespace*lineNo[execTrace[-2]][1] + text + "\n\n"
                        # visited.append(cellNo)
                    # if currLine == len(output):
                    #     output.append([line_no, text])
                    # else:
                    #     output[currLine][1] = output[currLine][1] + \
                    #         " -> " + str(new_variables[var])
                    # nb['cells'].append(nbf.v4.new_markdown_cell(text))
                    # nb['cells'].append(nbf.v4.new_code_cell())
                    # currLine += 1
                    # print("<div style=\"display:inline-block;width:50px;\"></div>", "<div style=\"display:inline-block;\">%s</div>" % (var + " = " + str(current_variables[var]) + " -> " + str(new_variables[var])),"<br>")

        # curr_indent = 0
        # for c in curr_code:
        #     if c == " ":
        #         curr_indent += 1
        #     else:
        #         break

        current_variables = copy.deepcopy(new_variables)
        stack.append({copy.deepcopy((execTrace[-2])*2):copy.deepcopy(current_variables)})

    return my_tracer