Ejemplo n.º 1
0
 def test_state_is_None(self):
     """Tests fileinput.isfirstline() when fileinput._state is None.
        Ensure that it raises RuntimeError with a meaningful error message
        and does not modify fileinput._state"""
     fileinput._state = None
     with self.assertRaises(RuntimeError) as cm:
         fileinput.isfirstline()
     self.assertEqual(("no active input()",), cm.exception.args)
     self.assertIsNone(fileinput._state)
def add_header(header, intFile):
    for line in fileinput.input(intFile, inplace=True):
        if fileinput.isfirstline():
            print "\n",
        print line,
    for line in fileinput.input(intFile, inplace=True):
        if fileinput.isfirstline():
            print header,
        print line,
Ejemplo n.º 3
0
def add_header( header, intFile ):
    print 'adding headers'
    for line in fileinput.input(intFile, inplace=True):
        if fileinput.isfirstline():
            print '\n',
        print line,
    for line in fileinput.input(intFile, inplace=True):
        if fileinput.isfirstline():
            print header,
        print line,
Ejemplo n.º 4
0
def check_files(files, verbose):
    in_multiline = False
    logical_line = ""
    token = False
    for line in fileinput.input(files):
        if verbose and fileinput.isfirstline():
            print "Running bash8 on %s" % fileinput.filename()
        # NOTE(sdague): multiline processing of heredocs is interesting
        if not in_multiline:
            logical_line = line
            token = starts_multiline(line)
            if token:
                in_multiline = True
                continue
        else:
            logical_line = logical_line + line
            if not end_of_multiline(line, token):
                continue
            else:
                in_multiline = False

        check_no_trailing_whitespace(logical_line)
        check_indents(logical_line)
        check_for_do(logical_line)
        check_if_then(logical_line)
Ejemplo n.º 5
0
	def __write(self, files, path):
		try:
			dummy = tempfile.NamedTemporaryFile(mode='w+t',
				delete=False)

			files = self.__normalize(files)

			for line in fileinput.input(files):
				dummy.write('%s%s' %
					(fileinput.isfirstline() and '\n' or '', line))

			else:
				try:
					shutil.move(dummy.name, path)

				except IOError as error:
					self.log(error, True)

				else:
					dummy.close()

		except FileNotFoundError:
			self.log('the file %s was not found' %
				fileinput.filename(), True)

		else:
			self.log('the file %s was built' % path)
Ejemplo n.º 6
0
def get_newdata_heading( filename ) :
	for line in fileinput.input(filename,inplace =1 ):
		if fileinput.isfirstline():
			print ( get_newhead(line))[:-1]
		else :
			print ( line )[:-1]
	fileinput.close()
Ejemplo n.º 7
0
def clean_clutter_in(files, tabsize=8):
  if not files: return
  n_empty = 0
  for fname in files:
    if not os.path.isdir(fname):
      for line in input(fname, inplace=1):
        if (isfirstline()):
          if (not isstdin()):
            print >> sys.__stdout__, filename() + ':'
          n_empty = 0
        clean_line = line.expandtabs(tabsize).rstrip()
        if (len(clean_line) == 0):
          n_empty += 1
        else:
          for i in xrange(n_empty): sys.stdout.write("\n")
          n_empty = 0
          sys.stdout.write(clean_line)
          sys.stdout.write("\n")
    # explicitly convert Windows linebreaks into Unix linebreaks
    wfile = open(fname,"r")
    wstr=wfile.read()
    wfile.close()
    ustr = wstr.replace("\r\n", "\n")
    ufile=open(fname,'wb')
    ufile.write(ustr)
    ufile.close()
Ejemplo n.º 8
0
def main(args):
    ap = argparse.ArgumentParser()
    ap.add_argument('files', nargs='*', help='files to unique (must be sorted first)')
    ns = ap.parse_args(args)

    def _print(lines):
        if lines is not None:
            print ''.join(lines)

    fileinput.close()  # in case it is not closed
    try:
        prev_line = None
        lines = None
        for line in fileinput.input(ns.files):
            if fileinput.isfirstline():
                _print(lines)
                lines = []
                prev_line = None
            if prev_line is None or line != prev_line:
                lines.append(line)
            prev_line = line

        _print(lines)

    finally:
        fileinput.close()
Ejemplo n.º 9
0
def main():
    global decorators
    global decorator_dictionary
    global line_number
    global non_blank_line_number
    global last_blank
    
    for index,value in decorator_dictionary.items():
        if args[index]:
            decorators.append(value)
        else:
            pass
    #print decorators
    
    for line in fileinput.input(args['FILE']): # Create FileInput instance to handle files.
        line_number = fileinput.lineno()
        
        if fileinput.isfirstline() == True: # reset count of non_blank_line_number for a new file.
           non_blank_line_number = 1
        elif line.isspace() == False: # if a line is blank.
           non_blank_line_number += 1
     
        
        output_line = line
        for d in decorators: # loop to apply decorators
            output_line = d(output_line)
            
        if line.isspace()==True: # update last_blank to ensure we know if a blank just passed
            last_blank = True
        else:
            last_blank = False
        
        if output_line is not None: # if the line isnt none, print it.
            print output_line,
Ejemplo n.º 10
0
 def _write_post(self, post):
     upost = unicode(post)
     self._print(upost)
     date = post.get_date()
     rx = re.compile(r'^([0-9]{4}\/[0-9]{2}\/[0-9]{2}).*')
     written = False
     # write the post chronologically in output file
     for line in fileinput.input(self._output, inplace=1, backup=".tmp"):
         if fileinput.isfirstline():
             if LEDGER_MODE_DIRECTIVE not in line:
                 print(LEDGER_MODE_DIRECTIVE)
         m = rx.match(line)
         if not written and m is not None and m.group(1) > date:
             written = True
             if self._encoding:
                 print(upost.encode(self._encoding))
             else:
                 print(upost)
         print line,
     if not written:
         with open(self._output, 'a') as o:
             if self._encoding:
                 o.write('\n')
                 o.write(upost.encode(self._encoding))
             else:
                 o.write('\n')
                 o.write(upost)
     if self._interactive:
         self._resources.write()
     return post
Ejemplo n.º 11
0
def add_header(input_fpath, header):
    import fileinput

    for line in fileinput.input(files=[input_fpath], inplace=True):
        if fileinput.isfirstline():
            print(header)
        print(line, end="")
Ejemplo n.º 12
0
Archivo: sort.py Proyecto: BBOOXX/stash
def main(args):
    ap = argparse.ArgumentParser()
    ap.add_argument('files', nargs='*', help='files to sort')
    ap.add_argument('-r', '--reverse', action='store_true', default=False,
                    help='reverse the result of comparisons')
    ns = ap.parse_args(args)

    def _print(lines):
        if lines is not None:
            lines = sorted(lines)
            if ns.reverse:
                lines = lines[::-1]
            print(''.join(lines))

    fileinput.close()  # in case it is not closed
    try:
        lines = None
        for line in fileinput.input(ns.files):
            if fileinput.isfirstline():
                _print(lines)
                lines = []
            lines.append(line)

        _print(lines)

    finally:
        fileinput.close()
Ejemplo n.º 13
0
def strip_header(input_fpath):
    import fileinput

    for line in fileinput.input(files=[input_fpath], inplace=True):
        if fileinput.isfirstline():
            continue
        print(line, end="")
def main():
    """
    A function which creates a PPM image based on a set of commands.
    """
    image = {}
    # Iterate over the commands in standard input
    for line in fileinput.input():
        # Get the size from the first line
        if fileinput.isfirstline():
            columns = line.split()[0]
            rows = line.split()[1]
            # Set the size of the image
            image["columns"] = int(columns)
            image["rows"] = int(rows)

        # Handle the commands in the lines after the first
        else:
            command = line.split()
            if command[0] == "point":
                draw_point(image, command)
            elif command[0] == "line":
                draw_line(image, command)
            elif command[0] == "rect":
                draw_rect(image, command)

    # Display the finished image
    display_image(image)
	def __init__(self, graph_file):
		""" Read graph_file, and store its topology.
		
		self.header : store the header of the graph. 
		self.neighbors : store the neighbors of each nodes.
		self.remaining_nodes : store a list of nodes which are not yet in the center of any pack.
		self.remaining_neighbors : neighbors that are in remaining_nodes. Only useful and updated for remaining_nodes.
		""" 
		
		self.nb2name = list()
		self.name2nb = dict()
		self.neighbors = list()
		cpt = 0
		for line in fileinput.input(graph_file):
			if fileinput.isfirstline():
				self.header = line
				continue
			n1, n2, tmp = line.split('\t', 2)
			self.add_node(n1)
			self.add_node(n2)
			n1 = self.name2nb[n1]
			n2 = self.name2nb[n2]
			
			self.neighbors[n1].append(n2)
			self.neighbors[n2].append(n1)

			cpt += 1
			if cpt % 1000000 == 0 :
				print(str(cpt))


		self.remaining_nodes = list(range(len(self.nb2name)))
		self.remaining_neighbors = list(self.neighbors)
Ejemplo n.º 16
0
def rewrite_pix4d_csv(base_dir, data_store):
    meta_file = os.path.join(base_dir, 'image-metadata.txt')
    pix4d_file = os.path.join(base_dir, 'pix4d-ekf.csv')

    f_out = open(pix4d_file, 'w')
    f_out.write('File Name,Lat (decimal degrees),Lon (decimal degrees),Alt (meters MSL),Roll (decimal degrees),Pitch (decimal degrees),Yaw (decimal degrees)\n')

    i = 0
    for line in fileinput.input(meta_file):
        if fileinput.isfirstline():
            continue
        tokens = line.split(',')
        image = tokens[0]
        (lat, lon, alt, psi, the, phi, time) = map(float, tokens[1:])
        time /= 1000000.0       # convert seconds
        while data_store.time[i] < time:
            i += 1
        line = "%s,%.8f,%.8f,%.4f,%.4f,%.4f,%.4f" % \
               (image,
                data_store.nav_lat[i]*180.0/math.pi,
                data_store.nav_lon[i]*180.0/math.pi,
                data_store.nav_alt[i],
                data_store.phi[i]*180.0/math.pi,
                data_store.the[i]*180.0/math.pi,
                data_store.psi[i]*180.0/math.pi)
        f_out.write(line + '\n');
    f_out.close()
Ejemplo n.º 17
0
def rewrite_image_metadata_txt(base_dir, data_store):
    meta_file = os.path.join(base_dir, 'image-metadata.txt')
    new_file = os.path.join(base_dir, 'image-metadata-ekf.txt')

    f_out = open(new_file, 'w')
    f_out.write('File Name,Lat (decimal degrees),Lon (decimal degrees),Alt (meters MSL),Yaw (decimal degrees),Pitch (decimal degrees),Roll (decimal degrees),GPS Time (us since epoch)\n')

    i = 0
    for line in fileinput.input(meta_file):
        if fileinput.isfirstline():
            continue
        tokens = line.split(',')
        image = tokens[0]
        (lat, lon, alt, psi, the, phi, time_orig) = map(float, tokens[1:])
        time_sec = time_orig / 1000000.0       # convert seconds
        while data_store.time[i] < time_sec:
            i += 1
        line = "%s,%.8f,%.8f,%.4f,%.4f,%.4f,%.4f,%.0f" % \
               (image,
                data_store.nav_lat[i]*180.0/math.pi,
                data_store.nav_lon[i]*180.0/math.pi,
                data_store.nav_alt[i],
                data_store.psi[i]*180.0/math.pi,
                data_store.the[i]*180.0/math.pi,
                data_store.phi[i]*180.0/math.pi,
                time_orig)
        f_out.write(line + '\n');
    f_out.close()
Ejemplo n.º 18
0
def prim():
    graph = {}
    for line in fileinput.input():
        if not fileinput.isfirstline():
            #print line
            line = line.split()
            try:
                graph[line[0]].append([line[1], line[2]])
            except:
                graph[line[0]] = [[line[1], line[2]]]
    #print graph

    mst = {}

    for u in graph:
        mst[u] = [sys.maxint,sys.maxint]
        for v in xrange(0, len(graph[u])):
            #print graph[u][v][1]
            #print mst[u][1]
            print type(graph[u][v][1])
            print type(mst[u][1])
            #print int(graph[u][v][1]) < int(mst[u][1]) 
            if int(graph[u][v][1]) < int(mst[u][1]):
                #print "herpaderp"
                mst[u] = [graph[u][v][0], graph[u][v][1]]

    print mst
Ejemplo n.º 19
0
    def temp_config(self, save):  # self.conf, self.conf_temp
        import re
        import fileinput

        # Save variable is to set it to remove | add hack
        # If Save is set to No  then it adds    the hack
        # If Save is set to Yes then it removes the hack
        # 		Hack is adding [MAIN] to temp file  so can use ConfigParser on the temp file
        # If Save is anything else  it just copies the src(file) -> dest(file)
        if save == False:
            src = self.conf_file
            dest = self.conf_temp_file
        else:
            dest = self.conf_file
            src = self.conf_temp_file

        dest_fd = open(dest, "w+")
        remove_whitespace = re.compile("( = )")

        empty = True

        if os.path.isfile(src) == True:
            for line in fileinput.input(src):
                empty = False
                line_fixed = remove_whitespace.sub("=", line)
                if fileinput.isfirstline() == True:
                    if save == False:
                        if line != "[GAME]\n":
                            dest_fd.write("[GAME]\n")
                        dest_fd.write(line_fixed)
                else:
                    dest_fd.write(line_fixed)
        if empty == True:
            dest_fd.write("[GAME]\n")
        dest_fd.close()
Ejemplo n.º 20
0
def movementCheck(args):
    if len(args) != 4:
        stderr.write("\n\033[1;31mError\033[0m: bad arguments passed in.\n")
        exit(1)

    currentXPosition = int(args[2])
    currentYPosition = int(args[1])
    desiredDirection = args[3]
    desiredPosition = 2*[1] # desiredPosition[xpos, ypos]

    for line in fileinput.input('map'): # for each line in the input file...
        if fileinput.isfirstline(): #get the size of the map
            xwidth = line[:line.find('x')] # grab the first part up to the x
            ywidth = line[line.find('x') + 1:] # grab the part after the x
            xwidth = int(xwidth) # convert the xwidth to an integer
            ywidth = int(ywidth) # convert the ywidth to an integer
            stderr.write("%d x %d\n" % (xwidth, ywidth))
            stderr.write("\t0123456789abcdef\n")
            currentMap = (xwidth)*[ywidth]
        else:
            if fileinput.lineno() > 1:
                currentMap[fileinput.lineno()-2] = list(line)

    for x in range(int(xwidth)):
        stderr.write("%d\t" %(x))
        for y in range(ywidth):
            #stderr.write("%s" %(currentMap[x][y]))
            if x == currentXPosition and y == currentYPosition:
                stderr.write("\033[1;31m%s\033[0m"%(currentMap[x][y]))
            elif currentMap[x][y] =='W':
                stderr.write("\033[1;34m%s\033[0m"%(currentMap[x][y]))
            elif currentMap[x][y] =='B': # check for bridges
                stderr.write("\033[1;43m%s\033[0m"%(currentMap[x][y]))
            else:
                stderr.write("\033[1;32m%s\033[0m"%(currentMap[x][y]))
        stderr.write("\n")
    #ignore variable names, they are backwards
    if desiredDirection == "left" and currentXPosition > 0:
        desiredPosition[0] = currentXPosition
        desiredPosition[1] = currentYPosition - 1
    elif desiredDirection == "right" and currentXPosition < xwidth:
        desiredPosition[0] = currentXPosition
        desiredPosition[1] = currentYPosition + 1
    elif desiredDirection == "up" and currentYPosition > 0:
        desiredPosition[0] = currentXPosition - 1
        desiredPosition[1] = currentYPosition
    elif desiredDirection == "down" and currentYPosition < ywidth:
        desiredPosition[0] = currentXPosition + 1
        desiredPosition[1] = currentYPosition

    # CHANGED  THE desiredPosition[ ]...IF INPUT  WAS 4 0 right...the desired postition was returning as 0 5, instead of 5 0
    # When trying to move past the upper boundary and left boundary, the desired postition returns -1 instead of an error message 
    # ORIGINAL INDEX VALUES                                             0                   1                              0                   1  
    stderr.write("\nDesired positoin: %d,%d is: %s\n" %(desiredPosition[1], desiredPosition[0], currentMap[desiredPosition[1]][desiredPosition[0]]))

    if currentMap[desiredPosition[0]][desiredPosition[1]] == "E" or currentMap[desiredPosition[0]][desiredPosition[1]] == "B":
        acceptable = True
    else:
        acceptable = False
    return(acceptable)
Ejemplo n.º 21
0
def readLGF_Network( source ):
    mInFile = open( source ,mode='r')
    fileString = mInFile.read()
    mInFile.close()
    if '@arcs' in fileString:
        initKey = '@arcs'
        initPos = 6
    elif '@edges' in fileString:
        initKey = '@edges'
        initPos = 7
    else:
        print('No keyword \'@arcs\' or \'@edges\' found\n Wrong file format')
        return
    strDat = fileString[fileString.find(initKey)+initPos:]
    mOutFile = open('tmp.txt',mode='w')
    mOutFile.write(strDat)
    mOutFile.close()
    mOutFile = open('tmp2.txt',mode='w')
    for line in fileinput.input('tmp.txt'):
        if not fileinput.isfirstline():
            mOutFile.write(line)
    mOutFile.close()
    os.remove('tmp.txt')
    g = nx.read_edgelist('tmp2.txt', nodetype=int, edgetype=int, data=False )
    os.remove('tmp2.txt')
    return g
Ejemplo n.º 22
0
    def test_missing_debug_statements(self):
        # Exclude explicit debug statements written in the code
        exclude = {
            'regex.py': [240, 241],
        }

        message = "\nFound a debug missing statement at line %d or file %r: %r"
        filename = None
        file_excluded = []
        files = (
            glob.glob(os.path.join(self.source_dir, '*.py')) +
            glob.glob(os.path.join(self.source_dir, 'validators/*.py'))
        )

        for line in fileinput.input(files):
            if fileinput.isfirstline():
                filename = fileinput.filename()
                file_excluded = exclude.get(os.path.basename(filename), [])
            lineno = fileinput.filelineno()

            if lineno in file_excluded:
                continue

            match = self.missing_debug.search(line)
            self.assertIsNone(match, message % (lineno, filename, match.group(0) if match else None))
Ejemplo n.º 23
0
def add_uses(fich, list_use):
    for line in fileinput.input(fich, inplace=1):
        if fileinput.isfirstline():
            print(line),
            for use in list_use:
                print("  use " + use)
        else:
            print(line),
Ejemplo n.º 24
0
def read_input(f):
    if not f:
        raise Exception("invalid file")
    numbers = []
    for line in fileinput.input(f):
        if not fileinput.isfirstline():
            numbers.append(int(line.strip()))
    return numbers
Ejemplo n.º 25
0
def read_input(f):
    if not f:
        raise Exception("invalid file")
    strings_list = []
    for line in fileinput.input(f):
        if not fileinput.isfirstline():
            strings_list.append(str(line.strip()))
    return strings_list
Ejemplo n.º 26
0
def replace_tags(filenames, tagdict={}, dry_run=False):
    """
    Update known tags in a list of files by modifying
    them in place.
    Always updates the ##COPYRIGHT## tag with the
    contents of the COPYRIGHT file.
    @param tagdict: a dictionary of tags to search for
    and the value that the tag should be replaced with.

    Only one tag should be used per line as this function
    is quite stupid and looks for a line starting with the
    tag, ignoring the rest of the line and replacing the
    whole line with tag = tag_value.
    """
    copyright_file = 'COPYRIGHT'
    copydata = open(copyright_file).read()

    for line in fileinput.input(filenames, inplace=True):
        matched = False

        # replace python #! line
        if fileinput.isfirstline():
            match = first_line_re.match(line)
            if match:
                matched = True
                post_interp = match.group(1) or ''
                if not dry_run:
                    sys.stdout.write("#!%s%s\n" % (os.path.join(
                        sysconfig.get_config_var("BINDIR"),
                        "python" + sysconfig.get_config_var("EXE")),
                                                   post_interp))
                    pass
                pass
            else:
                if not dry_run:
                    sys.stdout.write(line)
                    pass
                continue
            pass
        
        
        if line.startswith('##COPYRIGHT##'):
            if not dry_run:
                sys.stdout.write(copydata)
            matched = True
            continue

        for tag in tagdict:
            if line.startswith(tag):
                if not dry_run:
                    sys.stdout.write("%s = '%s'\n" % (tag, tagdict[tag]))
                matched = True
                break

        # this only happens if nothing matches
        if not matched:
            if not dry_run:
                sys.stdout.write(line)
Ejemplo n.º 27
0
def check_tidy(src_dir):
    count_lines = 0
    count_empty_lines = 0

    for (dirpath, dirnames, filenames) in os.walk(src_dir):
        if any(d in dirpath for d in skip_dirs):
            continue

        files = [os.path.join(dirpath, name) for name in filenames
                 if is_interesting(name)]

        if not files:
            continue

        contents = ""
        license_checked = False

        for line in fileinput.input(files):

            if '\t' in line:
                report_error('TAB charactor')
            if '\r' in line:
                report_error('CR charactor')
            if line.endswith(' \n') or line.endswith('\t\n'):
                report_error('trailing whitespace')
            if not line.endswith('\n'):
                report_error('line end without NEW LINE charactor')

            if len(line) -1 > column_limit:
                report_error('line exceeds %d charactors' % column_limit)

            if fileinput.isfirstline():
                contents =""
                license_checked = False

            count_lines += 1
            if not line.strip():
                count_empty_lines += 1

            if len(contents) < 700:
                contents += line
            elif not license_checked:
                if not check_license(contents):
                    report_error_name_line(fileinput.filename(),
                                           1,
                                           'incorrent license')
                license_checked = True

    print
    print "* total line of code: %d" % count_lines
    print ("* total non-black line of code: %d"
           % (count_lines - count_empty_lines))
    print "%s* total errors: %d%s" % (TERM_RED if count_err > 0 else TERM_GREEN,
                                      count_err,
                                      TERM_EMPTY)
    print

    return count_err == 0
def fileinput_with_firstline(file_path):
    """ Using the Python standard library `fileinput` to read a CSV file.
        I still need to use split to break the string into a list.
    """
    line_bits = None
    for line in fileinput.input(file_path):
        if fileinput.isfirstline():
            header = line
        line_bits = line.rstrip().split(",")
Ejemplo n.º 29
0
def RemoveOverlap(bedfile,loc,eventstxt):
    ''' eventstxt file must contain a header'''
    ''' It is better if the bedfile is sorted however it is not required'''
    path = os.getcwd()
    filename = FormatConversion.GetFilename(bedfile)
    base = FormatConversion.StripExtension(filename)
    fileout = open(path+"/"+loc+"/"+base+"_NoOverlapp.bed","w")
    p = subprocess.Popen(["bedtools","intersect","-wo","-a",bedfile,"-b",bedfile],stdout=subprocess.PIPE)
    overlaps = {}
    ''' The following for loop is to construct a dictionary with each event as the key and the 
    values of the dict is a list of all overlapping events'''
    for line in p.stdout.readlines():
        line=line.strip()
        tabs = line.split("\t")
        if int(tabs[12]) < 200:
            overlaps.setdefault(tabs[3],[]).append(tabs[9])
    ipcount = {}
    ''' The following for loop is to construct a dict with events as keys and the Ip count as values'''
    for row in fileinput.input(eventstxt):
        if not fileinput.isfirstline():
            row=row.strip()
            tabs = row.split("\t")
            ipcount[tabs[0]] = float(tabs[1])
    ''' The following for is to remove all the overlapping intervals'''
    for it in overlaps.keys():
        ''' iter over all the overlapping events '''
        if overlaps[it][-1] != 0 and overlaps[it][-1] != 1:
            ''' Making sure the current event was not visited in a direct or a indirect way '''
            for i in overlaps[it]:
                ''' iter over all the events that 'it' overlaps with'''
                ''' Check is 1 if 'it' has the max ip count and 0 if 'it' does not have (Initialized with 1)'''
                check=1
                ''' making sure not itering over 1 and 0 but only over events'''
                if i != 0 and i != 1:
                    if ipcount[it] < ipcount[i]:
                        check=0
            
            if check == 0:
                ''' If 'it' does no have the max ip count'''
                overlaps[it].append(int(0))
            else:
                ''' If it has the maximum ipcount then make it 1 and make all the other events in its valu
                0 irrespective of what it had earlier. Note: these events will never be visited again'''
                overlaps[it].append(int(1))
                for j in overlaps[it]:
                    if j !=0 and j != 1:
                        overlaps[j].append(int(0))
    ''' For loop that prints all the events that have the max ip count among overlapping events ''' 
    for line in fileinput.input(bedfile):
        line.strip()
        tabs = line.split("\t")
        if tabs[3] in overlaps.keys():
            if overlaps[tabs[3]][-1] == 1:
                fileout.write(line)
        else:
            fileout.write(line)
    return None
Ejemplo n.º 30
0
def process(line, flags):
    if 'include_header' in flags:
        if fileinput.isfirstline():
            sys.stdout.write(C_HEADER)

    if 'strip_whitespace' in flags:
        line = "{}\n".format(line.rstrip())

    sys.stdout.write(line)
def query_data_XT(arg1):
    try:
        date = datetime.now().strftime("%Y%m")
        url = 'http://ec2-54-175-179-28.compute-1.amazonaws.com/get_csv_xitou.php?device_id=' + str(
            arg1) + '&year_month=' + str(date)
        # print("url:" + url)
        # Get the URL of the csv file
        csv_LINK_PATTERN = 'href="(.*)">Download'
        req = urllib.request.Request(url)
        html = urllib.request.urlopen(req)
        doc = html.read().decode('utf8')

        string1 = "'>Download ID" + str(arg1) + str(date) + " Data</a><br>"
        get_rul_patten = doc.strip(string1).strip("<a href='")
        file_name = str(arg1) + "_csvFid"
        server_path = "http://ec2-54-175-179-28.compute-1.amazonaws.com/" + get_rul_patten

        # Creat the folder to save the csv file
        if not os.path.exists('./' + file_name):
            os.makedirs('./' + file_name)
        urllib.request.urlretrieve(server_path,
                                   './' + file_name + '/' + file_name + '.csv')

        # Create a dataframe from the URL by data crawling
        local_csv_pos = './' + file_name + '/' + file_name + '.csv'

        # perform a preprocessing of csv file
        with open(local_csv_pos, 'r') as f:
            global data_fir_line
            data_fir_line = len(f.readline())
            # print(data_fir_line)
        if data_fir_line < 230:
            for line in fileinput.input(local_csv_pos, inplace=1):
                if not fileinput.isfirstline():
                    print(line.replace('\n', ''))

        del_col = [0, 2, 4, 6, 8]
        csv_data = pd.read_csv(local_csv_pos,
                               sep=", |,, | = |= ",
                               header=None,
                               index_col=False,
                               engine='python')
        csv_data.drop(del_col, axis=1, inplace=True)

        colName = ['id', 'time', 'weather', 'air', 'acceleration']
        csv_data.columns = colName  # weather column (溫度、大氣壓力、濕度、風速、風向、雨量)

        last_uploadTime = csv_data.time[len(csv_data.time) - 1]
        last_uploadTime = pd.to_datetime(last_uploadTime,
                                         format="%Y%m%d%H%M%S")
        localTimeStamp = pd.to_datetime(strftime("%Y%m%d%H%M%S"),
                                        format="%Y%m%d%H%M%S")

        deltaT = localTimeStamp - last_uploadTime
        alrTimeIntv = timedelta(minutes=15)

        if deltaT > alrTimeIntv:
            deltaDay = deltaT.days
            deltaHr = deltaT.seconds // 3600
            deltaMin = (deltaT.seconds % 3600) // 60
            deltaSec = deltaT.seconds % 60
            outputStr = "Offline time: {} day, {} hr, {} min".format(
                deltaDay, deltaHr, deltaMin)
        else:
            outputStr = "Online"
    except:
        outputStr = "No data received this month"

    return outputStr
Ejemplo n.º 32
0
def main():
    for line in fileinput.input():
        if fileinput.isfirstline():
            print(f'<檔案{fileinput.filename}的開頭', end="")
            print(line, end="")
Ejemplo n.º 33
0
#!/usr/bin/env python3                     #标准注释,保证.py文件可在unix系统上运行
# -*- coding: utf-8  -*-                   #标准注释,表示.py文件都用标准UTF-8编码
#  'a test module'                            # 模块的文档注释,任何模块代码的第一个字符串都被视为模块的文档注释
__author__ = 'zxw'                     #作者名
import sys
import os                                 #        导入模块
import fileinput

for line in fileinput.input():
    meta = [fileinput.filename(),fileinput.fileno(),fileinput.isfirstline()]
    print(*meta,end=" ")
    print(line,end=" ")
Ejemplo n.º 34
0
def main():

    parser = argparse.ArgumentParser(description='VirTect: A pipeline for Virus detection')

    parser.add_argument('-t', '--n_thread', required = False, metavar = 'Number of threads, default: 8', default = '8', type = str, help ='Number of threads') 
    parser.add_argument('-1', '--fq1',  required = True, metavar = 'read1.fastq', type = str, help ='The read 1 of the paired end RNA-seq')
 
    parser.add_argument('-2', '--fq2',  required = True, metavar = 'read2.fastq', type = str, help ='The read 2 of the paired end RNA-seq')

    parser.add_argument('-o', '--out',  required = True, metavar = 'The output name for alignement', type = str, help ='Define the output directory to be stored the alignement results')

    parser.add_argument('-ucsc_gene', '--gtf',  required = True, metavar = 'gtf', type = str, help ='The input gtf file')

    parser.add_argument('-index', '--index_dir',  required = True, metavar = 'index files', type = str, help ='The directory of index files with hg38 prefix of the fasta file i.e,. index_files_directory/hg38')

    parser.add_argument('-index_vir', '--index_vir',  required = True, metavar = 'virus fasta', type = str, help ='The fasta file of the virus genomes')
   
    parser.add_argument('-d', '--distance', required = True, metavar = 'continuous_distance', type = int, help ='Define the continuous mapping distance of mapping reads to virus genome')


    args = parser.parse_args()
    
    fq1 = os.path.abspath(args.fq1)


    try:
        #f1=open(fq1,'r')
        os.path.isfile(fq1)
        f1=open(fq1,'r')
    except IOError:
        print('Error: There was no Read 1 FASTQ file!')
        sys.exit()


    fq2 = os.path.abspath(args.fq2)


    try:
        os.path.isfile(fq2)
        f2=open(fq2,'r')
    
    except IOError:
        print('Error: There was no Read 2 FASTQ file!')
        sys.exit()



    out = os.path.abspath(args.out)
    
 


    gtf = os.path.abspath(args.gtf)
   
    try:
        os.path.isfile(gtf)
        f2=open(gtf,'r')

    except IOError:
        print('Error: There was no GTF file!')
        sys.exit()   

    
    index_dir = os.path.abspath(args.index_dir)

    try:
        os.path.isfile(index_dir)
       # f4=open('hg38'+'."fa"','r')
    except IOError:
        print('Error: There was no fasta index directory!')
        sys.exit()
    
    


    index_vir = os.path.abspath(args.index_vir)

    #try:
     #   os.path.isfile(index_vir)
      #  f4=open(index_vir/viruses_757.fasta,'r')
    #except IOError:
     #   print('Error: There was no virus fasta index directory!')
      #  sys.exit()
    
    n_thread = args.n_thread

    distance = args.distance
    
    
    print ("Aligning by tophat")
    def alignment():
        cmd1='tophat -o '+out+' -p '+n_thread+' -G '+gtf+' '+index_dir+' '+fq1+'  '+fq2+''
        print 'Running ', cmd1
        os.system(cmd1)
    alignment()
        
    def bam2fastq():
        cmd2 ='samtools sort -n  '+out+'/unmapped.bam  -o '+out+'_sorted.bam' 
        print 'Running ', cmd2
        os.system(cmd2)    
        cmd3='bedtools bamtofastq -i  '+out+'_sorted.bam -fq  '+out+'_sorted_1.fq -fq2  '+out+'_sorted_2.fq'    
        print 'Running ', cmd3
        os.system(cmd3)
    bam2fastq()
 
    def bwa_alignment():
        cmd4= 'bwa mem '+index_vir+'  '+out+'_sorted_1.fq '+out+'_sorted_2.fq > '+out+'_aln.sam'
        print 'Running ', cmd4
        os.system(cmd4)
    bwa_alignment()
    
    def virus_detection():
        cmd5= 'samtools view -Sb -h '+out+'_aln.sam > '+out+'_aln.bam'
        print 'Running ', cmd5
        os.system(cmd5)

        cmd6= '''samtools view '''+out+"_aln.bam"+''' | cut -f3 | sort | uniq -c | awk '{if ($1>=400) print $0}' > '''+out+"_viruses_count.txt"+''' '''
        print 'Running ', cmd6
        os.system(cmd6)
    virus_detection() 
        
    def sort():
        cmd7= '''samtools sort '''+out+"_aln.bam"+'''  -o '''+out+"_aln_sorted.bam"+''' '''
        os.system(cmd7)
    sort()
    
    subprocess.call("./continuous_region.sh", shell=True)

    
    print ("The continous length")
    file =open("continuous_region.txt", "r")

    out_put =open("Final_continous_region.txt", "w")
    
    if (os.fstat(file.fileno()).st_size) >0:
            for i in file.readlines():
                i1=i.split()[0]
                i2=i.split()[1]
                j1=i2.split("-")
                j2=int(j1[1])-int(j1[0])


                if j2 >= distance:
                    j3=i1 + "\t" +  str(j1[0]) + '\t' +  str(j1[1])
                    out_put.write('%s\n' % j3)
                   
                else:
                    pass
    else:
        pass 
    out_put.close()
        

    final_output=open("Final_continous_region.txt",'r')
    if (os.fstat(final_output.fileno()).st_size) >0:
        print ("----------------------------------------Note: The sample may have some real virus :(-----------------------------------------------------")
        headers = 'virus transcript_start transcript_end'.split()
        for line in fileinput.input(['Final_continous_region.txt'], inplace=True):
            if fileinput.isfirstline():
                print '\t'.join(headers)
            print line.strip()
    else:
        print ("----------------------------------------Note: There is no real virus in the sample :)-----------------------------------------------------")
Ejemplo n.º 35
0
def main(args, fout=sys.stdout):
    if args.debug:
        logger.setLevel(logging.DEBUG)

    conn = pd.read_table(args.db)
    conn = conn.loc[:, ['Transcript stable ID', 'Gene type',
                        'Transcript type']].drop_duplicates()
    conn = conn.set_index(['Transcript stable ID'])

    max_warnings = 10
    w = 0
    c = 0
    n = 0
    no_header = True
    bad_chroms = set()
    for row in fileinput.input(args.annotation_file[0],
                               openhook=fileinput.hook_compressed):
        n = n + 1

        try:
            row = row.decode()
        except AttributeError:
            pass

        if row.startswith("#"):
            if fileinput.isfirstline():
                logger.debug("Header detected in genePred file.")
                no_header = False
            continue

        rowobj = Row(row, no_header)

        if not args.no_skip_random_chromosomes and \
            rowobj.is_on_random_chromosome():
            c = c + 1
            continue

        if rowobj.chromosome_contains_underscore():
            w = w + 1

            if rowobj.chrom not in bad_chroms:
                logger.warning("Skipping chromosome %s because it contains"
                               " underscores" % rowobj.chrom)
                bad_chroms.add(rowobj.chrom)
            continue

        # filter for only protein-coding genes
        try:
            result = conn.loc[get_stripped_name(rowobj.name)]
            if isinstance(result, pd.DataFrame):
                result = result.iloc[0, ]
            if not (result['Gene type'] == "protein_coding" and
                    result['Transcript type'] == "protein_coding"):
                c = c + 1
                continue
        except KeyError:
            c = c + 1
            continue

        bed = rowobj.extract_last_exon()

        if bed is not None:
            fout.write("\t".join([str(x) for x in bed]) + "\n")
        else:
            c = c + 1

    fileinput.close()
    if float(c) / float(n) > 0.75:
        logger.warning("%d/%d (%0.2f%%) were skipped. Are you using the "
              "correct database?" % (c, n, float(c)/float(n)))
Ejemplo n.º 36
0
    def _save_diff_expression(self, result_directory, params):
        """
        _save_diff_expression: save DifferentialExpression object to workspace
        """

        logging.info(
            'start saving KBaseFeatureValues.DifferentialExpressionMatrix object'
        )

        workspace_name = params.get('workspace_name')
        diff_expression_obj_name = params.get('diff_expression_obj_name')

        destination_ref = workspace_name + '/' + diff_expression_obj_name

        diff_expr_files = list()

        for res_file in os.listdir(result_directory):
            if 'deseq_results.csv' not in res_file:
                continue
            condition_labels = res_file.replace('_deseq_results.csv',
                                                '').split('_vs_', 2)[:2]

            genes_results_filepath = os.path.join(result_directory, res_file)

            with open(genes_results_filepath, "r") as f:
                reader = csv.reader(f)
                columns = next(reader)[1:]

            columns[columns.index('log2FoldChange')] = 'log2_fold_change'
            columns[columns.index('pvalue')] = 'p_value'
            columns[columns.index('padj')] = 'q_value'
            for line in fileinput.input(genes_results_filepath, inplace=True):
                if fileinput.isfirstline():
                    print('gene_id,' + ','.join(columns))
                else:
                    print(line)

            reader = csv.DictReader(open(genes_results_filepath))

            diffexpr_filepath = genes_results_filepath.replace(
                'deseq_results.csv', 'differential_expression_result.csv')

            with open(diffexpr_filepath, 'w') as csvfile:
                fieldnames = [
                    'gene_id', 'log2_fold_change', 'p_value', 'q_value'
                ]
                writer = csv.DictWriter(csvfile, fieldnames=fieldnames)

                writer.writeheader()

                for row in reader:
                    writer.writerow({
                        'gene_id':
                        row.get('gene_id'),
                        'log2_fold_change':
                        row.get('log2_fold_change'),
                        'p_value':
                        row.get('p_value'),
                        'q_value':
                        row.get('q_value')
                    })

            diff_expr_files.append({
                'condition_mapping': {
                    condition_labels[0]: condition_labels[1]
                },
                'diffexpr_filepath': diffexpr_filepath
            })

        upload_diff_expr_params = {
            'destination_ref': destination_ref,
            'diffexpr_data': diff_expr_files,
            'tool_used': 'deseq',
            'tool_version': '1.16.1',
            'genome_ref': params['genome_ref']
        }

        deu_upload_return = self.deu.save_differential_expression_matrix_set(
            upload_diff_expr_params)

        diff_expression_obj_ref = deu_upload_return['diffExprMatrixSet_ref']

        return diff_expression_obj_ref
Ejemplo n.º 37
0
    for i in range(len(matrix)):
        print(' '.join(map(str, matrix[i])))


countLine = 0
matrix1 = False
matrix2 = False
isCalulatedFirstMatrix = False
isCalulatedSecondMatrix = False
array1 = []
array2 = []

for line in fileinput.input():

    # Set first matrix dimensions
    if fileinput.isfirstline():
        matrix1 = True
        rows1 = int(line.split(' ')[0])
        cols1 = int(line.split(' ')[1])
        countLine = 1

    elif not fileinput.isfirstline(
    ) and countLine <= rows1 and not isCalulatedFirstMatrix:

        #     Lock this condition for further lines
        if countLine == rows1:
            isCalulatedFirstMatrix = True

        # Remove special characters
        line = line.strip('\n')
        line = line.strip('\t')
Ejemplo n.º 38
0
def run():
    parser = optparse.OptionParser()
    parser.add_option('--inputfile', action="store", dest="inputfile")
    parser.add_option('--ngrid', action="store", dest="ngrid")
    parser.add_option('--exeTime', action="store", dest="exeTime")
    parser.add_option('--exeQueue', action="store", dest="exeQueue")
    parser.add_option('--outfile_2d', action="store", dest="outfile_2d")
    parser.add_option('--outfile_multipole',
                      action="store",
                      dest="outfile_multipole")
    #    parser.add_option('--boxsize', action="store", dest="boxsize")
    parser.add_option('--line', action="store", dest="line")
    #    parser.add_option('--w', action="store", dest="w")
    #    parser.add_option('--omega_m', action="store", dest="omega_m")
    parser.add_option('--numNodes', action="store", dest="numNodes")
    parser.add_option('--numProcs', action="store", dest="numProcs")

    (options, args) = parser.parse_args()

    nodes = options.numNodes
    procs = options.numProcs

    numnodes = int(nodes)
    numprocs = int(procs)
    np = numnodes * numprocs

    toolsDir = "/global/project/projectdirs/hacc/PDACS/JK_Tools/"
    workingDir = "./"

    # inputArg = options.inputfile
    # split = os.path.splitext(options.inputfile)

    #get the name of the file - used to make the temp output files.
    workingName = options.outfile_2d.split("/")
    workingName = workingName[len(workingName) - 1]
    #outFile = "rs_pow.out"
    #workingDir + workingName
    #set the path to create files
    #if not os.path.exists(workingDir):
    #    os.makedirs(workingDir)

    outFile = workingDir + workingName + ".out"
    #write the pbs file to execute on Carver
    #os.chmod(outFile,S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH)

    con = lite.connect(options.inputfile)
    cur = con.cursor()
    cur.execute("select value from metadata where name='box_size [Mpc/h]'")
    row = cur.fetchone()
    boxsize = float(row[0])
    cur.execute("select value from metadata where name='numFiles'")
    row = cur.fetchone()
    nfiles = float(row[0])
    cur.execute("select value from metadata where name='Omega_m'")
    row = cur.fetchone()
    omega_m = float(row[0])
    cur.execute("select value from metadata where name='w_de'")
    row = cur.fetchone()
    w = float(row[0])
    cur.execute("select value from metadata where name='Snapshot'")
    con.commit()
    row = cur.fetchone()
    snapshotname = row[0]

    #    print options.outfile
    infile = "/global/project/projectdirs/hacc/PDACS/Coyote/Grid/" + snapshotname
    #    print infile

    #pbsPath = create_pbs_file(nodes, procs, options.exeQueue, options.exeTime, toolsDir, workingDir, workingName, options.ngrid, infile, nfiles, outFile, boxsize, options.line, w, omega_m)
    pbsCmd = "module unload pgi; module load gcc; module load fftw; module load gsl; module load sqlite; srun -n %s %s/powerspec.out %s %s %s %s %s %s %s %s" % (
        numnodes, toolsDir, options.ngrid, infile, nfiles, outFile, boxsize,
        options.line, w, omega_m)
    print "CMD: " + pbsCmd
    print "CWD: " + os.getcwd()
    os.system(pbsCmd)

    #shutil.copyfile(outFile, options.outputfile)
    cmd = "sed -e 's/^[ \t]*//;s/[ \t]*$//' %s | tr -s ' ' '\t' > %s" % (
        outFile + ".2D", options.outfile_2d)
    os.system(cmd)
    headers = '#k mu P(k,mu)'.split()
    for line in fileinput.input([options.outfile_2d], inplace=True):
        if fileinput.isfirstline():
            print '\t'.join(headers)
        if len(line.strip()) > 0:
            print line
    cmd = "sed -e 's/^[ \t]*//;s/[ \t]*$//' %s | tr -s ' ' '\t' > %s" % (
        outFile + ".multipole", options.outfile_multipole)
    os.system(cmd)
    headers = '#k monopole quadrupole'.split()
    for line in fileinput.input([options.outfile_multipole], inplace=True):
        if fileinput.isfirstline():
            print '\t'.join(headers)
        if len(line.strip()) > 0:
            print line
    #os.remove(outFile+".2D")
    os.remove(outFile + ".multipole")
Ejemplo n.º 39
0
    def check_files(self, files, verbose, max_line_length=79):
        logical_line = ""
        token = False

        # NOTE(mrodden): magic; replace with proper
        # report class when necessary
        report = self

        for fname in files:

            # reset world
            in_heredoc = False
            in_continuation = False

            # simple syntax checking, as files can pass style but still cause
            # syntax errors when you try to run them.
            check_syntax(fname, report)

            for line in fileinput.input(fname):
                if fileinput.isfirstline():

                    check_hashbang(line, fileinput.filename(), report)

                    if verbose:
                        print("Running vylint on %s" % fileinput.filename())

                # Don't run any tests on comment lines (but remember
                # inside a heredoc this might be part of the syntax of
                # an embedded script, just ignore that)
                if line.lstrip().startswith("#") and not in_heredoc:
                    continue

                # Strip trailing comments. From bash:
                #
                #   a word beginning with # causes that word and all
                #   remaining characters on that line to be ignored.
                #   ...
                #   A character that, when unquoted, separates
                #   words. One of the following: | & ; ( ) < > space
                #   tab
                #
                # for simplicity, we strip inline comments by
                # matching just '<space>#'.
                if not in_heredoc:
                    ll_split = line.split(" #", 1)
                    if len(ll_split) > 1:
                        line = ll_split[0].rstrip()

                # see if this starts a heredoc
                if not in_heredoc:
                    token = starts_heredoc(line)
                    if token:
                        in_heredoc = True
                        logical_line = [line]
                        continue

                # see if this starts a continuation
                if not in_continuation:
                    if is_continuation(line):
                        in_continuation = True
                        logical_line = [line]
                        continue

                # if we are in a heredoc or continuation, just loop
                # back and keep buffering the lines into
                # "logical_line" until the end of the
                # heredoc/continuation.
                if in_heredoc:
                    logical_line.append(line)
                    if not end_of_heredoc(line, token):
                        continue
                    else:
                        in_heredoc = False
                        # FIXME: if we want to do something with
                        # heredocs in the future, then the whole thing
                        # is now stored in logical_line.  for now,
                        # skip
                        continue
                elif in_continuation:
                    logical_line.append(line)
                    if is_continuation(line):
                        continue
                    else:
                        in_continuation = False
                else:
                    logical_line = [line]

                check_indents(logical_line, report)

                # at this point, logical_line is an array that holds
                # the whole continuation.  XXX : historically, we've
                # just handled every line in a continuation
                # separatley.  Stick with what works...
                for line in logical_line:
                    check_no_trailing_whitespace(line, report)
                    check_no_long_lines(line, report, max_line_length)
                    check_for_do(line, report)
                    check_if_then(line, report)
                    check_function_decl(line, report)
                    check_arithmetic(line, report)
                    check_local_subshell(line, report)
                    check_bare_arithmetic(line, report)
                    check_conditional_expression(line, report)

        # finished processing the file

        # last line should always end with a newline
        if not line.endswith("\n"):
            report.print_error(MESSAGES["E004"].msg, line)
Ejemplo n.º 40
0
# Copyright (C) 2020 ISIS Rutherford Appleton Laboratory UKRI
# SPDX - License - Identifier: GPL-3.0-or-later

import fileinput
import os
import sys

for root, dirs, files in os.walk('.'):
    for line in fileinput.input(
        (os.path.join(root, name) for name in files if name.endswith('.py')),
            inplace=True,
            # backup='.bak' # uncomment this if you want backups
    ):
        if fileinput.isfirstline(
        ) and line != "# Copyright (C) 2020 ISIS Rutherford Appleton Laboratory UKRI\n":
            sys.stdout.write(
                '# Copyright (C) 2020 ISIS Rutherford Appleton Laboratory UKRI\n# SPDX - License - '
                'Identifier: GPL-3.0-or-later\n\n')
            sys.stdout.write(line)
        else:
            sys.stdout.write(line)
Ejemplo n.º 41
0
    while strr.find(substr) != -1:
        index = strr.find(substr)
        strr = strr[0:index] + strr[index + length:]
    return strr


# ******* BEGIN *******

if __name__ == "__main__":
    bow = {}

    expendable_words = stopwords.words('english')

    print("Creating entity hash table...")
    for line in fileinput.input(sys.argv[1]):
        if (not fileinput.isfirstline()) or (HEADER == 0):
            line = line.rstrip()
            t, r, e, A, B, C = line.split(",")
            r, A, B, C = int(r), int(A), int(B), int(C)
            if r == 1:
                e = remove_all("B-", e)
                e = remove_all("I-", e)
                words = extract_words(t)
                entities = extract_words(e)
                for word in words:
                    i = words.index(word)
                    aux = entities[i]
                    if (aux != 'o' and word not in expendable_words):
                        if word not in bow:
                            bow[word] = [0, 0, 0, 0, 0, r, 0, A, B, C]
                        else:
Ejemplo n.º 42
0
        log_filename = "combine-" + str(nowTime) + ".log"
        fp_log = open(log_filename, "w")
    except Exception as e:
        print("create file {0} error\n".format(output_filename))

    file_linenum = []
    file_procecelineno = []
    # prccess lineno
    total_lineno = []
    valid_lineno = []
    start_addr = []
    length = []
    try:
        for line in fileinput.input(combine_filelist):
            #record each file's info
            if (fileinput.isfirstline()):
                print("process the file: {0}".format(fileinput.filename()))
                file_linenum.append(fileinput.lineno() - 1)
                file_procecelineno.append(process_lineno)
                start_addr.append(hex(process_lineno * 4))
            #process each line
            ProcessLine(fp, line)
        file_linenum.append(fileinput.lineno())
        file_procecelineno.append(process_lineno)

        for i in range(len(file_linenum) - 1):
            total_lineno.append(file_linenum[i + 1] - file_linenum[i])
            valid_lineno.append(file_procecelineno[i + 1] -
                                file_procecelineno[i])
            length.append(hex(valid_lineno[i] * 4))
Ejemplo n.º 43
0
        dict_out['fname'] = first
    if middle and not dict_out['mname']:    # If middle name list is non-empty and ..
        dict_out['mname'] = middle[0]

    g_names = dict_data['g_name'].split()
    if g_names and len(g_names) > 1:
        g_last, g_first, g_middle = hack_names(*g_names)
        if not dict_out['g_lname']:
            dict_out['g_lname'] = g_last
        if not dict_out['g_fname']:
            dict_out['g_fname'] = g_first
        if g_middle and not dict_out['g_mname']:    # If middle name list is non-empty and ..
            dict_out['g_mname'] = g_middle[0]
    elif g_names and len(g_names) == 1:
        dict_out['g_lname'] = g_names[0]

    return dict_out

if __name__ == '__main__':
    for line in fileinput.input(openhook=fileinput.hook_encoded("latin-1")):
        if not fileinput.isfirstline():    # First line is header in the case of 245
            dic_line = dict(zip(INPUT_LAYOUT, line.strip('\n').strip('\r').split('|')))
            row = project_data(dic_line)
            try:
                engine.execute(
                    meta.tables['ins00'].insert().values(data=row)
                )
            except (psycopg2.DataError, sqlalchemy.exc.DataError, sqlalchemy.exc.StatementError, UnicodeDecodeError):
                print('Unable to insert data:')
                print(row)
                pass
Ejemplo n.º 44
0
def generate_line_INDEL(fd_vcf):

    for line_vcf in fd_vcf:

        if fileinput.isfirstline():
            print(fileinput.filename())
            sys.stdout.flush()
        if line_vcf[0] == '#':
            continue
        l_vcf = line_vcf.split()
        ## skip SNPs
        if (l_vcf[3] in [
                'A',
                'C',
                'G',
                'T',
        ] and l_vcf[4] in [
                ## UnifiedGenotyper
                'A',
                'C',
                'G',
                'T',
                'A,C',
                'A,G',
                'A,T',
                'C,G',
                'C,T',
                'G,T',
                'A,C,G',
                'A,C,T',
                'A,G,T',
                'C,G,T',
                ## CombineVariants
                '.',
                'C,A',
                'G,A',
                'T,A',
                'G,C',
                'T,C',
                'T,G',
                'A,T,C',
                'T,C,A',
                'C,T,G',
                'A,T,G',
                'T,A,G',
                'C,T,A',
                'T,G,C',
                'G,C,T',
                ## UG GENOTYPE_GIVEN_ALLELES
                'C,G,A',
                'C,A,T',
                'G,A,C',
                'G,T,A',
                'G,T,C',
                'G,C,A',
                'A,G,C',
                'T,A,C',
                'T,G,A',
        ]):
            continue
        ## CombineVariants
        if l_vcf[4] == '.': continue
        ## CombineVariants
        if len(l_vcf[3]) == 2 and len(l_vcf[4]) == 2:
            continue
        ## CombineVariants
        if len(l_vcf[3]) == len(
                l_vcf[4]) and ',' not in l_vcf[3] and ',' not in l_vcf[4]:
            print(2, l_vcf[3], l_vcf[4])
            continue
        ## CombineVariants
        bool_continue = True
        if ',' not in l_vcf[3]:
            for s in l_vcf[4].split(','):
                if len(s) != len(l_vcf[3]):
                    bool_continue = False
                    break
        if bool_continue == True:
            print(3, l_vcf[3], l_vcf[4])
            continue

        yield line_vcf, l_vcf
def AddHeaders(header, intFile):
    for line in fileinput.input(intFile, inplace=True):
        if fileinput.isfirstline():
            print header,
        print line,
Ejemplo n.º 46
0
def main(dataset_path):
    global inside_code_block, last_valid_fence

    with open(dataset_path) as file:
        number_of_lines = sum(1 for _ in file)
        file.seek(0)

        for (repo_name, ) in tqdm.tqdm(csv.reader(file),
                                       total=number_of_lines):
            with tempfile.TemporaryDirectory() as temp_dir, util.pushd(
                    temp_dir):
                # noinspection PyBroadException
                try:
                    repo = git.Repo.clone_from(
                        f'[email protected]:{repo_name}.git',
                        '.',
                        depth=1,
                        origin='upstream')
                    markdown_paths = set(insensitive_glob('**/*.md', recursive=True)) \
                        | set(insensitive_glob('**/*.mkdn?', recursive=True)) \
                        | set(insensitive_glob('**/*.mdown', recursive=True)) \
                        | set(insensitive_glob('**/*.markdown', recursive=True))
                    markdown_paths = {
                        path
                        for path in markdown_paths if os.path.isfile(path)
                    }
                    if markdown_paths:  # Gets stuck otherwise
                        paths_with_crlf = crlf_paths(markdown_paths)
                        with fileinput.input(markdown_paths,
                                             inplace=True) as markdown_file:
                            use_crlf = False
                            for line in markdown_file:
                                if fileinput.isfirstline():
                                    inside_code_block = False
                                    last_valid_fence = None
                                    use_crlf = markdown_file.filename(
                                    ) in paths_with_crlf
                                if use_crlf and line and line[-1] == '\n':
                                    line = line[:-1] + '\r\n'
                                CODE_BLOCK_FENCE_BACK_TICKS_RE.sub(
                                    detect_code_block_back_ticks_fence, line)
                                CODE_BLOCK_FENCE_TILDES_RE.sub(
                                    detect_code_block_tildes_fence, line)
                                print(HEADING_WITHOUT_SPACE_RE.sub(
                                    heading_fix, line),
                                      end='')

                        if repo.index.diff(None):
                            repo.git.add('.')
                            repo.git.commit(m="Fix broken Markdown headings")

                            response = requests.post(
                                f'https://api.github.com/repos/{repo_name}/forks',
                                params=AUTH_PARAMS)
                            response_dict = json.loads(response.text)
                            if response.status_code == 202:
                                repo.create_remote(
                                    'origin', response_dict['ssh_url']).push()
                                create_pr(
                                    repo_name, response_dict["default_branch"],
                                    f'{response_dict["owner"]["login"]}:{response_dict["default_branch"]}'
                                )
                            else:
                                print(
                                    f"There was an error forking {repo_name}: {response_dict}"
                                )
                except Exception:
                    print(traceback.format_exc())
Ejemplo n.º 47
0
def erase_first_line(file_name):
    for line in fileinput.input(file_name, inplace=1):
        if not fileinput.isfirstline():
            print(line.replace('\n', ''))
Ejemplo n.º 48
0
#!/usr/bin/python
# coding:utf-8
#标准输入读取内容
"""
filename: 当前正在读取的文件名;
fileno: 文件的描述符;
filelineno:正在读取的行是当前文件的第几行;
isfirstline:正在读取的行是否是当前文件的第一行
isistdin fileinput: 正在读取文件还是直接操你个标准输入读取内容
"""

from __future__ import print_function
import fileinput
for line in fileinput.input():
    meta = [
        fileinput.filename(),
        fileinput.fileno(),
        fileinput.filelineno(),
        fileinput.isfirstline(),
        fileinput.isstdin()
    ]
    print(*meta, end="")
    print(line, end="")
Ejemplo n.º 49
0
def merge_DTI(folder_input, folder_output, name, fixed_dti_list=False):
    """
    Merge all the DTI files of a given subject.

    For the merging only DTI folders containing all .nii.gz, .bval and .bvec are considered,
    otherwise the folder is ignored.

    Args:
        folder_input (str) : the folder containing all the DTI to merge.
        folder_output (str) : the folder where store the merged file.
        name (str): name to give to the merged file.

    Returns:
        -1 if the input folder doesn't contain any DTI folder.
        The list of incomplete DTI folders if there is some folders without bvec/bval/nii
    """

    from os import path
    from glob import glob
    import fileinput
    import os

    img = []
    bval = []
    bvec = []
    dti_list = []
    # The bvec output file has three rows that are the concatenation of all the merged bvec files
    lines_out_bvec = ['', '', '']
    # The bval output file has only one row that is the concatenation of all the merged bval files
    lines_out_bval = ['']

    if fixed_dti_list is not False:
        for dti in fixed_dti_list:
            dti_list.append(path.join(folder_input, dti))
    else:
        dti_list = remove_rescan(glob(path.join(folder_input, '*DTI*')))
    incomp_folders = []
    nr_dti = len(dti_list)
    if nr_dti == 0:
        return -1
    else:
        if not os.path.exists(folder_output):
            os.mkdir(folder_output)
        for folder in dti_list:
            if len(glob(path.join(folder, '*.bval'))) != 0 and len(
                    glob(path.join(folder, '*.bvec'))) != 0:
                img.append(glob(path.join(folder, '*.nii*'))[0])
                bval.append(glob(path.join(folder, '*.bval'))[0])
                bvec.append(glob(path.join(folder, '*.bvec'))[0])
            else:
                incomp_folders.append(folder)

        # if it has been found at least a DTI folder complete with bvec, bval and nii.gz
        if len(img) > 0:
            file_suff = get_bids_suff('dwi')
            fin = fileinput.input(bval)
            # merge all the .nii.gz file with fslmerge
            os.system('fslmerge -t ' +
                      path.join(folder_output, name + file_suff + '.nii.gz') +
                      ' ' + " ".join(img))
            # merge all the .bval files
            fout = open(path.join(folder_output, name + file_suff + '.bval'),
                        'w')
            # Concatenate bval files
            for line in fin:
                if fileinput.isfirstline():
                    line_no = 0
                lines_out_bval[
                    line_no] = lines_out_bval[line_no] + " " + line.rstrip()
                line_no += 1
            for i in range(0, len(lines_out_bval)):
                lines_out_bval[i] = lines_out_bval[i].lstrip()

                fout.write(lines_out_bval[i] + "\n")

            # Concatenate bvec files
            fin = fileinput.input(bvec)
            fout = open(path.join(folder_output, name + file_suff + '.bvec'),
                        'w')
            for line in fin:
                if fileinput.isfirstline():
                    line_no = 0

                lines_out_bvec[
                    line_no] = lines_out_bvec[line_no] + " " + line.rstrip()
                line_no += 1
            for i in range(0, len(lines_out_bvec)):
                lines_out_bvec[i] = lines_out_bvec[i].lstrip()
                fout.write(lines_out_bvec[i] + "\n")

        if len(incomp_folders) > 0:
            return incomp_folders
Ejemplo n.º 50
0
'''
你也可以使用 fileinput 模块获得当前行的元信息 (meta information).
其中包括 isfirstline , filename , lineno , 如 下例 所示.
'''

import fileinput
import glob
import string, sys

for line in fileinput.input(glob.glob("samples/sample.txt")):
    if fileinput.isfirstline():  # first in a file?
        sys.stderr.write("-- reading %s --\n" % fileinput.filename())
    sys.stdout.write(str(fileinput.lineno()) + " " + string.upper(line))
Ejemplo n.º 51
0
def talker():
    '''GNSS Publisher'''
    pub = rospy.Publisher('/position_real', Float32MultiArray, queue_size=10)
    rospy.init_node('gnss', anonymous=True)
    rate = rospy.Rate(10)  # 10hz
    while not rospy.is_shutdown():
        try:
            ser = serial.Serial('/dev/ttyUSB0', 9600)
        except Exception:
            print('open serial failed.')
            exit(1)
        while True:
            s = ser.readline()
            am = str(s).strip().split(",")
            if len(am) < 15:
                continue
            else:
                lon = float(am[11])
                lat = float(am[12])

                lon_save = 'lon.json'
                with open(lon_save, 'a') as lon_obj:
                    lon_obj.write('\n' + str(lon))
                count = len(open(lon_save, 'r').readlines())
                if count < 200:
                    pass
                else:
                    for line in fileinput.input('lon.json', inplace=1):
                        if not fileinput.isfirstline():
                            print(line.replace('\n', ''))
                    for line in fileinput.input('lon.json', inplace=1):
                        if not fileinput.isfirstline():
                            print(line.replace('\n', ''))
                lon_read = []
                with open(lon_save) as f:
                    for line in f:
                        if line.count('\n') == len(line):
                            pass
                        else:
                            lon_read.append(line.strip('\n'))
                lon_read = list(map(float, lon_read))
                if len(lon_read) < 20:
                    lon_publish = lon
                else:
                    lon_read.reverse()
                    lon_filter = lon_read[0:19]
                    lon_filter.remove(max(lon_filter))
                    lon_filter.remove(min(lon_filter))
                    lon_publish = sum(lon_filter) / len(lon_filter)

                lat_save = 'lat.json'
                with open(lat_save, 'a') as lat_obj:
                    lat_obj.write('\n' + str(lat))
                count = len(open(lat_save, 'r').readlines())
                if count < 200:
                    pass
                else:
                    for line in fileinput.input('lat.json', inplace=1):
                        if not fileinput.isfirstline():
                            print(line.replace('\n', ''))
                    for line in fileinput.input('lat.json', inplace=1):
                        if not fileinput.isfirstline():
                            print(line.replace('\n', ''))

                lat_read = []
                with open(lat_save) as f:
                    for line in f:
                        if line.count('\n') == len(line):
                            pass
                        else:
                            lat_read.append(line.strip('\n'))
                lat_read = list(map(float, lat_read))
                if len(lat_read) < 20:
                    lat_publish = lon
                else:
                    lat_read.reverse()
                    lat_filter = lat_read[0:19]
                    lat_filter.remove(max(lat_filter))
                    lat_filter.remove(min(lat_filter))
                    lat_publish = sum(lat_filter) / len(lat_filter)

                pos_1 = [lon, lat]
                pos = Float32MultiArray(data=pos_1)
                pub.publish(pos)
                rospy.loginfo(pos.data)
                rate.sleep()
Ejemplo n.º 52
0
def get_alerts():
    """Get the set of VizAlerts from Tableau Server to check during this execution"""
    # package up the data from the source viz

    source_viz = vizalert.VizAlert(
        config.configs['vizalerts.source.viz'],
        config.configs['vizalerts.source.site'],
        config.configs['server.user'],
        config.configs['server.user.domain'])
    source_viz.view_name = 'VizAlerts Source Viz'
    source_viz.timeout_s = 30
    source_viz.force_refresh = True
    source_viz.data_retrieval_tries = 3

    log.logger.debug('Pulling source viz data down')

    try:
        source_viz.download_trigger_data()
        if len(source_viz.error_list) > 0:
            raise UserWarning(''.join(source_viz.error_list))

        results = source_viz.read_trigger_data()
        if len(source_viz.error_list) > 0:
            raise UserWarning(''.join(source_viz.error_list))

    except Exception as e:
        quit_script('Could not process source viz data from {} for the following reasons:<br/><br/>{}'.format(
			config.configs['vizalerts.source.viz'],
            e.message))

    # test for regex invalidity
    try:
        fieldlist = ('allowed_from_address','allowed_recipient_addresses','allowed_recipient_numbers')
        currentfield = ''
        currentfieldvalue = ''

        for line in results:
            for field in fieldlist:
                currentfield = field
                currentfieldvalue = line[field]
                re.compile('{}'.format(currentfieldvalue))
    except Exception as e:
        quit_script('Could not process source viz data from {} for the following reason:<br/><br/>' \
		    'Invalid regular expression found. Could not evaluate expression \'{}\' in the field {}. Raw error:<br/><br/>{}'.format(
                config.configs['vizalerts.source.viz'],
                currentfieldvalue,
                currentfield,
                e.message))

    # retrieve schedule data from the last run and compare to current
    statefile = config.configs['schedule.state.dir'] + SCHEDULE_STATE_FILENAME

    # list of all alerts we've retrieved from the server that may need to be run
    alerts = []

    # list of alerts to write to the state file again
    persistalerts = []

    # final list of views to execute alerts for
    execalerts = []
    try:
        if not os.path.exists(statefile):
            f = codecs.open(statefile, encoding='utf-8', mode='w+')
            f.close()
    except IOError as e:
        errormessage = u'Invalid schedule state file: {}'.format(e.message)
        log.logger.error(errormessage)
        quit_script(errormessage)

    # Create VizAlert instances for all the alerts we've retrieved
    try:
        results = source_viz.read_trigger_data() # get the results again to start at the beginning
        for line in results:
            # build an alert instance for each line            
            alert = vizalert.VizAlert(line['view_url_suffix'],
                                      line['site_name'],
                                      line['subscriber_sysname'],
                                      line['subscriber_domain'],
                                      line['subscriber_email'],
                                      line['view_name'])

            # Email actions
            alert.action_enabled_email = int(line['action_enabled_email'])
            alert.allowed_from_address = line['allowed_from_address']
            alert.allowed_recipient_addresses = line['allowed_recipient_addresses']            
            
            # SMS actions
            alert.action_enabled_sms = int(line['action_enabled_sms'])
            alert.allowed_recipient_numbers = line['allowed_recipient_numbers']
            alert.from_number = line['from_number']
            alert.phone_country_code = line['phone_country_code']

            alert.data_retrieval_tries = int(line['data_retrieval_tries'])

            if line['force_refresh'].lower() == 'true':
                alert.force_refresh = True
            else:
                alert.force_refresh = False

            alert.alert_type = line['alert_type']

            if line['notify_subscriber_on_failure'].lower() == 'true':
                alert.notify_subscriber_on_failure = True
            else:
                alert.notify_subscriber_on_failure = False

            alert.viz_data_maxrows = int(line['viz_data_maxrows'])
            alert.viz_png_height = int(line['viz_png_height'])
            alert.viz_png_width = int(line['viz_png_width'])
            alert.timeout_s = int(line['timeout_s'])
            alert.task_thread_count = int(line['task_threads'])

            # alert
            alert.alert_type = line['alert_type']
            if line['is_test'].lower() == 'true':
                alert.is_test = True
            else:
                alert.is_test = False

            if line['is_triggered_by_refresh'].lower() == 'true':
                alert.is_triggered_by_refresh = True
            else:
                alert.is_triggered_by_refresh = False

            # subscription
            if line['customized_view_id'] == '':
                alert.customized_view_id = None
            else:
                alert.customized_view_id = line['customized_view_id']

            alert.owner_email = line['owner_email']
            alert.owner_friendly_name = line['owner_friendly_name']
            alert.owner_sysname = line['owner_sysname']
            alert.project_id = int(line['project_id'])
            alert.project_name = line['project_name']
            alert.ran_last_at = line['ran_last_at']
            alert.run_next_at = line['run_next_at']
            alert.schedule_frequency = line['schedule_frequency']

            if line['schedule_id'] == '':
                alert.schedule_id = -1
            else:
                alert.schedule_id = int(line['schedule_id'])

            alert.schedule_name = line['schedule_name']

            if line['priority'] == '':
                alert.priority = -1
            else:
                alert.priority = int(line['priority'])

            if line['schedule_type'] == '':
                alert.schedule_type = -1
            else:
                alert.schedule_type = int(line['schedule_type'])

            alert.site_id = int(line['site_id'])
            alert.subscriber_license = line['subscriber_license']
            alert.subscriber_email = line['subscriber_email']
            alert.subscriber_user_id = int(line['subscriber_user_id'])
            alert.subscription_id = int(line['subscription_id'])
            alert.view_id = int(line['view_id'])
            alert.view_name = line['view_name']
            alert.view_owner_id = int(line['view_owner_id'])
            alert.workbook_id = int(line['workbook_id'])
            alert.workbook_repository_url = line['workbook_repository_url']

            # all done, now add it to the master list
            alerts.append(alert)
    except Exception as e:
        errormessage = u'Error instantiating alerts from list obtained from server: {}'.format(e)
        log.logger.error(errormessage)
        quit_script(errormessage)

    # now determine which actually need to be run now
    try:
        for line in fileinput.input([statefile]):
            if not fileinput.isfirstline():
                linedict = {}
                linedict['site_name'] = line.split('\t')[0]
                linedict['subscription_id'] = line.split('\t')[1]
                linedict['view_id'] = line.split('\t')[2]
                linedict['customized_view_id'] = line.split('\t')[3]
                linedict['ran_last_at'] = line.split('\t')[4]
                linedict['run_next_at'] = line.split('\t')[5]
                linedict['schedule_id'] = line.split('\t')[6].rstrip()  # remove trailing line break
                for alert in alerts:
                    # subscription_id is our unique identifier
                    if str(alert.subscription_id) == str(linedict['subscription_id']):

                        # preserve the last time the alert was scheduled to run
                        alert.ran_last_at = str(linedict['ran_last_at'])

                        # if the run_next_at date is greater for this alert since last we checked, mark it to run now
                        # the schedule condition ensures the alert doesn't run simply due to a schedule switch
                        # (note that CHANGING a schedule will still trigger the alert check...to be fixed later
                        if (
                                (datetime.datetime.strptime(str(alert.run_next_at), "%Y-%m-%d %H:%M:%S") \
                                         > datetime.datetime.strptime(str(linedict['run_next_at']),
                                                                       "%Y-%m-%d %H:%M:%S") \
                                         and str(alert.schedule_id) == str(linedict['schedule_id']))
                                # test alerts are handled differently
                                and not alert.is_test
                        ):

                                # For a test, run_next_at is anchored to the most recent comment, so use it as last run time
                                if alert.is_test:
                                    alert.ran_last_at = alert.run_next_at
                                else:
                                    alert.ran_last_at = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")

                                seconds_since_last_run = \
                                    abs((
                                            datetime.datetime.strptime(str(linedict['ran_last_at']),
                                                                       "%Y-%m-%d %H:%M:%S") -
                                            datetime.datetime.utcnow()
                                        ).total_seconds())

                                execalerts.append(alert)

                        # else use the ran_last_at field, and write it to the state file? I dunno.

                        # add the alert to the list to write back to our state file
                        persistalerts.append(alert)

        # add NEW subscriptions that weren't in our state file
        # this is ugly I, know...sorry. someday I'll be better at Python.
        persist_sub_ids = []
        for alert in persistalerts:
            persist_sub_ids.append(alert.subscription_id)
        for alert in alerts:
            if alert.subscription_id not in persist_sub_ids:
                # if this is a test alert, and we haven't seen it before, run that puppy now!
                if alert.is_test:
                    execalerts.append(alert)
                persistalerts.append(alert)

        # write the next run times to file
        with codecs.open(statefile, encoding='utf-8', mode='w') as fw:
            fw.write('{}\t{}\t{}\t{}\t{}\t{}\t{}\n'.format("site_name", "subscription_id", "view_id",
                                                           "customized_view_id", "ran_last_at", "run_next_at",
                                                           "schedule_id"))
            for alert in persistalerts:
                fw.write('{}\t{}\t{}\t{}\t{}\t{}\t{}\n'.format(alert.site_name, alert.subscription_id,
                                                               alert.view_id, alert.customized_view_id,
                                                               alert.ran_last_at, alert.run_next_at,
                                                               alert.schedule_id))
    except IOError as e:
        errormessage = u'IOError accessing {} while getting views to process: {}'.format(e.filename, e.message)
        log.logger.error(errormessage)
        quit_script(errormessage)
    except Exception as e:
        errormessage = u'Error accessing {} while getting views to process: {}'.format(statefile, e)
        log.logger.error(errormessage)
        quit_script(errormessage)

    return execalerts
Ejemplo n.º 53
0
def add_headers(files, header):
    for line in fileinput.input(files, inplace=True):
        if fileinput.isfirstline():
            [print(h) for h in header.splitlines()]
        print(line, end="")
Ejemplo n.º 54
0
Archivo: fp.py Proyecto: wuguang00/py
#!/usr/bin/python
import fileinput
for line in fileinput.input():
    print(fileinput.isfirstline())
    print(line)  # there is a enter in the result.
    print(fileinput.filename())
    print(fileinput.filelineno())
#grep "Firm" reads.sintax > firmReads.sintax
#cat firmReads.sintax |tr '\t' ' '| cut -d' ' -f1 > otuIDX.txt
#python randomOTUSeq.py otuIDX.txt OTU.fasta > subsetOTU
import sys
import random
import fileinput

firmOTUs = []
with open(sys.argv[1], "r") as firm:
    for line in firm:
        line = line.replace('\n', '')
        firmOTUs.append(line)
    rndSampFirmOTUs = random.sample(firmOTUs, 10)

for line in fileinput.input(sys.argv[2], inplace=True):
    if '>' not in line and not fileinput.isfirstline():
        line = line.replace('\n', '')
    elif not fileinput.isfirstline():
        line = '\n' + line
    print(f'{line}', end="")

with open(sys.argv[2], "r") as OTU:
    rndOTUIDX = []
    for i, line in enumerate(OTU, 1):
        line = line.replace('\n', '')
        if '>' in line and line[1:] in rndSampFirmOTUs:
            rndOTUIDX.append(i)
            print(line)
        if i - 1 in rndOTUIDX:
            j = 0
            print(line)
Ejemplo n.º 56
0
import networkx as nx
import matplotlib.pyplot as plt
import fileinput

G=nx.Graph()

for line in fileinput.input():
		if fileinput.isfirstline():
			data = line.split( )
			m = int(data[1])
		if not fileinput.isfirstline():
			numbers = line.split( )
			u = numbers[0]
			v = numbers[1]
			G.add_edge(u,v)
			m = m-1
			if m == 0:
				break

print(list(G.nodes()))
print(list(G.edges()))
nx.draw(G,pos=nx.spring_layout(G)) 
plt.show()
Ejemplo n.º 57
0
def main_work():

    #################################################

    # root is one level below this file in directory structure, ie. below the 'scripts' folder
    ROOT = os.path.split(
        os.path.realpath(
            os.path.abspath(
                os.path.dirname(inspect.getfile(
                    inspect.currentframe())))))[0] + '/'

    dirs = {
        'ROOT': ROOT,
        'CONFIG': ROOT + "configs/",
        'VOICES': ROOT + "voices/",
        'TRAIN': ROOT + "train/",
        'RULES': ROOT + "rules/",
        'CORPUS': ROOT + "corpus/",
        'BIN': ROOT + "/tools/bin/"
    }

    # ======== Get stuff from command line ==========

    a = ArgumentParser()
    a.add_argument('-s', dest='speaker', required=True, \
                    help= "the name of the speaker: <ROOT>/corpus/<LANG>/<SPEAKER>")
    a.add_argument('-l', dest='lang', required=True, \
                    help= "the language of the speaker: <ROOT>/corpus/<LANG>")
    a.add_argument('-o', dest='output', required=False, default=False, \
                    help= "output audio here")
    a.add_argument('-t', dest='stage', required=False, default="runtime", \
                    help=""" defines the current usage stage 
                            (definitions of stages should by found in <config>/recipe.cfg""")
    a.add_argument('-play', dest='play', action="store_true", required=False, default=False, \
                    help=" play audio after synthesis")
    a.add_argument('-lab', dest='make_label', action="store_true", default=False, \
                    help= "make label file as well as wave in output location")
    a.add_argument('config',
                   help="""configuration to use: naive, semi-naive, gold, 
                                    as defined in <ROOT>/recipes/<config> -directory"""
                   )
    a.add_argument('-bin', dest='custom_bindir')
    a.add_argument('files',
                   nargs='*',
                   help="text files to speak, reading from stdin by default")
    opts = a.parse_args()

    if opts.custom_bindir != None:
        dirs['BIN'] = opts.custom_bindir

    voice_location = os.path.join(dirs['VOICES'], opts.lang, opts.speaker,
                                  opts.config)
    train_location = os.path.join(dirs['TRAIN'], opts.lang, "speakers",
                                  opts.speaker, opts.config)
    config_path = os.path.join(dirs['CONFIG'], opts.config)
    voice_config = os.path.join(config_path, fname.RECIPE)

    ## Make Voice object to contain voice elements trained on this corpus:
    voice = Voice(opts.speaker, opts.lang, opts.config, opts.stage, dirs)

    if not opts.output:
        output_wavefile = os.path.join(voice_location, 'output', 'wav',
                                       'temp.wav')
    else:
        output_wavefile = opts.output

    if not opts.output:
        output_labfile = None
    else:
        output_labfile = output_wavefile.replace('.wav', '.lab')

    prevspace = False
    para = []
    # Go through the files a paragraph at a time, unless it's SSML in which case we parse it
    # An empty line marks the change of paragraphs in plain text files
    for line in fileinput.input(opts.files):
        line = line.decode('utf-8').rstrip()
        t = start_clock('Synthesise sentence')
        print line
        if fileinput.isfirstline():
            if para != []:
                voice.synth_utterance(''.join(para), output_wavefile=output_wavefile, \
                             output_labfile=output_labfile)
                if opts.play:
                    os.system('play ' + output_wavefile)
                para = []
            line = line.lstrip()
            if line.startswith('<speak') or line.startswith('<xml'):
                tree = etree.parse(fileinput.filename())
                parseSSML(tree, voice)
                fileinput.nextfile()
            else:
                para.append(line)
        elif line.isspace():
            prevspace = True
        elif prevspace and para != []:
            voice.synth_utterance(''.join(para), output_wavefile=output_wavefile, \
                             output_labfile=output_labfile)
            prevspace = False
            para = [line]
        else:
            para.append(line)

    if para != []:
        voice.synth_utterance(''.join(para), output_wavefile=output_wavefile, \
                             output_labfile=output_labfile)
        if opts.play:
            os.system('play ' + output_wavefile)
    stop_clock(t)
Ejemplo n.º 58
0
def add_header(input_fpath, header):
    for line in fileinput.input(files=[input_fpath], inplace=True):
        if fileinput.isfirstline():
            print header
        print line,
Ejemplo n.º 59
0
    def check_files(self, files, verbose):
        in_multiline = False
        multiline_start = 0
        multiline_line = ""
        logical_line = ""
        token = False
        prev_file = None
        prev_line = ""
        prev_lineno = 0

        # NOTE(mrodden): magic; replace with proper
        # report class when necessary
        report = self

        for fname in files:
            for line in fileinput.input(fname):
                if fileinput.isfirstline():
                    # if in_multiline when the new file starts then we didn't
                    # find the end of a heredoc in the last file.
                    if in_multiline:
                        report.print_error(messages.E012,
                                           multiline_line,
                                           filename=prev_file,
                                           filelineno=multiline_start)
                        in_multiline = False

                    # last line of a previous file should always end with a
                    # newline
                    if prev_file and not prev_line.endswith('\n'):
                        report.print_error(messages.E004,
                                           prev_line,
                                           filename=prev_file,
                                           filelineno=prev_lineno)

                    prev_file = fileinput.filename()

                    if verbose:
                        print("Running bashate on %s" % fileinput.filename())

                # NOTE(sdague): multiline processing of heredocs is interesting
                if not in_multiline:
                    logical_line = line
                    token = starts_multiline(line)
                    if token:
                        in_multiline = True
                        multiline_start = fileinput.filelineno()
                        multiline_line = line
                        continue
                else:
                    logical_line = logical_line + line
                    if not end_of_multiline(line, token):
                        continue
                    else:
                        in_multiline = False

                # Don't run any tests on comment lines
                if logical_line.lstrip().startswith('#'):
                    prev_line = logical_line
                    prev_lineno = fileinput.filelineno()
                    continue

                # Strip trailing comments. From bash:
                #
                #   a word beginning with # causes that word and all
                #   remaining characters on that line to be ignored.
                #   ...
                #   A character that, when unquoted, separates
                #   words. One of the following: | & ; ( ) < > space
                #   tab
                #
                # for simplicity, we strip inline comments by
                # matching just '<space>#'.
                ll_split = logical_line.split(' #', 1)
                if len(ll_split) > 1:
                    logical_line = ll_split[0].rstrip()

                check_no_trailing_whitespace(logical_line, report)
                check_indents(logical_line, report)
                check_for_do(logical_line, report)
                check_if_then(logical_line, report)
                check_function_decl(logical_line, report)
                check_arithmetic(logical_line, report)

                prev_line = logical_line
                prev_lineno = fileinput.filelineno()
Ejemplo n.º 60
0
def run():
    parser = optparse.OptionParser()
    parser.add_option('--inputfile', action="store", dest="inputfile")
    parser.add_option('--ngrid', action="store", dest="ngrid")
    parser.add_option('--outfile', action="store", dest="outfile")
    parser.add_option('--numNodes', action="store", dest="numNodes")
    parser.add_option('--numProcs', action="store", dest="numProcs")
    parser.add_option('--exeTime', action="store", dest="exeTime")
    parser.add_option('--exeQueue', action="store", dest="exeQueue")

    (options, args) = parser.parse_args()

    nodes = options.numNodes
    procs = options.numProcs

    ngrid = options.ngrid
    numnodes = int(nodes)
    numprocs = int(procs)

    toolsDir = "/global/project/projectdirs/hacc/PDACS/JK_Tools"

    #get the name of the file - used to make the temp output files.
    workingName = options.outfile.split("/")
    workingName = workingName[len(workingName) - 1]

    outFile = "./" + workingName + ".out"

    con = lite.connect(options.inputfile)
    cur = con.cursor()
    cur.execute("select value from metadata where name='box_size [Mpc/h]'")
    row = cur.fetchone()
    boxsize = float(row[0])
    cur.execute("select value from metadata where name='numFiles'")
    row = cur.fetchone()
    nfiles = float(row[0])
    cur.execute("select value from metadata where name='Snapshot'")
    con.commit()
    row = cur.fetchone()
    snapshotname = row[0]

    #print options.outfile
    inFile = "/global/project/projectdirs/hacc/PDACS/Coyote/Grid/" + snapshotname
    # print inFile

    #write the pbs file to execute on Carver
    #pbsCmd = "module unload pgi; module load gcc; module load fftw; module load gsl; module load sqlite; mpirun -n %d %s/powerspec.out %s %s %f %s %f" % (numnodes*numprocs, toolsDir, ngrid, options.inputfile, nfiles, outFile, boxsize)
    pbsCmd = "module load fftw; module load gsl; module load sqlite;srun -n %d  %s/powerspec.out %s %s %f %s %f" % (
        numnodes, toolsDir, ngrid, inFile, nfiles, outFile, boxsize)
    os.system(pbsCmd)
    print "CMD: " + pbsCmd
    print "CWD: " + os.getcwd()

    cmd = "sed 's/^[ \t]*//' %s | tr -s ' ' '\t' > %s" % (outFile,
                                                          options.outfile)
    os.system(cmd)
    headers = '#K PK'.split()
    for line in fileinput.input([options.outfile], inplace=True):
        if fileinput.isfirstline():
            print '\t'.join(headers)
        if len(line.strip()) > 0:
            print line

    os.remove(outFile)