def draw_metro_OD_gridmap():
	lnglat_dict = {}
	for line in fileinput.input("../../data/data_exp/metroSpatial.csv"):
		station, lng, lat = line.strip().split(',')
		lnglat_dict[station] = (float(lng), float(lat))
	fileinput.close()
	produce = [{} for h in xrange(24)]
	consume = [{} for h in xrange(24)]
	for line in fileinput.input("../../data/data_toy/2015_04_01_metro.txt"):
		card_id, time_start, loc_start, time_finish, loc_finish = line.strip().split(',')
		loc_start, loc_finish = loc_start.split("号线")[1].strip(), loc_finish.split("号线")[1].strip()
		hour_start, hour_finish = int(time_start.split(' ')[1].split(':')[0]), int(time_finish.split(' ')[1].split(':')[0])
		produce[hour_start][loc_start] = produce[hour_start].get(loc_start,0)+1
		consume[hour_finish][loc_finish] = consume[hour_finish].get(loc_finish,0)+1
	fileinput.close()
	for (_name, _matrix) in [('O', produce),('D', consume)]:
		for _h in xrange(5,24):
			fig, ax = plt.subplots(1,subplot_kw={'xticks': [], 'yticks': []})
			image = plt.imread('../../result/_map_bounds/shanghai_nokia.png')
			ax.imshow(image)
			_min, _max = min(_matrix[_h].values()), max(_matrix[_h].values())
			for _location, _count in _matrix[_h].iteritems():
				_lng, _lat = lnglat_dict[_location]
				if inbound(_lng, _lat):
					circle = plt.Circle(cord2XY(_lng,_lat), 1.*(_count-_min)/(_max-_min)*15+2, fc=_color_orange_list[10]['color'], alpha=0.5, linewidth=0)
					ax.add_patch(circle)
			plt.savefig("../../result/distribution_OD/metro_{}/shanghai_metro_{}_{}h_nokia.png".format(_name,_name,_h))
Example #2
0
def mafConvert(opts, args):
    format = args[0].lower()
    if isFormat(format, "sam"):
        if opts.dictionary: d = readSequenceLengths(fileinput.input(args[1:]))
        else: d = {}
        if opts.readgroup:
            readGroupItems = opts.readgroup.split()
            rg = "RG:Z:" + readGroupId(readGroupItems)
        else:
            readGroupItems = []
            rg = ""
        if not opts.noheader: writeSamHeader(d, opts.dictfile, readGroupItems)
    inputLines = fileinput.input(args[1:])
    if isFormat(format, "html") and not opts.noheader: writeHtmlHeader()
    isKeepCommentLines = isFormat(format, "tabular") and not opts.noheader
    oldQueryName = ""
    for maf in mafInput(inputLines, isKeepCommentLines):
        if   isFormat(format, "axt"): writeAxt(maf)
        elif isFormat(format, "blast"):
            writeBlast(maf, opts.linesize, oldQueryName)
            oldQueryName = maf.seqNames[1]
        elif isFormat(format, "html"): writeHtml(maf, opts.linesize)
        elif isFormat(format, "psl"): writePsl(maf, opts.protein)
        elif isFormat(format, "sam"): writeSam(maf, rg)
        elif isFormat(format, "tabular"): writeTab(maf)
        else: raise Exception("unknown format: " + format)
    if isFormat(format, "html") and not opts.noheader: print("</body></html>")
Example #3
0
    def parse(self, file_location):
        """Loads Kv data into memory
        Args:
            file_location (str): Path of file
        Returns:
            dict: name : numpy array of events
        """

        #This loop both sets the first_line variable, and finds the number of lines in the file
        for file_length, line in enumerate(fileinput.input(file_location)):
            if file_length == 0:
                first_line = line
        fileinput.close()


        parsed = {}

        for x in range(len(first_line.split(","))):
            parsed[first_line.split(",")[x].split("=")[0]] = numpy.zeros(shape=file_length+1, dtype="float64")

        for index, line in enumerate(fileinput.input(file_location)):
            the_line = line.strip("\n")

            for particle_count in range(len(line.split(","))):
                parsed[the_line.split(",")[particle_count].split("=")[0]][index] = numpy.float64(the_line.split(",")[particle_count].split("=")[1])
        fileinput.close()

        return parsed
def draw_taxi_gravity_model_coef_gridmap():
	lnglat_dict = {}
	for line in fileinput.input("../../data/data_exp/taxiSpatial.csv"):
		station, lng, lat = line.strip().split(',')
		lnglat_dict[station] = (float(lng), float(lat))
	fileinput.close()
	_matrix = [[0 for y in range(_grid_height)] for x in range(_grid_width)]
	for line in fileinput.input("../../data/statistics/intermediate/gravity_model_coef_taxi.txt"):
		_station, _lng, _lat, _coef1, _coef2 = line.strip().split(',')
		_gx, _gy = [int(_cord) for _cord in _station.split('_')]
		_matrix[_gx][_gy] = float(_coef1)
		# _matrix[_gx][_gy] = float(_coef2)
	fileinput.close()
	fig, ax = plt.subplots(1,subplot_kw={'xticks': [], 'yticks': []})
	image = plt.imread('../../result/_map_bounds/shanghai_nokia.png')
	ax.imshow(image)
	_slice = np.array(_matrix)
	_min, _max = _slice.min(), _slice.max()
	for x in range(_grid_width):
		for y in range(_grid_height):
			cube_color = get_color(_color_orange_list, _min, _max, _matrix[x][y])
			if cube_color != '#ffffff':
				cube = plt.Rectangle(gridXY2mapXY(x,y), _cube_width, _cube_height, fc=cube_color, alpha=0.6, linewidth=0)
				ax.add_patch(cube)
	plt.show()
def run(target_dir, inplaceFlag=0):
    global showLogoCount, alreadyChanged, showLogoChanged
    for root, dirs, files in os.walk(target_dir):
        for file in files:
            if file.endswith('.jsp') and (file.lower() in teoconstants.uipgms):
                if inplaceFlag == 0:   #improve performance
                    f = fileinput.input(root+"\\"+ file, inplace=inplaceFlag, openhook=fileinput.hook_encoded('utf-8'))
                elif inplaceFlag == 1:
                    f = fileinput.input(root+"\\"+ file, inplace=inplaceFlag)

                for i, line in enumerate(f):
                    if(re.search('posui:showLogo', line, re.IGNORECASE)):
                        showLogoCount += 1
                        if(re.search('isMultiLang',line)):
                           alreadyChanged += 1
                        else:
                            showLogoChanged += 1
                            line = line.replace('posui:showLogo','posui:showLogo isMultiLang="true"')

                            if inplaceFlag == 0:
                                sys.stdout.write(file+' : '+line)
                    if inplaceFlag == 1:
                        sys.stdout.write(line)

                f.close()
def gae(dev=False):
    setup()
    shutil.copy(root/'demo'/'routes.py', web2py_location)

    # Add demo passwords
    src = open(root/'demo'/'user.html', 'r')
    dest = web2py_demo_app/'views'/'plugin_instant_admin'/'user.html'
    for line in fileinput.input(dest, inplace=1):
        print line,
        if line.startswith('{{block auth}}'):
            print src.read()
    src.close()

    # Add welcome message
    src = open(root/'demo'/'sidebar.html', 'r')
    dest = web2py_demo_app/'views'/'plugin_instant_admin'/'layout.html'
    for line in fileinput.input(dest, inplace=1):
        print line,
        if line.strip() == '{{#extra-sidebar}}':
            print src.read()
    src.close()

    # Add feedback widget
    src = open(root/'demo'/'layout.html', 'r')
    dest = web2py_demo_app/'views'/'plugin_instant_admin'/'layout.html'
    for line in fileinput.input(dest, inplace=1):
        if line.startswith('</body>'):
            print src.read()
        print line,
    src.close()

    if dev:
        local('dev_appserver.py .')
    else:
        local('appcfg.py update .')
 def __init__(self):
   """
   Constructor.
   Put your initialization code here.
   """
   self.debug = True
   self.debug2 = True
   self.banned_ports = set()
   for line in fileinput.input('/root/pox/ext/banned-ports.txt'):
       portNumber = int(line.rstrip())
       log.debug(portNumber)
       self.banned_ports.add(portNumber)
   self.banned_domains = set()
   for line in fileinput.input('/root/pox/ext/banned-domains.txt'):
       domain = str(line.rstrip()) # Return a copy of the string with trailing characters removed. 
       log.debug(domain)
       self.banned_domains.add(domain)
   self.monitered_strings = set() # (address, search_string)
   for line in fileinput.input('/root/pox/ext/monitored-strings.txt'):
       temp = str(line.rstrip())
       address, search_string = temp.split(':')
       address = str(address)
       search_string = str(search_string)
       log.debug(address + ':' + search_string)
       self.monitered_strings.add((address, search_string))
   self.counts = dict() # key: (address, search_string, srcport, dstport), value: number of times the search_string appears
   self.countsIncomingbuffer = dict() # key: (address, srcport, dstport), value: string (initialized to empty string) 
   self.countsOutgoingbuffer = dict() # key: (address, srcport, dstport), value: string (initialized to empty string)
   self.countsBuffetSize = dict() # key: address, value: len(LongestString) -1
   self.timers = dict() # a timer for each TCP connection. key: (address, srcport, dstport) Value: Timers (initialized to 30.0 seconds)
   self.timersStatus = dict() # a status of timer for each TCP connection key: same as above Value: True when timer is on, false otherwise
   log.debug("Firewall initialized.")
   self.timerInitiated = 0
Example #8
0
 def set_version_in_app_yaml(branch="master", force=False):
     try:
         for line in fileinput.input('app.yaml', inplace=True):
             if line.startswith("version: "):
                 if branch == 'master':
                     if force:
                         print "version: master"
                     else:
                         print "version: master" + datetime.datetime.utcnow().strftime('%Y%m%d%H%M')
                 else:
                     print "version: " + branch
             else:
                 print line,
         print 'Set version in app.yaml'
     except:
         for line in fileinput.input('app.yaml', inplace=True):
             if line.startswith("version: "):
                 if branch == 'master':
                     if force:
                         print "version: master"
                     else:
                         print "version: master" + datetime.datetime.utcnow().strftime('%Y%m%d%H%M')
                 else:
                     print "version: " + branch
             else:
                 print line,
         print 'Set version in app.yaml'
Example #9
0
def swap_lines(line_search_1, line_search_2, filepath):
    """
    Swap lines in file, if line_search1 before line_search2
    :param filepath:
    :param line_search_2:
    :param line_search_1:
    """
    count = 0
    count1 = 0
    count2 = 0
    for line in fileinput.input(filepath):
        count += 1
        if line_search_1.strip() in line.strip():
            count1 = count
        elif line_search_2.strip() in line.strip():
            count2 = count
    fileinput.close()

    if 0 < count1 < count2:
        for line in fileinput.input(filepath, inplace=True):
            if line_search_1.strip() in line.strip():
                print(line_search_2)
            elif line_search_2.strip() in line.strip():
                print(line_search_1)
            else:
                print(line.rstrip("\n"))
        fileinput.close()
Example #10
0
def main():

  FullVersionStr = ""
  for line in fileinput.input(scriptpath + "/JuceLibraryCode/AppConfig.h",inplace=0):
    if "#define JucePlugin_Version " in line:
      FullVersionStr = line.lstrip("#define JucePlugin_Version ").strip()
    
  today = datetime.date.today()
  CFBundleGetInfoString = FullVersionStr + ", Copyright MatthieuBrucher, " + str(today.year)
  CFBundleVersion = FullVersionStr
  
  print("update_version.py - setting version to " + FullVersionStr)
  print("Updating plist version info...")
  
  import glob
  for plistpath in glob.glob(scriptpath + "/Builds/MacOSX/*.plist"):
    update_plist(plistpath, CFBundleGetInfoString, CFBundleVersion)

  print("Updating Mac Installer version info...")
  
  plistpath = scriptpath + "/installer/ATKColoredExpander.pkgproj"
  installer = plistlib.readPlist(plistpath)
  
  for x in installer['PACKAGES']:
    x['PACKAGE_SETTINGS']['VERSION'] = FullVersionStr
  
  plistlib.writePlist(installer, plistpath)
  replacestrs(plistpath, "//Apple//", "//Apple Computer//");
  
  print("Updating Windows Installer version info...")
  
  for line in fileinput.input(scriptpath + "/installer/ATKColoredExpander.iss",inplace=1):
    if "AppVersion" in line:
      line="AppVersion=" + FullVersionStr + "\n"
    sys.stdout.write(line)
Example #11
0
def file_filter(fname="-", filterfiles=None, delim="\t", header=False):
    """
    Processes the named file
    @param fname the name of the file to read from.  Defaults to stdin
    @param filterfiles list of filenames to open and filter by.  Each name
           may optionally be prepended with '_' to invert mapping values.
    @param delim the field separator.  Defaults to tab character
    @param header first line of file contains header information to be passed
           through as-is.  Defaults to False
    @return nothing (results are printed to stdout)
    @throws EmptyStdinError if nothing is waiting at stdin and no other files
            are specified.
    @throws NoFiltersError if no filter files have been specified
    """
    f = fileinput.input(fname, openhook=fileinput.hook_compressed)
    if fname == "-" and os.isatty(0):
        raise EmptyStdinError("stdin empty")
    if len(filterfiles) == 0:
        raise NoFiltersError("no filter files specified")
    else:
        ffiles = [(fileinput.input(x.lstrip('_'), 
                                   openhook=fileinput.hook_compressed),
                   True if x.startswith('_') else False) for x in filterfiles]
    for ln in f:
        ln = ln.rstrip("\n").rstrip("\r")
        keep_line = read_next(ffiles)
        if keep_line or (f.isfirstline() and header):
            print ln
def read_x(x1,y1,x2,y2,x3,y3,x4,y4):
	fn_constant_00001='/Users/yawei/Documents/papers/asynchronsou and distributed stochastic gradient descent with variance reduction /figures/data/accelerated/constant00001.txt'
	fn_accelerated_00001='/Users/yawei/Documents/papers/asynchronsou and distributed stochastic gradient descent with variance reduction /figures/data/accelerated/accelerated00001.txt'
	fn_constant_000001='/Users/yawei/Documents/papers/asynchronsou and distributed stochastic gradient descent with variance reduction /figures/data/accelerated/constant000001.txt'
	fn_accelerated_000001='/Users/yawei/Documents/papers/asynchronsou and distributed stochastic gradient descent with variance reduction /figures/data/accelerated/accelerated000001.txt'
	
	f_constant_00001=open(fn_constant_00001,'r+')
	f_accelerated_00001=open(fn_accelerated_00001,'r+')
	f_constant_000001=open(fn_constant_000001,'r+')
	f_accelerated_000001=open(fn_accelerated_000001,'r+')
	index=0
	for line1 in fileinput.input(fn_constant_00001):
		_1arr=line1.split('\r')[0].split('\t')
		x1.append(float(_1arr[0]))
		y1.append(float(_1arr[1]))
	for line2 in fileinput.input(fn_accelerated_00001):
		_2arr=line2.split('\r')[0].split('\t')
		x2.append(float(_2arr[0]))
		y2.append(float(_2arr[1]))
		#print _2arr
	for line3 in fileinput.input(fn_constant_000001):
		#print line3
		_3arr=line3.split('\r')[0].split('\t')
		index=index+1
		x3.append(float(_3arr[0]))
		y3.append(float(_3arr[1]))
	for line4 in fileinput.input(fn_accelerated_000001):
		_4arr=line4.split('\r')[0].split('\t')
		x4.append(float(_4arr[0]))
		y4.append(float(_4arr[1]))

	f_constant_00001.close()
	f_accelerated_00001.close()
	f_constant_000001.close()
	f_accelerated_000001.close()
Example #13
0
    def configure_step(self):
        """Set extra configure options."""
        self.cfg.update('configopts', "--with-threads --enable-shared")

        modules_setup_dist = os.path.join(self.cfg['start_dir'], 'Modules', 'Setup.dist')

        libreadline = get_software_root('libreadline')
        if libreadline:
            ncurses = get_software_root('ncurses')
            if ncurses:
                readline_libdir = get_software_libdir('libreadline')
                ncurses_libdir = get_software_libdir('ncurses')
                readline_static_lib = os.path.join(libreadline, readline_libdir, 'libreadline.a')
                ncurses_static_lib = os.path.join(ncurses, ncurses_libdir, 'libncurses.a')
                readline = "readline readline.c %s %s" % (readline_static_lib, ncurses_static_lib)
                for line in fileinput.input(modules_setup_dist, inplace='1', backup='.readline'):
                    line = re.sub(r"^#readline readline.c.*", readline, line)
                    sys.stdout.write(line)
            else:
                self.log.error("Both libreadline and ncurses are required to ensure readline support")

        openssl = get_software_root('OpenSSL')
        if openssl:
            for line in fileinput.input(modules_setup_dist, inplace='1', backup='.ssl'):
                line = re.sub(r"^#SSL=.*", "SSL=%s" % openssl, line)
                line = re.sub(r"^#(\s*-DUSE_SSL -I)", r"\1", line)
                line = re.sub(r"^#(\s*-L\$\(SSL\)/lib )", r"\1 -L$(SSL)/lib64 ", line)
                sys.stdout.write(line)

        super(EB_Python, self).configure_step()
 def _makeFakeEbuild(self, fake_ebuild_path, fake_ebuild_content=''):
   self.mox.StubOutWithMock(fileinput, 'input')
   fileinput.input(fake_ebuild_path).AndReturn(fake_ebuild_content)
   self.mox.ReplayAll()
   fake_ebuild = portage_utilities.EBuild(fake_ebuild_path)
   self.mox.VerifyAll()
   return fake_ebuild
Example #15
0
def main(argv):
  import getopt, fileinput
  def usage():
    print 'usage: %s {cmake,cget,cdump,cmerge} [options] cdbname [args ...]' % argv[0]
    print 'usage: %s {tmake,tget,tdump,tmerge} [options] tcdbname [args ...]' % argv[0]
    return 100
  args = argv[1:]
  if not args: return usage()
  cmd = args.pop(0)
  try:
    (opts, args) = getopt.getopt(args, 'kv2')
  except getopt.GetoptError:
    return usage()
  if not args: return usage()
  dbname = args.pop(0)
  
  # cdb
  if cmd == 'cmake':
    CDBMaker(dbname, dbname+'.tmp').txt2cdb(fileinput.input(args)).finish()
  elif cmd == 'cget':
    print repr(CDBReader(dbname).get(args[0]))
  elif cmd == 'cdump':
    f = (lambda k,v: '+%d,%d:%s->%s' % (len(k), len(v), k, v))
    for (k, v) in opts:
      if k == '-k': f = (lambda k,_: k)
      elif k == '-v': f = (lambda _,v: v)
      elif k == '-2': f = (lambda k,v: k+'\t'+v)
    for (k,v) in cdbdump(dbname):
      print f(k,v)
    print
  elif cmd == 'cmerge':
    dbs = [ cdbdump(fname) for fname in args ]
    m = CDBMaker(dbname, dbname+'.tmp')
    for (k,vs) in tcdbmerge(dbs):
      m.add(k, ' '.join(vs))
    m.finish()
  # tcdb
  elif cmd == 'tmake':
    TCDBMaker(dbname, dbname+'.tmp').txt2tcdb(fileinput.input(args)).finish()
  elif cmd == 'tget':
    print repr(TCDBReader(dbname).lookup(args))
  elif cmd == 'tdump':
    f = (lambda k,v: '%s%d,%d:%s->%s' % ('+'*len(k), len(k[-1]), len(v), k[-1], v))
    for (k, v) in opts:
      if k == '-k': f = (lambda k,_: '/'.join(k))
      elif k == '-v': f = (lambda _,v: v)
      elif k == '-2': f = (lambda k,v: '/'.join(k)+'\t'+v)
    for (k,v) in tcdbdump(dbname):
      print f(k,v)
    print
  elif cmd == 'tmerge':
    dbs = [ tcdbdump(fname) for fname in args ]
    m = TCDBMaker(dbname, dbname+'.tmp')
    for (k,vs) in tcdbmerge(dbs):
      m.put(len(k), k[-1], ' '.join(vs))
    m.finish()
    
  else:
    return usage()
  return
def create_more_container_files(sourceDir, suffix, maxElements, containers, containers2):
    """Creates additional files for the individual MPL-containers."""

    # Create files for each MPL-container with 20 to 'maxElements' elements
    # which will be used during generation.
    for container in containers:
        for i in range(20, maxElements, 10):
            # Create copy of "template"-file.
            newFile = os.path.join( sourceDir, container, container + str(i+10) + suffix )
            shutil.copyfile( os.path.join( sourceDir, container, container + "20" + suffix ), newFile ) 
            # Adjust copy of "template"-file accordingly.
            for line in fileinput.input( newFile, inplace=1, mode="rU" ):
                line = re.sub(r'20', re.escape(str(i+10)), line.rstrip())
                line = re.sub(r'11', re.escape(str(i + 1)), line.rstrip())
                line = re.sub(r'10(?![0-9])', re.escape(str(i)), line.rstrip())
                print(line)
    for container in containers2:
        for i in range(20, maxElements, 10):
            # Create copy of "template"-file.
            newFile = os.path.join( sourceDir, container, container + str(i+10) + "_c" + suffix )
            shutil.copyfile( os.path.join( sourceDir, container, container + "20_c" + suffix ), newFile ) 
            # Adjust copy of "template"-file accordingly.
            for line in fileinput.input( newFile, inplace=1, mode="rU" ):
                line = re.sub(r'20', re.escape(str(i+10)), line.rstrip())
                line = re.sub(r'11', re.escape(str(i + 1)), line.rstrip())
                line = re.sub(r'10(?![0-9])', re.escape(str(i)), line.rstrip())
                print(line)
def test_graphs_SAP():
    """ test the ShortestAncestralPath class """
    print "SAP\n"
    G = DirectedGraph.read_from_file(fileinput.input("digraph1.txt"))
    sap = ShortestAncestralPath(G)
    print "3 ", "1 ", "length = ", sap.length(3, 1)
    print "3 ", "1 ", "ancestor = ", sap.ancestor(3, 1)
    print "3 ", "11 ", "length = ", sap.length(3, 11)
    print "3 ", "11 ", "ancestor = ", sap.ancestor(3, 11)
    print "9 ", "12 ", "length = ", sap.length(9, 12)
    print "9 ", "12 ", "ancestor = ", sap.ancestor(9, 12)
    print "7 ", "2 ", "length = ", sap.length(7, 2)
    print "7 ", "2 ", "ancestor = ", sap.ancestor(7, 2)
    print "1 ", "6 ", "length = ", sap.length(1, 6)
    print "1 ", "6 ", "ancestor = ", sap.ancestor(1, 6)
    v1 = Stack()
    v1.push(3)
    v1.push(7)
    v1.push(8)
    v2 = Stack()
    v2.push(10)
    v2.push(9)
    v2.push(11)
    v2.push(12)
    v3 = Bag()
    v3.add(4)
    v3.add(1)
    print "v1 :3,7,8 ", "v2 : 10,9,11,12 ", "length = ", sap.length(v1, v2)
    print "v1 :3,7,8 ", "v2 : 10,9,11,12 ", "ancestor = ", sap.ancestor(v1, v2)
    print "v1 :3,7,8 ", "v3 : 4, 1 ", "length = ", sap.length(v1, v3)
    print "v1 :3,7,8 ", "v2 : 4, 1 ", "ancestor = ", sap.ancestor(v1, v3)
    G_2 = DirectedGraph.read_from_file(fileinput.input("digraph2.txt"))
    sap2 = ShortestAncestralPath(G_2)
    print "1 ", "5 ", "length = ", sap2.length(2, 1)
    print "1 ", "5 ", "ancestor = ", sap2.ancestor(2, 1)
def plot_chord_window_transfer():
	window_map = {}
	for line in fileinput.input("data_processed/chord_window_transfer.txt"):
		window_1, window_2, v = line.strip().split("\t")[0].split(",")[0], line.strip().split("\t")[0].split(",")[1], line.strip().split("\t")[1]
		if not window_map.has_key(window_1):
			window_map[window_1] = {"canteen":"","total":0,"stay":0,"price":0}
		if window_2 == window_1:
			window_map[window_1]["stay"] = int(v)
		window_map[window_1]["total"] += int(v)
	fileinput.close()

	for line in fileinput.input("data_processed/tree_window.txt"):
		(canteen, window, v1, v2, v3) = line.strip().split("\t")
		window_map[window]["canteen"] = canteen
		window_map[window]["price"] = float(v3)
	fileinput.close()

	with open("data2js.txt","w") as f:
		for window, values in window_map.iteritems():
			if values["canteen"] == "一餐":
			# if values["canteen"] == "二餐":
			# if values["canteen"] == "三餐":
			# if values["canteen"] == "四餐":
			# if values["canteen"] == "五餐":
			# if values["canteen"] == "六餐":
			# if values["canteen"] == "哈乐":
				f.write("["+str(round(values["price"],2))+","+str(round(100.0*values["stay"]/values["total"],2))+"],\n")
def plot_tree_canteen():
	with open("data2js.txt","w") as f:
		canteen_map, window_map, _id = {}, {}, 0
		for line in fileinput.input("data_processed/tree_canteen.txt"):
			_id += 1
			(name, v1, v2, v3) = line.strip().split("\t")
			f.write("{name:'"+name+"	日均客流"+v1+"',value:"+str(int(v1)/3)+",id:"+str(_id)+",depth:1,category:1},\n")
			canteen_map[name] = _id
		fileinput.close()

		for line in fileinput.input("data_processed/tree_window.txt"):
			_id += 1
			(canteen, name, v1, v2, v3) = line.strip().split("\t")
			f.write("{name:'"+name+"	日均客流"+v1+"',value:"+v1+",id:"+str(_id)+",depth:2,category:0},\n")
			window_map[name] = _id
		fileinput.close()

		for line in fileinput.input("data_processed/tree_canteen.txt"):
			(name, v1, v2, v3) = line.strip().split("\t")
			f.write("{source:"+str(0)+",target:"+str(canteen_map[name])+",weight:1},\n")
		fileinput.close()

		for line in fileinput.input("data_processed/tree_window.txt"):
			(canteen, name, v1, v2, v3) = line.strip().split("\t")
			f.write("{source:"+str(canteen_map[canteen])+",target:"+str(window_map[name])+",weight:1},\n")
		fileinput.close()
def cat_to_name(infile,PathToTaxonomy,suffix):
	namesdict={}
	for line in fileinput.input([PathToTaxonomy+"/names.dmp"]):
		DictValues=line.split('\t')
		if DictValues[6]=="scientific name":
			namesdict[DictValues[0]]=DictValues[2]
	outfile=open(suffix,'w')
	for line in fileinput.input([infile]):
		HitValues=line.strip().split('\t')
		n=2
		IDValues=[HitValues[0],HitValues[1]]
		while n<9:
			try:
				IDValues.append(namesdict[HitValues[n]])
			except KeyError:
				try:
					newlist=[]
					for each in HitValues[n].split(','):
						newlist.append(namesdict[each])
					IDValues.append(",".join(newlist))
				except KeyError:
					IDValues.append(HitValues[n])
			n=n+1
		for value in IDValues:
			outfile.write(value+'\t')
		outfile.write('\n')
	outfile.close()
    def cleanup(self):
        """
        Called only if PackStack Setup is successful. Returns modified system environment to default.
        It performs the following activities:
        1. Enable Repositories, during installation default repos are disabled {for offline install}
        (NOTE: EPEL repo is not changed back to default, it still points to /root/cloud/)
        2. Remove rabbitmq entry from /etc/hosts file
        3. Disable initial-setup-text service

        """
        count = 3
        for line in fileinput.input(ROOT_PATH + "/etc/yum.repos.d/CentOS-Base.repo", inplace=True):
            if line.startswith("enabled"):
                if count > 0:
                    count -= 1
                    print(line.replace("enabled=0", "enabled=1").rstrip("\n"))
                else:
                    print line,
            else:
                print line,
        fileinput.close()

        for line in fileinput.input(ROOT_PATH + '/etc/hosts', inplace=True):
                print(line.replace("127.0.0.1 www.rabbitmq.com", "").rstrip("\n"))
        fileinput.close()

        ret = iutil._run_systemctl("disable", "initial-setup-text")
        if ret:
            print ("Failed to Disable INITIAL-SETUP-UTILITY\n")
            return False
        os.remove(os.path.normcase(ROOT_PATH + "/var/www/html/rabbitmq-signing-key-public.asc"))
        return True
def tax_to_cat(infile,PathToTaxonomy):
	catdict={}
	for line in fileinput.input([PathToTaxonomy+"/nodes.dmp"]):
		DictValues=line.split('\t')
		catdict[DictValues[0]]=DictValues[2]
	outfile=open(infile+'.cat','w')
	dmpfile=open(infile+'.dmp','w')
	for line in fileinput.input([infile]):
		HitValues=line.strip().split('\t')
		TaxID=HitValues[-1]
		try:
			TaxCategories=[]
			n=1
			flag=TaxID
			while n<50:
				if flag!=catdict[flag]:
					TaxCategories.append(catdict[flag])
					flag=catdict[flag]
				if flag==catdict[flag]:
					n=49
				n=n+1
			outfile.write(line.strip())
			for cat in TaxCategories:
				outfile.write('\t'+cat)
			outfile.write('\n')
		except KeyError:
			dmpfile.write(line)
	outfile.close()
	dmpfile.close()
Example #23
0
def parser(N):

    num_vert = 0
    
    for line in fileinput.input():
        if len(line) > 0:
            num_vert = num_vert + 1
    
    # creates a square matrix of num_nodes x num_nodes
    for i in range(num_vert):
        temp = []
        for j in range(num_vert):
            temp.append(float('INF'))
        N.append(temp)

    line_count =0
    for line in fileinput.input():
        
        if (len(line)>0):
            edges =  line.split(' ')
            if (len(edges) > 1):
                temp = []
                for i in range(1,len(edges)):
                    temp = edges[i].split(',')
                    if(len(temp) > 1):
                        #print "Line count: " + str(line_count)
                        #print "temp[1]:    " + str(temp[1])
                        #print "temp[0]:    " + str(temp[0])
                        #print "N[line_count] = " + str(N[line_count])
                        N[line_count][int(temp[0])] = int(temp[1])
                        #print "Done. " + str(line_count)
            line_count = line_count + 1
def build_erddap_catalog(data_root, catalog_root, erddap_name, template_dir):
    """
    Cats together the head, all fragments, and foot of a datasets.xml
    """
    head_path = os.path.join(template_dir, 'datasets.head.xml')
    tail_path = os.path.join(template_dir, 'datasets.tail.xml')

    # discover all deployments in data directory
    udeployments    = defaultdict(list)
    pattern         = os.path.join(data_root, '*', '*')
    deployment_dirs = glob.glob(pattern)

    for dd in deployment_dirs:
        user, deployment = os.path.split(os.path.relpath(dd, data_root))
        udeployments[user].append(deployment)

    ds_path = os.path.join(catalog_root, erddap_name, 'datasets.xml')
    with open(ds_path, 'w') as f:
        for line in fileinput.input([head_path]):
            f.write(line)

        # for each user deployment, create a dataset fragment
        for user in udeployments:
            for deployment in udeployments[user]:
                f.write(build_erddap_catalog_fragment(data_root, user, deployment, template_dir))

        # if we have an "agg" file in our templates, fill one out per user
        if os.path.exists(os.path.join(template_dir, 'dataset.agg.xml')):
            for user in udeployments:
                f.write(build_erddap_agg_fragment(data_root, user, template_dir))

        for line in fileinput.input([tail_path]):
            f.write(line)

    logger.info("Wrote %s from %d deployments", ds_path, len(deployment_dirs))
def read_x(x1,y1,x2,y2,x3,y3,x4,y4):
	fn_without_tricks_disSVRG='/Users/yawei/Documents/papers/asynchronsou and distributed stochastic gradient descent with variance reduction /figures/data/evaluation1/dna/disSVRG-without-tricks.txt'
	fn_tricks_disSVRG='/Users/yawei/Documents/papers/asynchronsou and distributed stochastic gradient descent with variance reduction /figures/data/evaluation1/dna/disSVRG-with-tricks.txt'
	fn_petuumSGD='/Users/yawei/Documents/papers/asynchronsou and distributed stochastic gradient descent with variance reduction /figures/data/evaluation1/dna/petuumSGD.txt'
	fn_SSGD='/Users/yawei/Documents/papers/asynchronsou and distributed stochastic gradient descent with variance reduction /figures/data/evaluation1/dna/SSGD.txt'

	f_without_tricks_disSVRG=open(fn_without_tricks_disSVRG,'r+')
	f_tricks_disSVRG=open(fn_tricks_disSVRG,'r+')
	f_petuumSGD=open(fn_petuumSGD,'r+')
	f_SSGD=open(fn_SSGD,'r+')
	
	index=0
	for line1 in f_without_tricks_disSVRG:
		_1arr=line1.split('\r')[0].split('\t')
		x1.append(float(_1arr[0]))
		y1.append(float(_1arr[1]))
	for line2 in fileinput.input(fn_petuumSGD):
		_2arr=line2.split('\r')[0].split('\t')
		index=index+1
		x2.append(float(_2arr[0]))
		y2.append(float(_2arr[1]))
	for line3 in fileinput.input(fn_tricks_disSVRG):
		_3arr=line3.split('\r')[0].split('\t')
		x3.append(float(_3arr[0]))
		y3.append(float(_3arr[1]))
	for line4 in fileinput.input(fn_SSGD):
		_4arr=line4.split('\r')[0].split('\t')
		x4.append(float(_4arr[0]))
		y4.append(float(_4arr[1]))
	

	f_without_tricks_disSVRG.close()
	f_tricks_disSVRG.close()
	f_petuumSGD.close()
	f_SSGD.close()
def process():

	bed_ht = {}
	
	if (options.reference != ""):
		for line in fileinput.input([options.reference]):
			cols = line.strip().split("\t")
			bed_ht[cols[3]] = [cols[0],int(cols[1]), int(cols[2]), cols[3], int(cols[4]), cols[5]]
		if (options.verbose):
			print >> sys.stderr, "read %d entries from reference %s" % (len(bed_ht), options.reference)
			
	counter = 0
	for line in fileinput.input([args[0]]):
		cols = line.strip().split("\t")
		if (len(cols)<4 or line.startswith("#")):
			continue
		
		if (len(bed_ht) > 0):
			if (not bed_ht.has_key(cols[0])):
				print >> sys.stderr, "sequence not known"
			entry = bed_ht[cols[0]]
		else:
			entry = [cols[0], 0, 0, counter, 1 ,"+"]
			counter += 1 
			
		# discard too long features
		if (options.max_length>0 and int(cols[2])-int(cols[1]) > options.max_length):
			continue
			
		print "%s\t%d\t%d\t%s\t%d\t%s" % (entry[0], entry[1]+int(cols[1]), entry[1]+int(cols[2]), entry[3], entry[4], entry[5])
Example #27
0
 def special_download_work(self, fileNameUrls, baseDir, overallLength):
     """
     The actual worker for special downloads.  Must be called by
     get_overall_length() in order to work! Calls get_special_download() on
     each line of the file provided in order to download the desired files.
     """
     if not baseDir.endswith('/') and baseDir != '':
         baseDir += '/'
     fi = fileinput.input(fileNameUrls)
     nl = 0
     for line in fi:
         nl += 1
     fi = fileinput.input(fileNameUrls)
     cl = 0
     widgets = self.widgets
     pbar = ProgressBar(widgets=widgets, maxval=overallLength)
     pbar.start()
     for line in fi:
         urlToGetFile = line[:-1]
         fileNameToSave = os.path.join(baseDir, urlToGetFile[urlToGetFile.rfind('/') + 1:])
         proc = multiprocessing.Process(target=self.download(urlToGetFile, fileNameToSave))
         proc.start()
         proc.join()
         cl += 1
         pbar.update(overallLength / nl * cl)
         for i in range(overallLength):
             pbar.update(i + 1)
         note_set_and_send('Pounce: ', '%s download complete!' % fileNameToSave)
     pbar.finish()
     note_set_and_send('Pounce: ', 'All jobs completed!')
     return clean_exit()
def build_thredds_catalog(data_root, catalog_root, template_dir):
    """
    Creates THREDDS catalog files for the given user/deployment.
    """
    head_path = os.path.join(template_dir, 'catalog.head.xml')
    tail_path = os.path.join(template_dir, 'catalog.tail.xml')

    # discover all deployments in data directory
    udeployments    = defaultdict(list)
    pattern         = os.path.join(data_root, '*', '*')
    deployments = glob.glob(pattern)

    for dd in deployments:
        user, deployment = os.path.split(os.path.relpath(dd, data_root))
        udeployments[user].append(deployment)

    catalog_path = os.path.join(catalog_root, 'thredds', 'catalog.xml')
    with open(catalog_path, 'w') as f:
        for line in fileinput.input([head_path]):
            f.write(line)

        # for each deployment file create a fragment
        for user in udeployments:
            for deployment in udeployments[user]:
                f.write(build_thredds_catalog_fragment(data_root, user, deployment, template_dir))

        # @TODO: aggregations?

        for line in fileinput.input([tail_path]):
            f.write(line)

    logger.info("Wrote %s from %d deployments", catalog_path, len(deployments))
Example #29
0
def shop():
    print "\n\033[;32mWelcome to coral mall, only you think, no you can't buy\033[0m"
    for line in fileinput.input("shops"):
        print "\t%s" % line.strip()
    while True:
        for f in fileinput.input("shops"):
            list1 = f.split()[0]
            list2 = f.split()[1]
            products[list1] = list2

        choices = raw_input("Please enter the purchase of products or back (b): ").strip()
        if len(choices) == 0:continue
        elif choices == 'b' or choices == 'back':list()
        
        if products.has_key(choices):
            balance = pickle.load(open("userinfo","r"))
            if int(products[choices]) > balance[accountAuth][2]:
                print "\033[;31mYour balance is insufficient\033[0m"
            else:
                while True:
                    shopList.append(choices)
                    new_balance = int(balance[accountAuth][2]) - int(products[choices])
                    userInfo[accountAuth][2] = int(new_balance)
                    pickle.dump(userInfo,open("userinfo","wb"))

                    Bill(accountAuth,time.strftime("%Y-%m-%d %H:%M:%S"),choices,"-%d" % int(products[choices]))

                    print "\033[;32mConsumption is %r Money is %r\033[0m" % (products[choices],new_balance)
                    print "\033[;33mThe shopping list %s \033[0m" % shopList
                    break
        else:
            print "\033[;31mYou choose %r not in the list\033[0m" % choices
            shop()
def consist(infile,PathToTaxonomy):
	TargetCategories={}
	outfile=open(infile+".con",'w')
	catdict={}
	for line in fileinput.input([PathToTaxonomy+"/nodes.dmp"]):
		DictValues=line.split('\t')
		catdict[DictValues[0]]=DictValues[4]
	fileinput.close()
	for line in fileinput.input([infile]):
		HitValues=line.strip().split('\t')
		TargetCategories[HitValues[0]]={'perc':HitValues[2],'species':[],'genus':[],'family':[],'order':[],'phylum':[],'class':[],'kingdom':[]}
	fileinput.close()
	for line in fileinput.input([infile]):
		HitValues=line.strip().split('\t')
		n=12
		while n<len(HitValues):
			for cat in TargetCategories[HitValues[0]].keys():
				if catdict[HitValues[n]]==cat:
					if HitValues[n] not in TargetCategories[HitValues[0]][cat]:
						TargetCategories[HitValues[0]][cat].append(HitValues[n])
			n=n+1
	print "Number of ids with taxonomic information:" + str(len(TargetCategories))
	for ID in TargetCategories.keys():
		LCA_per_ID={}
		for cat in TargetCategories[ID].keys():
			if len(TargetCategories[ID][cat])>1:
				LCA_per_ID[cat]=str(','.join(TargetCategories[ID][cat]))
			elif len(TargetCategories[ID][cat])==0:
				LCA_per_ID[cat]='n'+str(len(TargetCategories[ID][cat]))
			else:
				LCA_per_ID[cat]=TargetCategories[ID][cat][0]
		outfile.write(ID+'\t'+TargetCategories[ID]['perc']+'\t'+LCA_per_ID['species']+'\t'+LCA_per_ID['genus']+'\t'+LCA_per_ID['family']+'\t'+LCA_per_ID['order']+'\t'+LCA_per_ID['class']+'\t'+LCA_per_ID['phylum']+'\t'+LCA_per_ID['kingdom']+'\n')
	outfile.close()
Example #31
0
        if segments[1] != '':
            end = 2*int(segments[1])
            if end < 0:
                end += 1

        return (start, end)

    raise Exception('Malformed arg: %s' % arg)

cols = [parse_col(c) for c in unknown[0].split(',')]
sys.argv = unknown[0:]

from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)

for line in fileinput.input():
    l = re.split('(%s)' % args.input_delimiter, line.strip())

    offset = 0

    first = True
    for col in cols:

        if args.keep_original_range_spacing:
            slice = l[
                    (None if col[0] == None else col[0] + offset):(None if col[1] == None else col[1] + offset)
            ]
            joiner = ''
        else:
            slice = l[
                    (None if col[0] == None else col[0]):(None if col[1] == None else col[1]):2
Example #32
0
import sys
import fileinput

## replace strings in file

#with fileinput.FileInput(fileToSearch, inplace=True, backup='.bak') as file:
#    for line in file:
#        print(line.replace(textToSearch, textToReplace), end='')

## replace entire line if line starts with specified pattern

for line in fileinput.input('input.txt', inplace=True):
    if line.strip().startswith('old'):
        line = 'new\n'
    sys.stdout.write(line)

## Run Database changes

#
#

## tar and backup code directory

#     with tarfile.open(output_filename, "w:gz") as tar:
#        tar.add(source_dir, arcname=os.path.basename(source_dir))
Example #33
0
import os, glob
import sys, re, fileinput

Argument = []
Argument = sys.argv[1:]

if (len(Argument)) < 2:
    print "Usage: Input_directory Chromosome_size(fai file)"
    sys.exit()

chr_order = ""

for line in fileinput.input([Argument[1]]):
    if line.startswith("#") or not line.strip():
        continue

    array = []
    line = line.rstrip("\n")
    array = line.split("\t")

    chr_order = chr_order + "  " + Argument[0] + "/" + array[0] + ".var.raw.bcf"

print("/home/apandey/bio/samtools-0.1.18/bcftools/bcftools cat " +
      str(chr_order) + " > " + str(Argument[0]) + "/Whole_genome_variants.bcf")

os.system("/home/apandey/bio/samtools-0.1.18/bcftools/bcftools cat " +
          str(chr_order) + " > " + str(Argument[0]) +
          "/Whole_genome_variants.bcf")
Example #34
0
import argparse
import fileinput
import subprocess

parser = argparse.ArgumentParser(description = "Use GATK ASEReadCounter to count allele-specific expression.", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--indir", help = "Directory of the input BAM files.")
parser.add_argument("--outdir", help = "Directory of the output BAM files.")
parser.add_argument("--insuffix", help = "Suffix of the input bam file.", default = ".bam")
parser.add_argument("--outsuffix", help = "Suffix of the output ASE count file.", default = ".ASEcounts")
parser.add_argument("--reference", help = "Path to indexed reference FASTA file.")
parser.add_argument("--sites", help = "Path to the VCF file containing SNP coordinates.")
parser.add_argument("--execute", help = "Execute the script", default = "False")
parser.add_argument("--Xmx", help = "Memory allocated to the Java process.", default = "900m")
parser.add_argument("--java_path", help = "Path to the Java executable.", default = "/software/java/bin/java")
parser.add_argument("--gatk_path", help = "Path to the GATK executable.", default = "~/software/GenomeAnalysisTK.jar")
args = parser.parse_args()

#Construct file names
for line in fileinput.input("-"):
		sample_name = line.rstrip()
		path_in = os.path.join(args.indir, sample_name, sample_name + args.insuffix)
		path_out = os.path.join(args.outdir, sample_name, sample_name + args.outsuffix)
		gatk_path = " ".join([args.java_path, "-jar", "-Xmx"+args.Xmx, args.gatk_path, "-T ASEReadCounter"])
		#The -dt NONE flag disables downsampling in GATK
		#flags = "-U ALLOW_N_CIGAR_READS -dt NONE --minMappingQuality 10 -rf MateSameStrand"
		flags = "-U ALLOW_N_CIGAR_READS -dt NONE --minMappingQuality 10" #ChIP-seq analysis
		command = " ".join([gatk_path, "-R", args.reference, "-I", path_in, "-o", path_out, "-sites", args.sites, flags])
		print(command)
		if args.execute == "True":
			subprocess.call(['bash','-c',command])
def main(args):
    filename = args[0]
    R = int(args[1])  # number of rows to generate
    C = int(args[2])  # number of columns to generate
    Smn = int(args[3])  # min distribution columns span
    Smx = int(args[4])  # max distribution columns span
    B = 50  # print every B lines generated
    F = None  # fairness of binomial coin

    if len(args) > 5:
        input_str = " ".join(args[5:])
        valid_B = False
        try:
            B = int(args[5])
            valid_B = True
        except ValueError:
            pass

        try:
            if not valid_B:
                input_str = args[5]
                F = float(args[5])
            elif len(args) > 6:
                input_str = args[6]
                F = float(args[6])

            if F is not None and F < 0 or F > 1:
                raise ValueError()

        except ValueError:
            sys.stderr.write("Error: invalid input '" + input_str + "'\n")
            sys.exit(1)

    if Smn > Smx or Smn < 1 or Smx < 1:
        sys.stderr.write("Error: invalid min/max spans.\n")
        sys.exit(1)

    # create C columns to store means
    means = [0] * C

    # assign a normally distributed mean for sections of columns
    section_widths = distribute(means, Smn, Smx, DistributionType.BINOMIAL, F)
    sys.stderr.write("Levels:  " + str(section_widths) + "\n")

    # print column names
    sys.stderr.write("Columns: [")
    v = 1
    section_col_names = []
    for width in section_widths:
        for c in xrange(width):
            section_col_names.append("col_{var}.{col}".format(var=v,
                                                              col=c + 1))
        v += 1
    sys.stderr.write(", ".join(section_col_names) + "]\n")

    # print the distributed means
    sys.stderr.write("Means:   \n")
    m = 0
    for section, width in enumerate(section_widths):
        sys.stderr.write("      (" + str(section + 1) + ") [ ")
        for mean in range(width):
            sys.stderr.write(str(means[m]) + " ")
            m += 1
        sys.stderr.write("]\n")

    # generate table
    threads = 40
    num_rows = R / threads
    remaning_rows = R % threads
    processes = []
    files = []
    line_count = Value('i', 0)
    lock = Lock()
    for t in range(threads - 1):
        processes.append(
            Process(target=create_part,
                    args=(filename + "_" + str(t), num_rows, means,
                          section_widths, B, line_count, lock)))
        files.append(filename + "_" + str(t))

    processes.append(
        Process(target=create_part,
                args=(filename + "_" + str(threads - 1),
                      num_rows + remaning_rows, means, section_widths, B,
                      line_count, lock)))

    tracker = Process(target=track_status, args=(4, R, line_count, lock))

    files.append(filename + "_" + str(threads - 1))

    start = time.time()

    for t in processes:
        t.start()

    tracker.start()
    for t in processes:
        t.join()

    sys.stderr.write(
        "\rMerging..                                           \r")
    tracker.terminate()

    # merge created parts
    outfile = files[0]
    files.pop(0)

    with open(outfile, 'a') as fout:
        fin = fileinput.input(files)
        for line in fin:
            fout.write(line)
        fin.close()

    for f in files:
        os.remove(f)

    os.rename(outfile, filename)

    end = time.time()

    sys.stderr.write("Generated a {} x {} table in {}\n".format(
        R, C, datetime.timedelta(seconds=end - start)))
Example #36
0
        raise Exception("You must specify a dataset to filter for.")

    lines = [[sym.fromstring(w) for w in line.split()]
             for line in file(opts.filter_file)]

    if opts.sentence_grammars:
        lines = list(enumerate(lines))

    if opts.parallel is not None:
        i, j = opts.parallel
        lines = lines[i::j]

    if opts.sentence_grammars:
        ffilters = [(Filter([line], maxlen),
                     file(
                         os.path.join(output_dir,
                                      "grammar.line%d" % (i + opts.begin)),
                         "w")) for (i, line) in lines]
    else:
        ffilters = [(Filter(lines, maxlen), output_file)]

    progress = 0
    for line in fileinput.input(args):
        r = rule.rule_from_line(line)
        for (ffilter, output_file) in ffilters:
            if ffilter.match(r.f):
                output_file.write(line)
        progress += 1
        if progress % 100000 == 0:
            sys.stderr.write("%d rules in\n" % progress)
Example #37
0
Usage:
- Change the values of `old_versions` and `new_versions``
- Run the script: `./bumper.py`
- That's all
"""

os.chdir(os.path.join(os.path.dirname(__file__), '..'))

old_versions = {
    6: '6.8.3',
    7: '7.4.0',
}

new_versions = {
    6: '6.8.4',
    7: '7.4.1',
}

files = [
    'README.md',
    'defaults/main.yml',
    'test/integration/standard-6x.yml',
    '.kitchen.yml',
]

for major, version in old_versions.items():
    for file in files:
        print(file)
        for line in fileinput.input([file], inplace=True):
            print(line.replace(version, new_versions[major]), end='')
Example #38
0
def _edit_file(file_path, line_pattern, replace_regex, replace_string):
    with fileinput.input(file_path, inplace=True) as f:
        for line in f:
            if line_pattern in line:
                line = re.sub(replace_regex, replace_string, line)
            sys.stdout.write(line)
Example #39
0
import sys, fileinput, itertools
from collections import defaultdict
import nltk
import spacy

nlp = spacy.load('en')

ngram_sentence = defaultdict(list)
high_freq_words = set([line.strip() for line in fileinput.input('high_freq_words.txt')])
prons = set([line.strip() for line in fileinput.input('prons.txt')])
collocation = set([(tk[0], tk[2]) for line in fileinput.input('collocation.txt') for tk in [line.strip().split('\t')]])

def lemmatize_sent(sentence):
    return [tk.lemma_ for tk in nlp(sentence)]

def calculate_score(ngram, sentence):
    ##### YOUR CODE HERE #####
    ngram = lemmatize_sent(ngram)
    sentence = list(filter(('\xa0 ').__ne__, lemmatize_sent(sentence)))
    sent_perm = itertools.permutations(sentence, 2)
    
    try:
        locat = list(zip(*[sentence[i:] for i in range(len(ngram))])).index(tuple(ngram))
    except:
        return -999
        
    not_in_high_freq = len([word for word in sentence if word not in high_freq_words])
    is_prons = len([word for word in sentence if word in prons])
    is_collocation = len(list(set([words for words in sent_perm if words in collocation])))
    
    score = locat - not_in_high_freq - is_prons + is_collocation
Example #40
0
import fileinput

data = [line.strip() for line in fileinput.input()]


def part1():
    total = 0
    for line in data:
        total += (int(line) // 3) - 2
    return total


def part2():
    total = 0
    for line in data:
        fuel = int(line)
        while fuel > 0:
            new = (fuel // 3) - 2
            if new > 0:
                total += new
                fuel = new
            else:
                break

    return total


print(part1())
print(part2())
Example #41
0

def test_answer_with_op2():
    assert answer(['EE00D40C823060']) == 14


def test_answer_with_op3():
    assert answer(['8A004A801A8002F478']) == 16


def test_answer_with_op4():
    assert answer(['620080001611562C8802118E34']) == 12


def test_answer_with_op5():
    assert answer(['C0015000016115A2E0802F182340']) == 23


def test_answer_with_op6():
    assert answer(['A0016C880162017C3686B18A3D4780']) == 31


if __name__ == '__main__':
    import timeit
    start = timeit.default_timer()
    filename = fileinput.input(F_NAME + '.input')
    ans = answer(filename)
    print('Answer:', ans)
    duration = timeit.default_timer() - start
    print(f'Execution time: {duration:.3f} s')
Example #42
0
args = parser.parse_args()


def cargo_cmd(command):
    return ['cargo', command] + (['--verbose'] if args.verbose else [])


if len(args.version) > 0:
    crates_and_doc = ['doc']
    crates_and_doc.extend(crate_list)

    for crate in crates_and_doc:
        cargo_toml = crate + '/Cargo.toml'
        print 'Processing', cargo_toml

        for line in fileinput.input(cargo_toml, inplace=1):
            line = re.sub('version = "(=?).*" #auto',
                          'version = "\g<1>' + args.version + '" #auto', line)
            print line,

if args.publish:
    for crate in crate_list:
        print 'Publishing', crate
        check_call(cargo_cmd('publish'), cwd=crate)

if args.build:
    check_call(cargo_cmd('build'), cwd='doc')
    check_call(cargo_cmd('build'), cwd='examples')

if args.format:
    for crate in crate_list_wrapper:
Example #43
0
""")

# Make avatar directory
if not os.path.isdir(AVATAR_DIR):
    os.makedirs(AVATAR_DIR)

# Parse command-line args
reverse = False
# If args include --reverse, remove first it,
# leaving file name(s) (if any) in args
if len(sys.argv) > 1:
    if sys.argv[1] == "--reverse" or sys.argv[1] == "-r":
        reverse = True
        del sys.argv[0]

lines = fileinput.input()
if reverse:
    buffered_lines = []
    for line in lines:
        buffered_lines.append(line)
    # Reverse list using slice
    lines = buffered_lines[::-1]

for line in lines:
    tweet = json.loads(line)

    # Download avatar
    url = tweet["user"]["profile_image_url"]
    filename = download_file(url)

    t = {
Example #44
0
def filter_file(regex, repl, *filenames, **kwargs):
    r"""Like sed, but uses python regular expressions.

    Filters every line of each file through regex and replaces the file
    with a filtered version.  Preserves mode of filtered files.

    As with re.sub, ``repl`` can be either a string or a callable.
    If it is a callable, it is passed the match object and should
    return a suitable replacement string.  If it is a string, it
    can contain ``\1``, ``\2``, etc. to represent back-substitution
    as sed would allow.

    Parameters:
        regex (str): The regular expression to search for
        repl (str): The string to replace matches with
        *filenames: One or more files to search and replace

    Keyword Arguments:
        string (bool): Treat regex as a plain string. Default it False
        backup (bool): Make backup file(s) suffixed with ``~``. Default is True
        ignore_absent (bool): Ignore any files that don't exist.
            Default is False
    """
    string = kwargs.get('string', False)
    backup = kwargs.get('backup', True)
    ignore_absent = kwargs.get('ignore_absent', False)

    # Allow strings to use \1, \2, etc. for replacement, like sed
    if not callable(repl):
        unescaped = repl.replace(r'\\', '\\')

        def replace_groups_with_groupid(m):
            def groupid_to_group(x):
                return m.group(int(x.group(1)))

            return re.sub(r'\\([1-9])', groupid_to_group, unescaped)

        repl = replace_groups_with_groupid

    if string:
        regex = re.escape(regex)

    for filename in filenames:

        msg = 'FILTER FILE: {0} [replacing "{1}"]'
        tty.debug(msg.format(filename, regex))

        backup_filename = filename + "~"

        if ignore_absent and not os.path.exists(filename):
            msg = 'FILTER FILE: file "{0}" not found. Skipping to next file.'
            tty.debug(msg.format(filename))
            continue

        # Create backup file. Don't overwrite an existing backup
        # file in case this file is being filtered multiple times.
        if not os.path.exists(backup_filename):
            shutil.copy(filename, backup_filename)

        try:
            for line in fileinput.input(filename, inplace=True):
                print(re.sub(regex, repl, line.rstrip('\n')))
        except BaseException:
            # clean up the original file on failure.
            shutil.move(backup_filename, filename)
            raise

        finally:
            if not backup and os.path.exists(backup_filename):
                os.remove(backup_filename)
Example #45
0
def main():
    for i, p in enumerate(read_problems(fileinput.input()), 1):
        s = solve(p)
        print 'Case #%r: %s' % (i, s)
def make_ovpn_template():
    """Create OpenVPN template file."""
    pull_server_data()

    with open(SERVER_INFO_FILE, "r") as f:
        server_data = json.load(f)

    # Get the ID of the first server from the API
    server_id = server_data["LogicalServers"][0]["ID"]

    config_file_response = call_api(
        "/vpn/config?Platform=linux&LogicalID={0}&Protocol=tcp".format(
            server_id),  # noqa
        json_format=False)

    with open(TEMPLATE_FILE, "wb") as f:
        for chunk in config_file_response.iter_content(100000):
            f.write(chunk)
            logger.debug("OpenVPN config file downloaded")

    # Write split tunneling config to OpenVPN Template
    try:
        if get_config_value("USER", "split_tunnel") == "1":
            split = True
        else:
            split = False
    except KeyError:
        split = False
    if split:
        logger.debug("Writing Split Tunnel config")
        with open(SPLIT_TUNNEL_FILE, "r") as f:
            content = f.readlines()

        with open(TEMPLATE_FILE, "a") as f:
            for line in content:
                line = line.rstrip("\n")
                netmask = None

                if not is_valid_ip(line):
                    logger.debug("[!] '{0}' is invalid. Skipped.".format(line))
                    continue

                if "/" in line:
                    ip, cidr = line.split("/")
                    netmask = cidr_to_netmask(int(cidr))
                else:
                    ip = line

                if netmask is None:
                    netmask = "255.255.255.255"

                if is_valid_ip(ip):
                    f.write("\nroute {0} {1} net_gateway".format(ip, netmask))

                else:
                    logger.debug("[!] '{0}' is invalid. Skipped.".format(line))

        logger.debug("Split Tunneling Written")

    # Remove all remote, proto, up, down and script-security lines
    # from template file
    remove_regex = re.compile(r"^(remote|proto|up|down|script-security) .*$")

    for line in fileinput.input(TEMPLATE_FILE, inplace=True):
        if not remove_regex.search(line):
            print(line, end="")

    logger.debug("remote and proto lines removed")

    change_file_owner(TEMPLATE_FILE)
Example #47
0
# -*- coding: utf-8 -*-
"""
Created on Thu Dec  3 17:36:15 2020

@author: jakas
"""

import fileinput

instruction = []

for line in fileinput.input('input8.txt'):
    instruction.append(line.strip())

p1 = []
p2 = []

for k in range(len(instruction)):
    ins = []

    for line in fileinput.input('input8.txt'):
        ins.append(line.strip())
        
    first_iteration = True
    I = []
    i = 0
    accumulator = 0
    
    change = 0
    
    if 'acc' in ins[k]:
#!/usr/bin/python
# Author : Michel Benites Nascimento
# Date   : 02/16/2018
# Descr. : Mapping URL

import sys
import fileinput
import platform

# Sets the output file.
#out_file = open("map_topurl_out.txt", "w")

# Checks which platform is in use.
if platform.system() == "Windows":
    inputdata = fileinput.input()
else:
    inputdata = sys.stdin

# Loop in the log file
for line in inputdata:

    # Split the line to get the URL
    sDummy, sURL, sDummy = line.strip().split('\t')
    print("%s\t%d" % (sURL, 1))
#   out_file.write("%s\t%d\n" % (sURL, 1))

#out_file.close()
#print ("Mapper is done")
Example #49
0
import fileinput
import re
from itertools import chain

lines = [line for line in fileinput.input()]

# PART 1:

mem = dict()

for line in lines:
    if "mask" in line:
        mask = line.split()[-1]
    if "mem" in line:
        addr, value = re.findall(r"\d+", line)
        str_bin = [c for c in f"{int(value):036b}"]
        for i, m in enumerate(mask):
            if m != "X":
                str_bin[i] = m
        mem[addr] = int("".join(str_bin), base=2)

print("Part 1:", sum(mem.values()))

# PART 2

mem = dict()


def find_addresses(addr_bin):
    if "X" not in addr_bin:
        return [addr_bin]
Example #50
0
    exit(1)

pctype  = str(sys.argv[1])   # PC type
pcord   = int(sys.argv[2])   # PC order of the chain samples 
methods = sys.argv[3:]

nsam = 100                   # Number of samples for NISP_MC


nmeth=len(methods)
print methods
methods_str=' '.join(methods)

## Prepare the xml file
shutil.copyfile('forUQ_surf_rxn.in.xml.templ','surf_rxn.in.xml')
for line in fileinput.input("surf_rxn.in.xml", inplace = 1):
    print line.replace('PCTYPE', pctype),
for line in fileinput.input("surf_rxn.in.xml", inplace = 1):
    print line.replace('PCORDER', str(pcord)),

for im in range(nmeth):
    str_param=''
    if methods[im]=='NISP_MC':
        str_param=str(nsam)
    cmd = './SurfRxn'+methods[im]+'.x '+str_param
    print "Running",cmd
    os.system(cmd)
    cmd = 'mv solution.dat solution_'+methods[im]+'.dat'
    print "Running",cmd
    os.system(cmd)
  
Example #51
0
"""
Advent of code 2016 - Day 10, Part 2

Balance Bots
===
What do you get if you multiply together the values of one chip in each of
outputs 0, 1, and 2?
"""
import fileinput
from part1 import parse_lines


def balance_bots(lines):
    bots, output = parse_lines(lines)

    one, two, three = [output[i]() for i in ['0', '1', '2']]
    print "The product of 0, 1 and 2 output bins is {} * {} * {} = {}".format(
        one, two, three, one * two * three)


if __name__ == '__main__':
    balance_bots(x.strip() for x in fileinput.input())
def read_input() -> list[str]:
    return [str(line.strip()) for line in fileinput.input("input.txt")]
Example #53
0
def part2():
    [line.strip() for line in fileinput.input()]
    for line in fileinput.input():
        pass
    return
Example #54
0
def replaceAll(file, searchExp, replaceExp):
    for line in fileinput.input(file, inplace=1):
        if searchExp in line:
            line = line.replace(searchExp, replaceExp)
        sys.stdout.write(line)
Example #55
0
bin_list = [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 30]
#bin_list = [10,20]

#an_type = 'VV'
an_type = 'VH'

metric_list = []

for bin in bin_list:
    break
    print '\n\n\t Looping Over Bin: ', bin

    # Go into datacard.config
    new_dir = "BDTrange: " + str(bin) + ",-1,1\n"
    for line in fileinput.input('13TeVconfig/datacard', inplace=True):
        if 'BDTrange:' in line:
            print line.replace(line, new_dir),
        else:
            print line,
    # end file modification

    print '\n\n\t Setting DC Directory...'

    # Set datacard output directory to current var name
    new_dir = "dir= 'bdt_param_optimize/BDT_bins" + str(bin) + "'\n"
    for line in fileinput.input('../myMacros/classification/dataCard_loop.py',
                                inplace=True):
        if 'dir=' in line:
            print line.replace(line, new_dir),
        else:
Example #56
0
                elif ttype == 6:
                    subs.append(1 if subpackets[0] < subpackets[1] else 0)
                elif ttype == 7:
                    subs.append(1 if subpackets[0] == subpackets[1] else 0)

            # Check exit conditions for loop.
            if num_subpackets is not None and len(subs) == num_subpackets:
                # IMPORTANT: return i and not len(i); it is not an invariant
                # that we read the full `bits`, since there may have been
                # some trailing data that we ignore since it falls out of
                # the range of `num_subpackets` subpackets.
                return subs, i, version
            elif bit_length is not None and i > bit_length:
                return subs, len(bits), version
            elif i >= len(bits):
                return subs, len(bits), version

    except Exception as e:
        print "Unexpected parsing error:", e
        return subs, i, version


# Read problem input.
BITS = ''.join(HEX_MAPPING[byte] for byte in fileinput.input()[0].strip())

# Problem guarantees the input consists of precisely 1 outer packet.
packets, _, total_version = parse_packets(BITS, num_subpackets=1, quiet=True)

print "Part 1:", total_version
print "Part 2:", packets[0]
Example #57
0
 def replace_method(file1, searchexp, replaceexp):
     for line in fileinput.input(file1, inplace=1):
         if searchexp in line:
             line = line.replace(searchexp, replaceexp)
         sys.stdout.write(line)
Example #58
0
File: 25.py Project: soufuru/NLP100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
25. テンプレートの抽出

記事中に含まれる「基礎情報」テンプレートのフィールド名と値を抽出し,辞書オブジェクトとして格納せよ.
"""

import re
import fileinput

if __name__ == '__main__':
    result = {}
    for file in fileinput.input("-"):
        for line in file.split("\n"):
            m = re.match("(?:.*\|)?(.+)\s=\s(.+)", line)
            if m:
                result.update({m.group(1): m.group(2)})

    for key, obj in result.items():
        print("{0}, {1}".format(key, obj))
Example #59
0
def main():
  details_re = re.compile(r"^Details:\scpu=\d+u\+\d+s\s*(\((?P<appCpu>.*)\))?")
  app_cpu_usage_re = re.compile(
      r"(?P<uid>\S+)=(?P<userTime>\d+)u\+(?P<sysTime>\d+)s")
  proc_stat_re = re.compile((r"^/proc/stat=(?P<usrTime>-?\d+)\s+usr,\s+"
                             r"(?P<sysTime>-?\d+)\s+sys,\s+"
                             r"(?P<ioTime>-?\d+)\s+io,\s+"
                             r"(?P<irqTime>-?\d+)\s+irq,\s+"
                             r"(?P<sirqTime>-?\d+)\s+sirq,\s+"
                             r"(?P<idleTime>-?\d+)\s+idle.*")
                           )

  data_start_time = 0.0
  data_stop_time = 0
  data_stop_timestr = ""

  on_mode = False
  time_offset = 0.0
  overflowed = False
  reboot = False
  prev_battery_level = -1
  bhemitter = BHEmitter()
  emit_dict = {}              # maps event categories to events
  time_dict = {}              # total event time held per second
  highlight_dict = {}         # search result for -n option
  is_first_data_line = True
  is_dumpsys_format = False
  argv_remainder = parse_argv()
  input_file = argv_remainder[0]
  legacy_mode = is_file_legacy_mode(input_file)
  proc_stat_summary = {
      "usr": 0,
      "sys": 0,
      "io": 0,
      "irq": 0,
      "sirq": 0,
      "idle": 0,
      }

  if legacy_mode:
    input_string = LegacyFormatConverter().convert(input_file)
    input_file = StringIO.StringIO(input_string)
  else:
    input_file = open(input_file, "r")

  while True:
    line = input_file.readline()
    if not line: break

    if not on_mode and line.startswith("Battery History"):
      on_mode = True
      continue
    elif not on_mode:
      continue

    if line.isspace(): break

    line = line.strip()

    if "RESET:TIME: " in line:
      data_start_time = parse_reset_time(line)
      continue
    if "OVERFLOW" in line:
      overflowed = True
      break
    if "START" in line:
      reboot = True
      continue
    if "TIME: " in line:
      continue

    # escape spaces within quoted regions
    p = re.compile('"[^"]+"')
    line = p.sub(space_escape, line)

    if details_re.match(line):
      match = details_re.search(line)
      try:
        d = match.groupdict()
        if d["appCpu"]:
          for app in d["appCpu"].split(", "):
            app_match = app_cpu_usage_re.search(app)
            try:
              a = app_match.groupdict()
              save_app_cpu_usage(a["uid"],
                                 int(a["userTime"]) + int(a["sysTime"]))
            except IndexError:
              sys.stderr.write("App CPU usage line didn't match properly")
      except IndexError:
        sys.stderr.write("Details line didn't match properly")

      continue
    elif proc_stat_re.match(line):
      match = proc_stat_re.search(line)
      try:
        d = match.groupdict()
        if d["usrTime"]:
          proc_stat_summary["usr"] += int(d["usrTime"])
        if d["sysTime"]:
          proc_stat_summary["sys"] += int(d["sysTime"])
        if d["ioTime"]:
          proc_stat_summary["io"] += int(d["ioTime"])
        if d["irqTime"]:
          proc_stat_summary["irq"] += int(d["irqTime"])
        if d["sirqTime"]:
          proc_stat_summary["sirq"] += int(d["sirqTime"])
        if d["idleTime"]:
          proc_stat_summary["idle"] += int(d["idleTime"])
      except IndexError:
        sys.stderr.write("proc/stat line didn't match properly")
      continue

    # pull apart input line by spaces
    split_line = line.split()
    if len(split_line) < 4: continue
    (line_time, _, line_battery_level, fourth_field) = split_line[:4]

    # "bugreport" output has an extra hex field vs "dumpsys", detect here.
    if is_first_data_line:
      is_first_data_line = False
      try:
        int(fourth_field, 16)
      except ValueError:
        is_dumpsys_format = True

    if is_dumpsys_format:
      line_events = split_line[3:]
    else:
      line_events = split_line[4:]

    fmt = (r"\+((?P<day>\d+)d)?((?P<hrs>\d+)h)?((?P<min>\d+)m)?"
           r"((?P<sec>\d+)s)?((?P<ms>\d+)ms)?$")
    time_delta_s = parse_time(line_time, fmt) + time_offset
    if time_delta_s < 0:
      print "Warning: time went backwards: %s" % line
      continue

    event_time = data_start_time + time_delta_s
    if reboot and "TIME:" in line:
      # adjust offset using wall time
      offset, event_time = adjust_reboot_time(line, event_time)
      if offset < 0:
        print "Warning: time went backwards: %s" % line
        continue
      time_offset += offset
      time_delta_s = event_time - data_start_time
      reboot = False
      line_events = {"reboot"}

    if line_battery_level != prev_battery_level:
      # battery_level is not an actual event, it's on every line
      if line_battery_level.isdigit():
        bhemitter.handle_event(event_time, format_time(time_delta_s),
                               "battery_level=" + line_battery_level,
                               emit_dict, time_dict, highlight_dict)

    for event in line_events:
      # conn events need to be parsed in order to be useful
      if event.startswith("conn"):
        num, ev = get_after_equal(event).split(":")
        if ev == "\"CONNECTED\"":
          event = "+conn="
        else:
          event = "-conn="

        if num in conn_constants:
          event += conn_constants[num]
        else:
          event += "UNKNOWN"

      bhemitter.handle_event(event_time, format_time(time_delta_s), event,
                             emit_dict, time_dict, highlight_dict)

    prev_battery_level = line_battery_level
    data_stop_time = event_time
    data_stop_timestr = format_time(time_delta_s)

  input_file.close()
  if not on_mode:
    print "Battery history not present in bugreport."
    return

  bhemitter.emit_remaining_events(data_stop_time, data_stop_timestr,
                                  emit_dict, time_dict, highlight_dict)

  bhemitter.generate_summary_rows(emit_dict, data_start_time,
                                  data_stop_time)

  power_emitter = PowerEmitter(bhemitter.cat_list)
  if getopt_power_data_file:
    for line in fileinput.input(getopt_power_data_file):

      data = line.split(" ")
      secs = float(data[0]) + POWER_DATA_FILE_TIME_OFFSET
      amps = float(data[1])

      power_emitter.handle_line(secs, amps, emit_dict)

  power_emitter.bill(time_dict)

  printer = Printer()

  if not getopt_generate_chart_only:
    print "<!DOCTYPE html>\n<html><head>\n"
  report_filename = argv_remainder[0]
  if getopt_report_filename:
    report_filename = getopt_report_filename
  header = "Battery Historian analysis for %s" % report_filename
  print "<title>" + header + "</title>"
  if overflowed:
    print ('<font size="5" color="red">Warning: History overflowed at %s, '
           'many events may be missing.</font>' %
           time_float_to_human(data_stop_time, True))
  print "<p>" + header + "</p>"

  if legacy_mode:
    print("<p><b>WARNING:</b> legacy format detected; "
          "history information is limited</p>\n")

  if not getopt_generate_chart_only:
    print """
      <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
      <script type="text/javascript" src="https://www.google.com/jsapi?autoload={'modules':[{'name':'visualization','version':'1','packages':['timeline']}]}"></script>
    """

  print "<script type=\"text/javascript\">"

  if not getopt_disable_chart_drawing:
    print "google.setOnLoadCallback(drawChart);\n"

  print """
    var dataTable;
    var chart;
    var options;
    var default_width = 3000
function drawChart() {

    container = document.getElementById('chart');
    chart = new google.visualization.Timeline(container);

    dataTable = new google.visualization.DataTable();
    dataTable.addColumn({ type: 'string', id: 'Position' });
    dataTable.addColumn({ type: 'string', id: 'Name' });
    dataTable.addColumn({ type: 'date', id: 'Start' });
    dataTable.addColumn({ type: 'date', id: 'End' });
    dataTable.addRows([
"""
  printer.print_events(emit_dict, highlight_dict)
  print "]);"

  width = 3000      # default width
  height = 3000     # intial height
  printer.print_chart_options(emit_dict, highlight_dict, width, height)
  print """

  //make sure allocate enough vertical space
  options['height'] = dataTable.getNumberOfRows() * 40;
  chart.draw(dataTable, options);

  //get vertical coordinate of scale bar
  var svg = document.getElementById('chart').getElementsByTagName('svg')[0];
  var label = svg.children[2].children[0];
  var y = label.getAttribute('y');
  //plus height of scale bar
  var chart_div_height = parseInt(y) + 50;
  var chart_height = chart_div_height;

  //set chart height to exact height
  options['height'] = chart_height;
  $('#chart').css('height', chart_div_height);
  svg.setAttribute('height', chart_height);
  var content = $('#chart').children()[0];
  $(content).css('height', chart_height);
  var inner = $(content).children()[0];
  $(inner).css('height', chart_height);
}


function redrawChart() {
    var scale = document.getElementById("scale").value;
    scale = scale.replace('%', '') / 100
    options['width'] = scale * default_width;
    chart.draw(dataTable, options);
}

</script>
<style>
#redrawButton{
width:100px;
}
</style>
"""
  if not getopt_generate_chart_only:
    print "</head>\n<body>\n"

  show_complete_time = False
  if data_stop_time - data_start_time > 24 * 60 * 60:
    show_complete_time = True
  start_localtime = time_float_to_human(data_start_time, show_complete_time)
  stop_localtime = time_float_to_human(data_stop_time, show_complete_time)

  print "<div id=\"chart\">"
  if not getopt_generate_chart_only:
    print ("<b>WARNING: Visualizer disabled. "
           "If you see this message, download the HTML then open it.</b>")
  print "</div>"
  print("<p><b>WARNING:</b>\n"
        "<br>*: wake_lock field only shows the first/last wakelock held \n"
        "when the system is awake. For more detail, use wake_lock_in."
        "<br>To enable full wakelock reporting (post-KitKat only) : \n"
        "<br>adb shell dumpsys batterystats "
        "--enable full-wake-history</p>")

  if getopt_proc_name:
    if len(bhemitter.match_list) > 1:
      print("<p><b>WARNING:</b>\n"
            "<br>Multiple match found on -n option <b>%s</b>"
            "<ul>" % getopt_proc_name)
      for match in bhemitter.match_list:
        print "<li>%s</li>" % match
      print ("</ul>Showing search result for %s</p>"
             % bhemitter.match_list[0].split(":", 1)[0])
    elif not bhemitter.match_list:
      print("<p><b>WARNING:</b>\n"
            "<br>No match on -n option <b>%s</b></p>" % getopt_proc_name)

    if not highlight_dict:
      print ("Search - <b>%s</b> in <b>%s</b> - did not match any event"
             % (getopt_proc_name, getopt_highlight_category))

  print ("<pre>(Local time %s - %s, %dm elapsed)</pre>"
         % (start_localtime, stop_localtime,
            (data_stop_time-data_start_time) / 60))

  print ("<p>\n"
         "Zoom: <input id=\"scale\" type=\"text\" value=\"100%\"></input>"
         "<button type=\"button\" id=\"redrawButton\""
         "onclick=\"redrawChart()\">redraw</button></p>\n"
         "</p>\n")

  power_emitter.report()

  if app_cpu_usage:
    print "<b>App CPU usage:</b>"
    print "<table border=\"1\"><tr><td>UID</td><td>Duration</td></tr>"
    for (uid, use) in sorted(app_cpu_usage.items(), key=lambda x: -x[1]):
      print "<tr><td>%s</td>" % uid
      print "<td>%s</td></tr>" % format_duration(use)
    print "</table>"

  print "<br /><b>Proc/stat summary</b><ul>"
  print "<li>Total User Time: %s</li>" % format_duration(
      proc_stat_summary["usr"])
  print "<li>Total System Time: %s</li>" % format_duration(
      proc_stat_summary["sys"])
  print "<li>Total IO Time: %s</li>" % format_duration(
      proc_stat_summary["io"])
  print "<li>Total Irq Time: %s</li>" % format_duration(
      proc_stat_summary["irq"])
  print "<li>Total Soft Irq Time: %s</li>" % format_duration(
      proc_stat_summary["sirq"])
  print "<li>Total Idle Time: %s</li>" % format_duration(
      proc_stat_summary["idle"])
  print "</ul>"

  print "<pre>Process table:"
  print bhemitter.procs_to_str()
  print "</pre>\n"

  if not getopt_generate_chart_only:
    print "</body>\n</html>"
Example #60
0
# Current frequency  0, change of +1; resulting frequency  1.
# Current frequency  1, change of -2; resulting frequency -1.
# Current frequency -1, change of +3; resulting frequency  2.
# Current frequency  2, change of +1; resulting frequency  3.
# In this example, the resulting frequency is 3.

# Here are other example situations:

# +1, +1, +1 results in  3
# +1, +1, -2 results in  0
# -1, -2, -3 results in -6

import fileinput

frequencies = list(fileinput.input())


def part1():
    # Treat each entry as integer and add them together
    return sum(map(int, frequencies))


def part2():

    # Initial frequency
    frequency_value = 0

    # List to store whitnessed values
    seen = {frequency_value}