Esempio n. 1
2
def main_style(args):
    if args.ci:
        # When the command is run in CI mode all the other parameters are ignored
        selected_modules = automation_path.filter_user_selected_modules(None)

        # Run pylint on all modules
        return_code_sum = run_pylint(selected_modules)

        # Run flake8 on modules
        return_code_sum += run_pep8(selected_modules)

        sys.exit(return_code_sum)

    selected_modules = automation_path.filter_user_selected_modules(args.modules)
    if not selected_modules:
        sys.stderr('No module is selected.\n')
        sys.exit(1)

    if not args.suites:
        return_code_sum = run_pylint(selected_modules) + run_pep8(selected_modules)
    else:
        return_code_sum = 0
        if 'pep8' in args.suites:
            return_code_sum += run_pep8(selected_modules)

        if 'pylint' in args.suites:
            return_code_sum += run_pylint(selected_modules)

    sys.exit(return_code_sum)
Esempio n. 2
2
 def check_user_config(self):
     if not os.path.isdir(self.user_config_dir):
         try:
             os.mkdir(self.user_config_dir, 0777)
         except (IOError, os.error), value:
             sys.stderr("Cannot write preferences into %s." % user_config_dir)
             sys.exit(1)
Esempio n. 3
0
def pepperoni(source, d, backup, write, rules):
    """
    Pepperoni will read the python file SOURCE and modify it to match PEP008 standards
    """

    if source:
        with open(source, "rb") as fh:
            data = fh.readlines()

        if data:
            corrected = utils.parse_file(data, rwd=rules)
            if d:
                dest = d
            else:
                if backup:
                    dest = utils.bak_file(source)
                else:
                    dest = source

            if write:

                with open(source, "wb") as fh:
                    fh.writelines(data)
                with open(dest, "wb") as fh:
                    fh.writelines(corrected)
            else:
                sys.stderr(corrected)
    else:
        print "Warning: No python file passed. Nothing was changed."
Esempio n. 4
0
File: steps.py Progetto: bcmd/BCMD
def makeNormalSequence ( bc, time=0, outfields=['*'], detfields=['*'], outhead=True, dethead=True ):
    seq = []
    params = bc['header'].get('chosen_param', None)
    timestep = bc['header'].get('time_step', None)
    
    if not params:
        return seq
    
    if ( timestep ):
        timestep = float(timestep[0])
        for row in bc['data']:
            if len(row) != len(params):
                print >> sys.stderr('Data row length does not match declared parameters')
            seq.append({'type':'=', 'n':1, 'start':time, 'end':time + timestep,
                        'duration':timestep, 'setfields':params, 'setvalues':row,
                        'outfields':outfields, 'detfields':detfields,
                        'outhead':outhead, 'dethead':dethead})
            time = time + timestep
            outhead = False
            dethead = False
    else:
        for row in bc['data']:
            duration = float(row[0])
            if len(row) != len(params) + 1:
                print >> sys.stderr('Data row length does not match declared parameters')
            seq.append({'type':'=', 'n':1, 'start':time, 'end':time + duration,
                        'duration':timestep, 'setfields':params, 'setvalues':row[1:],
                        'outfields':outfields, 'detfields':detfields,
                        'outhead':outhead, 'dethead':dethead})
            time = time + duration
            outhead = False
            dethead = False
    
    return seq
Esempio n. 5
0
def make_cells(args):

  if len(args) < 2:
    print sys.stderr('expected 2 arguments')
    sys.exit(1)

  size = args[0]
  seed_size = args[1]

#cell  cell0 (clk, op, 0, rule, state[4:3], state[1:0], state[2]);

  for i in xrange(size - 1, -1, -1):

    r = ((i - 2 + size) % size, (i - 1 + size) % size)
    l = ((i + 1) % size, (i + 2) % size)

    if l[0] + 1 == l[1]:
      left = 'state[%d:%d]' % (l[1], l[0])
    else:
      left = '{state[%d], state[%d]}' % (l[1], l[0])

    if r[0] == r[1] - 1:
      right = 'state[%d:%d]' % (r[1], r[0])
    else:
      right = '{state[%d], state[%d]}' % (r[1], r[0])

    cell = 'state[%d]' % (i)

    if i < seed_size:
      seed = 'seed[%d]' % (i)
    else:
      seed = '1\'b0';

    print 'cell_state cell' + repr(i) + '(clk, op, ' + seed + ', rule, ' + \
          left + ', ' + cell + ', ' + right + ');'
Esempio n. 6
0
 def add_color(self, html_color, name):
     """Add color to used color, if there is no name for color, method will generate some name"""
     self.add_package("color")
     #hex->dec
     if len(html_color) == 4:  #triple color code
         color = (int(html_color[1], 16), int(html_color[2], 16), int(html_color[3], 16))
     else:
         color = (int(html_color[1:3], 16), int(html_color[3:5], 16), int(html_color[5:7], 16))
     #get name
     if name:
         if name in self._defined_colors and self._defined_colors[name] == color:
             return name         #we have already defined this color
         if name in self._defined_colors and not self._defined_colors[name] == color:
             #we have same name but different color codes, so we create new name by adding number to it
             i = 1
             while name + str(i) in self._defined_colors:
                 i += 1
             self._defined_colors[name + str(i)] = color
             self._other.append("\\definecolor{" + name + str(i) + "}{RGB}{" + ",".join((str(x) for x in color)) + "}")
             return name + str(i)
         #we have unique name so we just add it
         self._defined_colors[name] = color
         self._other.append("\\definecolor{" + name + "}{RGB}{" + ",".join((str(x) for x in color)) + "}")
         return name
     else:
         sys.stderr("Invalid name for color")
Esempio n. 7
0
def get_environ():
        environ = {}
        environ["KONIX_PLATFORM"] = sys.platform
        logging.info("Installing for platform "+environ["KONIX_PLATFORM"])
        KONIX_PWD = os.getcwd().replace("\\","/")
        environ["KONIX_DEVEL_DIR"] = KONIX_PWD
        environ["KONIX_BIN_DIR"] = environ["KONIX_DEVEL_DIR"]+"/"+"bin"
        environ["KONIX_LIB_DIR"] = environ["KONIX_DEVEL_DIR"]+"/"+"lib"
        environ["KONIX_SHARE_DIR"] = environ["KONIX_DEVEL_DIR"]+"/"+"share"
        environ["KONIX_SRC_DIR"] = environ["KONIX_DEVEL_DIR"]+"/"+"src"
        # add the lib dir to sys path in order to use the which lib so that I
        # can find python executable. sys.executable won't work with cygwin
        sys.path.insert(0, environ["KONIX_LIB_DIR"])
        import which
        python_bin = which.which("python").replace("\\", "/")
        if python_bin == "":
            sys.stderr("Python must be in the path for that install to work")
            exit(1)
        environ["PYTHON_BIN"] = python_bin
        logging.info("Python bin is : "+python_bin)
        environ["PYTHON_PATH"] = os.path.dirname(python_bin)
        environ["KONIX_PWD"] = KONIX_PWD
        environ["KONIX_CONFIG_DIR"] = KONIX_PWD+"/"+"config"
        environ["KONIX_TUNING_DIR"] = KONIX_PWD+"/"+"tuning"
        environ["HOME"] = os.path.expanduser("~").replace("\\","/")
        environ["KONIX_PERSO_DIRS"] = os.path.join(environ["HOME"], "perso")
        environ["KONIX_PERSO_DIR"] = os.path.join(environ["KONIX_PERSO_DIRS"], "perso")
        environ["PATH_SEPARATOR"] = os.pathsep
        environ["HOSTNAME"] = os.environ["HOSTNAME"]
        environ["KONIX_SH_CUSTOM_FILE"] = environ["HOME"]+"/"+".shrc_custo"
        environ["KONIX_EMACS_CUSTOM_FILE"] = environ["HOME"]+"/"+".emacs_custo"
        return environ
Esempio n. 8
0
def main():
  if len(sys.argv) < 3:
    print >> sys.stderr, ('Usage: %s <JSON files...> <output C++ file>' %
                          sys.argv[0])
    print >> sys.stderr, sys.modules[__name__].__doc__
    return 1

  cpp_code = CC_HEADER
  for json_file in sys.argv[1:-1]:
    with open(json_file, 'r') as f:
      json_text = f.read()
    config = json.loads(json_text)
    if 'monitored_domain' not in config:
      print >> sys.stderr ('%s: no monitored_domain found' % json_file)
      return 1
    domain = config['monitored_domain']
    if not domain_is_whitelisted(domain):
      print >> sys.stderr ('%s: monitored_domain "%s" not in whitelist' %
                           (json_file, domain))
      return 1
    cpp_code += "  // " + json_file + ":\n"
    cpp_code += quote_and_wrap_text(json_text) + ",\n"
    cpp_code += "\n"
  cpp_code += CC_FOOTER

  with open(sys.argv[-1], 'wb') as f:
    f.write(cpp_code)

  return 0
def select_from_x_pictures(num_to_get, subreddit):

    try:
        reddit = praw.Reddit(user_agent='wallpaper_grabber')
        subs = reddit.get_subreddit(subreddit).get_hot(limit=num_to_get)
    except praw.errors.InvalidSubreddit:
        sys.stderr("Subreddit doesn't exist")
        sys.exit()

    filter_name = "^(.*[\\\/])"  # This removes everything but after the last /

    image = None
    submissions = []
    for img in subs:
        if ".png" in img.url.lower() or ".jpg" in img.url.lower():
            submissions.append(img.url)
            attempts = 0

    while attempts < num_to_get:
        try:
            check_file_exits()
            image = random.choice(submissions)
            filename = "wallpapers/" + re.sub(filter_name, '', image)
            file = requests.get(image)
            with open(filename, 'wb') as img:
                img.write(file.content)
                image = filename
        except:
            sys.stderr.write("Problem with downloading image")
            attempts += 1
            continue
        return image
Esempio n. 10
0
def eq_(a, b, msg=''):
    '''
    An assert with extra error messaging to make it easy to compare the objects    
    '''
    if a != b:
        sys.stderr("Assert Equality Error:\nA: %s\nB: %s\n%s" % (a, b, msg))
    assert a == b, msg
Esempio n. 11
0
def belongs_to_category(fasta_header, label_to_keep,  seq_label ):
    #Verify if label corresponds to the read header:
    if fasta_header.find(seq_label[0]) > -1:
        return label_to_keep == seq_label[1]
    else:
        sys.stderr("ERROR! Read label does not correspond to fasta line")
        raise Exception("Fail :(")
Esempio n. 12
0
 def whoops(func, path, exc_info):
     message = 'Could not remove %s' % path
     if logger:
         logger.error(message)
     else:
         sys.stderr(message)
     sys.exit(1)
Esempio n. 13
0
def get_emit_index(input_val, alphabet):
    
    for i in range(0, len(alphabet)):
        if alphabet[i] == input_val:
            return i

    sys.stderr("Could not find character " + input_val)
def communityMining(G, minCommSize=10):
  """
  Find communities in the graph 'G' with more than 'minCommSize' nodes.
  """
  count = 0
  dendrogram = community.generate_dendrogram(G)
  firstPartition = community.partition_at_level(dendrogram,0)
  
  sys.stderr.write("Prune sparse clusters. ")
  #remove early small communities 
  sparseComm = set([k for k,v in Counter(firstPartition.values()).iteritems() if v<minCommSize])
  nodes = [node for node in G.nodes() if firstPartition[node] in sparseComm]
  G.remove_nodes_from(nodes)

  sys.stderr.write("Find communities. ")
  # Partition again the graph and report big communities:
  dendrogram = community.generate_dendrogram(G)
  partition = community.partition_at_level(dendrogram,len(dendrogram)-2)
  allfqdns =  set(n for n,d in G.nodes(data=True) if d['bipartite']==1)
  allHosts = set(n for n,d in G.nodes(data=True) if d['bipartite']==0)
  size = float(len(set(partition.values())))
  communities = []
  
  bigComm = [k for k,v in Counter(partition.values()).iteritems() if v>minCommSize]
  for com in bigComm :
    comfqdns = [nodes for nodes in allfqdns if partition[nodes] == com]
    comHosts = [nodes for nodes in allHosts if partition[nodes] == com]
    comm = G.subgraph(comfqdns+comHosts) 
    if comm.order() < minCommSize :
        sys.stderr("Remove small community (This shouldn't happen here?)\n")
        continue

    communities.append(comm)
    
  return communities 
Esempio n. 15
0
 def daemon(self):
     try:
         if os.fork():
             sys.exit(0)
     except OSError, e:
         sys.stderr('%s, %d, %s' % ('Fork #1 failed', e.errno, e.strerror))
         sys.exit(1)
Esempio n. 16
0
def MakeConfig(opts):
	"""
	MakeConfigurationParameters

	Create Configuration list from command options
	"""
	prof_list = []
	cnt = 0
	for opt, arg in opts:
		if opt == ("--config"):
			try:
				# For C++ scope resolution operator 
				arg = re.sub("::", "@@", arg)
				name, type, default = arg.split(":")
				name    = re.sub("@@", "::", name)
				type    = re.sub("@@", "::", type)
				default = re.sub("@@", "::", default)
			except:
				sys.stderr("Invalid option: " \
					   + opt \
					   + "=" \
					   + arg)
			prof = Struct()
			prof.name = name
                        prof.l_name = name.lower()
                        prof.u_name = name.upper()
			prof.type = type
			prof.default  = default
			prof_list.append(prof)
			cnt += 1
	return prof_list
Esempio n. 17
0
def parse_GDS_columns(lines, subsets):
    """Parse list of line with columns description from SOFT file
    of GDS (GEO Dataset)

    :param lines: iterable -- iterator over lines
    :returns: pandas.DataFrame -- columns description

    """
    data = []
    index = []
    for line in lines:
        line = line.rstrip()
        if line.startswith("#"):
            tmp = __parse_entry(line)
            data.append(tmp[1])
            index.append(tmp[0])

    df = DataFrame(data, index=index, columns=['description'])
    subset_ids = {"disease_state": {}, "individual": {}}
    for subsetname, subset in subsets.iteritems():
        for expid in subset.metadata["sample_id"][0].split(","):
            if subset.get_type() == "disease state":
                subset_ids["disease_state"][expid] = subset.metadata["description"][0]
            elif subset.get_type() == "individual":
                subset_ids["individual"][expid] = subset.metadata["description"][0]
            else:
                stderr("Unknown subset type: %s for subset %s\n" % (subset.get_type(), subsetname))

    return df.join(DataFrame(subset_ids))
Esempio n. 18
0
def build_file(infile, outfile):

    try:
        f = open(infile)
    except IOError, msg:
        sys.stderr("Could not open input file '%s' : %s\n", infile, msg)
        sys.exit(1)
Esempio n. 19
0
def make_h10giao(mol, dm0, with_gaunt=False, verbose=logger.WARN):
    if isinstance(verbose, logger.Logger):
        log = verbose
    else:
        log = logger.Logger(mol.stdout, verbose)
    log.debug('first order Fock matrix / GIAOs')
    n4c = dm0.shape[0]
    n2c = n4c // 2
    c = mol.light_speed

    tg = mol.intor('cint1e_spgsp', 3)
    vg = mol.intor('cint1e_gnuc', 3)
    wg = mol.intor('cint1e_spgnucsp', 3)

    vj, vk = _call_giao_vhf1(mol, dm0)
    h1 = vj - vk
    if with_gaunt:
        sys.stderr('NMR gaunt part not implemented')
#TODO:        import pyscf.lib.pycint as pycint
#TODO:        vj, vk = scf.hf.get_vj_vk(pycint.rkb_giao_vhf_gaunt, mol, dm0)
#TODO:        h1 += vj - vk

    for i in range(3):
        h1[i,:n2c,:n2c] += vg[i]
        h1[i,n2c:,:n2c] += tg[i] * .5
        h1[i,:n2c,n2c:] += tg[i].conj().T * .5
        h1[i,n2c:,n2c:] += wg[i]*(.25/c**2) - tg[i]*.5
    return h1
Esempio n. 20
0
def writeFile(myQifData, fileName):
  try:
    saveData = open(fileName,  "w")
    saveData.write(myQifData)
    
    print "File %s scritto correttamente!" % (fileName)
  except:
    sys.stderr("Errore: impossibile scrivere il file")
Esempio n. 21
0
    def getSample(readId, samples):
    	for i in samples:
	    s = readId.find(i)
	    if s == 0:
	       e = len(i)
	       return i[s:e]
        sys.stderr("Read name does not contain a valid dataset name from config file\n")
        exit(1)
Esempio n. 22
0
def _get_manage_dot_py(host):
    sys.stderr('{path}/virtualenv/bin/python {path}/source/manage.py'.format(
        path = _get_base_folder(host)
    )
    )
    return '{path}/virtualenv/bin/python {path}/source/manage.py'.format(
        path = _get_base_folder(host)
    )
Esempio n. 23
0
 def _ReadProcfs(procfs_entry_pathname):
   try:
     with open(procfs_entry_pathname, 'r') as procfs_entry:
       val = procfs_entry.read().strip()
   except IOError:
     val = None
     sys.stderr('procfs entry not found: %s' % procfs_entry_pathname)
   return val
Esempio n. 24
0
def main():
    if len(sys.argv) < 4:
        sys.stderr('concatfiles.py resulting_file file_name_1 file_name_2 (file_name_3) ... (file_name_n)\n')
        sys.exit(1)

    resulting_file_path = sys.argv[1]
    files_to_concatenate = sys.argv[2:]
    concatenate_files(files_to_concatenate, resulting_file_path)
Esempio n. 25
0
def get_inner_product(list1, list2):
    if len(list1) != len(list2):
        sys.stderr('len(list1) != len(list2)')
        sys.exit(-1)
    _sum = 0.0
    for _i in range(len(list1)):
        _sum += list1[_i] * list2[_i]
    return _sum
def mutate2(m2, m1_pdb, trash, filename, keep_files, obs):
    """
    mutate residue 2
    """

    print "mutate2: " + m2
    file = open(filename, "r")
    temp = open("temp.txt", "w")
    for line in file:
        if line[0:6:] == "<PDBS>":
            temp.write(line[0:6:] + m1_pdb + ";\n")
        elif line[0:14:] == "<PositionScan>":
            temp.write(line[0:14:] + "to_delete.txt")
            temp.write("," + m2)
            temp.write(";\n")
        else:
            temp.write(line)
    temp.close()
    file.close()
    os.rename("temp.txt", filename)
    os.system(foldx + filename + " > /dev/null")

    m2_mut_new_name = "".join((m1_pdb[:-4], "_", m2, ".pdb"))

    nrg = "".join(("energies_", m2[2:-1], "_", m1_pdb[:-4], ".txt"))
    nrg_new_name = "".join((m2_mut_new_name[:-4], ".txt"))
    nrg_path = "".join((run_dir, "/", nrg))
    if keep_files or obs:
        nrg_copy = "".join((nrg_dir, "/", nrg_new_name))
        shutil.copyfile(nrg_path, nrg_copy)
    nrg_rename = "".join((run_dir, "/", nrg_new_name))
    while True:
        try:
            os.rename(nrg_path, nrg_rename)
        except OSError:
            os.system(foldx + filename + " > /dev/null")
        else:
            break

    trash.append(nrg_rename)

    if os.path.isfile("to_delete.txt"):
        os.remove("to_delete.txt")
    if os.path.isfile("".join((run_dir, "/", aa_table()[m2[0]], m2[2:-1], "_", m1_pdb))):
        os.remove("".join((run_dir, "/", aa_table()[m2[0]], m2[2:-1], "_", m1_pdb)))

    m2_mut = "".join((aa_table()[m2[-1]], m2[2:-1], "_", m1_pdb))

    m2_path = "".join((run_dir, "/", m2_mut))
    if keep_files or obs:
        m2_copy = "".join((pdb_dir, "/", m2_mut_new_name))
        shutil.copyfile(m2_path, m2_copy)
    m2_rename = "".join((run_dir, "/", m2_mut_new_name))
    try:
        os.rename(m2_path, m2_rename)
    except OSError:
        sys.stderr(m2_path + " not Found")
    trash.append(m2_rename)
Esempio n. 27
0
def node_insert_after (parent_node, old_node, new_node):
    #this is not working:
    #parent_node.insert (parent_node.getchildren().index(old_node) + 1, new_node)
    for i, node in enumerate (parent_node.getchildren()):
        if node.attrib[IDTAG] == old_node.attrib[IDTAG]:
            break
    else:
        sys.stderr ("Error placing new tag - putting at the end of parent tag")
    parent_node.insert (i + 1, new_node)
Esempio n. 28
0
    def raise_error(self, type="UnknownErrorType", message="No message"):
        """Raises an error

        Logs an error using logging, and then exits
        """

        logging.error(type + ": " + message)
        sys.stderr(type + ": " + message)
        exit(self.ERR_NO)
Esempio n. 29
0
def get_map_template():
    global map_template
    if map_template is None:
        with open("map-template.html", 'r') as infile:
            map_template = infile.read()    
    if map_template is None:
        stderr("ERROR: cannot find HTML template: map-template.html\n")
        sysexit(1)
    return map_template
Esempio n. 30
0
def createDatabase(db_file, force = True):
    """Create BLAST db out of elements file"""
    if force or not os.path.isfile(db_file + ".nhr"):
        cmd = "makeblastdb -in %s -dbtype nucl" % (db_file)
        p = subprocess.Popen(re.split("\s+", cmd), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
        output, err = p.communicate()
        if err:
            sys.stderr("madkeblastdb error:\n" + err.decode() + "\n")
            sys.stderr('-------------------------')
Esempio n. 31
0
def main():
    """Build and package Python2."""
    parser = argparse.ArgumentParser(prog='Python setup',
                                     description='This script will compile '
                                     'Python 1.0.1+ and optionally create '
                                     'a native macOS package.')
    parser.add_argument('-b',
                        '--build',
                        action='store_true',
                        help='Compile the Python binary')
    parser.add_argument('-s',
                        '--skip',
                        action='store_true',
                        help='Skip recompiling if possible. Only recommended '
                        'for development purposes.')
    parser.add_argument('-p',
                        '--pkg',
                        action='store_true',
                        help='Package the Python output directory.')
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        default=1,
                        help="Increase verbosity level. Repeatable up to "
                        "2 times (-vv)")
    parser.add_argument('--py',
                        default='2',
                        help='Python version to build. Accepts 2 or 3.')
    if len(sys.argv) < 2:
        parser.print_help()
        sys.exit(1)
    args = parser.parse_args()

    # set argument variables
    log.verbose = args.verbose
    skip = args.skip

    root.root_check()

    # Check for OpenSSL. If it isn't on disk in the proper location
    # we can't link against it.
    if not os.path.isdir(OPENSSL_INSTALL_PATH):
        log.warn("OpenSSL must be installed to '{}' prior to compiling "
                 "Python.".format(OPENSSL_INSTALL_PATH))
        sys.exit(1)

    if str(args.py) == '2':
        dist_url = CONFIG['python2_dist']
        dist_hash = CONFIG['python2_dist_hash']
        py_install_path = PYTHON2_INSTALL
        py_version = PYTHON2_VERSION
    elif str(args.py) == '3':
        dist_url = CONFIG['python3_dist']
        dist_hash = CONFIG['python3_dist_hash']
        py_install_path = PYTHON3_INSTALL
        py_version = PYTHON3_VERSION
    else:
        sys.stderr('Unsupported python version\n')
        sys.exit(1)

    if args.build:
        log.info("Bulding Python...")

        # When the skip option is passed and the build directory exists, skip
        # download and compiling of Python. Note we still do linking.
        if skip:
            log.debug("Skip flag was provided. We will not compile Python "
                      "on this run.")
        else:
            dl_and_extract_python(dist_url, dist_hash)
            # reset trigger flag as we needed to download Python
            skip = False

        build(py_version, py_install_path, skip=skip)

    if args.pkg:
        log.info("Building a package for Python...")
        # Change back into our local directory so we can output our package
        # via relative paths
        os.chdir(CURRENT_DIR)
        rc = package.pkg(
            root=py_install_path,
            version=py_version,
            identifier="{}.python".format(CONFIG['pkgid']),
            install_location=py_install_path,
            output='python-{}.pkg'.format(py_version),
        )
        if rc == 0:
            log.info("Python packaged properly")
        else:
            log.error("Looks like package creation failed")
Esempio n. 32
0
emu=False
clownr=False
for o,a in opts:
    if o in ("-h", "--help"):
        usage()
        sys.exit(0)
    elif o=="-r":
        reduced=True
    elif o=="--emu":
        emu=True
    elif o=="--clownr":
        clownr=True
    else:
        assert False, "unhandled option"
if not args:
    sys.stderr("Expecting ftbl file name\n")
    usage()
fullsys=not reduced
C13_ftbl.clownr=clownr
fftbl=args[0]
if fftbl and fftbl[-5:] != ".ftbl":
    fftbl+=".ftbl"
if fftbl and not os.path.exists(fftbl):
    sys.stderr.write(me+": file '"+fftbl+"' does not exist.\n")
    sys.exit(1)

# what kind of output we have?
mode=os.fstat(1).st_mode
f=sys.stdout if stat.S_ISFIFO(mode) or stat.S_ISREG(mode) else  open(fftbl[:-4]+"sys", "w")

# parse ftbl
Esempio n. 33
0
    odargs, _ = odparser.parse_known_args()

    jobmon_parser = construct_parser_jobmon()
    jobmon_args, _ = jobmon_parser.parse_known_args()

    sj = get_SGEJobInstance(odargs.out_dir, create_connection_config_from_args(jobmon_args))
    sj.log_started()

    parser = get_input_args.construct_parser_burdenator()
    args = get_input_args.get_args_and_create_dirs(parser)

    location_id = args.location_id
    year_id = args.year_id

    logger = logging.getLogger(__name__)
    logger.info(
        "Arguments passed to remote_run_pipeline_burdenator {}".format(args))
    pipeline.run_pipeline_burdenator(args)
    sj.log_completed()
except Exception as e:
    # Add the exception to the log file, and also report it centrally to the job monitor.
    # Do not send to stderr
    message = "remote_run_pipeline_burdenator {l}, {y} failed with an uncaught exception: {ex}" \
        .format(l=location_id, y=year_id, ex=e)
    if logger:
        logger.error(message)
    else:
        stderr(message)
    sj.log_error(message)
    sj.log_failed()
Esempio n. 34
0
    sys.exit(1)

q, numProcs, account, title, jobFile = pArgs

if q in qs:
    queue, group = qs[q]
else:
    if ':' in q:
        queue, group = q.split(':', 1)
    else:
        queue, group = q, q

if queue == 'general':
    print >>sys.stderr('''\
Keep in mind when submitting to queue "general" that jobs running on
this queue can be pre-empted.  An effort is made to clean up, but it
would be wise to check for run-away processes if this submission does
not run to completion.''')

if group: group = '-W group_list=%s'%group

mppn = '4'
if '.' in numProcs:
    numProcs, mppn = numProcs.split('.', 1)

# We assume that related script lives in the same directory as this script.
myDir = os.path.dirname(os.path.realpath(__file__))+os.path.sep
sqScript = myDir + 'SQDedDriver.py'
PBSScript = open(myDir + 'SQDedPBSScriptTemplate.py').read()%locals()

print PBSScript
import subprocess
import sys

HOST="localhost"

#ports are handled in ~/.ssh_config
COMMAND="uname -a"

ssh=subprocess.Popen(["ssh", "%s", % HOST, COMMAND],
                      shell=False,
                      stdout=subprocess.PIPE,
                      stderr=subprocess.PIPE,)
result=subprocess.stdout.readlines()

if result==[]:
    error=ssh.stderr.readliens()
    print (sys.stderr() "ERROR %s:" %error)
else:
    print(result)
Esempio n. 36
0
def main():
    if len(sys.argv) != 3:
        sys.stderr('makejson.py (input file name) [output js file]\n')
        sys.exit(1)

    makejsresource(sys.argv[1], sys.argv[2])
Esempio n. 37
0
    provided encoding to read and write.
    """
    with io.open(fname, "r", encoding=encoding) as f, \
         io.open(fname+".tmp", "w", encoding=encoding) as outf:
        have_changed = update_version_in(f, outf, regex, versionline)

    replace_on_change(fname, have_changed)


# Find out our version
with open("configure.ac") as f:
    version = find_version(f)

# If we have no version, we can't proceed.
if version == None:
    print("No version found in configure.ac", file=sys.stderr())
    sys.exit(1)

print("The version is {}".format(version))

today = time.strftime("%Y-%m-%d", time.gmtime())


# In configure.ac, we replace the definition of APPROX_RELEASE_DATE
# with "{today} for {version}", but only if the version does not match
# what is already there.
def replace_fn(m):
    if m.group(1) != version:
        # The version changed -- we change the date.
        return u'AC_DEFINE(APPROX_RELEASE_DATE, ["{}"], # for {}'.format(
            today, version)
Esempio n. 38
0
def main(argv=None):
    CMD_CREATE_BASE = "create-base"
    CMD_EXPORT_BASE = "export-base"
    CMD_IMPORT_BASE = "import-base"
    CMD_CREATE_OVERLAY = "create-overlay"
    CMD_DOWNLOAD = "download"
    CMD_SYNTHESIS = "synthesis"
    CMD_HANDOFF = "handoff"
    CMD_HANDOFF_RECV = "handoff-recv"
    CMD_EXT_LIST = "ext-list"
    commands = {
        CMD_CREATE_BASE: "create base vm from the running instance",
        CMD_CREATE_OVERLAY: "create VM overlay from the customizaed VM",
        CMD_DOWNLOAD: "Download VM overlay",
        CMD_SYNTHESIS: "VM Synthesis (Need downloadable URLs for VM overlay)",
        CMD_HANDOFF: "Perform VM handoff to destination URL",
        CMD_HANDOFF_RECV: "Send handoff recv message to the dest OpenStack",
        CMD_EXT_LIST: "List available extensions",
        CMD_EXPORT_BASE: "Export Base VM",
        CMD_IMPORT_BASE: "Import Base VM",
    }

    settings, args = process_command_line(sys.argv[1:], commands)
    token, endpoint, glance_endpoint = \
        get_token(settings.server_address, settings.user_name,
                  settings.password, settings.tenant_name)
    sys.stdout.write("Success to log in to %s for tenant %s..\n" % \
        (settings.server_address, settings.tenant_name))

    if len(args) < 1:
        sys.stderr.write("Need command")
        sys.exit(1)
    if args[0] == CMD_CREATE_BASE:
        if len(args) != 3:
            msg = "Error: creating Base VM needs [VM UUID] and [new name]\n"
            msg += " 1) VM UUID: UUID of a running instance that you want to use for base VM\n"
            msg += " 2) new name: name for base VM\n"
            sys.stderr.write(msg)
            sys.exit(1)
        instance_uuid = args[1]
        snapshot_name = args[2]
        request_cloudlet_base(settings.server_address, token,
                              urlparse(endpoint), instance_uuid, snapshot_name)
    elif args[0] == CMD_CREATE_OVERLAY:
        if len(args) != 3:
            msg = "Error: creating VM overlay needs [VM UUID] and [new name]\n"
            msg += " 1) VM UUID: UUID of a running instance that you want to create VM overlay\n"
            msg += " 2) new name: name for VM overlay\n"
            sys.stderr.write(msg)
            sys.exit(1)
        instance_uuid = args[1]
        snapshot_name = args[2]
        ret = request_create_overlay(settings.server_address, token,
                                     urlparse(endpoint), instance_uuid,
                                     snapshot_name)
        pprint(ret)
    elif args[0] == CMD_DOWNLOAD:
        if len(args) != 2:
            msg = "Error: downlading VM overlay needs [Image UUID]\n"
            msg += " 1) Image UUID: UUID of a VM overlay\n"
            sys.stderr.write(msg)
            sys.exit(1)
        image_name = args[1]
        output_name = image_name + ".zip"
        sys.stdout.write("Download %s to %s...\n" % (image_name, output_name))
        overlay_download(settings.server_address, token,
                         urlparse(glance_endpoint), image_name, output_name)
    elif args[0] == CMD_EXPORT_BASE:
        if len(args) != 2:
            msg = "Error: Exporting Base VM needs [Image UUID]\n"
            msg += " 1) Image UUID: UUID of a Base VM (base disk)\n"
            sys.stderr.write(msg)
            sys.exit(1)
        basedisk_uuid = args[1]
        output_path = os.path.join(os.curdir, "base-%s.zip" % basedisk_uuid)
        sys.stdout.write("Export %s to %s...\n" % (basedisk_uuid, output_path))
        if os.path.exists(output_path):
            is_overwrite = raw_input("%s exists. Overwirte it? (y/N) " %
                                     output_path)
            if is_overwrite != 'y':
                sys.exit(1)
        request_export_basevm(settings.server_address, token,
                              urlparse(endpoint), basedisk_uuid, output_path)
    elif args[0] == CMD_IMPORT_BASE:
        if len(args) != 3:
            msg = "Error: Importing Base VM needs [Path to Base VM file] [Name for Base VM]\n"
            msg += " 1) Path to Base VM file: Absolute path to base VM package\n"
            msg += " 2) Name for Base VM: new name for Base VM\n"
            sys.stderr.write(msg)
            sys.exit(1)
        import_filepath = args[1]
        basevm_name = args[2]
        if os.access(import_filepath, os.R_OK) == False:
            sys.stderr("Cannot access the file at %s" % import_filepath)
            sys.exit(1)
        try:
            request_import_basevm(settings.server_address, token,
                                  urlparse(endpoint),
                                  urlparse(glance_endpoint), import_filepath,
                                  basevm_name)
            sys.stdout.write("SUCCESS\n")
        except CloudletClientError as e:
            sys.stderr.write("Error: %s\n" % str(e))
    elif args[0] == CMD_SYNTHESIS:
        if len(args) != 3:
            msg = "Error: synthesis cmd needs [overlay url] and [name of VM]\n"
            sys.stderr.write(msg)
            sys.exit(1)
        overlay_url = str(args[1])
        new_instance_name = str(args[2])
        try:
            ret = request_synthesis(settings.server_address,
                                    token,
                                    urlparse(endpoint),
                                    key_name=None,
                                    server_name=new_instance_name,
                                    overlay_url=overlay_url)
            pprint(ret)
        except CloudletClientError as e:
            sys.stderr.write("Error: %s\n" % str(e))
    elif args[0] == CMD_HANDOFF:
        if len(args) != 3:
            msg = "Error: VM handoff needs [Instance UUID] []\n"
            msg += " 1) Instance UUID: Absolute path to base VM package\n"
            msg += " 2) Destination Credential : File path of a credential file for destination OpenStacknew\n"
            sys.stderr.write(msg)
            sys.exit(1)
        instance_uuid = str(args[1])
        handoff_dest_credential_file = str(args[2])

        try:
            # get token for the handoff destination
            dest_cred = _parse_credential_file(handoff_dest_credential_file)
            dest_account, dest_passwd, dest_tenant, dest_addr = dest_cred
            dest_token, dest_endpoint, dest_glance_endpoint = \
                get_token(dest_addr, dest_account, dest_passwd,
                            dest_tenant)
            handoff_url = dest_endpoint

            request_handoff(settings.server_address, token, urlparse(endpoint),
                            instance_uuid, handoff_url, dest_token)
        except CloudletClientError as e:
            sys.stderr.write(str(e))
            sys.exit(1)
    elif args[0] == CMD_HANDOFF_RECV:
        if not len(args) == 3:
            msg = "Need overlay_url and name of the instance"
            raise CloudletClientError(msg)

        overlay_url = str(args[1])
        new_instance_name = str(args[2])
        try:
            _request_handoff_recv(settings.server_address,
                                  token,
                                  urlparse(endpoint),
                                  server_name=new_instance_name,
                                  overlay_url=overlay_url)
        except CloudletClientError as e:
            sys.stderr.write("Error: %s\n" % str(e))
    elif args[0] == CMD_EXT_LIST:
        filter_name = None
        if len(args) == 2:
            filter_name = args[1]
        ext_info = get_extension(settings.server_address, token,
                                 urlparse(endpoint), filter_name)
        sys.stdout.write(json.dumps(ext_info, indent=2) + "\n")
    else:
        sys.stderr.write("No such command")
        sys.exit(1)
Esempio n. 39
0
    return sources


def currentdir():
    return os.path.dirname(__file__)


# configure, if not already done so
configname = os.path.join('include', 'config.h')
configname_in = os.path.join(currentdir(), 'src', 'include', 'config.h.in')

# run cmake if appropriate
if not os.path.exists(configname) or os.path.getmtime(
        configname) < os.path.getmtime(configname_in):
    if os.system('cmake %s' % currentdir()) or not os.path.exists(configname):
        sys.stderr('Configuring with cmake failed\n')
        sys.exit(1)


# inspect config.h, to find libraries to link to
def hasconfig(filename, symbol):
    f = open(filename, 'r')
    define = '#define HAVE_%s\n' % symbol
    for line in f:
        if line == define:
            f.close()
            return True
    f.close()
    return False

Esempio n. 40
0
    # calculate the window-sum for each element and associate the result with the rank of the element
    ranks_with_window = partial_elements.map(
        elements_summer(m, l, partition_weights)).flatMap(lambda x: x)

    # output the result in the form: rank, window-sum
    ranks_with_window = ranks_with_window.filter(lambda x: x[0] < n).map(
        lambda x: str(x[0]) + ' ' + str(x[1]))

    return ranks_with_window


def main(path_to_input, window_size, path_to_output, number_of_partitions):

    spark.conf.set("spark.sql.shuffle.partitions", number_of_partitions)
    spark.conf.set("spark.default.parallelism", number_of_partitions)

    data = sc.textFile(path_to_input)
    ranks_with_window = calculate_window_sum(data, window_size,
                                             number_of_partitions)
    ranks_with_window.saveAsTextFile(path_to_output)


if __name__ == '__main__':
    if len(sys.argv) != 5:
        sys.stderr("Incorrect number of arguments")
    path_to_input = sys.argv[1]
    window_size = int(sys.argv[2])
    path_to_output = sys.argv[3]
    number_of_partitions = int(sys.argv[4])
    main(path_to_input, window_size, path_to_output, number_of_partitions)
Esempio n. 41
0
 def analyze(self, verbose=None, **kwargs):
     sys.stderr('khf.analyze method not available')
def color_sketches_net(height,
                       width,
                       iterations,
                       batch_size,
                       content_weight,
                       tv_weight,
                       learning_rate,
                       generator_network='unet',
                       use_adversarial_net=False,
                       use_hint=False,
                       adv_net_weight=1.0,
                       weight_decay_lambda=1e-5,
                       sketch_reconstruct_weight=10.0 / 255.0,
                       print_iterations=None,
                       checkpoint_iterations=None,
                       save_dir="model/",
                       do_restore_and_generate=False,
                       do_restore_and_train=False,
                       restore_from_noadv_to_adv=False,
                       preprocessed_folder=None,
                       preprocessed_file_path_list=None,
                       content_preprocessed_folder=None,
                       color_rebalancing_folder=None,
                       from_screenshot=False,
                       from_webcam=False,
                       test_img_dir=None,
                       test_img_hint=None,
                       input_mode='sketch',
                       output_mode='rgb',
                       use_cpu=False):
    """
    Stylize images.
    TODO: modify the description.

    This function yields tuples (iteration, image); `iteration` is None
    if this is the final image (the last iteration).  Other tuples are yielded
    every `checkpoint_iterations` iterations.

    :param: lr_decay_steps: learning rate decays by lr_decay_rate after lr_decay steps.
    Default per https://arxiv.org/abs/1603.03417
    :param: min_lr: The minimum learning rate. Default per https://arxiv.org/abs/1603.03417
    :param: lr_decay_rate: learning rate decays by lr_decay_rate after lr_decay steps.
    Default per https://arxiv.org/abs/1603.03417
    :param: use_semantic_masks: If it is true, the input to the generator network will be the semantic masks instead
    of the content image. The content image will serve as ground truth for loss (I haven't decided whether to use content
    or style loss).
    :rtype: iterator[tuple[int|None,image]]
    """

    # Before training, make sure everything is set correctly.
    if use_hint:
        assert test_img_hint is not None
    height, width = get_compatible_shape(height, width)
    input_shape = (1, height, width, 3)
    print(
        'The input shape is: %s. Input mode is: %s. Output mode is: %s. Using %s generator network'
        % (str(input_shape), input_mode, output_mode, generator_network))

    content_img_preprocessed = None
    sketches_preprocessed = None
    prev_content_preprocessed_file_i = 0

    # Define tensorflow placeholders and variables.
    with tf.Graph().as_default():
        input_images = tf.placeholder(
            tf.float32,
            shape=[
                batch_size, input_shape[1], input_shape[2],
                1 if generator_network != 'lnet' else 3
            ],
            name='input_sketches' if input_mode == 'sketch' else 'input_bw')

        if use_hint:
            input_hint = tf.placeholder(
                tf.float32,
                shape=[batch_size, input_shape[1], input_shape[2], 3],
                name='input_hint')
            input_concatenated = tf.concat(3, (input_images, input_hint))
            if generator_network == 'unet_color':
                assert input_mode == 'sketch' or (input_mode == 'raw_sketch'
                                                  and do_restore_and_generate)
                color_output = unet_color_util.net(input_concatenated)
                sketch_output = lnet_util.net(
                    (color_output - 128) / 128
                ) * 255  # This is the reconstructed sketch from the color output.
            elif generator_network == 'lnet':
                assert input_mode == 'color' and not use_adversarial_net and not use_hint
                # This step is not necessary but kept to be in sync with chainer repo.
                input_concatenated = (input_concatenated - 128) / 128
                color_output = lnet_util.net(input_concatenated,
                                             trainable=True) * 255
            elif generator_network == 'backprop':
                assert input_mode == 'sketch'
                color_output = tf.get_variable(
                    'backprop_input_var',
                    shape=[batch_size, input_shape[1], input_shape[2], 3],
                    initializer=tf.random_normal_initializer(
                        mean=128, stddev=10.0)) + 0 * input_images
                sketch_output = lnet_util.net(
                    (color_output - 128) / 128
                ) * 255  # This is the reconstructed sketch from the color output.
            else:
                # TODO: change the error message.
                raise AssertionError(
                    "Please input a valid generator network name. Possible options are: TODO. Got: %s"
                    % (generator_network))

        else:
            if generator_network == 'unet_color':
                assert input_mode == 'sketch' or (input_mode == 'raw_sketch'
                                                  and do_restore_and_generate)
                color_output = unet_color_util.net(input_images)
                sketch_output = lnet_util.net(
                    (color_output - 128) / 128
                ) * 255  # This is the reconstructed sketch from the color output.
            elif generator_network == 'lnet':
                assert input_mode == 'color' and not use_adversarial_net and not use_hint
                # This step is not necessary but kept to be in sync with chainer repo.
                input_images = (input_images - 128) / 128
                color_output = lnet_util.net(input_images,
                                             trainable=True) * 255
            elif generator_network == 'backprop':
                assert input_mode == 'sketch'
                color_output = tf.get_variable(
                    'backprop_input_var',
                    shape=[batch_size, input_shape[1], input_shape[2], 3],
                    initializer=tf.random_normal_initializer(
                    )) + 0 * input_images
                sketch_output = lnet_util.net(
                    (color_output - 128) / 128
                ) * 255  # This is the reconstructed sketch from the color output.
            else:
                raise AssertionError(
                    "Please input a valid generator network name. Possible options are: TODO. Got: %s"
                    % (generator_network))

        generator_all_var = unet_util.get_net_all_variables()
        sketch_reconstruct_all_var = lnet_util.get_net_all_variables()

        if not do_restore_and_generate:
            assert preprocessed_folder is not None and preprocessed_file_path_list is not None and \
                   preprocessed_folder[-1] == '/'
            learning_rate_init = tf.constant(learning_rate)
            learning_rate_var = tf.get_variable(name='learning_rate_var',
                                                trainable=False,
                                                initializer=learning_rate_init)
            color_expected_output = tf.placeholder(
                tf.float32,
                shape=[
                    batch_size, input_shape[1], input_shape[2],
                    3 if generator_network != 'lnet' else 1
                ],
                name='color_expected_output')
            # Use the mean difference loss. Used to use tf.nn.l2_loss. Don't know how big of a difference that makes.
            # color_loss_non_adv =tf.nn.l2_loss(color_output - color_expected_output) / batch_size
            color_loss_non_adv = tf.reduce_mean(
                tf.abs(color_output - color_expected_output))
            weight_decay_loss_non_adv = conv_util.weight_decay_loss(
                scope='unet') * weight_decay_lambda
            # This is only for unet_color, not for training the lnet,
            sketch_expected_output = lnet_util.net(
                (color_expected_output - 128) / 128, reuse=True) * 255
            sketch_reconstruct_loss_non_adv = tf.reduce_mean(
                tf.abs(sketch_output -
                       sketch_expected_output)) * sketch_reconstruct_weight

            generator_loss_non_adv = color_loss_non_adv + weight_decay_loss_non_adv + sketch_reconstruct_loss_non_adv
            # tv_loss = tv_weight * total_variation(image)

            if use_adversarial_net:
                adv_net_input = tf.placeholder(
                    tf.float32,
                    shape=[batch_size, input_shape[1], input_shape[2], 3],
                    name='adv_net_input')
                adv_net_prediction_image_input = adv_net_util.net(
                    adv_net_input)
                adv_net_prediction_generator_input = adv_net_util.net(
                    color_output, reuse=True)
                adv_net_all_var = adv_net_util.get_net_all_variables()

                weight_decay_loss_adv = conv_util.weight_decay_loss(
                    scope='adv_net') * weight_decay_lambda

                logits_from_i = adv_net_prediction_image_input
                logits_from_g = adv_net_prediction_generator_input

                # One represent labeling the image as coming from real image. Zero represent labeling it as generated.
                adv_loss_from_i = tf.reduce_mean(
                    tf.nn.sparse_softmax_cross_entropy_with_logits(
                        logits_from_i, tf.ones(
                            [batch_size], dtype=tf.int64))) * adv_net_weight
                adv_loss_from_g = tf.reduce_mean(
                    tf.nn.sparse_softmax_cross_entropy_with_logits(
                        logits_from_g, tf.zeros(
                            [batch_size], dtype=tf.int64))) * adv_net_weight

                adv_loss = adv_loss_from_i + adv_loss_from_g + weight_decay_loss_adv
                generator_loss_through_adv = tf.reduce_mean(
                    tf.nn.sparse_softmax_cross_entropy_with_logits(
                        logits_from_g, tf.ones(
                            [batch_size], dtype=tf.int64))) * adv_net_weight
                # Beta1 = 0.5 according to dcgan paper
                adv_train_step = tf.train.AdamOptimizer(
                    learning_rate_var, beta1=0.5,
                    beta2=0.999).minimize(adv_loss, var_list=adv_net_all_var)
                # adv_train_step_i = tf.train.AdamOptimizer(learning_rate_var, beta1=0.5,
                #                        beta2=0.999).minimize(adv_loss_from_i, var_list=adv_net_all_var)
                # adv_train_step_g = tf.train.AdamOptimizer(learning_rate_var, beta1=0.5,
                #                        beta2=0.999).minimize(adv_loss_from_g, var_list=adv_net_all_var)
                generator_train_step_through_adv = tf.train.AdamOptimizer(
                    learning_rate_var, beta1=0.5,
                    beta2=0.999).minimize(generator_loss_through_adv,
                                          var_list=generator_all_var)
                generator_train_step = tf.train.AdamOptimizer(
                    learning_rate_var, beta1=0.9,
                    beta2=0.999).minimize(generator_loss_non_adv)

                with tf.control_dependencies(
                    [generator_train_step_through_adv, generator_train_step]):
                    generator_both_train = tf.no_op(
                        name='generator_both_train')

                adv_loss_real_sum = scalar_summary("adv_loss_real",
                                                   adv_loss_from_i)
                adv_loss_fake_sum = scalar_summary("adv_loss_fake",
                                                   adv_loss_from_g)
                adv_loss_weight_decay_sum = scalar_summary(
                    "adv_loss_weight_decay", weight_decay_loss_adv)

                generator_loss_through_adv_sum = scalar_summary(
                    "g_loss_through_adv", generator_loss_through_adv)
                adv_loss_sum = scalar_summary("adv_loss", adv_loss)
                generator_loss_l2_sum = scalar_summary(
                    "generator_loss_non_adv", generator_loss_non_adv)
                generator_loss_weight_decay_sum = scalar_summary(
                    "generator_loss_weight_decay", weight_decay_loss_non_adv)
                sketch_reconstruct_loss_non_adv_sum = scalar_summary(
                    "sketch_reconstruct_loss_non_adv",
                    sketch_reconstruct_loss_non_adv)

                g_sum = merge_summary([
                    generator_loss_through_adv_sum, generator_loss_l2_sum,
                    generator_loss_weight_decay_sum,
                    sketch_reconstruct_loss_non_adv_sum
                ])
                adv_sum = merge_summary([
                    adv_loss_fake_sum, adv_loss_real_sum,
                    adv_loss_weight_decay_sum, adv_loss_sum
                ])
            else:
                # optimizer setup
                # Training using adam optimizer. Setting comes from https://arxiv.org/abs/1610.07629.
                generator_train_step = tf.train.AdamOptimizer(
                    learning_rate_var, beta1=0.9,
                    beta2=0.999).minimize(generator_loss_non_adv)
                generator_loss_l2_sum = scalar_summary("color_loss_non_adv",
                                                       generator_loss_non_adv)
                generator_loss_weight_decay_sum = scalar_summary(
                    "generator_loss_weight_decay", weight_decay_loss_non_adv)
                sketch_reconstruct_loss_non_adv_sum = scalar_summary(
                    "sketch_reconstruct_loss_non_adv",
                    sketch_reconstruct_loss_non_adv)
                g_sum = merge_summary([
                    generator_loss_l2_sum, generator_loss_weight_decay_sum,
                    sketch_reconstruct_loss_non_adv_sum
                ])

            def print_progress(i,
                               feed_dict,
                               adv_feed_dict,
                               start_time,
                               total_iterations,
                               last=False):
                stderr.write('Iteration %d/%d\n' % (i + 1, iterations))
                if last or (print_iterations and i % print_iterations == 0):
                    current_time = time.time()
                    if i > 0:
                        seconds_passed = current_time - start_time
                        seconds_remaining = float(total_iterations -
                                                  i) / i * seconds_passed
                        m, s = divmod(seconds_remaining, 60)
                        h, m = divmod(m, 60)
                        stderr.write(
                            'Estimated time remaining: "%d:%02d:%02d"' %
                            (h, m, s))
                    stderr.write('Learning rate %f\n' %
                                 (learning_rate_var.eval()))
                    # TODO: change this
                    stderr.write(
                        ' generator l2 loss: %g\n' %
                        generator_loss_non_adv.eval(feed_dict=feed_dict))
                    stderr.write('       sketch loss: %g\n' %
                                 sketch_reconstruct_loss_non_adv.eval(
                                     feed_dict=feed_dict))
                    if not generator_network == 'backprop':
                        stderr.write('  w decay gen loss: %g\n' %
                                     weight_decay_loss_non_adv.eval(
                                         feed_dict=feed_dict))
                    # if generator_network == 'unet_both' or generator_network == 'colorful_img_both':
                    #     stderr.write('           bw loss: %g\n' % color_loss_non_adv.eval(feed_dict=feed_dict))
                    # stderr.write('           ab loss: %g\n' % ab_loss_non_adv.eval(feed_dict=feed_dict))
                    if use_adversarial_net:
                        stderr.write(
                            '   adv_from_i loss: %g\n' %
                            adv_loss_from_i.eval(feed_dict=adv_feed_dict))
                        stderr.write(
                            '   adv_from_g loss: %g\n' %
                            adv_loss_from_g.eval(feed_dict=adv_feed_dict))
                        stderr.write('generator adv loss: %g\n' %
                                     generator_loss_through_adv.eval(
                                         feed_dict=adv_feed_dict))
                        stderr.write('  w decay adv loss: %g\n' %
                                     weight_decay_loss_adv.eval(
                                         feed_dict=adv_feed_dict))

        # Optimization
        # It used to track and record only the best one with lowest loss. This is no longer necessary and I think
        # just recording the one generated at each round will make it easier to debug.
        best_image = None
        start_time = time.time()
        if restore_from_noadv_to_adv and use_adversarial_net:
            saver = tf.train.Saver(generator_all_var + [learning_rate_var])
        else:
            saver = tf.train.Saver()

        if use_cpu:
            config = tf.ConfigProto(device_count={'GPU': 0})
        else:
            config = None
        with tf.Session(config=config) as sess:
            if do_restore_and_generate:
                assert batch_size == 1
                ckpt = tf.train.get_checkpoint_state(save_dir)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                else:
                    stderr("No checkpoint found. Exiting program")
                    return

                if from_screenshot:
                    # This is the x and y offset, the coordinate where we start capturing screen shot.
                    kScreenX = 300
                    kScreenY = 300
                elif from_webcam:
                    cap = cv2.VideoCapture(0)
                    # Set width and height.
                    ret = cap.set(3, 1280)
                    ret = cap.set(4, 960)
                    ret, frame = cap.read()
                    print('The dimension of this camera is : %d x %d' %
                          (frame.shape[1], frame.shape[0]))
                else:
                    assert test_img_dir is not None
                iterator = 0

                while from_screenshot or from_webcam or (iterator == 0):
                    if from_screenshot:
                        pass
                        # w = gtk.gdk.get_default_root_window()
                        # sz = w.get_size()
                        # print "The size of the window is %d x %d" % sz
                        # pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, input_shape[1], input_shape[2])
                        # pb = pb.get_from_drawable(w, w.get_colormap(), kScreenX, kScreenY, 0, 0, input_shape[1],
                        #                           input_shape[2])
                        # content_image = pb.pixel_array
                    elif from_webcam:
                        ret, frame = cap.read()
                        content_image = scipy.misc.imresize(
                            frame, (input_shape[1], input_shape[2]))
                    else:
                        content_image = imread(
                            test_img_dir, (input_shape[1], input_shape[2]))
                    content_image = np.array([content_image])
                    if input_mode == 'sketch':
                        color_expected_output = tf.placeholder(
                            tf.float32,
                            shape=[
                                batch_size, input_shape[1], input_shape[2],
                                3 if generator_network != 'lnet' else 1
                            ],
                            name='color_expected_output')
                        sketch_expected_output = lnet_util.net(
                            (color_expected_output - 128) / 128,
                            reuse=True) * 255
                        content_image_yuv = cv2.cvtColor(
                            np.asarray(content_image[0, ...], dtype=np.uint8),
                            cv2.COLOR_RGB2YUV)
                        image_sketches = sketch_expected_output.eval(
                            feed_dict={
                                color_expected_output:
                                np.array([content_image_yuv])
                            })

                        # image_sketches = sketches_util.image_to_sketch(content_image)
                        # image_sketches = np.expand_dims(image_sketches, axis=3)
                    elif input_mode == 'bw':
                        content_image_lab = colorful_img_network_util.rgb_to_lab(
                            content_image)
                        image_sketches = content_image_lab[..., 0:1]
                        # image_sketches = np.expand_dims(rgb2gray(content_image), axis=3)
                    elif input_mode == 'color':
                        image_sketches = np.zeros(content_image.shape)
                        # image_sketches = np.expand_dims(rgb2gray(content_image), axis=3)
                    elif input_mode == 'raw_sketch':
                        image_sketches = rgb2gray(content_image, keep_dim=True)
                    else:
                        raise AssertionError('Input mode error.')

                    # Do some processing...
                    image_sketches, content_image = sketches_util.generate_training_batch(
                        image_sketches, content_image, train=False)

                    # Now generate an image using the style_blend_weights given.
                    if input_mode == 'color':
                        feed_dict = {input_images: content_image}
                    else:
                        feed_dict = {input_images: image_sketches[..., :1]}

                    if use_hint:
                        image_hint = hint_imread(
                            test_img_hint, (input_shape[1], input_shape[2]))
                        feed_dict[input_hint] = np.array([image_hint])

                    generated_bw = color_output.eval(feed_dict=feed_dict)
                    iterator += 1

                    if generator_network != 'lnet':
                        # Whenever using cv2.cvtColor, be careful not to use float values... It gives out wierd answers.
                        print(generated_bw[0, 0, 0:5, :])
                        print(content_image[0, 0, 0:5, :])
                        generated_image = np.array([
                            cv2.cvtColor(
                                np.asarray(generated_bw[0, ...],
                                           dtype=np.uint8), cv2.COLOR_YUV2RGB)
                        ])
                        # generated_image = image_sketches[...,:1]
                    else:
                        generated_image = generated_bw
                    yield (iterator, generated_image)

            else:
                # Initialize log writer
                summary_writer = SummaryWriter("./logs", sess.graph)

                # initialize pre-processsed numpy array
                if content_preprocessed_folder is not None:
                    if not os.path.isfile(content_preprocessed_folder +
                                          'record.txt'):
                        raise AssertionError(
                            'No preprocessed content images found in %s. To use this feature, first use some '
                            'other file to call read_resize_and_save_all_imgs_in_dir.'
                            % (content_preprocessed_folder))
                    content_preprocessed_record = sketches_util.read_preprocessed_sketches_npy_record(
                        content_preprocessed_folder)
                    if content_preprocessed_record[0][
                            3] != height or content_preprocessed_record[0][
                                4] != width:
                        raise AssertionError(
                            'The height and/or width of the preprocessed numpy files does not '
                            'match those of the current setting.')
                    # Read the first file
                    print('Reading preprocessed content images.')
                    content_img_preprocessed = np.load(
                        content_preprocessed_record[
                            prev_content_preprocessed_file_i][0])
                    sketches_preprocessed = np.load(
                        content_preprocessed_record[
                            prev_content_preprocessed_file_i][1])

                # Do Training.
                iter_start = 0
                if do_restore_and_train:
                    ckpt = tf.train.get_checkpoint_state(save_dir)
                    if ckpt and ckpt.model_checkpoint_path:
                        saver.restore(sess, ckpt.model_checkpoint_path)
                        iter_start = get_global_step_from_save_dir(
                            ckpt.model_checkpoint_path)
                    else:
                        raise AssertionError(
                            "No checkpoint found. Exiting program")
                        return
                    if restore_from_noadv_to_adv and use_adversarial_net:
                        # Simply running this doesn;t seem to work.
                        # sess.run(tf.initialize_variables(adv_net_all_var))

                        # Get all variables except the generator net and the learning rate
                        if '0.12.0' in tf.__version__:
                            all_vars = tf.get_collection(
                                tf.GraphKeys.GLOBAL_VARIABLES)
                        else:
                            all_vars = tf.get_collection(
                                tf.GraphKeys.VARIABLES)
                        var_not_saved = [
                            item for item in all_vars
                            if item not in (generator_all_var +
                                            [learning_rate_var])
                        ]
                        sess.run(tf.initialize_variables(var_not_saved))
                        # Now change the saver back to normal
                        saver = tf.train.Saver()
                        raise NotImplementedError
                else:
                    # # In the past I ran this. Now I have lnet which is a pretrained network.
                    # sess.run(tf.initialize_all_variables())

                    saver = tf.train.Saver(sketch_reconstruct_all_var)
                    ckpt = tf.train.get_checkpoint_state(
                        'model/chainer_converted/')
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    # Get variables not in lnet and initialize them
                    # Get all variables except the generator net and the learning rate
                    if '0.12.0' in tf.__version__:
                        all_vars = tf.get_collection(
                            tf.GraphKeys.GLOBAL_VARIABLES)
                    else:
                        all_vars = tf.get_collection(tf.GraphKeys.VARIABLES)
                    var_not_saved = [
                        item for item in all_vars
                        if item not in sketch_reconstruct_all_var
                    ]
                    sess.run(tf.initialize_variables(var_not_saved))
                    # Now change the saver back to normal
                    saver = tf.train.Saver()

                # Get path to all content images.

                image_subpaths = read_preprocessed_file_path_list(
                    preprocessed_file_path_list)

                # Ignore the ones at the end.
                if batch_size != 1 and len(image_subpaths) % batch_size != 0:
                    image_subpaths = image_subpaths[:-(len(image_subpaths) %
                                                       batch_size)]
                print('The size of training dataset is %d images.' %
                      len(image_subpaths))
                preprocessed_colored_folder = preprocessed_folder + 'color/'
                preprocessed_sketch_folder = preprocessed_folder + 'line/'

                content_dirs = map(lambda p: preprocessed_colored_folder + p,
                                   image_subpaths)
                sketch_dirs = map(lambda p: preprocessed_sketch_folder + p,
                                  image_subpaths)

                # # Test training GAN differently***
                # generators_turn = True
                # # END TEST***

                current_lr = learning_rate_var.eval()
                if abs(current_lr - learning_rate) > 0.00000001:
                    print(
                        "Given learning rate is different from the learning rate stored. Changing lr %f -> %f"
                        % (current_lr, learning_rate))
                    sess.run(learning_rate_var.assign(learning_rate))

                for i in range(iter_start, iterations):
                    if content_preprocessed_folder is not None:
                        current_content_preprocessed_file_i, index_within_preprocessed =  \
                            sketches_util.find_corresponding_sketches_npy_from_record(
                            content_preprocessed_record, i * batch_size, batch_size)
                        if prev_content_preprocessed_file_i != current_content_preprocessed_file_i:
                            prev_content_preprocessed_file_i = current_content_preprocessed_file_i
                            content_img_preprocessed = np.load(
                                content_preprocessed_record[
                                    current_content_preprocessed_file_i][0])
                            sketches_preprocessed = np.load(
                                content_preprocessed_record[
                                    current_content_preprocessed_file_i][1])
                        content_pre_list = content_img_preprocessed[
                            index_within_preprocessed:
                            index_within_preprocessed + batch_size,
                            ...].astype(np.float32)

                        if input_mode == 'sketch':
                            image_sketches = sketches_preprocessed[
                                index_within_preprocessed:
                                index_within_preprocessed + batch_size,
                                ...].astype(np.float32)
                            image_sketches = np.expand_dims(image_sketches,
                                                            axis=3)
                        elif input_mode == 'bw':
                            content_image_lab = colorful_img_network_util.rgb_to_lab(
                                content_pre_list)
                            image_sketches = content_image_lab[..., 0:1]
                            # image_sketches = np.expand_dims(rgb2gray(content_pre_list), axis=3)
                        elif input_mode == 'color':
                            image_sketches = content_pre_list
                        elif input_mode == 'raw_sketch':
                            raise AssertionError(
                                'Input mode raw_sketch should not be trained.')
                        else:
                            raise AssertionError('Input mode error.')
                    else:

                        current_content_dirs = get_batch_paths(
                            content_dirs, i * batch_size, batch_size)
                        current_sketch_dirs = get_batch_paths(
                            sketch_dirs, i * batch_size, batch_size)
                        content_pre_list = read_and_resize_batch_images(
                            current_content_dirs, None, None)
                        image_sketches = read_and_resize_bw_mask_images(
                            current_sketch_dirs, None, None,
                            len(current_sketch_dirs), 1)

                        # if input_mode == 'sketch':
                        #     image_sketches = sketches_util.image_to_sketch(content_pre_list)
                        #     image_sketches = np.expand_dims(image_sketches, axis=3)
                        # elif input_mode == 'bw':
                        #     content_image_lab = colorful_img_network_util.rgb_to_lab(content_pre_list)
                        #     image_sketches = content_image_lab[...,0:1]
                        #     # image_sketches = np.expand_dims(rgb2gray(content_pre_list), axis=3)
                        # else:
                        #     raise AssertionError('Input mode error.')

                    # Do some processing...
                    image_sketches, content_pre_list = sketches_util.generate_training_batch(
                        image_sketches, content_pre_list, train=True)
                    if generator_network == 'lnet':
                        feed_dict = {
                            color_expected_output: image_sketches[..., :1]
                        }
                    else:
                        feed_dict = {color_expected_output: content_pre_list}

                    if use_hint:
                        # image_hint = sketches_util.generate_hint_from_image(content_pre_list)
                        # feed_dict[input_hint] = image_hint
                        image_hint = image_sketches[..., 1:]
                        feed_dict[input_hint] = image_hint

                    image_sketches = image_sketches[..., :1]

                    if input_mode == 'color':
                        feed_dict[input_images] = content_pre_list
                    else:
                        feed_dict[input_images] = image_sketches

                    last_step = (i == iterations - 1)

                    if use_adversarial_net:
                        # adv_feed_dict = {input_images:image_sketches, adv_net_input: content_pre_list}
                        # if use_hint:
                        #     adv_feed_dict[input_hint] = image_hint
                        adv_feed_dict = copy.copy(feed_dict)
                        adv_feed_dict[adv_net_input] = content_pre_list

                        # TEST printing before training
                        print_progress(i,
                                       feed_dict=feed_dict,
                                       adv_feed_dict=adv_feed_dict,
                                       start_time=start_time,
                                       total_iterations=iterations,
                                       last=last_step)

                        # Update D network
                        _, summary_str = sess.run([adv_train_step, adv_sum],
                                                  feed_dict=adv_feed_dict)
                        summary_writer.add_summary(summary_str, i)

                        # Update G network
                        _, summary_str = sess.run(
                            [generator_both_train, g_sum],
                            feed_dict=adv_feed_dict)
                        summary_writer.add_summary(summary_str, i)

                    else:
                        adv_feed_dict = None
                        print_progress(i,
                                       feed_dict=feed_dict,
                                       adv_feed_dict=adv_feed_dict,
                                       start_time=start_time,
                                       total_iterations=iterations,
                                       last=last_step)

                        _, summary_str = sess.run(
                            [generator_train_step, g_sum], feed_dict=feed_dict)
                        summary_writer.add_summary(summary_str, i)

                    # TEST printing after training
                    print_progress(i,
                                   feed_dict=feed_dict,
                                   adv_feed_dict=adv_feed_dict,
                                   start_time=start_time,
                                   total_iterations=iterations,
                                   last=last_step)

                    if (checkpoint_iterations
                            and i % checkpoint_iterations == 0) or last_step:
                        saver.save(sess,
                                   save_dir + 'model.ckpt',
                                   global_step=i)
                        print('Checkpoint saved.')

                        if test_img_dir is not None:
                            test_image = imread(test_img_dir)
                            test_image_shape = test_image.shape

                        # The for loop will run once and terminate. Can't use return and yield in the same function so this is a hacky way to do it.
                        # Set use_cpu = true to save graphical memory
                        for _, generated_image in color_sketches_net(
                                test_image_shape[0],
                                test_image_shape[1],
                                iterations,
                                1,
                                content_weight,
                                tv_weight,
                                learning_rate,
                                generator_network=generator_network,
                                use_adversarial_net=
                                False,  # use_adversarial_net=use_adversarial_net,
                                use_hint=use_hint,
                                save_dir=save_dir,
                                do_restore_and_generate=True,
                                do_restore_and_train=False,
                                from_screenshot=False,
                                from_webcam=False,
                                test_img_dir=test_img_dir,
                                test_img_hint=test_img_hint,
                                input_mode=input_mode,
                                output_mode=output_mode,
                                use_cpu=use_cpu):
                            pass
                        best_image = generated_image

                        # Because we now have batch, choose the first one in the batch as our sample image.
                        yield ((None if last_step else i),
                               None if test_img_dir is None else best_image)
postsurvey_filename = sys.argv[4]
postsurvey2_filename = sys.argv[5]

###
#Start with only files from the Aug pilot (other files will not match
#in columns because of different survey questions)

######

#make a list of the filenames in the folder
file_language_tuples = []
for current_root, dirnames_d, filenames_d in os.walk(folderpath):
    csv_filenames = fnmatch.filter(filenames_d, "*.csv")
    if current_root == folderpath:
        if len(csv_filenames) > 0:
            print >> sys.stderr(""), \
                    "ERROR: need at least one level of nesting " \
                    + "in data folder '" + folderpath + "'"
            sys.exit()
        continue
    full_pathnames = [os.path.join(current_root, f) \
            for f in csv_filenames]
    subfolder = folderpath.join(current_root.split(folderpath)[1:])
    first_subfolder = None
    for folder in subfolder.split('/'):
        if len(folder) > 0:
            first_subfolder = folder
            break
    curr_tuples = [(f, first_subfolder) for f in full_pathnames]
    file_language_tuples += curr_tuples
Esempio n. 44
0
import sys

WIDTH = 80
URBAN_URL = 'http://api.urbandictionary.com/v0/define?term=%s'

if len(sys.argv) < 2:
    sys.stderr.write("usage: urban <term>\n")
    sys.exit(1)

term = '+'.join(sys.argv[1:])

try:
    resp = urllib2.urlopen(URBAN_URL % (term,))
    data = json.loads(resp.read())
except Exception:
    sys.stderr("error contacting urban dictionary.\n")
    sys.exit(1)

if data['result_type'] != 'exact':
    sys.stderr("no definitions.\n")
    sys.exit(1)

definitions = data['list']
definitions.sort(key=lambda x: -1.0 * x['thumbs_up']/x['thumbs_down'])
definitions = definitions[:3]

def render_def(n, d):
    res = ''
    line = "%d)" % (n + 1,)
    for word in d.split():
        if len(line) + 1 + len(word) > WIDTH:
Esempio n. 45
0
from src import BayesNet
import sys

if __name__ == '__main__':
    if len(sys.argv) != 4:
        sys.stderr('trainFile testFile learningMethod')
        sys.exit()

    trainFilename, testFilename, method = sys.argv[1:]

    model = BayesNet.BayesNet()
    model.loadTrain(trainFilename)
    model.loadTest(testFilename)

    if method == 'n':
        model.buildNaiveBayes()

    if method == 't':
        model.buildTAN()

    model.printResults()
Esempio n. 46
0
 def _sigHandler(self, signum, frame):
     "signal handler to stop logging and terminate process"
     self.finishUp()
     sys.stderr("Warning: profiler exiting on signal\n")
     sys.exit(1)
Esempio n. 47
0
def eprint(msg):
    sys.stderr(msg)
Esempio n. 48
0
    argparser = ArgumentParser(usage=usage)
    argparser.add_argument('-j',
                           '--json',
                           type=str,
                           default='-',
                           help='input json file')
    args = argparser.parse_args()
    return args


if __name__ == "__main__":
    argresult = parser()
    filename = argresult.json
    if filename is None:
        filename = "-"

    if filename == "-":
        dat = sys.stdin.read()
    else:
        if not os.path.exists(filename):
            sys.stderr("No such file [%s]" % filename)
            sys.exit(1)
        with open(argresult.json, "r") as f:
            dat = f.read()

    try:
        print(digest(dat))
    except Exception as e:
        print(e)
        print("Invalid input", file=sys.stderr)
def getRttDataAthena(configFile=None):

	print("Find RTT differentiel for each traceroute")

	if configFile is None:
		configFile = "conf/getRttDataAthena.conf"

	if os.path.exists(configFile):
		expParam = json.load(open(configFile, "r"), object_hook=json_util.object_hook)


	else:
		sys.stderr("No config file found!\nPlease copy conf/%s.default to conf/%s\n" % (configFile, configFile))

	pool = Pool(expParam["nbProcesses"], initializer=rttAnalysis.processInit)  # , maxtasksperchild=binMult)

	if not expParam["prefixes"] is None:
		expParam["prefixes"] = re.compile(expParam["prefixes"])
	client = pymongo.MongoClient("mongodb-iijlab")
	db = client.atlas
	detectionExperiments = db.rttExperiments
	alarmsCollection = db.rttChanges
	# expId = detectionExperiments.insert_one(expParam).inserted_id

	sampleMediandiff = {}
	ip2asn = {}
	gi = pygeoip.GeoIP("../lib/GeoIPASNum.dat")

	start = int(calendar.timegm(expParam["start"].timetuple()))
	end = int(calendar.timegm(expParam["end"].timetuple()))

	rawDiffRtt = defaultdict(list)
	rawNbProbes = defaultdict(list)
	rawDates = defaultdict(list)



	for currDate in range(start, end, expParam["timeWindow"]):

		sys.stderr.write("Rtt analysis %s" % datetime.datetime.utcfromtimestamp(currDate))
		tsS = time.time()

		diffRtt = defaultdict(dict)
		nbRow = 0

		currDatetime= datetime.datetime.utcfromtimestamp(int(currDate))
		endDatetime= datetime.datetime.utcfromtimestamp(int(currDate+int(expParam["timeWindow"])))
		msmIds= expParam["msmIds"]
		msmType=  expParam["msmType"]


		# Prepare the SQL request for the given period
		sqlRequest = generateSQLAthenaRequest(currDatetime, endDatetime, currDate, currDate+expParam["timeWindow"], msmIds)

		print ("The created SQL request is : \n %s"%sqlRequest)


		# Find Results
		result = get_traceroutes_by_sql_request(sqlRequest)
		if len(result)>0 :
			IRtt, row= computeRtt_athena(result)

			diffRtt, nbRow = mergeRttResults_athena(IRtt, row)

			for k, v in diffRtt.iteritems():
				rawDiffRtt[k].extend(v["rtt"])
				rawDates[k].extend([currDatetime] * len(v["rtt"]))

			timeSpent = (time.time() - tsS)
			sys.stderr.write(", %s sec/bin,  %s row/sec\r" % (timeSpent, float(nbRow) / timeSpent))


	pool.close()
	pool.join()
	return (rawDiffRtt, rawNbProbes, rawDates)
Esempio n. 50
0
    hangulcode = jamotosyllable(choseong, jungseong, jongseong)

    return hangulcode


# start main procedure
data_file_name = "Unihan.txt"

if len(sys.argv) == 2:
    data_file_name = sys.argv[1]

try:
    data_file = open(data_file_name, 'r')

except:
    sys.stderr("Cant open file: %s\n" % data_file_name)
    help()
    sys.exit(1)

gather_copyright = 0
table = {}
for line in data_file.readlines():
    # check for comment, jump over comments
    if line[0] == '#':
        if gather_copyright == 0:
            if string.find(line, "Format information:") != -1:
                gather_copyright = 1
                continue
            copyright += ' * ' + string.strip(line[1:]) + '\n'
            continue
        else:
def color_sketches_net(height, width, iterations, batch_size, content_weight, tv_weight,
                       learning_rate,
                       lr_decay_steps=50000,
                       min_lr=0.00001, lr_decay_rate=0.7, print_iterations=None,
                       checkpoint_iterations=None, save_dir="model/", do_restore_and_generate=False,
                       do_restore_and_train=False, real_folder=None, fake_folder=None):
    """
    Stylize images.

    This function yields tuples (iteration, image); `iteration` is None
    if this is the final image (the last iteration).  Other tuples are yielded
    every `checkpoint_iterations` iterations.

    :param: lr_decay_steps: learning rate decays by lr_decay_rate after lr_decay steps.
    Default per https://arxiv.org/abs/1603.03417
    :param: min_lr: The minimum learning rate. Default per https://arxiv.org/abs/1603.03417
    :param: lr_decay_rate: learning rate decays by lr_decay_rate after lr_decay steps.
    Default per https://arxiv.org/abs/1603.03417
    :param: use_semantic_masks: If it is true, the input to the generator network will be the semantic masks instead
    of the content image. The content image will serve as ground truth for loss (I haven't decided whether to use content
    or style loss).
    :rtype: iterator[tuple[int|None,image]]
    """

    # Before training, make sure everything is set correctly.

    input_shape = (1, height, width, 3)
    print('The input shape is: %s' % (str(input_shape)))

    # Define tensorflow placeholders and variables.
    with tf.Graph().as_default():

        learning_rate_decayed_init = tf.constant(learning_rate)
        learning_rate_decayed = tf.get_variable(name='learning_rate_decayed', trainable=False,
                                                initializer=learning_rate_decayed_init)

        adv_net_input_real = tf.placeholder(tf.float32,
                                         shape=[batch_size, input_shape[1], input_shape[2], 3], name='adv_net_input_real')
        adv_net_prediction_image_input = adv_net_util.net(adv_net_input_real)

        adv_net_input_fake = tf.placeholder(tf.float32,
                                       shape=[batch_size, input_shape[1], input_shape[2], 3], name='adv_net_input_fake')
        adv_net_prediction_generator_input = adv_net_util.net(adv_net_input_fake, reuse=True)
        adv_net_all_var = adv_net_util.get_net_all_variables()

        logits_from_i = adv_net_prediction_image_input
        logits_from_g = adv_net_prediction_generator_input

        # One represent labeling the image as coming from real image. Zero represent labeling it as generated.
        adv_loss_from_i = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits_from_i, tf.ones([batch_size], dtype=tf.int64)))
        adv_loss_from_g = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits_from_g, tf.zeros([batch_size], dtype=tf.int64)))

        adv_loss =  adv_loss_from_i + adv_loss_from_g
        adv_train_step = tf.train.AdamOptimizer(learning_rate_decayed, beta1=0.5,
                               beta2=0.999).minimize(adv_loss, var_list=adv_net_all_var)
        adv_train_step_i = tf.train.AdamOptimizer(learning_rate_decayed, beta1=0.5,
                               beta2=0.999).minimize(adv_loss_from_i, var_list=adv_net_all_var)
        adv_train_step_g = tf.train.AdamOptimizer(learning_rate_decayed, beta1=0.5,
                               beta2=0.999).minimize(adv_loss_from_g, var_list=adv_net_all_var)

        # with tf.control_dependencies([generator_train_step_through_adv, adv_train_step]):
        with tf.control_dependencies([adv_train_step_i, adv_train_step_g]):
            adv_generator_both_train = tf.no_op(name='adv_generator_both_train')

        def print_progress(i, adv_feed_dict, last=False):
            stderr.write(
                'Iteration %d/%d\n' % (i + 1, iterations))
            if last or (print_iterations and i % print_iterations == 0):
                stderr.write('Learning rate %f\n' % (learning_rate_decayed.eval()))
                stderr.write('   adv_from_i loss: %g\n' % adv_loss_from_i.eval(feed_dict=adv_feed_dict))
                stderr.write('   adv_from_g loss: %g\n' % adv_loss_from_g.eval(feed_dict=adv_feed_dict))


        # Optimization
        # It used to track and record only the best one with lowest loss. This is no longer necessary and I think
        # just recording the one generated at each round will make it easier to debug.
        best_real_image = None
        best_fake_image = None

        best_real_loss = 100.0
        best_fake_loss = 100.0

        saver = tf.train.Saver()
        with tf.Session() as sess:
            with open(save_dir + 'adv_loss.tsv', 'w') as loss_record_file:
                loss_record_file.write('i\tcurrent_generator_l2_loss\tcurrent_adv_loss_i\tcurrent_adv_loss_g\tcurrent_gen_loss_through_adv\n')
            # Do Training.
            iter_start = 0
            if do_restore_and_train:
                ckpt = tf.train.get_checkpoint_state(save_dir)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    iter_start = get_global_step_from_save_dir(ckpt.model_checkpoint_path)
                else:
                    stderr("No checkpoint found. Exiting program")
                    return
            else:
                sess.run(tf.initialize_all_variables())

            # Get path to all content images.
            real_dirs = get_all_image_paths_in_dir(real_folder)
            # Ignore the ones at the end.
            if batch_size != 1 and len(real_dirs) % batch_size != 0:
                real_dirs = real_dirs[:-(len(real_dirs) % batch_size)]
            print('The size of real dataset is %d images.' % len(real_dirs))


            fake_dirs = get_all_image_paths_in_dir(fake_folder)
            # Ignore the ones at the end.
            if batch_size != 1 and len(fake_dirs) % batch_size != 0:
                fake_dirs = fake_dirs[:-(len(fake_dirs) % batch_size)]
            print('The size of fake dataset is %d images.' % len(fake_dirs))


            # # Test training GAN differently***
            # generators_turn = True
            # # END TEST***


            for i in range(iter_start, iterations):
                # First decay the learning rate if we need to
                if (i % lr_decay_steps == 0 and i!= iter_start):
                    current_lr = learning_rate_decayed.eval()
                    sess.run(learning_rate_decayed.assign(max(min_lr, current_lr * lr_decay_rate)))

                current_content_dirs = get_batch_paths(real_dirs, i * batch_size, batch_size)
                content_pre_list = read_and_resize_batch_images(current_content_dirs, input_shape[1],
                                                                input_shape[2])


                current_fake_dirs = get_batch_paths(fake_dirs, i * batch_size, batch_size)
                fake_pre_list = read_and_resize_batch_images(current_fake_dirs, input_shape[1],
                                                                input_shape[2])

                last_step = (i == iterations - 1)


                adv_feed_dict = {adv_net_input_real: content_pre_list, adv_net_input_fake: fake_pre_list}
                # TEST printing before training
                print_progress(i, adv_feed_dict=adv_feed_dict, last=last_step)

                # if generators_turn:
                #     # generator_train_step.run(feed_dict=feed_dict)
                #     generator_train_step_through_adv.run(feed_dict=adv_feed_dict)
                #     adv_train_step.run(feed_dict=adv_feed_dict)

                # generator_train_step_through_adv.run(feed_dict=adv_feed_dict)
                # adv_train_step.run(feed_dict=adv_feed_dict)


                adv_generator_both_train.run(feed_dict=adv_feed_dict)
                # if i < 10000:
                #     generator_train_step.run(feed_dict=feed_dict)

                print_progress(i, adv_feed_dict= adv_feed_dict, last=last_step)
                # TODO:
                if i%10==0:
                    with open(save_dir + 'adv_loss.tsv', 'a') as loss_record_file:
                        current_adv_loss_i = adv_loss_from_i.eval(feed_dict=adv_feed_dict)
                        current_adv_loss_g = adv_loss_from_g.eval(feed_dict=adv_feed_dict)
                        loss_record_file.write('%d\t%g\t%g\n' % (i, current_adv_loss_i, current_adv_loss_g))


                        if current_adv_loss_i < best_real_loss:
                            best_real_loss = current_adv_loss_i
                            best_real_image = content_pre_list
                        if current_adv_loss_g < best_fake_loss:
                            best_fake_loss = current_adv_loss_g
                            best_fake_image = fake_pre_list


                if (checkpoint_iterations and i % checkpoint_iterations == 0) or last_step:
                    # saver.save(sess, save_dir + 'model.ckpt', global_step=i)

                    # Because we now have batch, choose the first one in the batch as our sample image.
                    yield (
                        (None if last_step else i),
                        best_real_image, best_fake_image
                    )
Esempio n. 52
0
def distance_query_generator(filename):
    """
    Creates the following dicts with the values of distances:
        - query_intra_distances: values from the distances same chain same dom.
        - query_inter_distances: values between different chains diff domains.

    """
    # Creation of variables to work with new dicts
    trans_dom_query = {}
    query_inter_distances = {}
    query_intra_distances = {}

    file_id = (filename.split('/')[-1])[:-3]

    if '.pdb' in filename:
        try:
            trans_dom_query = phobius_runner(filename[:-3] + 'fasta')
        except:
            sys.stderr('***It is recomendable add the fasta seq as  \
                            a separated file in the current directory ***\n ')
            fasta_fil = open('fasta_to_generate.fasta', 'w')
            seq_fasta = get_seq_PDB_CIF(filename)
            fasta_fil.write(seq_fasta)
            fasta_fil.close()

        trans_dom_query = phobius_runner('fasta_to_generate.fasta')
        entity = PDBParser()

    elif '.cif' in filename:
        try:
            trans_dom_query = phobius_runner(filename[:-3] + 'fasta')
        except:
            raise ValueError(
                'To use .cif files introduce a fasta with the following name: '
                + filename[:-3] + 'fasta')

        entity = MMCIFParser()

    structure = entity.get_structure('X', filename)
    model = structure[0]

    for key, value in trans_dom_query.items():
        logging.info('Chain processed: {}\n Transdom: {}'.format(key, value))
        if '.pdb' in filename:
            for chain in model:
                chain1 = chain
        else:
            chain1 = model[key]
        for val in value:
            beg_helix1, end_helix1 = int(val[0]), int(val[1])
            # ################INTRA DISTANCES ####################################
            select_and_save_distances_query(beg_helix1, end_helix1, beg_helix1,
                                            end_helix1, chain1, chain1,
                                            query_intra_distances)

            # ################INTER DISTANCES SAME CHAIN  ##############################
            for val2 in value:
                if val == val2:
                    continue
                beg_helix2, end_helix2 = int(val2[0]), int(val2[1])
                select_and_save_distances_query(beg_helix1, end_helix1,
                                                beg_helix2, end_helix2, chain1,
                                                chain1, query_inter_distances)

            # ################INTER DISTANCES BTWN CHAIN  ########################
            for key, value in trans_dom_query.items():
                if '.pdb' in filename:
                    for chain in model:
                        chain2 = chain
                else:
                    chain2 = model[key]
                if chain1 == chain2:
                    continue
                for val3 in value:
                    beg_helix3, end_helix3 = int(val3[0]), int(val3[1])
                    select_and_save_distances_query(beg_helix1, end_helix1,
                                                    beg_helix3, end_helix3,
                                                    chain1, chain2,
                                                    query_inter_distances)

    return (query_intra_distances, query_inter_distances)
Esempio n. 53
0
"""
from pymodbus.compat import IS_PYTHON3, PYTHON_VERSION
if IS_PYTHON3 and PYTHON_VERSION >= (3, 4):
    import asyncio
    import logging
    # ----------------------------------------------------------------------- #
    # Import the required asynchronous client
    # ----------------------------------------------------------------------- #
    from pymodbus.client.asynchronous.tcp import AsyncModbusTCPClient as ModbusClient
    from pymodbus.client.asynchronous.udp import (AsyncModbusUDPClient as
                                                  ModbusClient)
    from pymodbus.client.asynchronous import schedulers

else:
    import sys
    sys.stderr("This example needs to be run only on python 3.4 and above")
    sys.exit(1)

from threading import Thread
import time
# --------------------------------------------------------------------------- #
# configure the client logging
# --------------------------------------------------------------------------- #

logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)

# --------------------------------------------------------------------------- #
# specify slave to query
# --------------------------------------------------------------------------- #
Esempio n. 54
0
import sys
sys.path.append("./helpers/")

from producers import MyKafkaProducer



### main method that produces messages into Kafka topic ###

if __name__ == "__main__":

    if len(sys.argv) != 4:
        sys.stderr("Usage: main_produce.py <kafkaconfigfile> <schemafile> <s3configfile> \n")
        sys.exit(-1)

    kafka_configfile, schema_file, s3_configfile = sys.argv[1:4]

    prod = MyKafkaProducer(kafka_configfile, schema_file, s3_configfile)
    prod.produce_msgs()
def generate_scripts(args):

    # supported languages
    langId = {
        'portuguese': 'pt',
        'french': 'fr',
        'serbo-croatian': 'sh',
        'polish': 'pl',
        'czech': 'cs',
        'modern-greek': 'el',
        'catalan': 'ca',
        'bulgarian': 'bg',
        'danish': 'da',
        'estonian': 'et',
        'quechua': 'qu',
        'swedish': 'sv',
        'armenian': 'hy',
        'macedonian': 'mk',
        'arabic': 'ar',
        'dutch': 'nl',
        'hungarian': 'hu',
        'italian': 'it',
        'romanian': 'ro',
        'ukranian': 'uk',
        'german': 'de',
        'finnish': 'fi',
        'russian': 'ru',
        'turkish': 'tr',
        'spanish': 'es'
    }
    mode = args.mode
    embeddings = args.embedding_types.split(',')
    data_dir = args.data_dir

    # NOTE:
    # embeddings dir should have the following directory structure
    # embeddings_dir/language_id/embedding_type/final_embeds.vec
    # example: embeddings/de/word2vec/final_embeds.vec
    embedding_dir = args.embedding_dir

    # directory of output files
    json_config_dir = args.json_config_dir
    bash_dir = args.bash_dir
    model_dir = args.model_dir

    languages = {}
    if args.languages != 'all':
        language_set = args.languages.split(',')
    else:
        language_set = langId.keys()
    for lang in language_set:
        languages[lang] = langId[lang]

    # load feature template
    json_template = os.path.join("scripts", "subword" + mode + ".json")

    # this file lists number of possible values for each feature
    feature_set = os.path.join("scripts", mode + "_features.txt")

    # load feature dicts, currently only supported for five languages
    # German, Turkish, Finnish, Spanish, Russian
    # see feature_set file for examples
    feat_dict = defaultdict(lambda: defaultdict(int))
    with open(feature_set) as f:
        for line in f:
            line = line.strip()
            feature, lang, count, _ = line.split('\t')
            if lang in languages:
                feature = feature.replace(' ', '_')
                feat_dict[feature][lang] = count

    # generate JSON config files
    for emb_type in embeddings:
        for task in feat_dict.keys():
            for lang in feat_dict[task]:

                output_file = os.path.join(json_config_dir, emb_type,
                                           task + '-' + lang + '.json')
                os.makedirs(os.path.dirname(output_file), exist_ok=True)
                fout = open(output_file, 'w')

                embedding_file = os.path.join(embedding_dir, langId[lang],
                                              emb_type, 'final_embeds.vec')
                if not os.path.isfile(embedding_file):
                    print(embedding_file)
                    sys.stderr("Embedding is not available for ", lang,
                               ". This language will be skipped.")
                    continue
                with open(json_template) as f:
                    for line in f:
                        if '_data_path' in line:
                            path = data_dir + '/' + task + '/' + lang
                            newline = line.replace('[data_dir]/[feat]/[lang]',
                                                   path)
                            fout.write(newline)
                        elif emb_type == 'elmo' and 'embedding_dim' in line:
                            newline = line.replace('300', '1024')
                            fout.write(newline)
                        elif emb_type == 'elmo' and 'input_dim' in line:
                            if mode == 'classification':
                                newline = line.replace('300', '1024')
                            elif mode == 'contrastive':
                                newline = line.replace('600', '2048')
                            fout.write(newline)
                        elif 'hidden_dims' in line:
                            newline = line.replace('x',
                                                   str(feat_dict[task][lang]))
                            fout.write(newline)
                        elif 'pretrained_embedding_file' in line:
                            newline = line.replace('pretrained_embedding_file',
                                                   embedding_file)
                            fout.write(newline)
                        else:
                            fout.write(line)
                fout.close()

    # generate bash scripts
    for emb_type in embeddings:

        # train scripts
        bash_file = os.path.join(bash_dir,
                                 'run_' + mode + '_' + emb_type + '.sh')
        os.makedirs(os.path.dirname(bash_file), exist_ok=True)

        with open(bash_file, 'w', encoding='utf-8') as f:

            # write headings
            f.write('#!/bin/bash\n\n')
            f.write(
                'export PYTHON_PATH=$PATH\nexport PYTHONIOENCODING=utf-8\n\n')

            for task in feat_dict.keys():
                for lang in feat_dict[task]:
                    json_file = os.path.join(json_config_dir, emb_type,
                                             task + '-' + lang + '.json')
                    model_path = os.path.join(model_dir, lang, emb_type, task)
                    f.write('allennlp train ' + json_file + ' -s ' +
                            model_path + ' --include-package classifiers\n')

        make_executable(bash_file)
Esempio n. 56
0
def win_ipconfig():
    """
    Calls the windows command line tool ipconfig with the "/all" argument. Parses the printed output into a sequence of dictionaries like:
    [ { "name": "entry_name", "properties": {"entry_property": "...", ...,}}, ...]
    Discovered entry properties include:
        subnet_mask, 
        primary_dns_suffix,
        media_state,
        description,
        link_local_ipv6_address,
        ipv6_address,
        dhcpv6_iaid,
        autocnofiguration_enabled,
        host_name,
        ipv4_address,
        node_type,
        wins_proxy_enabled,
        dhcpv6_client_duid,
        connection_specific_dns_suffix,
        dhcp_enabled,
        dns_suffix_search_list,
        dns_servers,
        default_gateway,
        lease_obtained,
        lease_expires,
        netbios_over_tcpip,
        ip_routing_enabled,
        physical_address,
        dhcp_server
    """
    ipconfig_output = ""
    entries = []
    property_key_set = set()
    # call ipconfig
    try:
        result = subprocess.run(
            #["ipconfig", "/all"],
            "ipconfig /all",
            check=True,
            shell=True,
            stdout=subprocess.PIPE)
        ipconfig_output = result.stdout.decode("utf-8")
    except subprocess.CalledProcessError as cpe:
        sys.stderr("call to ipconfig failed")
        return entries

    # process the ipconfig output
    lines = ipconfig_output.split("\r\n")
    a = ipconfig_output.replace('\r\n', '\t')
    a = a.strip('\t')
    a = re.sub(r' ?(\. )+', '', a)
    a = re.sub(r' :', ':', a)
    a = re.sub(r'\t\t(?=[A-Za-z])', '\r\n', a)
    a = re.sub(r': \t', ': EMPTY\t', a)
    a = re.sub(r'\t {4,}', ',', a)
    a = re.sub(r'\t +', '\t', a)
    a = re.sub(r'(?<=[A-Za-z])\t\t', ':\t\t', a)
    b = a.split('\r\n')

    # convert the ipconfig output into a dict

    for c in b:
        name, properties = c.split('\t\t')
        properties = properties.split('\t')
        props_dict = {}
        for p in properties:
            if ':' in p:
                key, value = re.split(':', p, maxsplit=1)
                key = key.strip().lower()
                key = re.sub(r'[^a-zA-Z0-9]', '_', key)
                property_key_set.add(key)
                v = value.strip()
                if "," in v:
                    v = v.split(',')
                props_dict[key] = v
        entry = dict(name=name.strip(':'), properties=props_dict)
        entries.append(entry)

    g_logger.debug("unique properties: {}".format(", ".join(property_key_set)))

    return entries
Esempio n. 57
0
        chip_x += [float(base1_x)]
        chip_y += [float(base1_y)]
    elif 'base2' in chip_name:
        chip_x += [float(base2_x)]
        chip_y += [float(base2_y)]
    elif 'base3' in chip_name:
        chip_x += [float(base2_x)]
        chip_y += [float(base2_y)]
    elif 'spreader' in chip_name:
        chip_x += [float(spreader_x)]
        chip_y += [float(spreader_y)]
    elif 'null' == chip_name:
        chip_x += [0.00001]
        chip_y += [0.00001]
    else:
        sys.stderr('invalid chip name in input file ' + input_file)
        sys.exit()

    lay += [int(data[1])]
    x += [float(data[2])]
    y += [float(data[3])]
    if int(data[1]) == layer_tmp:
        count_tmp += 1
        layer_tmp = int(data[1])
    else:
        count_tmp = 1
        layer_tmp = int(data[1])
    count += [count_tmp]

num = len(rotate)
system_size = -10.0
Esempio n. 58
0
def main():
    train_test = sys.argv[1]
    if train_test not in ["train", "predict"]:
        sys.stderr("train or predict")
        exit(1)
    config_name = sys.argv[2]
    forced_decode_data = "data/brae.train.data"
    phrase_data_path = "data/phrase.list"
    src_count_path = "data/src.trans.data"
    tar_count_path = "data/tar.trans.data"
    brae_config = BRAEISOMAPConfig(config_name)
    train_name = "dim%d_lrec%f_lsem%f_ll2%f_alpha%f_beta%f_num%d_seed%d_batch%d_lr%f" % (brae_config.dim,
                                                                                         brae_config.weight_rec,
                                                                                         brae_config.weight_sem,
                                                                                         brae_config.weight_l2,
                                                                                         brae_config.alpha,
                                                                                         brae_config.beta,
                                                                                         brae_config.trans_num,
                                                                                         brae_config.random_seed,
                                                                                         brae_config.batch_size,
                                                                                         brae_config.optimizer.param["lr"])
    model_name = "model/%s" % train_name
    temp_model = model_name + ".temp"
    if train_test == "train":
        start_iter = int(sys.argv[3]) if len(sys.argv) > 3 else 0
        end_iter = int(sys.argv[4]) if len(sys.argv) > 4 else 25
        pre_logger("braeisomap_" + train_name)
        np.random.seed(brae_config.random_seed)
        if start_iter == 0:
            src_word_dict, tar_word_dict = read_phrase_pair_vocab(forced_decode_data)
            src_word_dict, tar_word_dict = add_trans_word_vocab(src_count_path, src_word_dict, tar_word_dict)
            tar_word_dict, src_word_dict = add_trans_word_vocab(tar_count_path, src_word_dict, tar_word_dict)
            src_word_dict = filter_vocab(src_word_dict, min_count=0)
            tar_word_dict = filter_vocab(tar_word_dict, min_count=0)
            src_phrases, tar_phrases, src_tar_pair = read_phrase_list(forced_decode_data, src_word_dict, tar_word_dict)

            src_phrases, tar_phrases = read_trans_list(src_count_path, src_phrases, tar_phrases,
                                                       src_word_dict, tar_word_dict)
            tar_phrases, src_phrases = read_trans_list(tar_count_path, tar_phrases, src_phrases,
                                                       tar_word_dict, src_word_dict)
            src_phrases = clean_text(src_phrases)
            tar_phrases = clean_text(tar_phrases)
            brae = pre_model(src_word_dict, tar_word_dict, brae_config, verbose=True)
            with open(temp_model, 'wb') as fout:
                pickle.dump(src_phrases, fout)
                pickle.dump(tar_phrases, fout)
                pickle.dump(src_tar_pair, fout)
                pickle.dump(brae, fout)
                pickle.dump(np.random.get_state(), fout)
            if end_iter == 1:
                exit(1)
        else:
            with open(temp_model, 'rb') as fin:
                src_phrases = pickle.load(fin)
                tar_phrases = pickle.load(fin)
                src_tar_pair = pickle.load(fin)
                brae = pickle.load(fin)
                np.random.set_state(pickle.load(fin))
        brae.train(src_phrases, tar_phrases, src_tar_pair, brae_config, model_name, start_iter, end_iter)
        brae.save_model("%s.model" % model_name)
    elif train_test == "predict":
        num_process = int(sys.argv[3]) if len(sys.argv) > 3 else 0
        brae_predict(phrase_data_path, train_name + ".pred", model_file="%s.model" % model_name,
                     bilinear=True, num_process=num_process)
    else:
        sys.stderr("train or predict")
        exit(1)
def get_emit_index(input_val, alphabet):
    for i in range(len(alphabet)):
        if alphabet[i] == input_val:
            return i
    
    sys.stderr("Could not find character " + input_val)
Esempio n. 60
0
        resulting_plist_path = os.path.join(build_dir, SAFARI_MANIFEST)
        plistlib.writePlist(plist, resulting_plist_path)

        background_dom = parseString('<html><head></head></html>')
        head = background_dom.getElementsByTagName('head')[0]

        for script in scripts['background_scripts']:
            script_element = background_dom.createElement('script')
            script_element.attributes['src'] = script
            # This is a hack to avoid generating self closing script tag
            script_element.appendChild(background_dom.createTextNode(''))
            head.appendChild(script_element)

        # Another hack to remove xml starting tag
        xml = '\n'.join(
            node.toxml('utf-8') for node in background_dom.childNodes)
        background.write(xml)


if __name__ == '__main__':
    if not len(sys.argv) == 4:
        sys.stderr('assign_scripts.py browser_name manifest_path build_dir \n')
        sys.exit(1)

    [browser_name, manifest_path, build_dir] = sys.argv[1:]

    if browser_name == 'chrome':
        chrome(manifest_path, build_dir)
    elif browser_name == 'safari':
        safari(manifest_path, build_dir)