Ejemplo n.º 1
0
def _download_reference_files(conn):
    print('Downloading reference files')
    if not exists(reference_base_dir):
        mkdir(reference_base_dir)

    files = {'tree': (get_reference_fp('gg_13_8-97_otus.tree'),
                      'ftp://ftp.microbio.me/greengenes_release/'
                      'gg_13_8_otus/trees/97_otus.tree'),
             'taxonomy': (get_reference_fp('gg_13_8-97_otu_taxonomy.txt'),
                          'ftp://ftp.microbio.me/greengenes_release/'
                          'gg_13_8_otus/taxonomy/97_otu_taxonomy.txt'),
             'sequence': (get_reference_fp('gg_13_8-97_otus.fasta'),
                          'ftp://ftp.microbio.me/greengenes_release/'
                          'gg_13_8_otus/rep_set/97_otus.fasta')}

    for file_type, (local_fp, url) in viewitems(files):
        # Do not download the file if it exists already
        if exists(local_fp):
            print("SKIPPING %s: file already exists at %s. To "
                  "download the file again, erase the existing file first" %
                  (file_type, local_fp))
        else:
            try:
                urlretrieve(url, local_fp)
            except:
                raise IOError("Error: Could not fetch %s file from %s" %
                              (file_type, url))

    ref = Reference.create('Greengenes', '13_8', files['sequence'][0],
                           files['taxonomy'][0], files['tree'][0])

    _insert_processed_params(conn, ref)
Ejemplo n.º 2
0
    def GetUserCfgDir(self):
        """
        Creates (if required) and returns a filesystem directory for storing
        user config files.

        """
        cfgDir = '.idlerc'
        userDir = os.path.expanduser('~')
        if userDir != '~': # expanduser() found user home dir
            if not os.path.exists(userDir):
                warn = ('\n Warning: os.path.expanduser("~") points to\n '+
                        userDir+',\n but the path does not exist.\n')
                sys.stderr.write(warn)
                userDir = '~'
        if userDir == "~": # still no path to home!
            # traditionally IDLE has defaulted to os.getcwd(), is this adequate?
            userDir = os.getcwd()
        userDir = os.path.join(userDir, cfgDir)
        if not os.path.exists(userDir):
            try:
                os.mkdir(userDir)
            except (OSError, IOError):
                warn = ('\n Warning: unable to create user config directory\n'+
                        userDir+'\n Check path and permissions.\n Exiting!\n\n')
                sys.stderr.write(warn)
                raise SystemExit
        return userDir
Ejemplo n.º 3
0
def buildComponent(component, componentDir=None):
    """ Build the component. Return a pair of paths
        (pathToBinary, pathToXPTFile)"""
    logger = build_util.getLogger('build_components')
    # Save current working directory to set it back later
    prevDir = os.getcwd()
    # Component Directory
    if componentDir is None:
        componentDir = os.path.join(Settings.prefs.src_dir, "components", component)

    os.chdir(componentDir)
    logger.info("Making build and bin dirs for component %s"%component)
    buildDir = os.path.join(componentDir, "build")
    binDir = os.path.join(componentDir, "bin")
    
    for dir in [buildDir, binDir]:
        try:
            os.mkdir(dir)
        except OSError, err:
            if err.errno == errno.EEXIST:
                logger.warning("Couldn't make %s because it exists."%dir)
                logger.warning("Deleting %s"%dir)
                shutil.rmtree(dir)
                logger.warning("Trying to make %s again"%dir)
                os.mkdir(dir)
            else:
                raise
Ejemplo n.º 4
0
def parseblocks(blocklycode):
    # this is where blockly code is parsed into a python file with the command list
    # saved in memory for transimission.
    global webotcommands
    panya = Panya()
    t = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    savedir = os.path.join(sdir, g.user.nickname, "sketches")
    if not os.path.exists(savedir):
        os.mkdir(savedir)
    filename = os.path.join(savedir, t + ".py")
    print "Saving python code to ", filename
    target = open(filename, "w")
    target.write(blocklycode)
    target.close()
    # We now compile the generated python strings in the blocklycode
    # into bytecode and execute the resultant .pyc through the exec function
    # in our current namespace (I can't figure out a better way to have the
    # webot class instance variables accessible)
    # Read about caveats here - http://lucumr.pocoo.org/2011/2/1/exec-in-python/
    compiledcode = compile(blocklycode, "<string>", "exec")
    exec compiledcode
    print webotcommands
    sessionresponse = portsetup(webotcommands)
    webotcommands = ""
    return sessionresponse
Ejemplo n.º 5
0
	def copy_to_dist(self):
		print("Copy files of "+self.build_script+" to",self.build_path+self.build_dist)
		with open(self.build_path+self.build_script) as script:
			line_count = 0
			in_lib = False
			for line in script:
				print(line_count,line)
				line_count += 1
				if(("sys.path.append('"+self.build_script_lib+"')") in line):
					print("##### Dentro de lib")
					in_lib = True
				elif("#end "+self.build_script_lib+" imports" in line):
					print("##### Fuera de lib ")
					in_lib = False
					break
				elif(in_lib):
					if(line.startswith("from")):
						print("class lib")
						lineSplit = line.split(" ")
						src = self.build_path+self.build_script_lib+"\\"+lineSplit[1]+".py"
						dest = self.build_path+self.build_dist+"\\"+lineSplit[1]+".py"
						dest_path = self.build_path+self.build_dist
						print(src,"to",dest)
						if(os.path.exists(dest_path)):
							copyfile(src,dest)
						else:
							print("Making dest: ", dest_path)
							os.mkdir(dest_path)
							copyfile(src,dest)
					elif(line.startswith("import")):
						print("is ordinary lib")

		src = self.build_path + self.build_script
		dest = self.build_path + self.build_dist + "\\"+ self.build_script
		copyfile(src,dest)
Ejemplo n.º 6
0
    def testMercurialWithShareAndBundle(self):
        # First create the bundle
        bundle = os.path.join(self.tmpdir, 'bundle')
        run_cmd(['hg', 'bundle', '-a', bundle], cwd=self.repodir)

        # Create a commit
        open(os.path.join(self.repodir, 'test.txt'), 'w').write('hello!')
        run_cmd(['hg', 'add', 'test.txt'], cwd=self.repodir)
        run_cmd(['hg', 'commit', '-m', 'adding changeset'], cwd=self.repodir)

        # Wrap unbundle so we can tell if it got called
        orig_unbundle = unbundle
        try:
            called = []

            def new_unbundle(*args, **kwargs):
                called.append(True)
                return orig_unbundle(*args, **kwargs)
            hg.unbundle = new_unbundle

            shareBase = os.path.join(self.tmpdir, 'share')
            sharerepo = os.path.join(shareBase, self.repodir.lstrip("/"))
            os.mkdir(shareBase)
            mercurial(
                self.repodir, self.wc, shareBase=shareBase, bundles=[bundle])

            self.assertEquals(called, [True])
            self.assertEquals(
                getRevisions(self.repodir), getRevisions(self.wc))
            self.assertEquals(
                getRevisions(self.repodir), getRevisions(sharerepo))
        finally:
            hg.unbundle = orig_unbundle
Ejemplo n.º 7
0
 def run(self):
     print('generating usage docs')
     # allows us to build docs without the C modules fully loaded during help generation
     from borg.archiver import Archiver
     parser = Archiver().build_parser(prog='borg')
     choices = {}
     for action in parser._actions:
         if action.choices is not None:
             choices.update(action.choices)
     print('found commands: %s' % list(choices.keys()))
     if not os.path.exists('docs/usage'):
         os.mkdir('docs/usage')
     for command, parser in choices.items():
         print('generating help for %s' % command)
         with open('docs/usage/%s.rst.inc' % command, 'w') as doc:
             if command == 'help':
                 for topic in Archiver.helptext:
                     params = {"topic": topic,
                               "underline": '~' * len('borg help ' + topic)}
                     doc.write(".. _borg_{topic}:\n\n".format(**params))
                     doc.write("borg help {topic}\n{underline}\n::\n\n".format(**params))
                     doc.write(Archiver.helptext[topic])
             else:
                 params = {"command": command,
                           "underline": '-' * len('borg ' + command)}
                 doc.write(".. _borg_{command}:\n\n".format(**params))
                 doc.write("borg {command}\n{underline}\n::\n\n".format(**params))
                 epilog = parser.epilog
                 parser.epilog = None
                 doc.write(re.sub("^", "    ", parser.format_help(), flags=re.M))
                 doc.write("\nDescription\n~~~~~~~~~~~\n")
                 doc.write(epilog)
Ejemplo n.º 8
0
def mkdir( folder ):
  """create a folder, ignore if it exists"""
  try:
    folder = os.path.join(os.getcwd(),folder)
    os.mkdir( folder )
  except OSError as e:
    print "MakeDoc: Exception %s when creating folder" %repr(e), folder
Ejemplo n.º 9
0
 def testMercurialWithNewShare(self):
     shareBase = os.path.join(self.tmpdir, 'share')
     sharerepo = os.path.join(shareBase, self.repodir.lstrip("/"))
     os.mkdir(shareBase)
     mercurial(self.repodir, self.wc, shareBase=shareBase)
     self.assertEquals(getRevisions(self.repodir), getRevisions(self.wc))
     self.assertEquals(getRevisions(self.repodir), getRevisions(sharerepo))
Ejemplo n.º 10
0
 def setUp(self):
     # Create a temporary directory.
     f = tempfile.mktemp()
     system_tmp_dir = os.path.dirname(f)
     my_dir = 'testpath_tempdir_' + str(random.random())[2:]
     self.tempdir = os.path.join(system_tmp_dir, my_dir)
     os.mkdir(self.tempdir)
Ejemplo n.º 11
0
def newprovision(names, session, smbconf, provdir, logger):
    """Create a new provision.

    This provision will be the reference for knowing what has changed in the
    since the latest upgrade in the current provision

    :param names: List of provision parameters
    :param creds: Credentials for the authentification
    :param session: Session object
    :param smbconf: Path to the smb.conf file
    :param provdir: Directory where the provision will be stored
    :param logger: A Logger
    """
    if os.path.isdir(provdir):
        shutil.rmtree(provdir)
    os.mkdir(provdir)
    logger.info("Provision stored in %s", provdir)
    return provision(logger, session, smbconf=smbconf,
            targetdir=provdir, samdb_fill=FILL_FULL, realm=names.realm,
            domain=names.domain, domainguid=names.domainguid,
            domainsid=names.domainsid, ntdsguid=names.ntdsguid,
            policyguid=names.policyid, policyguid_dc=names.policyid_dc,
            hostname=names.netbiosname.lower(), hostip=None, hostip6=None,
            invocationid=names.invocation, adminpass=names.adminpass,
            krbtgtpass=None, machinepass=None, dnspass=None, root=None,
            nobody=None, users=None,
            serverrole="domain controller",
            backend_type=None, ldapadminpass=None, ol_mmr_urls=None,
            slapd_path=None,
            dom_for_fun_level=names.domainlevel, dns_backend=names.dns_backend,
            useeadb=True, use_ntvfs=True)
Ejemplo n.º 12
0
    def testMetaDataHandleForSavingModel(self):
        """Test the handling of SaveModel for Data with Meta Atributes
        """

        #Test the save of a model created from a train data with meta attributes
        self.assert_(len(self.WMetaTest.domain.getmetas())>=1,"The dataset WMetaTest should have Meta Attributes")
        plsM = AZorngPLS.PLSLearner(self.WMetaTest)
        AccNoMetaBefore = evalUtilities.getClassificationAccuracy(self.NoMetaTrain,plsM) 
        AccWMetaBefore = evalUtilities.getClassificationAccuracy(self.WMetaTest,plsM)


        # Save the model 
        scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdir"+str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir,"PLSModel")
        plsM.write(modelPath)

        # Read in the model
        plsR = AZorngPLS.PLSread(modelPath)
        self.assert_(len(plsR.imputer.defaults.domain.getmetas())==0,"There shouldn't be any Meta data now!")

        # Calculate classification accuracy 
        AccNoMetaAfter = evalUtilities.getClassificationAccuracy(self.NoMetaTrain, plsR)
        AccWMetaAfter = evalUtilities.getClassificationAccuracy(self.WMetaTest, plsR)

        # Test that the accuracy of the model before and after saved
        self.assertEqual(AccNoMetaBefore, AccNoMetaAfter,"NoMeta: Predictions after loading saved model were different")
        self.assertEqual(AccWMetaBefore, AccWMetaAfter, "WMeta: Predictions after loading saved model were different")
        self.assertEqual(round(AccWMetaAfter,9), round(0.888888888889,9),"Accuracy was not the expected value!")
        self.assertEqual(round(AccNoMetaAfter,9), round(0.605769230769,9),"Accuracy was not the expected value!")
 
        # Remove the scratch directory
        os.system("/bin/rm -rf "+scratchdir)
Ejemplo n.º 13
0
 def __init__(self):
     self.slider_dic = {"slider": ""}
     self.settings_dic = {"label1": "", "label2": "", "label3": "", "label4": "",
                         "label5": "", "label6": "", "label7": "", "label8": "",
                         "is_autorun": "", "iconpacks": "", "switchtree": "", "dialogName": "",
                         "column0": "", "column1": "", "tabspos": ""}
     self.icon_dic = {"about_comments": "", "about_site_label": "", "about_author": "",
                     "restoreItem": "", "mixerItem": "", "muteItem": "", "icon_tooltip_mute": "", "icon_tooltip": ""}
     self.CP = ConfigParser()
     self.loader = loadResFile()
     curr_locale = locale.getlocale()[0][0:2]
     self.localepath = self.loader.get("pyalsavolume", "lang/"+"%s.lng"%curr_locale)
     if not os.path.exists(self.localepath):
         if os.path.exists(self.loader.get("pyalsavolume", "lang/en.lng")):
            self.localepath =  "/usr/share/pyalsavolume/lang/en.lng"
         else:
            os.sys.stderr.write("Path %s not exists" %self.localepath)               
     homepath = os.environ["HOME"] + "/.local/share/pyalsavolume"
     if not os.path.exists(homepath):
         os.mkdir(homepath, 0o775)
     langpath = os.environ["HOME"] + "/.local/share/pyalsavolume/lang"
     if not os.path.exists(langpath):
         os.mkdir(langpath, 0o775)
     if self.localepath:
         self.CP.read(self.localepath)
     else:
         self.setDefault()
     self.getLocale()
Ejemplo n.º 14
0
def createProject(i):	
	# Rome
	lat = 41.893 + i*0.01
	lng = 12.483 + i*0.02

	# astronomical years (0 = 1BC)
	firstYear = -1500 + random.randint(0, 3000)
	lastYear = firstYear + 100 + random.randint(0, 50)
	projectName = "timetest" + str(i)
	
	projectFolderName = projectName
	# project folder
	pPath = os.path.join(outputDirName, projectFolderName)
	os.mkdir( pPath, 0755 )
	# metadata file
	metadataFile = open (os.path.join(pPath,"metadata.xml"), 'a')
	metadataFile.write(constructMetadata())
	metadataFile.close()
	# data folder
	dPath = os.path.join(pPath, "data")
	os.mkdir( dPath, 0755 )
	# tridas file
	tridasFile = open (os.path.join(dPath,"tridas.xml"), 'a')
	tridasFile.write(constructTridas(projectName, lat, lng, firstYear, lastYear))
	tridasFile.close()
	# associated and values when needed, but not yet!
	print "Created project in folder: " + projectFolderName
	#
	# create the zip file
	zipFilename = os.path.join(outputDirName, projectName+".zip")
	make_zipfile(zipFilename, pPath)
Ejemplo n.º 15
0
    def testSavedModel(self):
        """Test PLS model saving
        Test to assure that a saved pls model gives the same predictions as before saving."""

        # Create a pls model
        pls = AZorngPLS.PLSLearner(self.train_data)

        # Calculate classification accuracy 
        Acc = evalUtilities.getClassificationAccuracy(self.test_data, pls)

        # Save the model 
        scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdir"+str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir,"PLSModel")
        pls.write(modelPath)
        
        # Read in the model
        plsM = AZorngPLS.PLSread(modelPath)

        # Calculate classification accuracy 
        savedAcc = evalUtilities.getClassificationAccuracy(self.test_data, plsM)

        # Test that the accuracy of the two classifiers is the exact same
        self.assertEqual(Acc, savedAcc)

        # Remove the scratch directory
        os.system("/bin/rm -rf "+scratchdir)
Ejemplo n.º 16
0
    def build_dist(self):
        for sdir in self.staging_dirs:
            if os.path.exists(sdir):
                shutil.rmtree(sdir)
        main_stage, ninja_stage = self.staging_dirs
        modules = [os.path.splitext(os.path.split(x)[1])[0] for x in glob(os.path.join('mesonbuild/modules/*'))]
        modules = ['mesonbuild.modules.' + x for x in modules if not x.startswith('_')]
        modules += ['distutils.version']
        modulestr = ','.join(modules)
        python = shutil.which('python')
        cxfreeze = os.path.join(os.path.dirname(python), "Scripts", "cxfreeze")
        if not os.path.isfile(cxfreeze):
            print("ERROR: This script requires cx_freeze module")
            sys.exit(1)

        subprocess.check_call([python,
                               cxfreeze,
                               '--target-dir',
                               main_stage,
                               '--include-modules',
                               modulestr,
                               'meson.py'])
        if not os.path.exists(os.path.join(main_stage, 'meson.exe')):
            sys.exit('Meson exe missing from staging dir.')
        os.mkdir(ninja_stage)
        shutil.copy(shutil.which('ninja'), ninja_stage)
        if not os.path.exists(os.path.join(ninja_stage, 'ninja.exe')):
            sys.exit('Ninja exe missing from staging dir.')
Ejemplo n.º 17
0
 def make_nodes(self):
     self.nodes = []
     for i in range(self.numnodes):
         nodedir = os.path.join(self.testdir, "node%d" % i)
         os.mkdir(nodedir)
         f = open(os.path.join(nodedir, "tahoe.cfg"), "w")
         f.write("[client]\n"
                 "introducer.furl = %s\n"
                 "shares.happy = 1\n"
                 "[storage]\n"
                 % (self.introducer_furl,))
         # the only tests for which we want the internal nodes to actually
         # retain shares are the ones where somebody's going to download
         # them.
         if self.mode in ("download", "download-GET", "download-GET-slow"):
             # retain shares
             pass
         else:
             # for these tests, we tell the storage servers to pretend to
             # accept shares, but really just throw them out, since we're
             # only testing upload and not download.
             f.write("debug_discard = true\n")
         if self.mode in ("receive",):
             # for this mode, the client-under-test gets all the shares,
             # so our internal nodes can refuse requests
             f.write("readonly = true\n")
         f.close()
         c = self.add_service(client.Client(basedir=nodedir))
         self.nodes.append(c)
Ejemplo n.º 18
0
    def extract(self, file, dir):
        if not dir.endswith(':') and not os.path.exists(dir):
            os.mkdir(dir)

        zf = zipfile.ZipFile(file)

        # create directory structure to house files
        self._createstructure(file, dir)

        num_files = len(zf.namelist())
        percent = self.percent
        divisions = 100 / percent
        perc = int(num_files / divisions)

        # extract files to directory structure
        for i, name in enumerate(zf.namelist()):

            if self.verbose == True:
                print "Extracting %s" % name
            elif perc > 0 and (i % perc) == 0 and i > 0:
                complete = int (i / perc) * percent


            if not name.endswith('/'):
                try:
                    (path,namet) = os.path.split(os.path.join(dir, name))
                    os.makedirs( path)
                except:
                    pass
                outfile = open(os.path.join(path, namet), 'wb')
                outfile.write(zf.read(name))
                outfile.flush()
                outfile.close()
Ejemplo n.º 19
0
	def animate(self, event):
		
		import os
		if os.path.exists("angle_rot_anim"):
			os.system("rm -rf angle_rot_anim")
		os.mkdir("angle_rot_anim")
		frame = 0
		fname = "angle_rot_anim/frame%05i.png" % frame
		self.start(None)
		self.g.screenShot(fname=fname)
		roll = 	float(self.roll_angle.GetValue())
		roll = 	-1*roll*pi/180
		for r in linspace(0, roll, 50):
			self.show_transform([r])
			time.sleep(.1)
			frame += 1 
			fname = "angle_rot_anim/frame%05i.png" % frame
			self.g.screenShot(fname=fname)
		for r in linspace(0, self.pitch_angle, 20):
			self.show_transform([roll, r])
			time.sleep(.1)			
			frame += 1 
			fname = "angle_rot_anim/frame%05i.png" % frame
			self.g.screenShot(fname=fname)
		for r in linspace(0, self.yaw_angle, 50):
			self.show_transform([roll, self.pitch_angle, r])
			time.sleep(.1)
			frame += 1 
			fname = "angle_rot_anim/frame%05i.png" % frame
			self.g.screenShot(fname=fname)
Ejemplo n.º 20
0
def download_artifact(url, dest, uid, token):
    print('url is %s dest is %s uid is %s token is %s' % (url,dest,uid,token))

	# create dest if does not exist
    if dest:
        if os.path.exists(dest):
            print('dest exists: ', dest)
        else:
            print('dest does not exist, creating now : ', dest)
            os.mkdir(dest)
    else:
        dest = str(Path.home())

    splittedurl = url.rsplit('/', 1).pop()
    dest = dest + '/' + splittedurl

	# https security handler
    if (not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None)):
        ssl._create_default_https_context = ssl._create_unverified_context

    request = urllib.request.Request(url)

    if uid and token:

        credentials = ('%s:%s' % (uid, token))
        encoded_credentials = base64.b64encode(credentials.encode('ascii'))
        request.add_header('Authorization', 'Basic %s' % encoded_credentials.decode("ascii"))

    try:
        with urllib.request.urlopen(request, timeout=2) as response, open(dest, 'wb') as out_file:
            data = response.read()
            out_file.write(data)
            print("Success!")
    except urllib.error.URLError:
        print("Artifactory connection timed out, please check URL, UID and Token.")
Ejemplo n.º 21
0
def monthly_card_import(db):
    data = request.files.data
    error = ''
    all_sqls = IMPORT_SQLS 

    if data and data.file:
        tmp_root = './tmp/'
        if not isdir(tmp_root):  # 若目录tmp_root不存在,则创建
            os.mkdir(tmp_root)
        tmp_filename = os.path.join(tmp_root, current_time('tmp_monthly_card%Y%m%d%H%M%S.xls'))
        tmp_file = open(tmp_filename, 'w')  # 新建一个xls后缀的文件,然后将读取的excel文件的内容写入该文件中
        rows = data.file.readlines()

        if not rows:  # 文件空
            error = '数据格式错误[2]'
            return template('error', error=error)
        for row in rows:
            tmp_file.write(row)
        tmp_file.close()

        # 在导入新的数据前,先将数据库原有数据导出到tmp目录,作为备份,数据导入失败时可以恢复数据
        export_sqls = EXPORT_SQLS
        try:
            # 若备份文件已存在,则删除重新写入
            if os.path.exists(os.path.join(tmp_root, BACK_FILE)):
                os.remove(os.path.join(tmp_root, BACK_FILE))
            excel_export(export_sqls, tmp_root, BACK_FILE, db)
        except Exception, e:
            print '数据备份错误: %s' %e

        error = excel_import(all_sqls, tmp_filename, db)
        os.remove(tmp_filename)  # 删除上传的临时文件
Ejemplo n.º 22
0
 def test_download_file_to_existing_folder(self):
   self.client.write('dl', 'hello')
   with temppath() as tpath:
     os.mkdir(tpath)
     self.client.download('dl', tpath)
     with open(osp.join(tpath, 'dl')) as reader:
       eq_(reader.read(), 'hello')
Ejemplo n.º 23
0
  def test_upload_cleanup(self):
    dpath = mkdtemp()
    _write = self.client.write

    def write(hdfs_path, *args, **kwargs):
      if 'bar' in hdfs_path:
        raise RuntimeError()
      return _write(hdfs_path, *args, **kwargs)

    try:
      self.client.write = write
      npath = osp.join(dpath, 'hi')
      os.mkdir(npath)
      with open(osp.join(npath, 'foo'), 'w') as writer:
        writer.write('hello!')
      os.mkdir(osp.join(npath, 'bar'))
      with open(osp.join(npath, 'bar', 'baz'), 'w') as writer:
        writer.write('world!')
      try:
        self.client.upload('foo', dpath)
      except RuntimeError:
        ok_(not self._exists('foo'))
      else:
        ok_(False) # This shouldn't happen.
    finally:
      rmtree(dpath)
      self.client.write = _write
Ejemplo n.º 24
0
def copy_wpt_tree(tree, dest):
    """Copy the working copy of a Tree to a destination directory.

    :param tree: The Tree to copy.
    :param dest: The destination directory"""
    if os.path.exists(dest):
        assert os.path.isdir(dest)

    shutil.rmtree(dest)
    os.mkdir(dest)

    for tree_path in tree.paths():
        source_path = os.path.join(tree.root, tree_path)
        dest_path = os.path.join(dest, tree_path)

        dest_dir = os.path.split(dest_path)[0]
        if not os.path.isdir(source_path):
            if not os.path.exists(dest_dir):
                os.makedirs(dest_dir)
            shutil.copy2(source_path, dest_path)

    for source, destination in [("testharness_runner.html", ""),
                                ("testharnessreport.js", "resources/")]:
        source_path = os.path.join(here, os.pardir, source)
        dest_path = os.path.join(dest, destination, os.path.split(source)[1])
        shutil.copy2(source_path, dest_path)

    add_license(dest)
 def test_init_manifest_packageid(self):
     comm.setUp()
     os.chdir(comm.XwalkPath)
     comm.clear("org.xwalk.test")
     os.mkdir("org.xwalk.test")
     cmd = (
         comm.HOST_PREFIX
         + comm.PackTools
         + "crosswalk-app manifest "
         + comm.XwalkPath
         + "org.xwalk.test --platforms=android --package-id=org.xwalk.test"
     )
     os.system(cmd)
     with open(comm.ConstPath + "/../tools/org.xwalk.test/manifest.json") as json_file:
         data = json.load(json_file)
     updatecmd = (
         comm.HOST_PREFIX
         + comm.PackTools
         + "crosswalk-app manifest "
         + comm.XwalkPath
         + "org.xwalk.test --platforms=android --package-id=org.test.foo"
     )
     os.system(updatecmd)
     with open(comm.ConstPath + "/../tools/org.xwalk.test/manifest.json") as json_file_update:
         updatedata = json.load(json_file_update)
     comm.clear("org.xwalk.test")
     self.assertEquals(data["xwalk_package_id"].strip(os.linesep), "org.xwalk.test")
     self.assertEquals(updatedata["xwalk_package_id"].strip(os.linesep), "org.test.foo")
Ejemplo n.º 26
0
   def testConfigurationFileOperations(self):
      """
         Does the config file exist, and can we create it? 
      """
      self.removeConfDir(self.confDir_)
      os.mkdir(self.c2_.getConfigurationDirectory())

      self.assert_(self.c2_.configurationFileExists() == False)
      self.c2_.createSampleConfigurationFile(self.c2_.getConfigurationFileFullPath())

      self.assert_(self.c2_.configurationFileExists() == True)

      os.remove(self.c2_.getConfigurationFileFullPath())
      self.removeConfDir(self.c2_.getConfigurationDirectory())

      # have it make its own config file.
      c3 = Configuration(self.confDir_, True)
      self.assert_(self.c2_.configurationFileExists() == True)

      # Now find out what happens when this is true, but the file already exists.
      c4 = Configuration(self.confDir_, True)

      # And when it's false
      c5 = Configuration(self.confDir_, False)
   
      os.remove(self.c2_.getConfigurationFileFullPath())
      self.removeConfDir(self.c2_.getConfigurationDirectory())
Ejemplo n.º 27
0
def zip_path(src, dst, volume_size, exe7z, isIgnoreCache=False):
    '''
    zip a specify directory into several volumes, if the output directory already exist then the 
    zip process will be skipped

    #>>> zip_volumes('f:/build', 'f:/7zip', exe7z='D:/Project/PersontalTools/fast-sync-by-ftp/sync-service/bin/7z.exe')
    #'f:/7zip'
    #>>> os.path.isfile('f:/7zip/archives.zip.001')
    #True
    '''
    if os.path.isdir(dst):
        if isIgnoreCache:
            shutil.rmtree(dst)
        else:
            return

    os.mkdir(dst)

    archive_path = os.path.join(dst, file_pattern)
    cmd = '"{exe7z}" a {output} {source} -v{volume_size} '.format(exe7z=exe7z,
                                                                    output=archive_path,
                                                                    source=src,
                                                                    volume_size=volume_size)
    logging.info('Execute zip command: %s' % cmd)
    p = subprocess.Popen([exe7z, 'a', archive_path, src, '-v%s' % volume_size], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    logging.info(p.communicate())
Ejemplo n.º 28
0
  def test_upload_with_progress(self):

    def callback(path, nbytes, history=defaultdict(list)):
      history[path].append(nbytes)
      return history

    dpath = mkdtemp()
    try:
      path1 = osp.join(dpath, 'foo')
      with open(path1, 'w') as writer:
        writer.write('hello!')
      os.mkdir(osp.join(dpath, 'bar'))
      path2 = osp.join(dpath, 'bar', 'baz')
      with open(path2, 'w') as writer:
        writer.write('the world!')
      self.client.upload(
        'up',
        dpath,
        chunk_size=4,
        n_threads=1, # Callback isn't thread-safe.
        progress=callback
      )
      eq_(self._read('up/foo'), b'hello!')
      eq_(self._read('up/bar/baz'), b'the world!')
      eq_(
        callback('', 0),
        {path1: [4, 6, -1], path2: [4, 8, 10, -1], '': [0]}
      )
    finally:
      rmtree(dpath)
Ejemplo n.º 29
0
def UnzipFilenameToDir(filename, directory):
  """Unzip |filename| to |directory|."""
  cwd = os.getcwd()
  if not os.path.isabs(filename):
    filename = os.path.join(cwd, filename)
  zf = zipfile.ZipFile(filename)
  # Make base.
  if not os.path.isdir(directory):
    os.mkdir(directory)
  os.chdir(directory)
  # Extract files.
  for info in zf.infolist():
    name = info.filename
    if name.endswith('/'):  # dir
      if not os.path.isdir(name):
        os.makedirs(name)
    else:  # file
      directory = os.path.dirname(name)
      if not os.path.isdir(directory):
        os.makedirs(directory)
      out = open(name, 'wb')
      out.write(zf.read(name))
      out.close()
    # Set permissions. Permission info in external_attr is shifted 16 bits.
    os.chmod(name, info.external_attr >> 16L)
  os.chdir(cwd)
Ejemplo n.º 30
0
 def test_download_file_to_existing_folder_with_matching_file(self):
   self.client.write('dl', 'hello')
   with temppath() as tpath:
     os.mkdir(tpath)
     with open(osp.join(tpath, 'dl'), 'w') as writer:
       writer.write('hey')
     self.client.download('dl', tpath)
Ejemplo n.º 31
0
    # Google Drive Module
    G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None)
    G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None)
    G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA", None)
    GDRIVE_FOLDER_ID = os.environ.get("GDRIVE_FOLDER_ID", None)
    TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TEMP_DOWNLOAD_DIRECTORY",
                                             "./downloads")
else:
    # Put your ppe vars here if you are using local hosting
    PLACEHOLDER = None

# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists('bin'):
    os.mkdir('bin')

binaries = {
    "https://raw.githubusercontent.com/yshalsager/megadown/master/megadown":
    "bin/megadown",
    "https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py":
    "bin/cmrudl"
}

for binary, path in binaries.items():
    downloader = SmartDL(binary, path, progress_bar=False)
    downloader.start()
    os.chmod(path, 0o755)

# Global Variables
COUNT_MSG = 0
Ejemplo n.º 32
0
 def createDirectory(self,directorypath):
     
     if not os.path.exists(directorypath):
         os.mkdir(directorypath)
         print "output path = "+directorypath+'\n'
Ejemplo n.º 33
0
Archivo: kast.py Proyecto: pypeit/spit
def generate_pngs(category,
                  clobber=False,
                  seed=12345,
                  debug=False,
                  regular=True):
    """
    Parameters
    ----------
    category : str
    clobber : bool, optional
    debug : bool, optional

    Returns
    -------

    """
    bidx = [0, -8]
    # Pre-processing dict
    pdict = preprocess.original_preproc_dict()

    #
    rstate = np.random.RandomState(seed)
    outroot = spit_path + '/Kast/PNG/{:s}/'.format(category)

    # Flats first (they are the most common)
    flat_files = glob.glob(spit_path +
                           '/Kast/FITS/{:s}/flat/*fits.gz'.format(category))
    nflats = len(flat_files)
    # Output dir
    outdir = outroot + 'flat/'
    if not os.path.isdir(outdir):
        os.mkdir(outdir)
    # Loop me
    for flat_file in flat_files:
        spit_png.make_standard(flat_file,
                               outdir,
                               bidx,
                               0,
                               pdict,
                               clobber=clobber)

    # Other image types (regularizing to the number of flats)
    for itype in ['arc', 'bias', 'standard', 'science']:
        files = glob.glob(
            spit_path +
            '/Kast/FITS/{:s}/{:s}/*fits.gz'.format(category, itype))
        nfiles = len(files)
        # Output dir
        outdir = outroot + '{:s}/'.format(itype)
        if not os.path.isdir(outdir):
            os.mkdir(outdir)
        # Start looping
        ntot = 0  # Number of FTIS files used
        step = 0  # Index looping through the image for normalization
        # Loop me
        while ntot < nflats:
            npull = min(nflats - ntot, nfiles)
            # Randomize, but use seeded to avoid new ones appearing!
            rand = rstate.rand(npull)
            srt = np.argsort(rand)
            #if len(np.unique(srt)) != npull:
            #    pdb.set_trace()
            #if npull < nfiles:
            #    pdb.set_trace()
            # Loop
            #save_files = []
            for kk in srt:
                filen = files[kk]
                #if step == 5:
                #    print(kk, filen)
                #save_files.append(filen)
                spit_png.make_standard(filen,
                                       outdir,
                                       bidx,
                                       step,
                                       pdict,
                                       clobber=clobber)
            # Check (Debugging)
            #for ifile in save_files:
            #    if 'may19_2015_r1' in ifile:
            #        print(ifile)
            #if step == 5:
            #    pdb.set_trace()
            # Increment
            step += 1
            ntot += npull

    # Sanity check
    if regular:
        for itype in ['flat', 'arc', 'bias', 'standard', 'science']:
            outroot = spit_path + '/Kast/PNG/{:s}/{:s}'.format(category, itype)
            files = glob.glob(outroot + '/*.png')
            try:
                assert len(files) == 4 * nflats
            except AssertionError:
                pdb.set_trace()
Ejemplo n.º 34
0
	help="path to input image folder")
args = vars(ap.parse_args())

counter=0

ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) 

images_path=os.path.sep.join([ROOT_DIR, args['image_folder']])

list_images=os.listdir(images_path)

yolo_obj=YoloV3(images_path)

net=yolo_obj.load_weights()

output_directory = os.path.join(ROOT_DIR, "xmls")

if not os.path.isdir(output_directory):
	os.mkdir(output_directory)

for image in list_images:
	yolo_obj.process_images(image,net)
	yolo_obj.generate_result(image,counter, args['image_folder'])
	counter+=1






Ejemplo n.º 35
0
"""
import requests
import os
from datetime import date
import browser_cookie3
import sys

# Get cookies from the browser
cj = browser_cookie3.firefox()
if not ("advent" in str(cj)):
    cj = browser_cookie3.chrome()

# Get today number of day
day_today = date.today().strftime("%d").lstrip("0")

# If we provide an argument, use it as the desired day. Ex: ./startDay.py 5. Otherwise use day_today
if len(sys.argv) > 1:
    day = int(sys.argv[1])
    if day < 0 or day > 31 or day > int(day_today):
        exit("Day is not valid")
else:
    day = day_today

print(f"Initializing day {day}")

if not os.path.exists(f"day{day}"):
    os.mkdir(f"day{day}")
    os.chdir(f"day{day}")
    r = requests.get(f"https://adventofcode.com/2019/day/{day}/input", cookies=cj)
    with open(f"input{day}", "w") as f:
        f.write(r.text)
Ejemplo n.º 36
0
    def run_test_uboot_compile(self, bitbake_variables):
        # No need to test this on non-vexpress-qemu. It is a very resource
        # consuming test, and it is identical on all boards, since it internally
        # tests all boards.
        machine = bitbake_variables["MACHINE"]
        if not machine.startswith("vexpress-qemu"):
            pytest.skip("Skipping test on non-vexpress-qemu platforms")

        # This is a slow running test. Skip if appropriate.
        self.check_if_should_run()

        for task in ["do_provide_mender_defines", "prepare_recipe_sysroot"]:
            subprocess.check_call("cd %s && bitbake -c %s u-boot" %
                                  (os.environ['BUILDDIR'], task),
                                  shell=True)
        bitbake_variables = get_bitbake_variables("u-boot")

        shutil.rmtree("/dev/shm/test_uboot_compile", ignore_errors=True)

        env = copy.copy(os.environ)
        env['UBOOT_SRC'] = bitbake_variables['S']
        env['TESTS_DIR'] = os.getcwd()
        env['LOGS'] = os.path.join(os.getcwd(), "test_uboot_compile-logs")
        if os.path.exists(env['LOGS']):
            print(
                "WARNING: %s already exists. Will use cached logs from there. Recreate to reset."
                % env['LOGS'])
        else:
            os.mkdir(env['LOGS'])

        configs_to_test = self.collect_and_prepare_boards_to_test(
            bitbake_variables, env)

        env['BOARD_LOGS'] = " ".join(configs_to_test)

        try:
            sanitized_makeflags = bitbake_variables['EXTRA_OEMAKE']
            sanitized_makeflags = sanitized_makeflags.replace("\\\"", "\"")
            sanitized_makeflags = re.sub(" +", " ", sanitized_makeflags)
            env['MAYBE_UBI'] = "--ubi" if machine == "vexpress-qemu-flash" else ""
            # Compile all boards. The reason for using a makefile is to get easy
            # parallelization.
            subprocess.check_call(
                "make -j %d -f %s SUBJOBCOUNT=-j%d TMP=/dev/shm/test_uboot_compile %s"
                % (self.parallel_job_count(),
                   os.path.join(env['TESTS_DIR'],
                                "files/Makefile.test_uboot_automation"),
                   self.parallel_subjob_count(), sanitized_makeflags),
                shell=True,
                env=env,
                stderr=subprocess.STDOUT)

            # Now check that the ratio of compiled boards is as expected. This
            # number may change over time as U-Boot changes, but big discrepancies
            # should be checked out.
            failed = 0.0
            total = 0.0
            for file in os.listdir(env['LOGS']):
                if not file.endswith("_defconfig"):
                    continue

                total += 1
                with open(os.path.join(env['LOGS'], file)) as fd:
                    if "AutoPatchFailed\n" in fd.readlines():
                        failed += 1

            assert total == len(
                configs_to_test
            ), "Number of logs do not match the number of boards we tested? Should not happen"

            if machine == "vexpress-qemu":
                # PLEASE UPDATE the version you used to find this number if you update it.
                # From version: v2018.05
                measured_failed_ratio = 198.0 / 664.0
            elif machine == "vexpress-qemu-flash":
                # PLEASE UPDATE the version you used to find this number if you update it.
                # From version: v2018.05
                measured_failed_ratio = 36.0 / 159.0

            # We tolerate a certain percentage discrepancy in either direction.
            tolerated_discrepancy = 0.1

            lower_bound = measured_failed_ratio * (1.0 - tolerated_discrepancy)
            upper_bound = measured_failed_ratio * (1.0 + tolerated_discrepancy)
            try:
                assert failed / total >= lower_bound, "Less boards failed than expected. Good? Or a mistake somewhere? Failed: %d, Total: %d" % (
                    failed, total)
                assert failed / total <= upper_bound, "More boards failed than expected. Failed: %d, Total: %d" % (
                    failed, total)
            except AssertionError:
                for file in os.listdir(env['LOGS']):
                    with open(os.path.join(env['LOGS'], file)) as fd:
                        log = fd.readlines()
                        if "AutoPatchFailed\n" in log:
                            print(
                                "Last 50 lines of output from failed board: " +
                                file)
                            print("".join(log[-50:]))
                raise

            shutil.rmtree(env['LOGS'])

        finally:
            shutil.rmtree("/dev/shm/test_uboot_compile", ignore_errors=True)
Ejemplo n.º 37
0
def _handle_child(child_socket, root_dir, in_dir, out_dir, *, fork_twice=True, mount_proc=True):
    host_euid = geteuid()
    host_egid = getegid()
    unshare(CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
            CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNET)
    write_text_file('/proc/self/uid_map', '1000 {} 1'.format(host_euid))
    try:
        write_text_file('/proc/self/setgroups', 'deny')
    except FileNotFoundError:
        pass
    write_text_file('/proc/self/gid_map', '1000 {} 1'.format(host_egid))
    setresuid(1000, 1000, 1000)
    setresgid(1000, 1000, 1000)
    sethostname('icebox')
    if fork_twice:
        pid = fork()
        if pid != 0:
            child_socket.close()
            waitpid(pid, 0)
            exit()

    # Prepare sandbox filesystem.
    mount('tmpfs', root_dir, 'tmpfs', MS_NOSUID)
    if mount_proc:
        proc_dir = path.join(root_dir, 'proc')
        mkdir(proc_dir)
        mount('proc', proc_dir, 'proc', MS_NOSUID)
    bind_or_link('/bin', path.join(root_dir, 'bin'))
    bind_or_link('/etc/alternatives', path.join(root_dir, 'etc/alternatives'))
    bind_or_link('/lib', path.join(root_dir, 'lib'))
    bind_or_link('/lib64', path.join(root_dir, 'lib64'))
    bind_or_link('/usr/bin', path.join(root_dir, 'usr/bin'))
    bind_or_link('/usr/include', path.join(root_dir, 'usr/include'))
    bind_or_link('/usr/lib', path.join(root_dir, 'usr/lib'))
    bind_or_link('/usr/lib64', path.join(root_dir, 'usr/lib64'))
    bind_or_link('/usr/libexec', path.join(root_dir, 'usr/libexec'))
    bind_mount(in_dir, path.join(root_dir, 'in'))
    bind_mount(out_dir, path.join(root_dir, 'out'), rdonly=False)
    chdir(root_dir)
    mkdir('old_root')
    pivot_root('.', 'old_root')
    umount('old_root', MNT_DETACH)
    rmdir('old_root')
    write_text_file('/etc/passwd', 'icebox:x:1000:1000:icebox:/:/bin/bash\n')
    mount('/', '/', '', MS_BIND | MS_REMOUNT | MS_RDONLY | MS_NOSUID)

    # Execute pickles.
    socket_file = child_socket.makefile('rwb')
    while True:
        try:
            func = cloudpickle.load(socket_file)
        except EOFError:
            exit()
        try:
            ret, err = func(), None
        except Exception as e:
            ret, err = None, e
        data = cloudpickle.dumps((ret, err))
        socket_file.write(pack('I', len(data)))
        socket_file.write(data)
        socket_file.flush()
Ejemplo n.º 38
0
def _unittest_dsdl_definition_constructor() -> None:
    import tempfile
    from .dsdl_definition import FileNameFormatError

    directory = tempfile.TemporaryDirectory()
    root_ns_dir = os.path.join(directory.name, 'foo')

    os.mkdir(root_ns_dir)
    os.mkdir(os.path.join(root_ns_dir, 'nested'))

    def touchy(relative_path: str) -> None:
        p = os.path.join(root_ns_dir, relative_path)
        os.makedirs(os.path.dirname(p), exist_ok=True)
        with open(p, 'w') as f:
            f.write('# TEST TEXT')

    def discard(relative_path: str) -> None:
        os.unlink(os.path.join(root_ns_dir, relative_path))

    touchy('123.Qwerty.123.234.uavcan')
    touchy('nested/2.Asd.21.32.uavcan')
    touchy('nested/Foo.32.43.uavcan')

    dsdl_defs = _construct_dsdl_definitions_from_namespace(root_ns_dir)
    print(dsdl_defs)
    lut = {x.full_name: x for x in dsdl_defs}    # type: typing.Dict[str, DSDLDefinition]
    assert len(lut) == 3

    assert str(lut['foo.Qwerty']) == repr(lut['foo.Qwerty'])
    assert str(lut['foo.Qwerty']) == \
        "DSDLDefinition(name='foo.Qwerty', version=Version(major=123, minor=234), fixed_port_id=123, " \
        "file_path='%s')" % lut['foo.Qwerty'].file_path

    assert str(lut['foo.nested.Foo']) == \
        "DSDLDefinition(name='foo.nested.Foo', version=Version(major=32, minor=43), fixed_port_id=None, " \
        "file_path='%s')" % lut['foo.nested.Foo'].file_path

    t = lut['foo.Qwerty']
    assert t.file_path == os.path.join(root_ns_dir, '123.Qwerty.123.234.uavcan')
    assert t.has_fixed_port_id
    assert t.fixed_port_id == 123
    assert t.text == '# TEST TEXT'
    assert t.version.major == 123
    assert t.version.minor == 234
    assert t.name_components == ['foo', 'Qwerty']
    assert t.short_name == 'Qwerty'
    assert t.root_namespace == 'foo'
    assert t.full_namespace == 'foo'

    t = lut['foo.nested.Asd']
    assert t.file_path == os.path.join(root_ns_dir, 'nested', '2.Asd.21.32.uavcan')
    assert t.has_fixed_port_id
    assert t.fixed_port_id == 2
    assert t.text == '# TEST TEXT'
    assert t.version.major == 21
    assert t.version.minor == 32
    assert t.name_components == ['foo', 'nested', 'Asd']
    assert t.short_name == 'Asd'
    assert t.root_namespace == 'foo'
    assert t.full_namespace == 'foo.nested'

    t = lut['foo.nested.Foo']
    assert t.file_path == os.path.join(root_ns_dir, 'nested', 'Foo.32.43.uavcan')
    assert not t.has_fixed_port_id
    assert t.fixed_port_id is None
    assert t.text == '# TEST TEXT'
    assert t.version.major == 32
    assert t.version.minor == 43
    assert t.name_components == ['foo', 'nested', 'Foo']
    assert t.short_name == 'Foo'
    assert t.root_namespace == 'foo'
    assert t.full_namespace == 'foo.nested'

    touchy('nested/Malformed.MAJOR.MINOR.uavcan')
    try:
        _construct_dsdl_definitions_from_namespace(root_ns_dir)
    except FileNameFormatError as ex:
        print(ex)
        discard('nested/Malformed.MAJOR.MINOR.uavcan')
    else:       # pragma: no cover
        assert False

    touchy('nested/NOT_A_NUMBER.Malformed.1.0.uavcan')
    try:
        _construct_dsdl_definitions_from_namespace(root_ns_dir)
    except FileNameFormatError as ex:
        print(ex)
        discard('nested/NOT_A_NUMBER.Malformed.1.0.uavcan')
    else:       # pragma: no cover
        assert False

    touchy('nested/Malformed.uavcan')
    try:
        _construct_dsdl_definitions_from_namespace(root_ns_dir)
    except FileNameFormatError as ex:
        print(ex)
        discard('nested/Malformed.uavcan')
    else:       # pragma: no cover
        assert False

    _construct_dsdl_definitions_from_namespace(root_ns_dir)  # making sure all errors are cleared

    touchy('nested/super.bad/Unreachable.1.0.uavcan')
    try:
        _construct_dsdl_definitions_from_namespace(root_ns_dir)
    except FileNameFormatError as ex:
        print(ex)
    else:       # pragma: no cover
        assert False

    try:
        _construct_dsdl_definitions_from_namespace(root_ns_dir + '/nested/super.bad')
    except FileNameFormatError as ex:
        print(ex)
    else:       # pragma: no cover
        assert False

    discard('nested/super.bad/Unreachable.1.0.uavcan')
import imageio
from matplotlib import pyplot as plt
from functools import partial
import torch

from unbalancedot.functional import regularized_ot, hausdorff_divergence, \
    sinkhorn_divergence
from unbalancedot.sinkhorn import BatchVanillaSinkhorn
from unbalancedot.entropy import KullbackLeibler, Balanced, TotalVariation, \
    Range
from unbalancedot.utils import euclidean_cost

# Build path to save plots
path = os.getcwd() + "/output"
if not os.path.isdir(path):
    os.mkdir(path)
path = path + "/unbalanced_gradient_flow_waffle"
if not os.path.isdir(path):
    os.mkdir(path)

# Check GPU availability for computational speedup
use_cuda = torch.cuda.is_available()
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
torch.set_default_tensor_type(dtype)


###############################################
# Display routine
# ~~~~~~~~~~~~~~~~~
def load_image(fname):
    img = imageio.imread(fname, as_gray=True)  # Grayscale
Ejemplo n.º 40
0
from keras import backend as K
from keras.optimizers import RMSprop
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from sklearn.metrics import roc_curve, auc, roc_auc_score

isFast = True # If True, then it runs on a very small dataset (and results won't be that great)

dataset_type = 'process'
#dataset_type = 'domain'

OUTPUT_DIR = 'output'

if not os.path.isdir(OUTPUT_DIR):
    os.mkdir(OUTPUT_DIR)

if dataset_type == 'domain':
    OUTPUT_FILE = os.path.join(OUTPUT_DIR, 'domain_results.pkl')
    INPUT_FILE = os.path.join('data', 'domains_spoof.pkl')
    IMAGE_FILE = os.path.join(OUTPUT_DIR, 'domains_roc_curve.png')
    OUTPUT_NAME = 'Domain Spoofing'
elif dataset_type == 'process':
    OUTPUT_FILE = os.path.join(OUTPUT_DIR, 'process_results.pkl')
    INPUT_FILE = os.path.join('data', 'process_spoof.pkl')
    IMAGE_FILE = os.path.join(OUTPUT_DIR, 'process_roc_curve.png')
    OUTPUT_NAME = 'Process Spoofing'
else:
    raise Exception('Unknown dataset type: %s' % (dataset_type,))

def generate_imgs(strings, font_location, font_size, image_size, text_location):
Ejemplo n.º 41
0
import os
from pathlib import Path
import pandas as pd

smpl_dstr = Path('MeGlass/test')
output_path = Path('MeGlass_120x120')
train_gl = pd.read_csv(smpl_dstr / 'gallery_black_glass.txt',header=None)
train_wth_gl = pd.read_csv(smpl_dstr / 'gallery_no_glass.txt',header=None)
val_gl = pd.read_csv(smpl_dstr / 'probe_black_glass.txt',header=None)
val_wth_gl = pd.read_csv(smpl_dstr / 'probe_no_glass.txt',header=None)

os.mkdir('MeGlass_120x120/train')
os.mkdir('MeGlass_120x120/val')

os.mkdir('MeGlass_120x120/train/glass')
os.mkdir('MeGlass_120x120/train/noglass')
os.mkdir('MeGlass_120x120/val/glass')
os.mkdir('MeGlass_120x120/val/noglass')

itrt = [(train_gl, 'train', 'glass'), (train_wth_gl, 'train', 'noglass'), 
        (val_gl, 'val', 'glass'), (val_wth_gl, 'val', 'noglass')]

for pth in itrt:
    for i in range(len(pth[0])):
        file = pth[0].iloc[i][0] 
        os.replace(output_path / file, output_path / pth[1] / pth[2] / file)
    
        
print('Size of glasses validation')
print(len(os.listdir(output_path / 'val' / 'glass')))

for key in tqdm(models):

    #key = "mutex_nsep_cov"
    #threshold_start = 0.00007  # for 1000ncomb
    #threshold_start = 0.00009 #for 1000ncomb
    #threshold_start = 0.00029 #for 100ncomb

    #input file path
    filepath = "../" + network_name + "/out/connected_components_original/" + key + "/"
    our_path = "../" + network_name + "/out/tmp/connected_components_isolarge/" + key + "/"
    _path_ = "../" + network_name + "/out/tmp/connected_components/" + key + "/"

    if not os.path.exists(_path_):
        os.mkdir(_path_)
    try:
        # Get the starting threshold from the file name at n =1000
        threshold_start = float(glob.glob(filepath+'cc_n1000_k3_*')[0].split('/')[-1].split('d')[1][:-4])-7e-5
        print('Start Threshold {:10f}'.format(threshold_start))
    except:
        print(glob.glob(filepath+'cc_n1000_*'))

    path = our_path
    print (path)

    if not os.path.exists(our_path):
        os.mkdir(our_path)

    LARGE_NUM = 100000
    k = 12
Ejemplo n.º 43
0
    if decay_count >= decay_threshold:
        print('\t Reducing learning rate')
        decay_count = 0
        session.run(inc_gstep)


if __name__ == "__main__":

    num_files = 3
    dir_name = os.path.join(FLAGS.data_path, FLAGS.dataset)
    filenames = ['train_clean_{}.txt'.format(FLAGS.data_size), 'valid_clean.txt', 'test_clean.txt']
    documents = []

    model_path = os.path.join(dir_name, FLAGS.model_name)
    if not os.path.exists(model_path):
        os.mkdir(model_path)

    # read training data
    print('\nProcessing file %s' % os.path.join(dir_name, filenames[0]))
    train_words, train_mask = read_train_data(os.path.join(dir_name, filenames[0]))
    documents.append(train_words)
    print('Data size (Tokens) (Document %d) %d' % (0, len(train_words)))
    print('Sample string (Document %d) %s' % (0, train_words[:50]))

    # read valid data
    print('\nProcessing file %s' % os.path.join(dir_name, filenames[1]))
    valid_words = read_data(os.path.join(dir_name, filenames[1]))
    documents.append(valid_words)
    print('Data size (Tokens) (Document %d) %d' % (1, len(valid_words)))
    print('Sample string (Document %d) %s' % (1, valid_words[:50]))
Ejemplo n.º 44
0
## Developed by Deyvid Amgarten

# Libraries
import os
import subprocess



# Greeting message
print('\nYou only need to run this script once!\n')

# Create model directory
try:
    os.stat('models/')
except:
    os.mkdir('models/')

# Verify and download DBs
if not os.path.isfile('models/all_vogs_hmm_profiles_feb2018.hmm.h3m'):
    print('Downloading flat file database. Do not worry, that will just take a few minutes and is executed only in the first time... \n')
    os.system('wget http://projetos.lbi.iq.usp.br/phaghost/vHULK/models/AllvogHMMprofiles.tar.gz')
    print('Extracting database file...\n')
    if subprocess.call('tar -xzf AllvogHMMprofiles.tar.gz', shell=True) == 1:
        print('Error extracting database\n')
        quit()
    subprocess.run('cat AllvogHMMprofiles/* > models/all_vogs_hmm_profiles_feb2018.hmm', shell=True)
    subprocess.run('rm -r AllvogHMMprofiles/ AllvogHMMprofiles.tar.gz', shell=True)
    print('Compressing hmm database...')
    if subprocess.call('hmmpress models/all_vogs_hmm_profiles_feb2018.hmm', shell=True) == 1:
        print('Error using hmmer tools (hmmpress). Verify if it is installed!\n')
        quit()
Ejemplo n.º 45
0
SEED = 1  # Seed for the random split, used in random.shuffle() below
random.seed(SEED)

categories = os.listdir(sourcedir)
nfiles = [len(os.listdir(sourcedir + cc)) for cc in categories]

if verbose:
    print("List of categories and number of files in them:")
    for name, num in zip(categories, nfiles):
        print("{}: {} images".format(name, num))
    print("Total: {} images".format(sum(nfiles)))

    print("Splitting with ratio {}...".format(rvalid1))

with suppress(FileExistsError):
    os.mkdir(traindir)
with suppress(FileExistsError):
    os.mkdir(vdir1)
for cc in categories:
    with suppress(FileExistsError):
        os.mkdir(traindir + cc)
    with suppress(FileExistsError):
        os.mkdir(vdir1 + cc)

for cc in enumerate(categories):
    if os.listdir(traindir + cc[1]):
        if emptydirs:
            rmtree(traindir + cc[1])
            os.mkdir(traindir + cc[1])
        else:
            raise OSError(traindir + cc[1] + ' is not empty')
Ejemplo n.º 46
0
def _regenerate_and_sign_metadata_remote(r):

    # not required */
    if not r.is_signed:
        return

    # fix up any remotes that are not dirty, but have firmware that is dirty
    # -- which shouldn't happen, but did...
    if not r.is_dirty:
        for fw in r.fws:
            if not fw.is_dirty:
                continue
            print('Marking remote %s as dirty due to %u' % (r.name, fw.firmware_id))
            r.is_dirty = True
            fw.is_dirty = False

    # not needed
    if not r.is_dirty:
        return

    # set destination path from app config
    download_dir = app.config['DOWNLOAD_DIR']
    if not os.path.exists(download_dir):
        os.mkdir(download_dir)

    invalid_fns = []
    print('Updating: %s' % r.name)

    # create metadata for each remote
    fws_filtered = []
    for fw in db.session.query(Firmware):
        if fw.remote.name in ['private', 'deleted']:
            continue
        if not fw.signed_timestamp:
            continue
        if r.check_fw(fw):
            fws_filtered.append(fw)
    settings = _get_settings()
    blob_xmlgz = _generate_metadata_kind(fws_filtered,
                                         firmware_baseuri=settings['firmware_baseuri'])

    # write metadata-?????.xml.gz
    fn_xmlgz = os.path.join(download_dir, r.filename)
    with open(fn_xmlgz, 'wb') as f:
        f.write(blob_xmlgz)
    invalid_fns.append(fn_xmlgz)

    # write metadata.xml.gz
    fn_xmlgz = os.path.join(download_dir, r.filename_newest)
    with open(fn_xmlgz, 'wb') as f:
        f.write(blob_xmlgz)
    invalid_fns.append(fn_xmlgz)

    # create Jcat item with SHA256 checksum blob
    jcatfile = JcatFile()
    jcatitem = jcatfile.get_item(r.filename)
    jcatitem.add_alias_id(r.filename_newest)
    jcatitem.add_blob(JcatBlobSha1(blob_xmlgz))
    jcatitem.add_blob(JcatBlobSha256(blob_xmlgz))

    # write each signed file
    for blob in ploader.metadata_sign(blob_xmlgz):

        # add GPG only to archive for backwards compat with older fwupd
        if blob.kind == JcatBlobKind.GPG:
            fn_xmlgz_asc = fn_xmlgz + '.' + blob.filename_ext
            with open(fn_xmlgz_asc, 'wb') as f:
                f.write(blob.data)
            invalid_fns.append(fn_xmlgz_asc)

        # add to Jcat file too
        jcatitem.add_blob(blob)

    # write jcat file
    fn_xmlgz_jcat = fn_xmlgz + '.jcat'
    with open(fn_xmlgz_jcat, 'wb') as f:
        f.write(jcatfile.save())
    invalid_fns.append(fn_xmlgz_jcat)

    # update PULP
    if r.name == 'stable':
        _metadata_update_pulp(download_dir)

    # do this all at once right at the end of all the I/O
    for fn in invalid_fns:
        print('Invalidating {}'.format(fn))
        ploader.file_modified(fn)

    # mark as no longer dirty
    if not r.build_cnt:
        r.build_cnt = 0
    r.build_cnt += 1
    r.is_dirty = False

    # log what we did
    _event_log('Signed metadata {} build {}'.format(r.name, r.build_cnt))

    # only keep the last 6 metadata builds (24h / stable refresh every 4h)
    suffix = r.filename.split('-')[2]
    fns = glob.glob(os.path.join(download_dir, 'firmware-*-{}'.format(suffix)))
    for fn in sorted(fns):
        build_cnt = int(fn.split('-')[1])
        if build_cnt + 6 > r.build_cnt:
            continue
        os.remove(fn)
        _event_log('Deleted metadata {} build {}'.format(r.name, build_cnt))

    # all firmwares are contained in the correct metadata now
    for fw in fws_filtered:
        fw.is_dirty = False
    db.session.commit()
Ejemplo n.º 47
0
        c=b[0]
        d=c.split('/')

        if int(d[1])<10:
            d[1]="0"+d[1]
        if int(d[2])<10:
            d[2]='0'+d[2]
        e=d[0]+d[1]+d[2]

        urlpdf1=a[2]
        comp=a[4]
        name=a[9]
        hy=a[10]
        pdfsource =  r"http://data.eastmoney.com/report/"+e+"/hy,"+urlpdf1+".html"
        print(pdfsource)
        pdfpage = requests.get(pdfsource)
        pdfpage.encoding = 'gb2312'
        pdfpagetext = pdfpage.text
        fp2=r"(?<=http://pdf.dfcfw.com/pdf/).+?(?=pdf)"
        pattern2 = re.compile(fp2)
        results2 = pattern2.findall(pdfpagetext)
        a=os.path.exists("./%r" %(hy))
        if a!=1:
            os.mkdir("./%r" %(hy))
        if results2 != []:
            pdfurl=r"http://pdf.dfcfw.com/pdf/"+results2[0]+"pdf"
            print(pdfurl)
            urlretrieve(pdfurl, "./%r/%r.pdf" %(hy, name) )
      
            print(name)
Ejemplo n.º 48
0
 def create_downloads_folder():
     if not os.path.exists(os.path.join(os.getcwd(), 'Downloads')):
         os.mkdir(os.path.join(os.getcwd(), 'Downloads'))
Ejemplo n.º 49
0
def main():
    fic_ids, fandom, headers, restart, idlist_is_csv, only_first_chap, output_dirpath = get_args(
    )
    os.chdir(os.getcwd())
    storycolumns = [
        'fic_id', 'title', 'author', 'author_key', 'rating', 'category',
        'fandom', 'relationship', 'character', 'additional tags', 'language',
        'published', 'status', 'status date', 'words', 'comments', 'kudos',
        'bookmarks', 'hits', 'chapter_count', 'series', 'seriespart',
        'seriesid', 'summary', 'preface_notes', 'afterword_notes'
    ]
    chaptercolumns = [
        'fic_id', 'title', 'summary', 'preface_notes', 'afterword_notes',
        'chapter_num', 'chapter_title', 'paragraph_count'
    ]
    textcolumns = ['fic_id', 'chapter_id', 'para_id', 'text']
    if not os.path.exists(workdir(output_dirpath, fandom)):
        os.mkdir(workdir(output_dirpath, fandom))
    if not os.path.exists(contentdir(output_dirpath, fandom)):
        os.mkdir(contentdir(output_dirpath, fandom))
    with open(storiescsv(output_dirpath, fandom), 'a') as f_out:
        storywriter = csv.writer(f_out)
        with open(chapterscsv(output_dirpath, fandom), 'a') as ch_out:
            chapterwriter = csv.writer(ch_out)
            with open(errorscsv(output_dirpath, fandom), 'a') as e_out:
                errorwriter = csv.writer(e_out)
                #does the csv already exist? if not, let's write a header row.
                if os.stat(storiescsv(output_dirpath, fandom)).st_size == 0:
                    print('Writing a header row for the csv.')
                    storywriter.writerow(storycolumns)
                if os.stat(chapterscsv(output_dirpath, fandom)).st_size == 0:
                    print('Writing a header row for the csv.')
                    chapterwriter.writerow(chaptercolumns)
                if idlist_is_csv:
                    csv_fname = fic_ids[0]
                    total_lines = 0

                    # Count fics remaining
                    with open(csv_fname, 'r') as f_in:
                        reader = csv.reader(f_in)
                        for row in reader:
                            if not row:
                                continue
                            total_lines += 1

                    # Scrape fics
                    with open(csv_fname, 'r+') as f_in:
                        reader = csv.reader(f_in)
                        if restart is '':
                            for row in tqdm(reader,
                                            total=total_lines,
                                            ncols=70):
                                if not row:
                                    continue
                                write_fic_to_csv(fandom,
                                                 row[0],
                                                 only_first_chap,
                                                 storywriter,
                                                 chapterwriter,
                                                 errorwriter,
                                                 storycolumns,
                                                 chaptercolumns,
                                                 headers,
                                                 output_dirpath,
                                                 write_whole_fics=True)
                        else:
                            found_restart = False
                            for row in tqdm(reader,
                                            total=total_lines,
                                            ncols=70):
                                if not row:
                                    continue
                                found_restart = process_id(
                                    row[0], restart, found_restart)
                                if found_restart:
                                    write_fic_to_csv(
                                        fandom,
                                        row[0],
                                        only_first_chap,
                                        storywriter,
                                        chapterwriter,
                                        errorwriter,
                                        storycolumns,
                                        chaptercolumns,
                                        headers,
                                        output_dirpath=output_dirpath,
                                        write_whole_fics=True)
                                else:
                                    print('Skipping already processed fic')

                else:
                    for fic_id in fic_ids:
                        write_fic_to_csv(fandom,
                                         fic_id,
                                         only_first_chap,
                                         storywriter,
                                         chapterwriter,
                                         errorwriter,
                                         storycolumns,
                                         chaptercolumns,
                                         headers,
                                         output_dirpath=output_dirpath,
                                         write_whole_fics=True)
Ejemplo n.º 50
0
  global DRY_RUN
  namespace = argv[1]
  services = argv[2].split(",")
  regions_and_contexts = argv[3].split(",")
  for region_and_context in regions_and_contexts:
    region, context = region_and_context.split(":")
    contexts[region] = context
  if n == 5:
      print("--- Running the script in dry-run mode ---\n")
      DRY_RUN = True

# Set the path to the directory where the generated yaml files will be stored
generated_files_dir = '../generated'

try:
    os.mkdir(generated_files_dir)
except OSError:
    pass

# Create a load balancer for the DNS pods in each k8s cluster
def create_dns_lb():
    for region, context in contexts.items():
    	call(['kubectl', 'apply', '-f', 'dns-lb.yaml', '--context', context])

# Set up each load balancer to forward DNS requests for zone-scoped namespaces to the
# relevant cluster's DNS server, using the external IP of the internal load balancers
def retrieve_dns_lb_ip_per_region():
    dns_ips = dict()
    for region, context in contexts.items():
        external_ip = ''
        while True:
def check_path(p):
    if not os.path.exists(p):
        os.mkdir(p)
Ejemplo n.º 52
0
from MyGenerator import AudioGenerator
from model_ops import ModelMGPU
import os
import scipy.io.wavfile as wavfile
import numpy as np
import utils
# super parameters
people_num = 2
NUM_GPU = 1

# PATH
# model_path = './saved_models_AO_with_norm/AOmodel-2p-015-0.02258.h5'
model_path = './saved_models_AO/AOmodel-2p-001-0.55518.h5'
dir_path = './pred/'
if not os.path.isdir(dir_path):
    os.mkdir(dir_path)

# database_path = '../../data/audio/audio_database/mix/'
database_path = '../../data/audio/AV_model_database/mix/'

# load data
testfiles = []
# with open('../../data/audio/audio_database/dataset_train.txt', 'r') as f:
with open('../../data/audio/AV_model_database/dataset_train.txt', 'r') as f:
    testfiles = f.readlines()


def parse_X_data(line, num_people=people_num, database_path=database_path):
    parts = line.split()  # get each name of file for one testset
    mix_str = parts[0]
    name_list = mix_str.replace('.npy', '')
Ejemplo n.º 53
0
import os
from activepapers import url

#
# The ACTIVEPAPERS_LIBRARY environment variable follows the
# same conventions as PATH under Unix.
#
library = os.environ.get('ACTIVEPAPERS_LIBRARY', None)
if library is None:
    # This is Unix-only, needs a Windows equivalent
    home = os.environ.get('HOME', None)
    if home is not None:
        library = os.path.join(home, '.activepapers')
        if not os.path.exists(library):
            try:
                os.mkdir(library)
            except IOError:
                library = None

library = library.split(':')

def split_paper_ref(paper_ref):
    index = paper_ref.find(':')
    if index == -1:
        raise ValueError("invalid paper reference %s" % paper_ref)
    return paper_ref[:index].lower(), paper_ref[index+1:]


#
# Return the local filename for a paper reference,
# after downloading the file if required.
Ejemplo n.º 54
0
def ck_preprocess(i):
    print('\n--------------------------------')

    def my_env(var):
        return i['env'].get(var)

    def dep_env(dep, var):
        return i['deps'][dep]['dict']['env'].get(var)

    def has_dep_env(dep, var):
        return var in i['deps'][dep]['dict']['env']

    def set_in_my_env(var):
        return my_env(var) and my_env(var).lower() in [
            'yes', 'true', 'on', '1'
        ]

    def set_in_dep_env(dep, var):
        return dep_env(dep, var) and dep_env(
            dep, var).lower() in ['yes', 'true', 'on', '1']

    for dep_name, dep in i.get('deps', {}).items():
        dep_tags = dep.get('tags', '')
        if 'python-package' in dep_tags:
            pp = dep_env(dep_name, 'PYTHONPATH')
            for p in pp.split(':'):
                if p not in ["${PYTHONPATH}", "$PYTHONPATH", ""]:
                    sys.path.append(p)

    print(sys.path)

    # Init variables from environment

    # TF-model specific value
    if has_dep_env('weights', 'CK_ENV_TENSORFLOW_MODEL_CONVERT_TO_BGR'):
        MODEL_CONVERT_TO_BGR = set_in_dep_env(
            'weights', 'CK_ENV_TENSORFLOW_MODEL_CONVERT_TO_BGR')
    else:
        MODEL_CONVERT_TO_BGR = False

    # TODO: all weights packages should provide common vars to reveal its
    # input image size: https://github.com/ctuning/ck-tensorflow/issues/67
    if has_dep_env('weights', 'CK_ENV_TENSORFLOW_MODEL_IMAGE_WIDTH'):
        IMAGE_SIZE = int(
            dep_env('weights', 'CK_ENV_TENSORFLOW_MODEL_IMAGE_WIDTH'))
    elif has_dep_env('weights', 'CK_ENV_ONNX_MODEL_IMAGE_WIDTH'):
        IMAGE_SIZE = int(dep_env('weights', 'CK_ENV_ONNX_MODEL_IMAGE_WIDTH'))
    else:
        if has_dep_env('weights', 'CK_ENV_MOBILENET_RESOLUTION'):
            IMAGE_SIZE = int(dep_env('weights', 'CK_ENV_MOBILENET_RESOLUTION'))
        else:
            return {
                'return': 1,
                'error':
                'Only TensorFlow model packages are currently supported.'
            }

    IMAGE_COUNT = int(my_env('CK_BATCH_COUNT')) * int(my_env('CK_BATCH_SIZE'))
    SKIP_IMAGES = int(my_env('CK_SKIP_IMAGES'))
    IMAGE_DIR = dep_env('images', 'CK_ENV_DATASET_IMAGENET_VAL')
    IMAGE_FILE = my_env('CK_IMAGE_FILE')
    RESULTS_DIR = 'predictions'
    IMAGE_LIST_FILE = 'image_list.txt'
    TMP_IMAGE_SIZE = int(my_env('CK_TMP_IMAGE_SIZE'))
    CROP_PERCENT = float(my_env('CK_CROP_PERCENT'))
    SUBTRACT_MEAN = set_in_my_env('CK_SUBTRACT_MEAN')

    # Full path of dir for caching prepared images.
    # Store preprocessed images in sources directory, not in `tmp`, as
    # `tmp` directory can de cleaned between runs and caches will be lost.
    CACHE_DIR_ROOT = my_env('CK_IMG_CACHE_DIR')
    if not CACHE_DIR_ROOT:
        CACHE_DIR_ROOT = os.path.join('..', 'preprocessed')

    # Single file mode
    if IMAGE_FILE:
        image_dir, IMAGE_FILE = os.path.split(IMAGE_FILE)
        # If only filename is set, assume that file is in images package
        if not image_dir:
            image_dir = IMAGE_DIR
        else:
            IMAGE_DIR = image_dir
        assert os.path.isfile(os.path.join(
            IMAGE_DIR, IMAGE_FILE)), "Input file does not exist"
        IMAGES_COUNT = 1
        SKIP_IMAGES = 1
        RECREATE_CACHE = True
        CACHE_DIR = os.path.join(CACHE_DIR_ROOT, 'single-image')
        print('Single file mode')
        print('Input image file: {}'.format(IMAGE_FILE))
    else:
        RECREATE_CACHE = set_in_my_env("CK_RECREATE_CACHE")
        CACHE_DIR = os.path.join(
            CACHE_DIR_ROOT, '{}-{}-{}'.format(IMAGE_SIZE, TMP_IMAGE_SIZE,
                                              CROP_PERCENT))

    print('Input images dir: {}'.format(IMAGE_DIR))
    print('Preprocessed images dir: {}'.format(CACHE_DIR))
    print('Results dir: {}'.format(RESULTS_DIR))
    print('Image size: {}'.format(IMAGE_SIZE))
    print('Image count: {}'.format(IMAGE_COUNT))
    print('Skip images: {}'.format(SKIP_IMAGES))

    # Prepare cache dir
    if not os.path.isdir(CACHE_DIR_ROOT):
        os.mkdir(CACHE_DIR_ROOT)
    if RECREATE_CACHE:
        if os.path.isdir(CACHE_DIR):
            shutil.rmtree(CACHE_DIR)
    if not os.path.isdir(CACHE_DIR):
        os.mkdir(CACHE_DIR)

    # Prepare results directory
    if os.path.isdir(RESULTS_DIR):
        shutil.rmtree(RESULTS_DIR)
    os.mkdir(RESULTS_DIR)

    # Load processing images filenames
    if IMAGE_FILE:
        image_list = [IMAGE_FILE]
    else:
        image_list = load_image_list(IMAGE_DIR, IMAGE_COUNT, SKIP_IMAGES)

    # Preprocess images which are not cached yet
    print('Preprocess images...')
    preprocessed_count = 0
    for image_file in image_list:
        cached_path = os.path.join(CACHE_DIR, image_file)
        if not os.path.isfile(cached_path):
            original_path = os.path.join(IMAGE_DIR, image_file)
            image_data = load_image(image_path=original_path,
                                    target_size=IMAGE_SIZE,
                                    intermediate_size=TMP_IMAGE_SIZE,
                                    crop_percentage=CROP_PERCENT,
                                    convert_to_bgr=MODEL_CONVERT_TO_BGR)
            image_data.tofile(cached_path)
            preprocessed_count += 1
            if preprocessed_count % 10 == 0:
                print('  Done {} of {}'.format(preprocessed_count,
                                               len(image_list)))
    print('  Done {} of {}'.format(len(image_list), len(image_list)))

    # Save list of images to be classified
    with open(IMAGE_LIST_FILE, 'w') as f:
        for image_file in image_list:
            f.write(image_file + '\n')

    # Setup parameters for program
    new_env = {}
    files_to_push = []
    files_to_pull = []

    # Some special preparation to run program on Android device
    if i.get('target_os_dict', {}).get('ck_name2', '') == 'android':
        # When files will being pushed to Android, current path will be sources path,
        # not `tmp` as during preprocessing. So we have to set `files_to_push` accordingly,
        if CACHE_DIR.startswith('..'):
            CACHE_DIR = CACHE_DIR[3:]

        for image_file in image_list:
            files_to_push.append(os.path.join(CACHE_DIR, image_file))
            files_to_pull.append(
                os.path.join(RESULTS_DIR, image_file) + '.txt')

        # Set list of additional files to be copied to Android device.
        # We have to set these files via env variables with full paths
        # in order to they will be copied into remote program dir without sub-paths.
        new_env['RUN_OPT_IMAGE_LIST_PATH'] = os.path.join(
            os.getcwd(), IMAGE_LIST_FILE)
        files_to_push.append('$<<RUN_OPT_IMAGE_LIST_PATH>>$')

    def to_flag(val):
        return 1 if val and (str(val).lower() in ['yes', 'true', 'on', '1'
                                                  ]) else 0

    # model-specific variable
    normalize = dep_env('weights',
                        "CK_ENV_TENSORFLOW_MODEL_NORMALIZE_DATA") or dep_env(
                            'weights', "CK_ENV_ONNX_MODEL_NORMALIZE_DATA")

    new_env['RUN_OPT_IMAGE_LIST'] = IMAGE_LIST_FILE
    new_env['RUN_OPT_RESULT_DIR'] = RESULTS_DIR
    new_env['RUN_OPT_IMAGE_DIR'] = CACHE_DIR
    new_env['RUN_OPT_IMAGE_SIZE'] = IMAGE_SIZE
    new_env['RUN_OPT_NORMALIZE_DATA'] = to_flag(
        my_env("CK_NORMALIZE_DATA") or normalize)
    new_env['RUN_OPT_SUBTRACT_MEAN'] = to_flag(my_env("CK_SUBTRACT_MEAN"))
    new_env['RUN_OPT_BATCH_COUNT'] = my_env('CK_BATCH_COUNT')
    new_env['RUN_OPT_BATCH_SIZE'] = my_env('CK_BATCH_SIZE')
    new_env['RUN_OPT_SILENT_MODE'] = to_flag(my_env('CK_SILENT_MODE'))
    print(new_env)

    # Run program specific preprocess script
    preprocess_script = os.path.join(os.getcwd(), '..', 'preprocess-next.py')
    if os.path.isfile(preprocess_script):
        print('--------------------------------')
        print('Running program specific preprocessing script ...')
        module = imp.load_source('preprocess', preprocess_script)
        if hasattr(module, 'ck_preprocess'):
            res = module.ck_preprocess(i)
            if res['return'] > 0: return res
            new_env.update(res.get('new_env', {}))
            files_to_push.extend(res.get('run_input_files', []))
            files_to_pull.extend(res.get('run_output_files', []))

            # Preprocessing can return list of additional files to be copied to Android device.
            # These files are given as full paths, and will be copied near the executable.
            files_to_push_by_path = res.get('files_to_push_by_path', {})
            for key in files_to_push_by_path:
                new_env[key] = files_to_push_by_path[key]
                files_to_push.append('$<<' + key + '>>$')

    print('--------------------------------\n')
    return {
        'return': 0,
        'new_env': new_env,
        'run_input_files': files_to_push,
        'run_output_files': files_to_pull,
    }
rects.append([[2.5, -2.5], [2.5+w, -2.5-h]])

angles = []
angles.append([180, 270]*5)
angles.append([180, 270, 0, 90]*2+[180, 270])
angles.append([i*65.5 for i in range(0, 10)])
angles.append([i*36 for i in range(0, 10)])

base_path = r'./rotating_rects_layers'
#write blank layer
save_path = abspath(base_path + '/layer' + '{0:03d}'.format(0) + '.xml')
directory = os.path.dirname(save_path)

#create directory if needed
if not os.path.exists(directory):
    os.mkdir(directory)

#save the blank layer
with open(abspath(save_path), 'w') as fillFile:
    EC.write_laser_power(fillFile, 45, 0)


for i in range(len(angles[0])):
    save_path = abspath(base_path + '/layer' + '{0:03d}'.format(i + 1) + '.xml')
    with open(abspath(save_path), 'w') as f:
        EC.write_laser_power(f, 75)
        for j in range(len(rects)):
            EC.write_rotating_square(f, angles[j][i], rects[j][0], rects[j][1], hs=0.2794)
            EC.write_comment(f, "Finished Rectangle")
            # if j == 2:
            #     print(angles[j][i])
Ejemplo n.º 56
0
import os
import sys
import numpy as np
import soundfile as sf
import torch
import librosa

HOME = os.path.expanduser("~")
GIT_FOLDER = HOME + "/git"
CHECKPOINTS_FOLDER = GIT_FOLDER + "/_checkpoints"

if not os.path.isdir(CHECKPOINTS_FOLDER):
    os.mkdir(CHECKPOINTS_FOLDER)

TACOTRON_FOLDER = GIT_FOLDER + "/Cherokee-TTS"
CHR_FOLDER = TACOTRON_FOLDER+"/data/tests/see-say-write"

wavernn_chpt = "wavernn_weight.pyt"
WAVERNN_FOLDER = GIT_FOLDER + "/WaveRNN"
WAVERNN_WEIGHTS = CHECKPOINTS_FOLDER + "/" + wavernn_chpt

if not os.path.exists(CHECKPOINTS_FOLDER + "/" + wavernn_chpt):
    os.chdir(CHECKPOINTS_FOLDER)
    os.system("curl -O -L 'https://github.com/Tomiinek/Multilingual_Text_to_Speech/releases/download/v1.0/" +wavernn_chpt+"'")


print("Cur Dir", os.getcwd())

if "utils" in sys.modules:
    del sys.modules["utils"]
Ejemplo n.º 57
0
    def __init__(self, args):
        QtArgs = [args[0], '-style', 'fusion'
                  ] + args[1:]  # force Fusion style by default

        parser = argparse.ArgumentParser(prog=args[0],
                                         description='Launch Meshroom UI.',
                                         add_help=True)

        parser.add_argument(
            'project',
            metavar='PROJECT',
            type=str,
            nargs='?',
            help=
            'Meshroom project file (e.g. myProject.mg) or folder with images to reconstruct.'
        )
        parser.add_argument(
            '-i',
            '--import',
            metavar='IMAGES/FOLDERS',
            type=str,
            nargs='*',
            help='Import images or folder with images to reconstruct.')
        parser.add_argument(
            '-I',
            '--importRecursive',
            metavar='FOLDERS',
            type=str,
            nargs='*',
            help=
            'Import images to reconstruct from specified folder and sub-folders.'
        )
        parser.add_argument('-s',
                            '--save',
                            metavar='PROJECT.mg',
                            type=str,
                            default='',
                            help='Save the created scene.')
        parser.add_argument(
            '-p',
            '--pipeline',
            metavar=
            'MESHROOM_FILE/photogrammetry/panoramaHdr/panoramaFisheyeHdr',
            type=str,
            default=os.environ.get("MESHROOM_DEFAULT_PIPELINE",
                                   "photogrammetry"),
            help=
            'Override the default Meshroom pipeline with this external graph.')
        parser.add_argument(
            "--verbose",
            help="Verbosity level",
            default='warning',
            choices=['fatal', 'error', 'warning', 'info', 'debug', 'trace'],
        )

        args = parser.parse_args(args[1:])

        logStringToPython = {
            'fatal': logging.FATAL,
            'error': logging.ERROR,
            'warning': logging.WARNING,
            'info': logging.INFO,
            'debug': logging.DEBUG,
            'trace': logging.DEBUG,
        }
        logging.getLogger().setLevel(logStringToPython[args.verbose])

        QApplication.setAttribute(Qt.AA_EnableHighDpiScaling)

        super(MeshroomApp, self).__init__(QtArgs)

        self.setOrganizationName('AliceVision')
        self.setApplicationName('Meshroom')
        self.setApplicationVersion(meshroom.__version_name__)

        font = self.font()
        font.setPointSize(9)
        self.setFont(font)

        pwd = os.path.dirname(__file__)
        self.setWindowIcon(QIcon(os.path.join(pwd, "img/meshroom.svg")))

        # QML engine setup
        qmlDir = os.path.join(pwd, "qml")
        url = os.path.join(qmlDir, "main.qml")
        self.engine = QmlInstantEngine()
        self.engine.addFilesFromDirectory(qmlDir, recursive=True)
        self.engine.setWatching(
            os.environ.get("MESHROOM_INSTANT_CODING", False))
        # whether to output qml warnings to stderr (disable by default)
        self.engine.setOutputWarningsToStandardError(
            MessageHandler.outputQmlWarnings)
        qInstallMessageHandler(MessageHandler.handler)

        self.engine.addImportPath(qmlDir)
        components.registerTypes()

        # expose available node types that can be instantiated
        self.engine.rootContext().setContextProperty(
            "_nodeTypes", {
                n: {
                    "category": nodesDesc[n].category
                }
                for n in sorted(nodesDesc.keys())
            })

        # instantiate Reconstruction object
        self._undoStack = commands.UndoStack(self)
        self._taskManager = TaskManager(self)
        r = Reconstruction(undoStack=self._undoStack,
                           taskManager=self._taskManager,
                           defaultPipeline=args.pipeline,
                           parent=self)
        self.engine.rootContext().setContextProperty("_reconstruction", r)

        # those helpers should be available from QML Utils module as singletons, but:
        #  - qmlRegisterUncreatableType is not yet available in PySide2
        #  - declaring them as singleton in qmldir file causes random crash at exit
        # => expose them as context properties instead
        self.engine.rootContext().setContextProperty(
            "Filepath", FilepathHelper(parent=self))
        self.engine.rootContext().setContextProperty(
            "Scene3DHelper", Scene3DHelper(parent=self))
        self.engine.rootContext().setContextProperty(
            "Transformations3DHelper", Transformations3DHelper(parent=self))
        self.engine.rootContext().setContextProperty(
            "Clipboard", ClipboardHelper(parent=self))

        # additional context properties
        self.engine.rootContext().setContextProperty(
            "_PaletteManager", PaletteManager(self.engine, parent=self))
        self.engine.rootContext().setContextProperty("MeshroomApp", self)

        # request any potential computation to stop on exit
        self.aboutToQuit.connect(r.stopChildThreads)

        if args.project and not os.path.isfile(args.project):
            raise RuntimeError(
                "Meshroom Command Line Error: 'PROJECT' argument should be a Meshroom project file (.mg).\n"
                "Invalid value: '{}'".format(args.project))

        if args.project:
            r.load(args.project)
            self.addRecentProjectFile(args.project)
        else:
            r.new()

        # import is a python keyword, so we have to access the attribute by a string
        if getattr(args, "import", None):
            r.importImagesFromFolder(getattr(args, "import"), recursive=False)

        if args.importRecursive:
            r.importImagesFromFolder(args.importRecursive, recursive=True)

        if args.save:
            if os.path.isfile(args.save):
                raise RuntimeError(
                    "Meshroom Command Line Error: Cannot save the new Meshroom project as the file (.mg) already exists.\n"
                    "Invalid value: '{}'".format(args.save))
            projectFolder = os.path.dirname(args.save)
            if not os.path.isdir(projectFolder):
                if not os.path.isdir(os.path.dirname(projectFolder)):
                    raise RuntimeError(
                        "Meshroom Command Line Error: Cannot save the new Meshroom project file (.mg) as the parent of the folder does not exists.\n"
                        "Invalid value: '{}'".format(args.save))
                os.mkdir(projectFolder)
            r.saveAs(args.save)
            self.addRecentProjectFile(args.save)

        self.engine.load(os.path.normpath(url))
Ejemplo n.º 58
0
def test_net(sess,
             net,
             imdb,
             weights_filename,
             experiment_setup=None,
             max_per_image=100,
             thresh=0.05):
    np.random.seed(cfg.RNG_SEED)
    """Test a Fast R-CNN network on an image database."""
    num_images = len(imdb.image_index)
    # num_images = 2

    # all detections are collected into:
    #  all_boxes[cls][image] = N x 5 array of detections in
    #  (x1, y1, x2, y2, score)
    all_boxes = [[[] for _ in range(num_images)]
                 for _ in range(imdb.num_classes)]

    output_dir = get_output_dir(imdb, weights_filename)
    print('using output_dir: ', output_dir)
    # timers
    _t = {'im_detect': Timer(), 'misc': Timer()}

    # define a writer to write the histogram of summaries
    # test_tbdir = '/home/shuang/projects/tf-faster-rcnn/tensorboard/'
    # if not os.path.exists(test_tbdir):
    # print('making directory for test tensorboard result')
    #   os.mkdir(test_tbdir)
    # writer = tf.summary.FileWriter(test_tbdir,sess.graph)

    # define a folder for activation results
    test_actdir = '../activations_%s' % experiment_setup
    if not os.path.exists(test_actdir):
        os.mkdir(test_actdir)
    # define a folder for zero fractions
    test_zerodir = './zero_fractions'
    if not os.path.exists(test_zerodir):
        os.mkdir(test_zerodir)

    for i in range(num_images):
        im = imread(imdb.image_path_at(i))

        _t['im_detect'].tic()
        scores, boxes, acts = im_detect(sess, net, im)
        _t['im_detect'].toc()

        # write act summaries to tensorboard
        # writer.add_summary(act_summaries)

        # record the zero fraction -> only for vgg16
        # zero_frac = []
        # for layer_ind in range(13):
        #   batch_num,row,col,filter_num = acts[layer_ind].shape
        #   zero_frac.append([])
        #   for j in range(filter_num):
        #     # print(acts[0][:,:,:,i].shape)
        #     fraction = 1-np.count_nonzero(acts[layer_ind][:,:,:,j])/(batch_num*row*col)
        #     zero_frac[layer_ind].append(fraction)

        _t['misc'].tic()

        # skip j = 0, because it's the background class
        chosen_classes = []
        for j in range(1, imdb.num_classes):
            # for j, clas in enumerate(imdb._classes[1:]):
            inds = np.where(scores[:, j] > thresh)[0]
            cls_scores = scores[inds, j]
            cls_boxes = boxes[inds, j * 4:(j + 1) * 4]
            cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
              .astype(np.float32, copy=False)
            keep = nms(cls_dets, cfg.TEST.NMS)
            cls_dets = cls_dets[keep, :]
            all_boxes[j][i] = cls_dets
            if len(cls_dets) != 0:  # only for recording activations_res
                chosen_classes.append(imdb._classes[j])

        # Limit to max_per_image detections *over all classes*
        if max_per_image > 0:
            image_scores = np.hstack(
                [all_boxes[j][i][:, -1] for j in range(1, imdb.num_classes)])
            if len(image_scores) > max_per_image:
                image_thresh = np.sort(image_scores)[-max_per_image]
                for j in range(1, imdb.num_classes):
                    keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
                    all_boxes[j][i] = all_boxes[j][i][keep, :]
        _t['misc'].toc()

        # write acts to a seperate text file for each seprate image file -> only vgg
        f_name = '{}/{}.txt'.format(test_actdir, i)
        act_file = open(f_name, 'w')
        act_file.write('\n'.join(chosen_classes))
        act_file.write('\n')
        sum_act = []
        for arr in acts:
            temp = np.sum(arr, axis=(0, 1, 2))
            sum_act.append(temp)
        for item in sum_act:
            act_file.write('{}\n'.format(str(item)))
        act_file.close()
        chosen_classes = []

        # write zero fractions to text files -> only vgg
        # file_name = '{}/{}.txt'.format(test_zerodir,i)
        # zero_file = open(file_name,'w')
        # zero_file.write('\n'.join(chosen_classes))
        # zero_file.write('\n')
        # for arr in zero_frac:
        #   zero_file.write('{}\n'.format(str(arr)))
        # zero_file.close()
        # chosen_classes = []

        if i % 1000 == 0:
            print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
                .format(i + 1, num_images, _t['im_detect'].average_time,
                    _t['misc'].average_time))

    # writer.close()
    det_file = os.path.join(output_dir, 'detections.pkl')
    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    imdb.evaluate_detections(all_boxes, output_dir, experiment_setup)
Ejemplo n.º 59
0
def init_path(paths):
    for path in paths:
        if os.path.exists(path):
            shutil.rmtree(path)
        os.mkdir(path)
Ejemplo n.º 60
0
# _*_coding: utf-8 _*_
"""
    时间:2018年12月06日
    作者:张鹏
    文件命:config.py
    功能:配置文件
"""

import os

# 指定数据集路基
dataset_path = './data'

# 训练集路劲
train_data_file = os.path.join(dataset_path, 'fashion-mnist_train.csv')
# 测试集路径
test_data_file = os.path.join(dataset_path, 'fashion-mnist_test.csv')

# 结果保存路径
output_path = './output'
if not os.path.exists(output_path):
    os.mkdir(output_path)

# 图像大小
img_rows, img_cols = 28, 28