Exemple #1
0
def process_and_save(filename):
	"""
	Little script to do reading, selecting of the right data, getting 
	it in the right structure and then pickling it.
	This is supposed to make reading a lot faster when the light cones 
	are to be made.
	"""
	
	picklename = filename+'.pickled'
		
	# First check if the pickled version doesn't already exist
	if '/' in filename:
		dir = os.listdir(filename.rsplit('/', 1)[0])
	else: dir = os.listdir('.')
	
	if picklename.rsplit('/', 1)[1] in dir:
		print "Pickled version already exists for", filename
		return False
	
	data = read_bolshoi(filename, nopickle=True)
	if not data: return None
	
	with open(picklename, 'w') as picklefile:
		cPickle.dump(data, picklefile)
	
	return True
Exemple #2
0
def zip_android(zf, basepath):
    android_dist_dir = os.path.join(top_dir, 'dist', 'android')
    zip_dir(zf, os.path.join(cur_dir, 'simplejson'),
            os.path.join(basepath, 'android', 'simplejson'))
    android_jar = os.path.join(android_dist_dir, 'titanium.jar')
    zf.write(android_jar, '%s/android/titanium.jar' % basepath)

    android_depends = os.path.join(top_dir, 'android', 'dependency.json')
    zf.write(android_depends, '%s/android/dependency.json' % basepath)

    titanium_lib_dir = os.path.join(top_dir, 'android', 'titanium', 'lib')
    for thirdparty_jar in os.listdir(titanium_lib_dir):
        if thirdparty_jar == "smalljs.jar": continue
        elif thirdparty_jar == "commons-logging-1.1.1.jar": continue
        jar_path = os.path.join(top_dir, 'android', 'titanium', 'lib',
                                thirdparty_jar)
        zf.write(jar_path, '%s/android/%s' % (basepath, thirdparty_jar))

    # include all module lib dependencies
    modules_dir = os.path.join(top_dir, 'android', 'modules')
    for module_dir in os.listdir(modules_dir):
        module_lib_dir = os.path.join(modules_dir, module_dir, 'lib')
        if os.path.exists(module_lib_dir):
            for thirdparty_jar in os.listdir(module_lib_dir):
                if thirdparty_jar.endswith('.jar'):
                    jar_path = os.path.join(module_lib_dir, thirdparty_jar)
                    zf.write(jar_path,
                             '%s/android/%s' % (basepath, thirdparty_jar))

    android_module_jars = glob.glob(
        os.path.join(android_dist_dir, 'titanium-*.jar'))
    for android_module_jar in android_module_jars:
        jarname = os.path.split(android_module_jar)[1]
        zf.write(android_module_jar,
                 '%s/android/modules/%s' % (basepath, jarname))
Exemple #3
0
  def _fetch_pkg(self, gopath, pkg, rev):
    """Fetch the package and setup symlinks."""
    fetcher = self._get_fetcher(pkg)
    root = fetcher.root()
    root_dir = os.path.join(self.workdir, 'fetches', root, rev)

    # Only fetch each remote root once.
    if not os.path.exists(root_dir):
      with temporary_dir() as tmp_fetch_root:
        fetcher.fetch(dest=tmp_fetch_root, rev=rev)
        safe_mkdir(root_dir)
        for path in os.listdir(tmp_fetch_root):
          shutil.move(os.path.join(tmp_fetch_root, path), os.path.join(root_dir, path))

    # TODO(John Sirois): Circle back and get get rid of this symlink tree.
    # GoWorkspaceTask will further symlink a single package from the tree below into a
    # target's workspace when it could just be linking from the fetch_dir.  The only thing
    # standing in the way is a determination of what we want to artifact cache.  If we don't
    # want to cache fetched zips, linking straight from the fetch_dir works simply.  Otherwise
    # thought needs to be applied to using the artifact cache directly or synthesizing a
    # canonical owner target for the fetched files that 'child' targets (subpackages) can
    # depend on and share the fetch from.
    dest_dir = os.path.join(gopath, 'src', root)
    # We may have been `invalidate`d and not `clean-all`ed so we need a new empty symlink
    # chroot to avoid collision; thus `clean=True`.
    safe_mkdir(dest_dir, clean=True)
    for path in os.listdir(root_dir):
      os.symlink(os.path.join(root_dir, path), os.path.join(dest_dir, path))
Exemple #4
0
    def make_target_directory(self, path):
        path = os.path.abspath(path)
        try:
            os.makedirs(path)
        except OSError as e:
            self.abort('Could not create target folder: %s' % e)

        if os.path.isdir(path):
            try:
                if len(os.listdir(path)) != 0:
                    raise OSError('Directory not empty')
            except OSError as e:
                self.abort('Bad target folder: %s' % e)

        scratch = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
        os.makedirs(scratch)
        try:
            yield scratch
        except:
            shutil.rmtree(scratch)
            raise
        else:
            # Use shutil.move here in case we move across a file system
            # boundary.
            for filename in os.listdir(scratch):
                if isinstance(path, unicode):
                    filename = filename.decode(fs_enc)
                shutil.move(os.path.join(scratch, filename),
                            os.path.join(path, filename))
            os.rmdir(scratch)
Exemple #5
0
def test_sequence_output():
    directory = tempfile.mkdtemp()
    assert 0 == len(os.listdir(directory))
    cli.main(['--seq', '-v=1', '-e=m', '-o=' + os.path.join(directory, 'test.svg'), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
    number_of_files = len(os.listdir(directory))
    shutil.rmtree(directory)
    assert 4 == number_of_files
def check_for_node_modules( node_modules ):
	if os.path.isdir( node_modules ):
		for dirname in os.listdir( node_modules ):
			path = os.path.join(node_modules, dirname)
			for filename in os.listdir(path):
				if filename.endswith('.js'):
					PATHS[ 'modules' ][ filename ] = os.path.join( path, filename )
Exemple #7
0
def valid_dir(d):
    # type: (Dict) -> bool
    dir = d['path']
    if not path.exists(dir):
        return True
    if not path.isdir(dir):
        return False

    if set(['Makefile', 'make.bat']) & set(os.listdir(dir)):  # type: ignore
        return False

    if d['sep']:
        dir = os.path.join('source', dir)
        if not path.exists(dir):
            return True
        if not path.isdir(dir):
            return False

    reserved_names = [
        'conf.py',
        d['dot'] + 'static',
        d['dot'] + 'templates',
        d['master'] + d['suffix'],
    ]
    if set(reserved_names) & set(os.listdir(dir)):  # type: ignore
        return False

    return True
    def handle(self, **options):

        for party_slug in sorted(os.listdir(ppc_data_directory)):
            json_directory = join(
                ppc_data_directory,
                party_slug
            )
            for leafname in sorted(os.listdir(json_directory)):
                if not leafname.endswith('.json'):
                    continue
                filename = join(json_directory, leafname)
                image = re.sub(r'\.json$', '-cropped.png', filename)
                if not exists(image):
                    image = None
                print '==============================================================='
                print "filename:", filename
                with open(filename) as f:
                    ppc_data = json.load(f)
                ppc_data['party_slug'] = party_slug
                ppc_data['party_object'] = party_slug_to_popit_party[party_slug]
                ppc_data['constituency_object'] = get_constituency_from_name(
                    ppc_data['constituency']
                )
                if options['check']:
                    continue
                self.handle_person(ppc_data, image)
Exemple #9
0
def env_status():
    """
    Print information about the test bed to check tests aren't
    failing to clean up.

    To see, ensure you use: py.test -s
    """
    folders = [folder for folder
               in os.listdir(os.path.dirname(CONF.path_to('link')))
               if folder.find('cmd') == -1]
    try:
        link_d = os.listdir(CONF.path_to('link'))
    except OSError:
        link_d = 'DOES NOT EXIST'
    try:
        prefix_d = os.listdir(CONF.path_to('prefix'))
    except OSError:
        prefix_d = 'DOES NOT EXIST'
    print('\n')
    print('Environment Summary BEFORE Cleanup')
    print('Paths: ', CONF.get('pakit.paths'))
    print('IDB Entries: ', sorted([key for key, _ in pakit.conf.IDB]))
    print('Contents Root Dir: ', sorted(folders))
    print('Contents Link Dir: ', sorted(link_d))
    print('Contents Prefix Dir: ', sorted(prefix_d))
Exemple #10
0
def build_gfortran(ctx, target):
    binpath = os.path.join(ctx.out_dir, "bin")
    manpath = os.path.join(ctx.out_dir, "share", "man", "man1")
    includepath = os.path.join(ctx.out_dir, "include")

    binfiles = os.listdir(binpath)
    manfiles = os.listdir(manpath)
    srcpath = ctx.env.SRCPATH

    ctx.venv_exec("""
        base="gcc-42-5666.3-darwin11"
        pushd %(srcpath)s/3rdparty
        rm -fr "$base"
        mkdir -p "$base"
        pushd "$base"
        xar -xf "../$base.pkg"
        mv *.pkg/Payload Payload.gz
        pax --insecure -rz -f Payload.gz -s ",./usr,$VIRTUAL_ENV,"
        ln -sf "$VIRTUAL_ENV/bin/gfortran-4.2" "$VIRTUAL_ENV/bin/gfortran"
        popd
        rm -fr "$base"
        popd
    """ % locals())

    # Delete other files installed
    shutil.rmtree(os.path.join(includepath, "gcc"))

    for f in os.listdir(binpath):
        if f not in binfiles and not "gfortran" in f:
            os.unlink(os.path.join(binpath, f))

    for f in os.listdir(manpath):
        if f not in manfiles and not "gfortran" in f:
            os.unlink(os.path.join(manpath, f))
 def test_basic(self):
     """grab_driver_files: copy drivers into place, return module list"""
     # create a bunch of fake extracted files
     outdir = self.tmpdir + '/extract-outdir'
     moddir = outdir + "/lib/modules/%s/kernel/" % os.uname()[2]
     fwdir = outdir + "/lib/firmware/"
     modules = makefiles(moddir+"net/funk.ko", moddir+"fs/lolfs.ko.xz")
     firmware = makefiles(fwdir+"funk.fw")
     makefiles(outdir+"/usr/bin/monkey", outdir+"/other/dir/blah.ko")
     mod_upd_dir = self.tmpdir+'/module-updates'
     fw_upd_dir = self.tmpdir+'/fw-updates'
     # use our updates dirs instead of the default updates dirs
     with mock.patch.multiple("driver_updates",
                              MODULE_UPDATES_DIR=mod_upd_dir,
                              FIRMWARE_UPDATES_DIR=fw_upd_dir):
         modnames = grab_driver_files(outdir)
     self.assertEqual(set(modnames), set(["funk", "lolfs"]))
     modfiles = set(['funk.ko', 'lolfs.ko.xz'])
     fwfiles = set(['funk.fw'])
     # modules/firmware are *not* in their old locations
     self.assertEqual([f for f in modules+firmware if os.path.exists(f)], [])
     # modules are in the system's updates dir
     self.assertEqual(set(os.listdir(mod_upd_dir)), modfiles)
     # modules are also in outdir's updates dir
     self.assertEqual(set(os.listdir(outdir+'/'+mod_upd_dir)), modfiles)
     # repeat for firmware
     self.assertEqual(set(os.listdir(fw_upd_dir)), fwfiles)
     self.assertEqual(set(os.listdir(outdir+'/'+fw_upd_dir)), fwfiles)
Exemple #12
0
def getDownloadedMusicList ():
	dirs = os.listdir(__resource__)
	for dir in dirs:
		mp3Path = __resource__ + '/' + dir + '/mp3/'
		picPath = __resource__ + '/' + dir + '/pic/'

		filename, file_extension = os.path.splitext(dir)

		print 'file_extension ', file_extension
		print 'filename ', filename
		#filter js files
		if isExcludeFiles(file_extension) :
			continue
		if mp3Path == 'static/css/mp3/':
			continue

		mp3 = os.listdir(mp3Path)
		mp3 = map(lambda x : mp3Path + x, mp3)
		#print 'mp3Path:', mp3Path 
		#print mp3

		pic = os.listdir(picPath)
		pic = map(lambda x : picPath + x, pic)
		#print 'picPath:', picPath
		#print pic
	return None
Exemple #13
0
    def load(self):
        now = int(time.time())
        """ Try to import the requested modules ; put the imported modules in self.imported_modules.
The previous imported modules, if any, are cleaned before. """ 
        # We get all modules file with .py
        modules_files = [ fname[:-3] for fname in os.listdir(self.modules_path) 
                         if fname.endswith(".py") ]

        # And directories
        modules_files.extend([ fname for fname in os.listdir(self.modules_path)
                               if os.path.isdir(os.path.join(self.modules_path, fname)) ])

        # Now we try to load thems
        # So first we add their dir into the sys.path
        if not self.modules_path in sys.path:
            sys.path.append(self.modules_path)

        # We try to import them, but we keep only the one of
        # our type
        del self.imported_modules[:]
        for fname in modules_files:
            #print "Try to load", fname
            try:
                m = __import__(fname)
                if not hasattr(m, 'properties'):
                    continue

                # We want to keep only the modules of our type
                if self.modules_type in m.properties['daemons']:
                    self.imported_modules.append(m)
            except Exception , exp:
                logger.log("Warning in importing module : %s" % exp)
Exemple #14
0
 def download(self, cameras, path):
     left_dir = os.path.join(path, 'left')
     right_dir = os.path.join(path, 'right')
     target_dir = os.path.join(path, 'raw')
     if not os.path.exists(target_dir):
         os.mkdir(target_dir)
     left_pages = [os.path.join(left_dir, x)
                   for x in sorted(os.listdir(left_dir))]
     right_pages = [os.path.join(right_dir, x)
                    for x in sorted(os.listdir(right_dir))]
     # Write the orientation as a JPEG comment to the end of the file
     if len(left_pages) != len(right_pages):
         logger.warn("The left and right camera produced an inequal"
                     " amount of images, please fix the problem!")
         logger.warn("Will not combine images")
         return
     if (self.config['first_page']
             and not self.config['first_page'].get(str) == 'left'):
         combined_pages = reduce(operator.add, zip(right_pages, left_pages))
     else:
         combined_pages = reduce(operator.add, zip(left_pages, right_pages))
     logger.info("Combining images.")
     for idx, fname in enumerate(combined_pages):
         fext = os.path.splitext(os.path.split(fname)[1])[1]
         target_file = os.path.join(target_dir, "{0:04d}{1}"
                                    .format(idx, fext))
         shutil.copyfile(fname, target_file)
     shutil.rmtree(right_dir)
     shutil.rmtree(left_dir)
Exemple #15
0
def clear_project(exitIfError=True):
	"""
	This function will erease the tables and views that were created
	The name of the table that is going to be deleted, are all in the config
	file in the constant forler.

	This function is dropping/deleting all views and the single table
	"""
	tables_to_delete = db.get_table_names()
	db.clear_qc_data(tables_to_delete)

	db.drop_table(QC_TABLE_NAME)
	if os.path.exists(WEB_APP_PATH+"assets/img/"):
		files = os.listdir(WEB_APP_PATH+"assets/img/")
	else:
		files = []
	for img_folder in files:
		print "removing : ",WEB_APP_PATH+"assets/img/"+img_folder
		rmtree(WEB_APP_PATH+"assets/img/"+img_folder)

	if os.path.exists(WEB_APP_PATH+"assets/reports/"):
		files = os.listdir(WEB_APP_PATH+"assets/reports/")
	else:
		files = [] 
	for report in files:
		print "removing : ",WEB_APP_PATH+"assets/reports/"+report
		os.remove(WEB_APP_PATH+"assets/reports/"+report)
	
	# recreate the database tables and views
	create_database(exitIfError)
Exemple #16
0
    def __locateRequest(self, requestName, assigned=False):
        """ Locate the sub requests associated with a requestName

    :param self: self reference
    :param str requestName: request name
    :param bool assigned: flag to include/exclude Assigned requests
    """
        self.log.info("__locateRequest: Attempting to locate %s." % requestName)
        requestTypes = os.listdir(self.root)
        subRequests = []
        try:
            for requestType in requestTypes:
                reqDir = "%s/%s" % (self.root, requestType)
                if os.path.isdir(reqDir):
                    statusList = os.listdir(reqDir)
                    if not assigned and "Assigned" in statusList:
                        statusList.remove("Assigned")
                    for status in statusList:
                        statusDir = os.path.join(reqDir, status)
                        if os.path.isdir(statusDir):
                            requestNames = os.listdir(statusDir)
                            if requestName in requestNames:
                                requestPath = os.path.join(statusDir, requestName)
                                subRequests.append(requestPath)
            self.log.info("__locateRequest: Successfully located %s." % requestName)
            return S_OK(subRequests)
        except Exception, error:
            errStr = "__locateRequest: Exception while locating request."
            self.log.exception(errStr, requestName, lException=error)
            return S_ERROR(errStr)
Exemple #17
0
    def getDBSummary(self):
        """ Obtain a summary of the contents of the requestDB

    :param self: self reference
    :return: S_OK with dict[requestType][status] => nb of files
    """
        self.log.info("getDBSummary: Attempting to get database summary.")
        requestTypes = os.listdir(self.root)
        try:
            summaryDict = {}
            for requestType in requestTypes:
                summaryDict[requestType] = {}
                reqTypeDir = os.path.join(self.root, requestType)
                if os.path.isdir(reqTypeDir):
                    statusList = os.listdir(reqTypeDir)
                    for status in statusList:
                        reqTypeStatusDir = os.path.join(reqTypeDir, status)
                        requests = os.listdir(reqTypeStatusDir)
                        summaryDict[requestType][status] = len(requests)
            self.log.info("getDBSummary: Successfully obtained database summary.")
            return S_OK(summaryDict)
        except Exception, x:
            errStr = "getDBSummary: Exception while getting DB summary."
            self.log.exception(errStr, lException=x)
            return S_ERROR(errStr)
Exemple #18
0
def main(max_stations=0, folder='.'):
    try:
        makedirs(output_folder+'/'+folder)
    except OSError:
        pass

    all_files = [ f for f in listdir(data_folder) if isfile(join(data_folder,f)) and f.endswith('.gz') ]
    
    for ndf in all_files:
        string = '_%dstations' % max_stations
        new_name=ndf[:-7]+string+ndf[-7:]
        rename(data_folder+'/'+ndf, data_folder+'/'+new_name)
        
    all_files = [ f for f in listdir(data_folder) if isfile(join(data_folder,f)) and f.endswith('.gz') ]
    
    for a_f in all_files:
        move(data_folder+'/'+a_f, output_folder+'/'+folder+'/'+a_f)
        print "Moved:", a_f[0:-3]
        
    data_files = [ f for f in listdir(output_folder+'/'+folder) if isfile(join(output_folder+'/'+folder,f)) and f.endswith('.dat.gz') ]

    print "\n"

    for d_f in data_files:
        fin = gzip.open(output_folder+'/'+folder+'/'+d_f, 'rb')
        data = fin.read()
        fin.close()

        with open(output_folder+'/'+folder+'/'+d_f[0:-3],'w') as fout:
            fout.write(data)

        print "Unzipped:", d_f[0:-3]
 def cleanup(self, action):
     if not self.steps_filename:
         return
     if not self.question_yes_no("All unused PPM files will be moved to a"
                                 " backup directory. Are you sure?",
                                 "Clean up data directory?"):
         return
     # Remember the current step index
     current_step_index = self.current_step_index
     # Get the backup dir
     backup_dir = os.path.join(self.steps_data_dir, "backup")
     # Create it if it doesn't exist
     if not os.path.exists(backup_dir):
         os.makedirs(backup_dir)
     # Move all files to the backup dir
     for filename in glob.glob(os.path.join(self.steps_data_dir,
                                            "*.[Pp][Pp][Mm]")):
         shutil.move(filename, backup_dir)
     # Get the used files back
     for step in self.steps:
         self.set_state_from_step_lines(step, backup_dir, warn=False)
         self.get_step_lines(self.steps_data_dir)
     # Remove the used files from the backup dir
     used_files = os.listdir(self.steps_data_dir)
     for filename in os.listdir(backup_dir):
         if filename in used_files:
             os.unlink(os.path.join(backup_dir, filename))
     # Restore step index
     self.set_step(current_step_index)
     # Inform the user
     self.message("All unused PPM files may be found at %s." %
                  os.path.abspath(backup_dir),
                  "Clean up data directory")
def get_data_old(data_dir, data_x, data_y):
    #if we ened up needing the not-data-wrapper version
    label_num = 0
    labels = []#so we actually know what each index value associated represents, str reps
    for f in os.listdir(os.path.abspath(data_dir)):
        f = os.path.abspath(data_dir + f)

        label = f.split("/")[-1]
        labels.append(label)

        if os.path.isdir(f):
            for sample_index, sample in enumerate(os.listdir(f)):
                #get our spectrogram for the sample
                samplerate, audio_raw = wav.read(f + "/" + sample)

                #start with zeros so we have our blank trailing space
                audio = np.zeros(max_audio_len)

                #convert our signed integer to an unsigned one by adding 32768, then divide by max of unsigned integer to get percentage and use that as our float value
                #fill as far as we've got
                for a_index, a in enumerate(audio_raw):
                    audio[a_index] = float((a+32768)/65536.0)
                data_x[sample_index] = audio
                data_y[sample_index][label_num] = 1.0
                #audio = [float((a+32768)/65536.0) for a in audio]
        label_num += 1
    return zip(data_x, data_y)
Exemple #21
0
 def run(self):
     dst = self.config.get_dst_folder()
     cdv_dst = self.config.get_cordova_dst_folder(self.key)
     if os.path.exists(cdv_dst):
         names = os.listdir(cdv_dst)
         for name in names:
             if not name.startswith('.'):
                 name = os.path.join(cdv_dst, name)
                 if os.path.isfile(name):
                     os.remove(name)
                 else:
                     shutil.rmtree(name)
     names = os.listdir(dst)
     for name in names:
         if not name.startswith('.'):
             src = os.path.join(dst, name)
             copy = os.path.join(cdv_dst, name)
             if os.path.isfile(src):
                 shutil.copy(src, copy)
             else:
                 shutil.copytree(src, copy, ignore=shutil.ignore_patterns('.*'))
     for r, d, f in os.walk(cdv_dst):
         for files in filter(lambda x: x.endswith('.html'), f):
             p = os.path.join(r, files)
             self.replace_cordova_tag(p)
     self.copy_icons(dst)
     self.copy_splash(dst)
Exemple #22
0
def recreate_images(result_dir, noisy_image_dir):
    # Read noisy images first
    test_images = {}
    for image_name in os.listdir(noisy_image_dir):
        if image_name.endswith('.png'):
            image_path = os.path.join(noisy_image_dir, image_name)
            image = util.img_as_float(io.imread(image_path))
            image_name_noext = os.path.splitext(image_name)[0]
            test_images[image_name_noext] = image
    # Enumerate results - image directories
    for image_name in sorted(os.listdir(result_dir)):
        image_dir = os.path.join(result_dir, image_name)
        if os.path.isdir(image_dir):
            print image_name
            for result_file in sorted(os.listdir(image_dir)):
                if result_file.endswith('.net'):
                    # Instantiate trained ANN from .net file
                    net_path = os.path.join(image_dir, result_file)
                    ann = libfann.neural_net()
                    ann.create_from_file(net_path)
                    # Filter the same image which it was trained with
                    filtered_image = filter_fann(
                        test_images[image_name], ann)
                    param_set_name = os.path.splitext(result_file)[0]
                    io.imsave(
                        os.path.join(image_dir, param_set_name + '.png'),
                        filtered_image)
Exemple #23
0
def run_ocr(result_dir, output_file):
    DEFAULT_TEXT = 'Lorem ipsum\ndolor sit amet,\nconsectetur\n\nadipiscing elit.\n\nDonec vel\naliquet velit,\nid congue\nposuere.'
    csv_file = open(output_file, 'w')
    fieldnames = ['image', 'param_set', 'ocr']
    writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
    writer.writeheader()
    # Enumerate images
    for image_name in sorted(os.listdir(result_dir)):
        image_dir = os.path.join(result_dir, image_name)
        if os.path.isdir(image_dir):
            print image_name
            # Enumerate parameter sets
            for result_file in sorted(os.listdir(image_dir)):
                if result_file.endswith('.png'):
                    image_path = os.path.join(image_dir, result_file)
                    image = util.img_as_float(io.imread(image_path))
                    ocr = ocr_accuracy(image, DEFAULT_TEXT)

                    result_ps_name = os.path.splitext(result_file)[0]
                    # # Write into csv file
                    result_row = {
                        'image': image_name,
                        'param_set': result_ps_name,
                        'ocr': ocr,
                    }
                    writer.writerow(result_row)
    csv_file.close()
    return None
Exemple #24
0
def load_designs(db, root='designs'):
    for design in os.listdir(root):
        views = dict()
        path = os.path.join(root, design)
        if not os.path.isdir(path): continue
        path = os.path.join(root, design, 'views')
        for filename in os.listdir(path):
            name, ext = os.path.splitext(filename)
            if ext != '.js': continue
            with open(os.path.join(path, filename)) as codefile:
                code = codefile.read()
            if name.startswith('map_'):
                name = name[len('map_'):]
                key = 'map'
            elif name.startswith('reduce_'):
                name = name[len('reduce_'):]
                key = 'reduce'
            else:
                key = 'map'
            views.setdefault(name, dict())[key] = code
        id = "_design/%s" % design
        try:
            doc = db[id]
        except couchdb.http.ResourceNotFound:
            logging.debug("loading %s", id)
            db.save(dict(_id=id, views=views))
        else:
            if doc['views'] != views:
                doc['views'] = views
                logging.debug("updating %s", id)
                db.save(doc)
            else:
                logging.debug("no change %s", id)
Exemple #25
0
    def __init__(self):
        self.options = config.YAML_CONFIG.get('options')

        # where graphite lives
        self.graphite_base = config.YAML_CONFIG.get('graphite')

        # where the graphite renderer is
        self.graphite_render = "%s/render/" % self.graphite_base

        # where to find graph, dash etc templates
        self.graph_templates = config.YAML_CONFIG.get('templatedir')

        # the dash site might have a prefix for its css etc
        self.prefix = self.options.get('prefix', "")

        # the page refresh rate
        self.refresh_rate = self.options.get('refresh_rate', 60)

        # how many columns of graphs do you want on a page
        self.graph_columns = self.options.get('graph_columns', 2)

        # how wide each graph should be
        self.graph_width = self.options.get('graph_width')

        # how hight each graph sould be
        self.graph_height = self.options.get('graph_height')

        # Dashboard title
        self.dash_title = self.options.get('title', 'Graphite Dashboard')

        # Dashboard logo
        self.dash_logo = self.options.get('logo')

        # Time filters in interface
        self.interval_filters = self.options.get('interval_filters', [])

        self.intervals = self.options.get('intervals', [])

        self.top_level = {}

        for category in [ name for name in os.listdir(self.graph_templates)
                                        if not name.startswith('.') and os.path.isdir(os.path.join(self.graph_templates, name)) ]:

            if os.listdir( os.path.join(self.graph_templates,category) ) != []:

                self.top_level[category] = leonardo.Leonardo( self.graphite_base,
                                                              "/render/",
                                                              self.graph_templates,
                                                              category,
                                                              { "width" : self.graph_width,
                                                                "height" : self.graph_height
                                                              }
                                                            )

        self.search_elements = [ "%s/%s" % (d['category'], d['name'])   for dash in self.top_level  for d in self.top_level[dash].dashboards() ]

        elements_string = ""
        for item in self.search_elements:
            elements_string += '"%s",' % item
        self.search_elements = "[%s]" % elements_string[:-1]
Exemple #26
0
def diff(options, a, b):
    def print_meta(s):
        codec_print(simple_colorize(s, "magenta"), options)

    if os.path.isfile(a) and os.path.isfile(b):
        if not filecmp.cmp(a, b, shallow=False):
            diff_files(options, a, b)

    elif os.path.isdir(a) and os.path.isdir(b):
        a_contents = set(os.listdir(a))
        b_contents = set(os.listdir(b))

        for child in sorted(a_contents.union(b_contents)):
            if child not in b_contents:
                print_meta("Only in %s: %s" % (a, child))
            elif child not in a_contents:
                print_meta("Only in %s: %s" % (b, child))
            elif options.recursive:
                diff(options,
                     os.path.join(a, child),
                     os.path.join(b, child))
    elif os.path.isdir(a) and os.path.isfile(b):
        print_meta("File %s is a directory while %s is a file" % (a, b))

    elif os.path.isfile(a) and os.path.isdir(b):
        print_meta("File %s is a file while %s is a directory" % (a, b))
Exemple #27
0
    def getFiles(self, subject):
        """Get a dictionary with a list of all candidate filenames for associated data, such as roi overlays, flatmap caches, and ctm caches.
        """
        surfparse = re.compile(r'(.*)/([\w-]+)_([\w-]+)_(\w+).*')
        surfpath = os.path.join(filestore, subject, "surfaces")

        if self.subjects[subject]._warning is not None:
            warnings.warn(self.subjects[subject]._warning)

        surfs = dict()
        for surf in os.listdir(surfpath):
            ssurf = os.path.splitext(surf)[0].split('_')
            name = '_'.join(ssurf[:-1])
            hemi = ssurf[-1]

            if name not in surfs:
                surfs[name] = dict()
            surfs[name][hemi] = os.path.abspath(os.path.join(surfpath,surf))

        filenames = dict(
            surfs=surfs,
            xfms=os.listdir(os.path.join(filestore, subject, "transforms")),
            xfmdir=os.path.join(filestore, subject, "transforms", "{xfmname}", "matrices.xfm"),
            anats=os.path.join(filestore, subject, "anatomicals", '{type}{opts}.{ext}'), 
            surfinfo=os.path.join(filestore, subject, "surface-info", '{type}{opts}.npz'),
            masks=os.path.join(filestore, subject, 'transforms', '{xfmname}', 'mask_{type}.nii.gz'),
            cachedir=os.path.join(filestore, subject, "cache"),
            rois=os.path.join(filestore, subject, "rois.svg").format(subj=subject),
        )

        return filenames
Exemple #28
0
def run_q(result_dir, output_file):
    csv_file = open(output_file, 'w')
    # fieldnames = ['image', 'param_set', 'final_error', 'training_time']
    fieldnames = ['image', 'param_set', 'q']
    writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
    writer.writeheader()
    # Enumerate images
    for image_name in sorted(os.listdir(result_dir)):
        image_dir = os.path.join(result_dir, image_name)
        if os.path.isdir(image_dir):
            print image_name
            # Enumerate parameter sets
            for result_file in sorted(os.listdir(image_dir)):
                if result_file.endswith('.png'):
                    image_path = os.path.join(image_dir, result_file)
                    image = util.img_as_float(io.imread(image_path))
                    q = q_py(image)

                    # result_json_path = os.path.join(image_dir, result_json)
                    result_ps_name = os.path.splitext(result_file)[0]
                    # result_data = read_results(result_json_path)
                    # # last_epoch_error = float(parse_epochs(result_data)[-1]['error'])
                    # last_epoch_error = float(parse_epochs(result_data)[-1])
                    # # Write into csv file
                    result_row = {
                        'image': image_name,
                        'param_set': result_ps_name,
                        'q': q,
                    }
                    writer.writerow(result_row)
    csv_file.close()
    return None
def action_uninstall(plat=None, x64=None, ext=None, **kwargs):
    plat, x64, ext, folder, location = fix_args(plat, x64, ext)

    print 'Uninstalling flow123d...'

    if plat == 'linux':
        # only remove install folder
        pass

    if plat == 'windows':
        uninstaller_location = os.path.abspath(os.path.join(folder, 'Uninstall.exe'))
        command = [uninstaller_location, '/S']
        process, stdout, stderr = run_command(command)
        check_error(process, stdout, stderr)
        if process.returncode != 0:
            return process.returncode

    # add sleep since windows spawns child which is not bound by parent
    # so exiting parent does not exit children as well
    time.sleep(5)

    shutil.rmtree(os.path.abspath(folder), True)
    shutil.rmtree(os.path.abspath('output'), True)
    if os.path.exists(folder):
        print 'Uninstallation not successful!'
        print os.listdir(folder)
        return 1

    print 'Uninstallation successful!'
    return 0
Exemple #30
0
def parse_results(result_dir, output_file):
    csv_file = open(output_file, 'w')
    # fieldnames = ['image', 'param_set', 'final_error', 'training_time']
    fieldnames = ['image', 'param_set', 'final_error']
    writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
    writer.writeheader()
    # Enumerate images
    for image_name in sorted(os.listdir(result_dir)):
        image_dir = os.path.join(result_dir, image_name)
        if os.path.isdir(image_dir):
            print image_name
            # Enumerate parameter sets
            for result_json in sorted(os.listdir(image_dir)):
                if result_json.endswith('.json'):
                    result_json_path = os.path.join(image_dir, result_json)
                    result_ps_name = os.path.splitext(result_json)[0]
                    result_data = read_results(result_json_path)
                    # last_epoch_error = float(parse_epochs(result_data)[-1]['error'])
                    last_epoch_error = float(parse_epochs(result_data)[-1])
                    # Write into csv file
                    result_row = {
                        'image': image_name,
                        'param_set': result_ps_name,
                        'final_error': last_epoch_error,
                        # 'training_time': result_data['training_time']
                    }
                    writer.writerow(result_row)
    csv_file.close()
    return None
Exemple #31
0
def get_ring(ring_name,
             required_replicas,
             required_devices,
             server=None,
             force_validate=None,
             ipport2server=None,
             config_paths=None):
    if not server:
        server = ring_name
    ring = Ring('/etc/swift', ring_name=ring_name)
    if ipport2server is None:
        ipport2server = {}  # used internally, even if not passed in
    if config_paths is None:
        config_paths = defaultdict(dict)
    store_config_paths(server, config_paths)

    repl_name = '%s-replicator' % server
    repl_configs = {
        i: readconf(c, section_name=repl_name)
        for i, c in config_paths[repl_name].items()
    }
    servers_per_port = any(
        int(c.get('servers_per_port', '0')) for c in repl_configs.values())

    add_ring_devs_to_ipport2server(ring,
                                   server,
                                   ipport2server,
                                   servers_per_port=servers_per_port)
    if not VALIDATE_RSYNC and not force_validate:
        return ring
    # easy sanity checks
    if ring.replica_count != required_replicas:
        raise unittest.SkipTest(
            '%s has %s replicas instead of %s' %
            (ring.serialized_path, ring.replica_count, required_replicas))

    devs = [dev for dev in ring.devs if dev is not None]
    if len(devs) != required_devices:
        raise unittest.SkipTest(
            '%s has %s devices instead of %s' %
            (ring.serialized_path, len(devs), required_devices))
    for dev in devs:
        # verify server is exposing mounted device
        ipport = (dev['ip'], dev['port'])
        _, server_number = get_server_number(ipport, ipport2server)
        conf = repl_configs[server_number]
        for device in os.listdir(conf['devices']):
            if device == dev['device']:
                dev_path = os.path.join(conf['devices'], device)
                full_path = os.path.realpath(dev_path)
                if not os.path.exists(full_path):
                    raise unittest.SkipTest(
                        'device %s in %s was not found (%s)' %
                        (device, conf['devices'], full_path))
                break
        else:
            raise unittest.SkipTest(
                "unable to find ring device %s under %s's devices (%s)" %
                (dev['device'], server, conf['devices']))
        # verify server is exposing rsync device
        rsync_export = conf.get('rsync_module', '').rstrip('/')
        if not rsync_export:
            rsync_export = '{replication_ip}::%s' % server
        cmd = "rsync %s" % rsync_module_interpolation(rsync_export, dev)
        p = Popen(cmd, shell=True, stdout=PIPE)
        stdout, _stderr = p.communicate()
        if p.returncode:
            raise unittest.SkipTest('unable to connect to rsync '
                                    'export %s (%s)' % (rsync_export, cmd))
        for line in stdout.decode().splitlines():
            if line.rsplit(None, 1)[-1] == dev['device']:
                break
        else:
            raise unittest.SkipTest("unable to find ring device %s under "
                                    "rsync's exported devices for %s (%s)" %
                                    (dev['device'], rsync_export, cmd))
    return ring
# Split for validation
val = float(args.valfrac)

# Split for prediction (These files will not be trained nor validated on)
test = float(args.testfrac)

# Test if the inputs are somehow reasonable
assert(test>0 and val>0 and test+val<0.5)

# directory where the .root files live
#path = /scratch-cbe/users/benjamin.wilhelmy/DeepLepton/v2/step2/2016/muo/pt_3.5_-1/STopvsTop 
path = args.path

# read .root filenames from directory
filenames = os.listdir(path)
filenames = [f for f in filenames if '.root' in f] 

# Cut filenames to length
if len(filenames) < N:
    print('Not enough files in directory, will use the ones that are\
             available.')
    N_files = len(filenames)
elif N == 0:
    N_files = len(filenames)
else:
    N_files   = int(N)
    random.shuffle(filenames)
    filenames = filenames[:N_files]

# Split into train and predict files
Exemple #33
0
    def convert(self, output):
        """
        Convert the output from LaTeX into images

        Arguments:
        output -- output file object

        """
        if not self.command and self.executeConverter is Imager.executeConverter:
            log.warning('No imager command is configured.  ' +
                        'No images will be created.')
            return

        cwd = os.getcwd()

        # Make a temporary directory to work in
        tempdir = tempfile.mkdtemp()
        os.chdir(tempdir)

        # Execute converter
        rc, images = self.executeConverter(output)
        if rc:
            log.warning('Image converter did not exit properly.  ' +
                        'Images may be corrupted or missing.')

        # Get a list of all of the image files
        if images is None:
            images = [
                f for f in os.listdir('.') if re.match(r'^img\d+\.\w+$', f)
            ]
        if len(images) != len(self.images):
            log.warning(
                'The number of images generated (%d) and the number of images requested (%d) is not the same.'
                % (len(images), len(self.images)))

        # Sort by creation date
        #images.sort(lambda a,b: cmp(os.stat(a)[9], os.stat(b)[9]))

        images.sort(
            lambda a, b: cmp(int(re.search(r'(\d+)\.\w+$', a).group(1)),
                             int(re.search(r'(\d+)\.\w+$', b).group(1))))

        os.chdir(cwd)

        if PILImage is None:
            log.warning('PIL (Python Imaging Library) is not installed.  ' +
                        'Images will not be cropped.')

        # Move images to their final location
        for src, dest in zip(images, self.images.values()):
            # Move the image
            directory = os.path.dirname(dest.path)
            if directory and not os.path.isdir(directory):
                os.makedirs(directory)
            try:
                shutil.copy2(os.path.join(tempdir, src), dest.path)
            except OSError:
                shutil.copy(os.path.join(tempdir, src), dest.path)

            # Crop the image
            try:
                dest.crop()
                status.dot()
            except Exception, msg:
                import traceback
                traceback.print_exc()
                log.warning('failed to crop %s (%s)', dest.path, msg)
def parse_extensions(base_dir, extensions_dir, public_dir, base_url,
                     stdnotes_ext_list_path, ghub_headers):
    """
    Build Standard Notes extensions repository using Github meta-data
    """
    extension_path = extensions_dir
    public_path = public_dir
    os.chdir(public_path)

    extensions = []
    std_ext_list = []
    std_ext_list = parse_stdnotes_extensions(stdnotes_ext_list_path)
    # Get all extensions, sort extensions alphabetically along by their by type
    extfiles = [
        x for x in sorted(os.listdir(extension_path))
        if not x.endswith('theme.yaml') and x.endswith('.yaml')
    ]
    themefiles = [
        y for y in sorted(os.listdir(extension_path))
        if y.endswith('theme.yaml')
    ]
    extfiles.extend(themefiles)

    for extfile in extfiles:
        with open(os.path.join(extension_path, extfile)) as extyaml:
            ext_yaml = yaml.load(extyaml, Loader=yaml.FullLoader)
        ext_has_update = False
        repo_name = ext_yaml['github'].split('/')[-1]
        repo_dir = os.path.join(public_path, repo_name)
        # If we have valid github personal access token
        if ghub_headers:
            # Get extension's github release meta-data
            ext_git_info = json.loads(
                requests.get(
                    'https://api.github.com/repos/{github}/releases/latest'.
                    format(**ext_yaml),
                    headers=ghub_headers).text)
            try:
                ext_version = ext_git_info['tag_name']
            except KeyError:
                # No github releases found
                print('Skipping: {:38s}\t(github repository not found)'.format(
                    ext_yaml['name']))
                continue
            # Check if extension directory already exists
            if not os.path.exists(repo_dir):
                os.makedirs(repo_dir)
            # Check if extension with current release already exists
            if not os.path.exists(os.path.join(repo_dir, ext_version)):
                ext_has_update = True
                os.makedirs(os.path.join(repo_dir, ext_version))
                # Grab the release and then unpack it
                with requests.get(ext_git_info['zipball_url'],
                                  headers=ghub_headers,
                                  stream=True) as zipball_stream:
                    with open(
                            os.path.join(repo_dir, ext_version) + ".zip",
                            'wb') as zipball_file:
                        shutil.copyfileobj(zipball_stream.raw, zipball_file)
                # unpack the zipball
                process_zipball(repo_dir, ext_version)
        else:
            ext_version, ext_has_update = git_clone_method(
                ext_yaml, public_path, ext_has_update)

        if extfile in std_ext_list:
            ext_id = ext_yaml['id'].rsplit('.', 1)[1]
            ext_yaml['id'] = '%s.%s' % (LOCAL_HOSTNAME, ext_id)

        # Build extension info (stateless)
        # https://domain.com/sub-domain/my-extension/index.json
        extension = dict(
            identifier=ext_yaml['id'],
            name=ext_yaml['name'],
            content_type=ext_yaml['content_type'],
            area=ext_yaml.get('area', None),
            version=ext_version,
            description=ext_yaml.get('description', None),
            marketing_url=ext_yaml.get('marketing_url', None),
            thumbnail_url=ext_yaml.get('thumbnail_url', None),
            valid_until='2030-05-16T18:35:33.000Z',
            url='/'.join([base_url, repo_name, ext_version, ext_yaml['main']]),
            download_url='https://github.com/{}/archive/{}.zip'.format(
                ext_yaml['github'], ext_version),
            latest_url='/'.join([base_url, repo_name, 'index.json']),
            flags=ext_yaml.get('flags', []),
            dock_icon=ext_yaml.get('dock_icon', {}),
            layerable=ext_yaml.get('layerable', None),
            statusBar=ext_yaml.get('statusBar', None),
        )

        # Strip empty values
        extension = {k: v for k, v in extension.items() if v}

        # Check if extension is already up-to-date
        if ext_has_update:
            # Generate JSON file for each extension
            with open(os.path.join(public_path, repo_name, 'index.json'),
                      'w') as ext_json:
                json.dump(extension, ext_json, indent=4)
            if extfile.endswith("theme.yaml"):
                print('Theme: {:34s} {:6s}\t(updated)'.format(
                    ext_yaml['name'], ext_version.strip('v')))
            else:
                print('Extension: {:30s} {:6s}\t(updated)'.format(
                    ext_yaml['name'], ext_version.strip('v')))
        else:
            # ext already up-to-date
            if extfile.endswith("theme.yaml"):
                print('Theme: {:34s} {:6s}\t(already up-to-date)'.format(
                    ext_yaml['name'], ext_version.strip('v')))
            else:
                print('Extension: {:30s} {:6s}\t(already up-to-date)'.format(
                    ext_yaml['name'], ext_version.strip('v')))

        extensions.append(extension)
    os.chdir('..')

    # Generate the main repository index JSON
    # https://domain.com/sub-domain/my-index.json
    with open(os.path.join(public_path, 'index.json'), 'w') as ext_json:
        json.dump(
            dict(
                content_type='SN|Repo',
                valid_until='2030-05-16T18:35:33.000Z',
                packages=extensions,
            ),
            ext_json,
            indent=4,
        )
    print("\nProcessed: {:20s}{} extensions. (Components: {}, Themes: {})".
          format("", len(extfiles),
                 len(extfiles) - len(themefiles), len(themefiles)))
    print("Repository Endpoint URL: {:6s}{}/index.json".format("", base_url))
Exemple #35
0
        vertical=True,
        x_prefix="< ",
    )


if __name__ == "__main__":
    if len(sys.argv) < 3:
        print(
            "Must provide two arguments: 1) The directory that saves a list of "
            "directories which contain block cache trace analyzer result files "
            "2) the directory to save plotted graphs.")
        exit(1)
    csv_result_dir = sys.argv[1]
    output_result_dir = sys.argv[2]
    print("Processing directory {} and save graphs to {}.".format(
        csv_result_dir, output_result_dir))
    for csv_relative_dir in os.listdir(csv_result_dir):
        csv_abs_dir = csv_result_dir + "/" + csv_relative_dir
        result_dir = output_result_dir + "/" + csv_relative_dir
        if not os.path.isdir(csv_abs_dir):
            print("{} is not a directory".format(csv_abs_dir))
            continue
        print("Processing experiment dir: {}".format(csv_relative_dir))
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
        plot_miss_ratio_graphs(csv_abs_dir, result_dir)
        plot_access_timeline(csv_abs_dir, result_dir)
        plot_reuse_graphs(csv_abs_dir, result_dir)
        plot_percentage_access_summary(csv_abs_dir, result_dir)
        plot_access_count_summary(csv_abs_dir, result_dir)
Exemple #36
0
def choosetm():
    f = listdir("templates/background")
    print(f)
    return render_template("choosetm.html", fil=f)
def process_json_file(json_file_path, src_dir, ori_dst_dir, binary_dst_dir, instance_dst_dir):
    """

    :param json_file_path:
    :param src_dir: origin clip file path
    :param ori_dst_dir:
    :param binary_dst_dir:
    :param instance_dst_dir:
    :return:
    """
    assert ops.exists(json_file_path), '{:s} not exist'.format(json_file_path)

    image_nums = len(os.listdir(ori_dst_dir))

    with open(json_file_path, 'r') as file:
        for line_index, line in enumerate(file):
            info_dict = json.loads(line)

            image_dir = ops.split(info_dict['raw_file'])[0]
            image_dir_split = image_dir.split('/')[1:]
            image_dir_split.append(ops.split(info_dict['raw_file'])[1])
            image_name = '_'.join(image_dir_split)
            image_path = ops.join(src_dir, info_dict['raw_file'])
            assert ops.exists(image_path), '{:s} not exist'.format(image_path)

            h_samples = info_dict['h_samples']
            lanes = info_dict['lanes']

            image_name_new = '{:s}.png'.format('{:d}'.format(line_index + image_nums).zfill(4))

            src_image = cv2.imread(image_path, cv2.IMREAD_COLOR)
            dst_binary_image = np.zeros([src_image.shape[0], src_image.shape[1]], np.uint8)
            dst_instance_image = np.zeros([src_image.shape[0], src_image.shape[1]], np.uint8)

            for lane_index, lane in enumerate(lanes):
                assert len(h_samples) == len(lane)
                lane_x = []
                lane_y = []
                for index in range(len(lane)):
                    if lane[index] == -2:
                        continue
                    else:
                        ptx = lane[index]
                        pty = h_samples[index]
                        lane_x.append(ptx)
                        lane_y.append(pty)
                if not lane_x:
                    continue
                lane_pts = np.vstack((lane_x, lane_y)).transpose()
                lane_pts = np.array([lane_pts], np.int64)

                cv2.polylines(dst_binary_image, lane_pts, isClosed=False,
                              color=255, thickness=5)
                cv2.polylines(dst_instance_image, lane_pts, isClosed=False,
                              color=lane_index * 50 + 20, thickness=5)

            dst_binary_image_path = ops.join(binary_dst_dir, image_name_new)
            dst_instance_image_path = ops.join(instance_dst_dir, image_name_new)
            dst_rgb_image_path = ops.join(ori_dst_dir, image_name_new)

            cv2.imwrite(dst_binary_image_path, dst_binary_image)
            cv2.imwrite(dst_instance_image_path, dst_instance_image)
            cv2.imwrite(dst_rgb_image_path, src_image)

            print('Process {:s} success'.format(image_name))
warnings.filterwarnings("ignore")

#Using google colab
drive.mount('/content/gdrive')

# Using transfer learning on pre trained model from imagenet
from keras.applications import InceptionResNetV2
conv_base = InceptionResNetV2(weights='imagenet',
                              include_top=False,
                              input_shape=(150, 150, 3))

DIR = '/content/gdrive/My Drive/Household Image/'  #directory in my drive
X = []  #train data
Y = []  #Its label
files = [
    i + str('/') + img_dir for i in os.listdir(DIR)
    for img_dir in os.listdir(DIR + f'{i}')
]
random.shuffle(files)  #shuffling the data
for i in files:
    print(i)
    img = cv2.imread(DIR + i, 1)
    img = cv2.resize(img, (150, 150))  #resizing optimum size
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    X.append(img)
    if 'Status A' in i:
        Y.append(1)
    else:
        Y.append(0)

x = np.array(X)  #all the image data after pre processing
Exemple #39
0
def remove_path_key_file(key,path):
    ls = os.listdir(path)
    print(ls)
    for i in ls :
        if key in i :
            os.remove(path+'\\'+i) 
Exemple #40
0
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License
#  along with this program; if not, write to the Free Software
#  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
#  MA 02110-1301, USA.
#
#

from setuptools import setup, find_packages
from os import listdir, system


langs = []
for l in listdir('languages'):
    if l.endswith('ts'):
        system('lrelease-qt5 languages/%s' % l)
        langs.append(('languages/%s' % l).replace('.ts', '.qm'))


system('pyrcc5 kaptan.qrc -o kaptan5/rc_kaptan.py')

datas = [('/usr/share/applications', ['data/kaptan.desktop']),
         # welcome uygulaması ile başlatılacak.
         #('/etc/skel/.config/autostart', ['data/kaptan.desktop']),
         ('/usr/share/icons/hicolor/scalable/apps', ['data/images/kaptan-icon.svg']),
         ('/usr/share/kaptan/languages', langs)]

setup(
    name = "kaptan",
 '''
 json_file = open('model.json', 'r')
 loaded_model_json = json_file.read()
 json_file.close()
 loaded_model = model_from_json(loaded_model_json)
 # load weights into new model
 loaded_model.load_weights("model.h5")
 print("Loaded model from disk")
 
 model=loaded_model
 '''
 model=load_model("keras_model_test.h5")
 model.summary()
 #####################test##############################
 
 testPathDir=os.listdir(testFolder)
 allTestX=[]
 allTestY=[]
 allTestZ=[]
 
 allTestsent=[]
 TestBol=True
 
 
 #testPathDir=trainPathDir
 #testFolder=trainFolder
 
 extractAllTrain(allTestX,allTestY,allTestZ,testPathDir,testFolder, allTestsent, TestBol)
 
 
 
Exemple #42
0
q = 1

scale_path = 'scale100'

input_path = "E:\Faks\Zavrsni\Backups\FullDB3\Test Data\\" + scale_path + '\\'
data_path = "E:\Faks\Zavrsni\Backups\FullDB3\Data\\"

# input_path = "E:\Faks\Zavrsni\\FullDB\Test Data\\" + scale_path + '\\'
# data_path = "E:\Faks\Zavrsni\\FullDB\Data\\"

loaded = {}
possible = {}

# Picking up the saved data of all known planes

planes = listdir(data_path)

for plane in planes:

    cameras = listdir(data_path + plane)

    for camera in cameras:

        file = open(data_path + plane + '\\' + camera + '\\' + 'normal.txt', "r")

        name = ''

        for line in file:

            if line[0] == '#':
                name = line.strip()
Exemple #43
0
cv2.imshow("img", dist_transform)
cv2.waitKey(0)
cv2.destroyAllWindows()

# Now, mark the region of unknown with zero
markers[unknown==255] = 0

markers = cv2.watershed(img, markers)
"""


path = "C:/Users/Christian/Desktop/Fourth_CV/Complete_images/MitoSegNet"
path_gt = "C:/Users/Christian/Desktop/Fourth_CV/Complete_images/Ground_Truth"


for img in os.listdir(path):

    print(img)

    image = cv2.imread(path + os.sep + img, cv2.IMREAD_GRAYSCALE)

    image_copy = copy.copy(image)
    org_img = copy.copy(image)

    gt = cv2.imread(path_gt + os.sep + img, cv2.IMREAD_GRAYSCALE)

    # label image mask
    labelled = label(image)
    # Get region props of labelled images
    reg_props = regionprops(labelled)
                        classes.add(c.strip())

    return sorted(classes)

if __name__ == '__main__':
    # Parse arguments.
    if len(sys.argv) != 4:
        raise Exception('Include the input and output directories as arguments, e.g., python driver.py input output.')

    model_input = sys.argv[1]
    input_directory = sys.argv[2]
    output_directory = sys.argv[3]

    # Find files.
    input_files = []
    for f in os.listdir(input_directory):
        if os.path.isfile(os.path.join(input_directory, f)) and not f.lower().startswith('.') and f.lower().endswith('mat'):
            input_files.append(f)

    if not os.path.isdir(output_directory):
        os.mkdir(output_directory)

    classes=get_classes(input_directory,input_files)

    # Load model.
    print('Loading 12ECG model...')
    model = load_12ECG_model(model_input)

    # Iterate over files.
    print('Extracting 12ECG features...')
    num_files = len(input_files)
    if len(scales) < 2: scales += scales

    dataset_path = args.dataset_path
    cam_dir = os.path.join(dataset_path, 'Cameras')
    img_dir = os.path.join(dataset_path, 'Rectified')
    dep_dir = os.path.join(dataset_path, 'Depths')

    new_dataset_path = args.output_path or dataset_path.rstrip(
        '\\', '/') + '_resized'
    new_cam_dir = os.path.join(new_dataset_path, 'Cameras')
    new_img_dir = os.path.join(new_dataset_path, 'Rectified')
    new_dep_dir = os.path.join(new_dataset_path, 'Depths')

    # Resize camera profiles
    os.makedirs(new_cam_dir, exist_ok=True)
    for camfile in os.listdir(cam_dir):
        if camfile.find('cam') == -1: continue
        with open(os.path.join(cam_dir, camfile), 'r') as f:
            lines = [line.strip() for line in f.readlines()]
        intrinsic = np.fromstring(' '.join(lines[7:10]), dtype=float,
                                  sep=' ').reshape((3, 3))
        intrinsic_scale = np.array([
            [scales[0], 1, scales[0]],
            [1, scales[1], scales[1]],
            [1, 1, 1],
        ])
        intrinsic = (intrinsic * intrinsic_scale).tolist()

        lines = [line + '\n' for line in lines]
        with open(os.path.join(new_cam_dir, camfile), 'w') as f:
            f.writelines(lines[0:7])
 def get_directories(directory_path):
     return os.listdir(directory_path)
def get_immediate_subdirectories(a_dir):
    return [name for name in os.listdir(a_dir)
            if os.path.isdir(os.path.join(a_dir, name))]
Exemple #48
0
def predict(gr):
    """
    Weight all the neural nets and work it out
    @param: ungrayed pixel
    @return: the averaged vals
    """
    global gamma
    r,g,b=0,0,0
    trainers=list(os.listdir(r"C:\Users\Devansh\Desktop\Projects\img\TrainedData"))
    d=r"C:\Users\Devansh\Desktop\Projects\img\TrainedData"
    l=len(trainers)
    i=0
    types=len(trainers)
    x=[gr]
    pred=list()
    while(i< types ):
        t=d+"/"+ trainers[i]
        clf = joblib.load(t)
        pred=clf.predict(x)[0]
        
        b+=pred[2]/types
        g+=pred[1]/types
        r+=pred[0]/types
        
    

        i+=1
    #by adjusting the values and scaling them, we can reduce the effects of overfitting. 
    gamma+=((0.2126 * r + 0.7152 * g + 0.0722 * b)/255) *(random.randrange(-300000,300000)/30000) #optimize (range will give a -10<val<10)
    print(gamma)
    
    if gamma>=0:
        r=(random.randrange(int((r-gamma-1)),int(r+gamma)+1 ))
        g=(random.randrange((int(g-gamma-1)),int(g+gamma)+1 ))
        b=(random.randrange((int(b-gamma-1)),int(b+gamma)+1 ))
    else:
        r=(random.randrange(int((r+gamma-1)),int(r-gamma)+1 ))
        g=(random.randrange((int(g+gamma-1)),int(g-gamma)+1 ))
        b=(random.randrange((int(b+gamma-1)),int(b-gamma)+1 ))
        """
    print("Created: ",r,g,b)
    correction=(.2126*random.randrange(0,int(r)+1)+.7152*random.randrange(0,int(g)+1)+0.722*random.randrange(0,int(b)+1))
    print(correction)
    
    #to use when I start testing and building my model with error calculations
    r=(random.randrange(abs(int(r-2*correction)),int(r+1+2*correction)))/(types)
    g=(random.randrange(abs(int(g-2*correction)),int(g+1+2*correction)))/(types)
    b=(random.randrange(abs(int(b-2*correction)),int(b+1+2*correction)))/(types)
    
    prediction= [r,g,b ]
    print("Weighed", r,g,b)
    """
    prediction=[r,g,b]
    
    for i in range (len(prediction)):
        if(prediction[i]>255):
            prediction[i]%=255 #build equivalence classes for predictions
        elif(prediction[i]<0):
            prediction[i]*=-1
            if(prediction[i]>255):
                prediction[i]%=255 #build equivalence classes for predictions
        gamma+=(prediction[i]-pred[i])**2
    gamma%=math.sqrt(abs(gamma)) #self correction with gamma
    return prediction 
Exemple #49
0
    print "Done."

    print "Inserting Types..."
    for label, template in types.items():
        inserter.insert_task_types(template, relation=False, task=task, label=label)
    for label, template in relation_types.items():
        inserter.insert_task_types(template, relation=True, task=task, label=label)
    print "Done."
else:
    print "Preparing Task..."
    task = inserter.get_task_id()
    if task is None:
        task = inserter.create_task(USERNAME)
    print "Done."

for filename in os.listdir(filepath):
    if ".xml" in filename:
        files.append(filepath + filename)

for filename in files:
    print filename

    documents = []
    user_documents = []
    entities = []
    pairs = []
    offsets = []

    tree = ET.parse(filename)
    root = tree.getroot()
    for document in root.iter('document'):
def get_files_in_directory(a_dir):
    files = [f for f in os.listdir(a_dir) if os.path.isfile(os.path.join(a_dir, f))]
    return files
Exemple #51
0
# !/usr/bin/python
# -*- coding: UTF-8 -*-
import os, sys

path1 = 'D:\home\zeewei\\20190319\\radar_data\\2_108_1'  # 所需修改文件夹所在路径
dirs = os.listdir(path1)

i = 0
for dir in dirs:
    if dir[0:5] == 'road1':
        os.rename(
            os.path.join(path1, str(dir)),
            os.path.join(path1, 'road1_' + str(dir[4:5]) + '_' + str(dir[5:])))
        print("重命名成功: , ", dir)
        i += 1
Exemple #52
0
            activity['position_x'] = None
            activity['position_y'] = None
            activity['entities'] = []
            activity['agents'] = []

            activity['properties'] = {}

        activities.append(activity)

    session['activities'] = activities

    description(document, file, filtered_data, agents, entities, show_a_e)
    export_json(session, file)


if __name__ == "__main__":

    # 	document = input('Base de dados: ')
    # 	file = input('Extrair para: ')
    # 	sheet = "sessao"
    # 	controlHarvestToMicelioSession(document, sheet, file)

    ODS_DIR = './Sessoes/'
    sheet = "sessao"
    files = os.listdir(ODS_DIR)

    for document in files:
        file = document.replace('.ods', '.json')
        if document[(len(document) - 4)::] == '.ods':
            controlHarvestToMicelioSession(ODS_DIR + document, sheet, file)
Exemple #53
0
def all_beautiful_memories_begin():
    import log
    files_list = os.listdir(BIZ_PATH)
    files_list = set(['biz.' + x[:x.rfind(".")] for x in files_list if x.endswith(".py")])
    map(__import__, files_list)
Exemple #54
0
entryNodes = root.findall('classpathentry')
linkedLibs = []
for node in entryNodes:
    entryKind = node.get('kind')
    entryPath = node.get('path')
    entryKind = entryKind.strip(' \n\r\t')
    entryPath = entryPath.strip(' \n\r\t')
    if entryKind == 'lib' and entryPath.endswith('.jar'):
        linkedLibs.append(entryPath)

plugins = pluginStr.split(':')
modified = False
for pluginName in plugins:
    pluginAndroidDir = pluginsDir + '/' + pluginName + '/android'

    for fileName in os.listdir(pluginAndroidDir):
        if os.path.splitext(fileName)[1] == '.jar':
            needAdd = True
            for linkedJar in linkedLibs:
                jarName = os.path.basename(linkedJar)
                if fileName == jarName:
                    needAdd = False
                    break

            if needAdd:
                modified = True
                pathAttr = 'plugin-x/' + pluginName + '/android/' + fileName
                root.append(getLibElement(pathAttr))
                linkedLibs.append(pathAttr)

if modified:
#!/usr/bin/python

## Author: Eric FONTANILLAS
## Date: 03.01.12
## Object: Recursively run the script "16_PATRON_stats.r" which compute stats and report graph pdf) for each branch reported in the codeml output

import string, os

## 1 ## Open input/output
branch_folder = "12_2_Branches"
L1 = os.listdir(branch_folder)

file_PATRON = open("16_PATRON_stats.r", "r")
PATRON = file_PATRON.read()

out_graph = "18_statsOutput_perBranches_GRAPHS"
out_table = "18_statsOutput_perBranches_TABLES"

for file in L1:
    print "\n\tProcess branch '%s' ..." % file[:-4]
    XXX_IN = "%s/%s" % (branch_folder, file)

    name_fasta = file[:-4]

    XXX_OUT_GRAPH = "%s/%s.pdf" % (out_graph, name_fasta)
    XXX_OUT_TABLE = "%s/%s.csv" % (out_table, name_fasta)

    New_PATRON = string.replace(PATRON, "XXX_IN", XXX_IN)
    New_PATRON = string.replace(New_PATRON, "XXX_OUT_GRAPH", XXX_OUT_GRAPH)
    New_PATRON = string.replace(New_PATRON, "XXX_OUT_TABLE", XXX_OUT_TABLE)
from FCS import Fcs
from FCS import StainFcs
from FCS import Write
from tkinter import filedialog
import random
import os

# 选择路径
Fpath = filedialog.askdirectory()
for file in [
        file for file in os.listdir(Fpath)
        if os.path.splitext(file)[1] == ".fcs"
]:
    filename = Fpath + '/' + file

    fcs = Fcs(filename)

    # # 写到excel中
    # fcs.fcs2excel()

    # 染色通道
    # stain_fcs = StainFcs(filename)
    # stain_fcs.fcs2excel(folder_name="Stain_excel")

    # # 写出到fcs中
    # save_dir = fcs.file_dir + "/WriteFcs"
    # # excel文件夹不存在时创建
    # if not os.path.exists(save_dir):
    #     os.makedirs(save_dir)
    # filename = save_dir + '/' + file
    #
Exemple #57
0
import os
a = int(input("정수 입력 : "))

if (a % 3 == 0 and a % 4 == 0):
    print("3과 4의 공배수")
elif (a % 3 == 0):
    print("3의 배수")
elif (a % 4 == 0):
    print("4의 배수")
else:
    print("3의 배수도, 4의 배수도 아닙니다")

b = "c:\\temp\\201632023\\room1\\room2"
print(b)
a = b.count("\\")
l = b.split("\\", a)
del (l[a])
b = ""
for s in l:
    if (s == "c:"):
        b += s
    else:
        b += "\\" + s

print(b)
print(os.listdir("c:\\temp\\201632023\\"))
if (len(os.listdir("c:\\temp\\201632023\\room1\\room2")) == 0):
    print("없어!")
else:
    print("있어!")
Exemple #58
0
import pydensecrf.densecrf as dcrf
import numpy as np
import sys

from skimage.io import imread, imsave
from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax

from os import listdir, makedirs
from os.path import isfile, join
import os

davis_path = '/your path/datasets/davis16-test/'
setting = '/your path/results/davis/'
out_folder = '/your path/results/davis-crf/'
for d in listdir(setting):

    vidDir = join(davis_path, d)
    resDir = join(out_folder, d)
    if not os.path.exists(resDir):
            os.makedirs(resDir)
    for f in listdir(vidDir):       

        img = imread(join(vidDir, f))
        segDir = join(setting, d)
        frameName = str.split(f, '.')[0]
        anno_rgb = imread(segDir + '/' + frameName + '.png').astype(np.uint32)
        
        min_val = np.min(anno_rgb.ravel())
        max_val = np.max(anno_rgb.ravel())
        out = (anno_rgb.astype('float') - min_val) / (max_val - min_val)
        labels = np.zeros((2, img.shape[0], img.shape[1]))
Exemple #59
0
def waf_entry_point(current_directory,version,wafdir):
	Logs.init_log()
	if Context.WAFVERSION!=version:
		Logs.error('Waf script %r and library %r do not match (directory %r)'%(version,Context.WAFVERSION,wafdir))
		sys.exit(1)
	if'--version'in sys.argv:
		Context.run_dir=current_directory
		ctx=Context.create_context('options')
		ctx.curdir=current_directory
		ctx.parse_args()
		sys.exit(0)
	Context.waf_dir=wafdir
	Context.launch_dir=current_directory
	no_climb=os.environ.get('NOCLIMB',None)
	if not no_climb:
		for k in no_climb_commands:
			if k in sys.argv:
				no_climb=True
				break
	cur=current_directory
	while cur:
		lst=os.listdir(cur)
		if Options.lockfile in lst:
			env=ConfigSet.ConfigSet()
			try:
				env.load(os.path.join(cur,Options.lockfile))
				ino=os.stat(cur)[stat.ST_INO]
			except Exception:
				pass
			else:
				for x in[env.run_dir,env.top_dir,env.out_dir]:
					if Utils.is_win32:
						if cur==x:
							load=True
							break
					else:
						try:
							ino2=os.stat(x)[stat.ST_INO]
						except OSError:
							pass
						else:
							if ino==ino2:
								load=True
								break
				else:
					Logs.warn('invalid lock file in %s'%cur)
					load=False
				if load:
					Context.run_dir=env.run_dir
					Context.top_dir=env.top_dir
					Context.out_dir=env.out_dir
					break
		if not Context.run_dir:
			if Context.WSCRIPT_FILE in lst:
				Context.run_dir=cur
		next=os.path.dirname(cur)
		if next==cur:
			break
		cur=next
		if no_climb:
			break
	if not Context.run_dir:
		if'-h'in sys.argv or'--help'in sys.argv:
			Logs.warn('No wscript file found: the help message may be incomplete')
			Context.run_dir=current_directory
			ctx=Context.create_context('options')
			ctx.curdir=current_directory
			ctx.parse_args()
			sys.exit(0)
		Logs.error('Waf: Run from a directory containing a file named %r'%Context.WSCRIPT_FILE)
		sys.exit(1)
	try:
		os.chdir(Context.run_dir)
	except OSError:
		Logs.error('Waf: The folder %r is unreadable'%Context.run_dir)
		sys.exit(1)
	try:
		set_main_module(Context.run_dir+os.sep+Context.WSCRIPT_FILE)
	except Errors.WafError ,e:
		Logs.pprint('RED',e.verbose_msg)
		Logs.error(str(e))
		sys.exit(1)
#!/usr/bin/env python3

import os
import shutil

inputtie = '/home/hannelore/PROJECTHH/Data/cgMLSTschemes/MLST-548/wgMLST/schema-548/'
outputtie = '/home/hannelore/PROJECTHH/Data/cgMLSTschemes/MLST-548/cgMLST/scheme-548-cgMLST/'
fileList = os.listdir(inputtie)
print(fileList)

cfil = open ('/home/hannelore/PROJECTHH/Data/cgMLSTschemes/MLST-548/cgMLST/cgMLSTschema.txt', 'r')
for line in cfil:
    #Had problems matching line to file: but found that the number of characters differed (1 extra for line)
    #tried to remove with strip and replace (" ","") but this did not work, removing the last character did, it was an /n
    """
    line.split()
    line.replace(" ","")
    """
    line=line[:-1]
    for item in fileList:
        '''
        print("line is", line)
        print(type(line))
        print(len(line))
        print("item is", item)
        print(type(item))
        print(len(item))
        '''
        if(line == item):
            inputpath= str(inputtie) + str(line)