Ejemplo n.º 1
0
	def from_directory(cls, dirname):
		print "Loading from directory: ", dirname
		filenames = glob.glob(dirname+"/*.fits") + glob.glob(dirname+"/*.fits.gz")
		print 'got %d files' % len(filenames)
		cat_name=dirname.strip(os.path.sep).split(os.path.sep)[-1]
		cat = cls.from_multiple_fits(filenames, cat_name)
		return cat
Ejemplo n.º 2
0
def cleanupFiles():
    # First get rid of modified files
    for l in ["l1", "l2", "l3"]:
        arcpy.Delete_management(l)

    for f in glob.glob("C:\\Arctmp\\*"):
        try:
            shutil.rmtree(f)
        except:
            print "UNABLE TO REMOVE:", f
    # Now remove the old directory
    for i in xrange(0, 1000000):
        new_workspace = "C:\\Arctmp\\workspace." + str(i)
        if not os.path.exists(new_workspace):
            break
    print "TESTING USING WORKSPACE", new_workspace
    # Now move in fresh copies
    shutil.copytree("C:\\Arcbase", new_workspace)
    print "CONTENTS:"
    arcpy.env.workspace = new_workspace
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.shp")):
        print f
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.lyr")):
        print f
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.gdb")):
        print f
Ejemplo n.º 3
0
    def _load_imdb(self):
        dir_path = os.path.join(config.data_path, self.folder)
        data_path = os.path.join(dir_path, self.which_set)

        pos_path = os.path.join(data_path, 'pos')
        neg_path = os.path.join(data_path, 'neg')

        files = glob.glob(pos_path+'/*.txt')
        pos_strings = [open(f, 'r').read() for f in files]
        pos_labels = np.ones(len(files))

        files = glob.glob(neg_path+'/*.txt')
        neg_strings = [open(f, 'r').read() for f in files]
        neg_labels = np.zeros(len(files))

        targets = np.hstack((pos_labels, neg_labels))
        targets = numpy.array(targets, dtype='int32').reshape((-1, 1))
        features = np.array(pos_strings + neg_strings)

        #n = 25000 / 2
        #features = features[n-1000:n+1000]
        #targets = targets[n-1000:n+1000]

        self.num_examples = len(features)

        if self.sorted == True:
            index = np.vectorize(len)(features).argsort()
            features = features[index]
            targets = targets[index]

        return (features, targets)
  def _InstrumentExecutables(self):
    build_dir = self._build_dir
    work_dir = self._work_dir
    _LOGGER.info('Build dir "%s".', build_dir)

    # Make a copy of all unittest executables, DLLs, PDBs and test_data in
    # the build directory.
    for pattern in ('*_unittests.exe', '*.dll', '*.pdb', 'test_data'):
      files = glob.glob(os.path.join(build_dir, pattern))
      for path in files:
        _LOGGER.info('Copying "%s" to "%s".', path, work_dir)
        if os.path.isdir(path):
          # If the source file is a directory, do a recursive copy.
          dst = os.path.join(work_dir, os.path.basename(path))
          shutil.copytree(path, dst)
        else:
          shutil.copy(path, work_dir)

    # Instrument all EXEs in the work dir.
    for exe in glob.glob(os.path.join(work_dir, '*.exe')):
      self._InstrumentOneFile(exe)

    # And the DLLs we've specified.
    for dll in _DLLS_TO_INSTRUMENT:
      self._InstrumentOneFile(os.path.join(work_dir, dll))
Ejemplo n.º 5
0
def test_onset_functions():
    # Load in all files in the same order
    ref_files = sorted(glob.glob(REF_GLOB))
    est_files = sorted(glob.glob(EST_GLOB))
    sco_files = sorted(glob.glob(SCORES_GLOB))

    assert len(ref_files) == len(est_files) == len(sco_files) > 0

    # Unit tests
    for metric in [mir_eval.onset.f_measure]:
        yield (__unit_test_onset_function, metric)
    # Regression tests
    for ref_f, est_f, sco_f in zip(ref_files, est_files, sco_files):
        with open(sco_f, 'r') as f:
            expected_scores = json.load(f)
        # Load in an example onset annotation
        reference_onsets = mir_eval.io.load_events(ref_f)
        # Load in an example onset tracker output
        estimated_onsets = mir_eval.io.load_events(est_f)
        # Compute scores
        scores = mir_eval.onset.evaluate(reference_onsets, estimated_onsets)
        # Compare them
        for metric in scores:
            # This is a simple hack to make nosetest's messages more useful
            yield (__check_score, sco_f, metric, scores[metric],
                   expected_scores[metric])
Ejemplo n.º 6
0
    def write_csv_files(self, overwrite=False):
        self.extract_images()
        for setn in ('train', 'val'):
            img_dir = os.path.join(self.out_dir, setn)
            csvfile = getattr(self, setn + '_file')
            neon_logger.display("Getting %s file list" % (setn))
            if os.path.exists(csvfile) and not overwrite:
                neon_logger.display("File %s exists, not overwriting" % (csvfile))
                continue
            flines = []

            subdirs = glob(os.path.join(img_dir, '*'))
            for subdir in subdirs:
                subdir_label = os.path.basename(subdir)  # This is the int label
                files = glob(os.path.join(subdir, self.file_pattern))
                flines += [(filename, subdir_label) for filename in files]

            if setn == 'train':
                np.random.seed(0)
                np.random.shuffle(flines)

            with gzip.open(csvfile, 'wb') as f:
                f.write('filename,l_id\n')
                for tup in flines:
                    f.write('{},{}\n'.format(*tup))
Ejemplo n.º 7
0
    def get_cached_file(self, file_path, file_glob, required=False, use_first=False, **addl_args):
        if file_glob == None:
            return None
        
        if hasattr(file_glob, '__iter__'):
            found_files = []
            for curr_glob in file_glob:
                curr_files = glob.glob( os.path.join(file_path, curr_glob) )
                if len(curr_files) > 0:
                    found_files = curr_files
                    break
        else:
            found_files = glob.glob( os.path.join(file_path, file_glob) )

        if len(found_files) == 0:
            if required:
                raise OSError('Could not find at path: "%s" any files matching glob: "%s"' % (file_path, file_glob))
            else:
                return None
        elif len(found_files) > 1 and not use_first:
            raise OSError('Found too many files files at path: "%s" with glob: "%s", found: %s' % (file_path, file_glob, found_files))

        if self.file_cache.has_key(found_files[0]):
            file_obj = self.file_cache[ found_files[0] ]
        else:
            file_obj = OCO_Matrix( found_files[0], **addl_args )
            self.file_cache[ found_files[0] ] = file_obj

        return file_obj
Ejemplo n.º 8
0
 def test_devtool_reset_all(self):
     # Check preconditions
     workspacedir = os.path.join(self.builddir, 'workspace')
     self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
     tempdir = tempfile.mkdtemp(prefix='devtoolqa')
     self.track_for_cleanup(tempdir)
     self.track_for_cleanup(workspacedir)
     self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
     testrecipe1 = 'mdadm'
     testrecipe2 = 'cronie'
     result = runCmd('devtool modify -x %s %s' % (testrecipe1, os.path.join(tempdir, testrecipe1)))
     result = runCmd('devtool modify -x %s %s' % (testrecipe2, os.path.join(tempdir, testrecipe2)))
     result = runCmd('devtool build %s' % testrecipe1)
     result = runCmd('devtool build %s' % testrecipe2)
     stampprefix1 = get_bb_var('STAMP', testrecipe1)
     self.assertTrue(stampprefix1, 'Unable to get STAMP value for recipe %s' % testrecipe1)
     stampprefix2 = get_bb_var('STAMP', testrecipe2)
     self.assertTrue(stampprefix2, 'Unable to get STAMP value for recipe %s' % testrecipe2)
     result = runCmd('devtool reset -a')
     self.assertIn(testrecipe1, result.output)
     self.assertIn(testrecipe2, result.output)
     result = runCmd('devtool status')
     self.assertNotIn(testrecipe1, result.output)
     self.assertNotIn(testrecipe2, result.output)
     matches1 = glob.glob(stampprefix1 + '*')
     self.assertFalse(matches1, 'Stamp files exist for recipe %s that should have been cleaned' % testrecipe1)
     matches2 = glob.glob(stampprefix2 + '*')
     self.assertFalse(matches2, 'Stamp files exist for recipe %s that should have been cleaned' % testrecipe2)
Ejemplo n.º 9
0
 def annotate(self, conf, subject):
     subject = dict(subject)
     lib_jars = list()
     for lib_dir in subject['lib_dirs']:
         lib_jars += glob.glob(os.path.join(lib_dir, '*.jar'))
         lib_jars += glob.glob(os.path.join(lib_dir, '*.aar'))
     subject['lib_jars'] = lib_jars
     subject['soot_classpath'] = ':'.join((
         ':'.join(subject['class_dirs']),
         ':'.join(subject['lib_jars']),
         subject['classpath'],
     ))
     targets = list()
     if 'target' in subject:
         targets.append(subject['target'])
     else:
         targets.extend(subject['targets'])
     subject['jpdg_cmd'] = [
         'java',
         '-Xmx8g',
         '-jar',
         self.jpdg_jar,
         '-c', subject['soot_classpath'],
         '-l', 'op',
     ]
     for t in targets:
         subject['jpdg_cmd'] += ['-d', t]
     for ex_dir in subject['exclude_pkgs']:
         subject['jpdg_cmd'] += ['-e', ex_dir]
     return subject
Ejemplo n.º 10
0
def main():
    files = glob.glob("./scans/*.jpg")
    files += glob.glob("./scans/*.jpeg")
    for f in files:
        reset_stats()
        print "Processing: " + f.split("/")[len(f.split("/")) - 1]

        schedule = Schedule()
        schedule.load_data()
        if schedule.get_has_schedule():
            scan_image(f, schedule)

            print "Sheet ok? ",
            while True:
                cv2.imshow("image", cv2.resize(img, (446, 578)))
                cv2.moveWindow("image", 0, 0)
                # user_in = raw_input()
                key = cv2.waitKey(-1)
                if key == ord("y"):
                    print "Sheet ok... Dumping data"
                    dump_stats()
                    os.remove(f)
                    break
                elif key == ord("n"):
                    print "Marking to redo"
                    #os.rename(f, "./scans/redo/" + f.split("/")[len(f.split("/")) - 1])
                    break
                elif key == ord("q"):
                    exit(0)
                else:
                    continue
            cv2.destroyAllWindows()
        else:
            print "Unable to load schedule... Aborting"
Ejemplo n.º 11
0
    def test_stage_package_gets_cached(self):
        self.run_snapcraft(["pull", "oneflatwithstagepackages"], "dump")

        # Verify the 'hello' deb package was cached.
        cache_dir = os.path.join(
            xdg.BaseDirectory.xdg_cache_home, "snapcraft", "stage-packages", "apt"
        )
        archive_dir = os.path.join("var", "cache", "apt", "archives")
        cached = glob.glob(os.path.join(cache_dir, "*", archive_dir, "hello*"))
        self.assertThat(cached, HasLength(1))
        cached_deb = cached[0]

        staged = glob.glob(
            os.path.join(
                "parts", "oneflatwithstagepackages", "ubuntu", "download", "hello*"
            )
        )
        self.assertThat(staged, HasLength(1))
        staged_deb = staged[0]

        # Verify that the staged and cached debs are the same file (hard
        # linked) by comparing inodes.
        cached_deb_inode = os.stat(cached_deb).st_ino
        self.assertThat(cached_deb_inode, Equals(os.stat(staged_deb).st_ino))

        # Now clean the part and pull again.
        self.run_snapcraft("clean", "dump")
        self.run_snapcraft(["pull", "oneflatwithstagepackages"], "dump")

        # Verify that the staged deb is _still_ the same one from the cache.
        self.assertThat(cached_deb_inode, Equals(os.stat(staged_deb).st_ino))
Ejemplo n.º 12
0
def generate_rst():
    """generate chX.rst in current working directory"""
    cwd = os.getcwd()
    demo_dir = os.path.join(cwd, 'demos')
    chapters = os.listdir(demo_dir)
    for chapter in chapters:
        if not os.path.isdir(os.path.join(demo_dir, chapter)):
            continue
        reg_py = os.path.join(demo_dir, chapter, '*.py')
        scripts = glob.glob(reg_py)
        rst_file = chapter + '.rst'
        rst_file = os.path.join(demo_dir, chapter, rst_file)
        with open(rst_file, 'w') as f:
            f.write(chapter)
            f.write('\n========================================\n')
            for script in scripts:
                script_name = os.path.basename(script)
                f.write('\n' + script_name[:-3])
                f.write('\n----------------------------------------\n')
                reg_png = os.path.join(demo_dir,
                                       chapter,
                                       script_name[:-3] + '*.png')
                for img in glob.glob(reg_png):
                    img_name = os.path.basename(img)
                    f.write(".. image:: " + img_name + "\n")
                f.write(".. literalinclude:: " + script_name + "\n")
Ejemplo n.º 13
0
def _file_configs_paths(osname, agentConfig):
    """ Retrieve all the file configs and return their paths
    """
    try:
        confd_path = get_confd_path(osname)
        all_file_configs = glob.glob(os.path.join(confd_path, '*.yaml'))
        all_default_configs = glob.glob(os.path.join(confd_path, '*.yaml.default'))
    except PathNotFound as e:
        log.error("No conf.d folder found at '%s' or in the directory where the Agent is currently deployed.\n" % e.args[0])
        sys.exit(3)

    if all_default_configs:
        current_configs = set([_conf_path_to_check_name(conf) for conf in all_file_configs])
        for default_config in all_default_configs:
            if not _conf_path_to_check_name(default_config) in current_configs:
                all_file_configs.append(default_config)

    # Compatibility code for the Nagios checks if it's still configured
    # in datadog.conf
    # FIXME: 6.x, should be removed
    if not any('nagios' in config for config in itertools.chain(*all_file_configs)):
        # check if it's configured in datadog.conf the old way
        if any([nagios_key in agentConfig for nagios_key in NAGIOS_OLD_CONF_KEYS]):
            all_file_configs.append('deprecated/nagios')

    return all_file_configs
Ejemplo n.º 14
0
def strip_symbols():
    bin_dir = os.path.join(conf[CONF_BUILDDIR], 'pack', 'bin')
    ignored = []
    def do_strip(fn):
        run('strip "%s"' % fn)
        info('stripping: %s' % fn)

    def should_ignore(path):
        '''Do not strip python.dll and msvc*.dll '''
        name = os.path.basename(path).lower()
        return name.startswith('python') or name.startswith('msvc')

    for dll in glob.glob(os.path.join(bin_dir, '*.dll')):
        if should_ignore(dll):
            ignored.append(dll)
        else:
            do_strip(dll)

    for exe in glob.glob(os.path.join(bin_dir, '*.exe')):
        do_strip(exe)

    info('----------------------------')
    info('ignored:')
    for name in ignored:
        info('>> %s' % name)
Ejemplo n.º 15
0
def main(args):
  # Default invocation will verify the golden files are unchanged.
  failed = 0
  if not args:
    args = ['--wnone', '--diff', '--test', '--dstroot=.']

  ParseOptions(args)

  idldir = os.path.split(sys.argv[0])[0]
  idldir = os.path.join(idldir, 'test_cgen', '*.idl')
  filenames = glob.glob(idldir)
  ast = ParseFiles(filenames)
  if hgen.GenerateRelease(ast, 'M14', {}):
    print "Golden file for M14 failed."
    failed = 1
  else:
    print "Golden file for M14 passed."


  idldir = os.path.split(sys.argv[0])[0]
  idldir = os.path.join(idldir, 'test_cgen_range', '*.idl')
  filenames = glob.glob(idldir)

  ast = ParseFiles(filenames)
  if hgen.GenerateRange(ast, ['M13', 'M14', 'M15', 'M16', 'M17'], {}):
    print "Golden file for M13-M17 failed."
    failed =1
  else:
    print "Golden file for M13-M17 passed."

  return failed
Ejemplo n.º 16
0
	def setRepositoryRevisions(self):
		# expand possible environment variables in paths
		if isinstance(self._args.repo_scan_base_dirs, basestring):
			self._args.repo_scan_base_dirs = [self._args.repo_scan_base_dirs]
		self._args.repo_scan_base_dirs = [os.path.expandvars(repoScanBaseDir) for repoScanBaseDir in self._args.repo_scan_base_dirs]
		
		# construct possible scan paths
		subDirWildcards = ["*/" * level for level in range(self._args.repo_scan_depth+1)]
		scanDirWildcards = [os.path.join(repoScanBaseDir, subDirWildcard) for repoScanBaseDir in self._args.repo_scan_base_dirs for subDirWildcard in subDirWildcards]
		
		# globbing and filter for directories
		scanDirs = tools.flattenList([glob.glob(scanDirWildcard) for scanDirWildcard in scanDirWildcards])
		scanDirs = [scanDir for scanDir in scanDirs if os.path.isdir(scanDir)]
		
		# key: directory to check type of repository
		# value: command to extract the revision
		repoVersionCommands = {
			".git" : "git rev-parse HEAD",
			".svn" : "svn info"# | grep Revision | awk '{print $2}'"
		}
		# loop over dirs and revision control systems and write revisions to the config dict
		for repoDir, currentRevisionCommand in repoVersionCommands.items():
			repoScanDirs = [os.path.join(scanDir, repoDir) for scanDir in scanDirs]
			repoScanDirs = [glob.glob(os.path.join(scanDir, repoDir)) for scanDir in scanDirs]
			repoScanDirs = tools.flattenList([glob.glob(os.path.join(scanDir, repoDir)) for scanDir in scanDirs])
			repoScanDirs = [os.path.abspath(os.path.join(repoScanDir, "..")) for repoScanDir in repoScanDirs]
			
			for repoScanDir in repoScanDirs:
				popenCout, popenCerr = subprocess.Popen(currentRevisionCommand.split(), stdout=subprocess.PIPE, cwd=repoScanDir).communicate()
				self._config[repoScanDir] = popenCout.replace("\n", "")
Ejemplo n.º 17
0
    def lookup_copyright_notice(self, ufo_folder):
        current_path = ufo_folder
        try:
            contents = open(os.path.join(ufo_folder, 'fontinfo.plist')).read()
            copyright = self.grep_copyright_notice(contents)
            if copyright:
                return copyright
        except (IOError, OSError):
            pass

        while os.path.realpath(self.operator.path) != current_path:
            # look for all text files inside folder
            # read contents from them and compare with copyright notice
            # pattern
            files = glob.glob(os.path.join(current_path, '*.txt'))
            files += glob.glob(os.path.join(current_path, '*.ttx'))
            for filename in files:
                with open(os.path.join(current_path, filename)) as fp:
                    match = COPYRIGHT_REGEX.search(fp.read())
                    if not match:
                        continue
                    return match.group(0).strip(',\r\n')
            current_path = os.path.join(current_path, '..')  # go up
            current_path = os.path.realpath(current_path)
        return
Ejemplo n.º 18
0
def findPR650(ports=None):
    """DEPRECATED (as of v.1.60.01). Use :func:`psychopy.hardware.findPhotometer()` instead, which
    finds a wider range of devices
    """
    logging.error("DEPRECATED (as of v.1.60.01). Use psychopy.hardware.findPhotometer() instead, which "\
    +"finds a wider range of devices")

    if ports==None:
        if sys.platform=='darwin':
            ports=[]
            #try some known entries in /dev/tty. used by keyspan
            ports.extend(glob.glob('/dev/tty.USA*'))#keyspan twin adapter is usually USA28X13P1.1
            ports.extend(glob.glob('/dev/tty.Key*'))#some are Keyspan.1 or Keyserial.1
            ports.extend(glob.glob('/dev/tty.modem*'))#some are Keyspan.1 or Keyserial.1
            if len(ports)==0: logging.error("couldn't find likely serial port in /dev/tty.* Check for \
                serial port name manually, check drivers installed etc...")
        elif sys.platform=='win32':
            ports = range(11)
    elif type(ports) in [int,float]:
        ports=[ports] #so that we can iterate
    pr650=None
    logging.info('scanning serial ports...\n\t')
    logging.console.flush()
    for thisPort in ports:
        logging.info(str(thisPort)); logging.console.flush()
        pr650 = Photometer(port=thisPort, meterType="PR650", verbose=False)
        if pr650.OK:
            logging.info(' ...OK\n'); logging.console.flush()
            break
        else:
            pr650=None
            logging.info('...Nope!\n\t'); logging.console.flush()
    return pr650
Ejemplo n.º 19
0
 def repackage_archive_zip_to_pmc_zip(self, doi_id):
     "repackage the zip file in the TMP_DIR to a PMC zip format"
     # unzip contents
     zip_input_dir = os.path.join(self.get_tmp_dir(), self.TMP_DIR)
     zip_extracted_dir = os.path.join(self.get_tmp_dir(), self.JUNK_DIR)
     zip_renamed_files_dir = os.path.join(self.get_tmp_dir(), self.RENAME_DIR)
     pmc_zip_output_dir = os.path.join(self.get_tmp_dir(), self.INPUT_DIR)
     archive_zip_name = glob.glob(zip_input_dir + "/*.zip")[0]
     with zipfile.ZipFile(archive_zip_name, 'r') as myzip:
         myzip.extractall(zip_extracted_dir)
     # rename the files and profile the files
     file_name_map = article_processing.rename_files_remove_version_number(
         files_dir = zip_extracted_dir,
         output_dir = zip_renamed_files_dir
     )
     if self.logger:
         self.logger.info("FTPArticle running %s workflow for article %s, file_name_map"
                          % (self.workflow, self.doi_id))
         self.logger.info(file_name_map)
     # convert the XML
     article_xml_file = glob.glob(zip_renamed_files_dir + "/*.xml")[0]
     article_processing.convert_xml(xml_file=article_xml_file,
                      file_name_map=file_name_map)
     # rezip the files into PMC zip format
     soup = parser.parse_document(article_xml_file)
     volume = parser.volume(soup)
     pmc_zip_file_name = article_processing.new_pmc_zip_filename(self.journal, volume, doi_id)
     with zipfile.ZipFile(os.path.join(pmc_zip_output_dir, pmc_zip_file_name), 'w',
                          zipfile.ZIP_DEFLATED, allowZip64=True) as new_zipfile:
         dirfiles = article_processing.file_list(zip_renamed_files_dir)
         for df in dirfiles:
             filename = df.split(os.sep)[-1]
             new_zipfile.write(df, filename)
     return True
Ejemplo n.º 20
0
def install_new_policy(projdir, tmpdir):
    """
    Copies the polkit policy files.
    """
    files = glob.glob(projdir + '/data/*.policy') + \
            glob.glob(projdir + '/modules/*/data/*.policy')
    return _copy_files(files, '/usr/share/polkit-1/actions/', tmpdir)
Ejemplo n.º 21
0
def getPurchaseReport():
    purchaseFiles = []
    for purchases in glob.glob('purchases/purchase*'):
        purchaseFiles.append(purchases)
    for purchases in glob.glob('purchases/Item*'):
        itemlist = purchases
    item_dict = {}
    with open(itemlist,'r') as itemfile:
        all_lines = itemfile.readlines()
        for i in range(2,len(all_lines)):
            item = all_lines[i].split()
            item_dict.update({item[0]:item[1]})
        #print item_dict
    report_dict = {}
    for i in range(0,len(purchaseFiles)):
        with open(purchaseFiles[i],'r') as purchasefile:
            item_total = 0
            purchase_list = purchasefile.readlines()
            #print purchaseFiles[i][19:22]
            for j in range(2,len(purchase_list)):
                purchase = purchase_list[j].split()
                #print float(item_dict[purchase[0]][1:])
                #print float(purchase[1])
                item_total = float(item_dict[purchase[0]][1:])*float(purchase[1])+item_total
                #print item_total
        report_dict.update({int(purchaseFiles[i][21]):"{0:.2f}".format(item_total)})
        #print purchaseFiles[i][19:22]
     

    print report_dict
    return report_dict
Ejemplo n.º 22
0
def copy_build_directories_vs(dist_build, build_dir):
    """Copy the build/visual-studio directories to the distribution directory.
    """
    buildfiles = __astyle_dir + "/build/"
    # copy solution files
    vsdir = '/' + build_dir + '/'
    dist_astyle_vs20xx = dist_build + vsdir
    os.mkdir(dist_astyle_vs20xx)
    slnfiles = glob.glob(buildfiles + vsdir + "*.sln")
    for sln in slnfiles:
        shutil.copy(sln, dist_astyle_vs20xx)

    # build project directories
    for projdir in ("/AStyle/",
                    "/AStyle Dll/",
                    "/AStyle Java/",
                    "/AStyle Lib/"):
        dist_astyle_proj = dist_astyle_vs20xx[:-1] + projdir
        os.mkdir(dist_astyle_proj)

        # copy project files
        projfiles = glob.glob(buildfiles + vsdir[:-1] + projdir + "*.*proj")
        files_copied = 0
        for proj in projfiles:
            files_copied += 1
            shutil.copy(proj, dist_astyle_proj)
        if vsdir[1:-1] >= "vs2010":
            filtfiles = glob.glob(buildfiles + vsdir[:-1] + projdir + "*.*.filters")
            for filter_in in filtfiles:
                files_copied += 1
                shutil.copy(filter_in, dist_astyle_proj)
        # verify number of files copied
        if files_copied != 2:
            libastyle.system_exit("Error in number of build files copied: " + str(files_copied))
Ejemplo n.º 23
0
    def get_batches_fn(batch_size):
        """
        Create batches of training data
        :param batch_size: Batch Size
        :return: Batches of training data
        """
        image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))
        label_paths = {
            re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path
            for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}
        background_color = np.array([255, 0, 0])

        random.shuffle(image_paths)
        for batch_i in range(0, len(image_paths), batch_size):
            images = []
            gt_images = []
            for image_file in image_paths[batch_i:batch_i+batch_size]:
                gt_image_file = label_paths[os.path.basename(image_file)]

                image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)
                gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)

                gt_bg = np.all(gt_image == background_color, axis=2)
                gt_bg = gt_bg.reshape(*gt_bg.shape, 1)
                gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)

                images.append(image)
                gt_images.append(gt_image)

            yield np.array(images), np.array(gt_images)
Ejemplo n.º 24
0
def get_tests_info(input_dir, msg_dir, prefix, suffix):
    """get python input examples and output messages

    We use following conventions for input files and messages:
    for different inputs:
        test for python  >= x.y    ->  input   =  <name>_pyxy.py
        test for python  <  x.y    ->  input   =  <name>_py_xy.py
    for one input and different messages:
        message for python >=  x.y ->  message =  <name>_pyxy.txt
        lower versions             ->  message with highest num
    """
    result = []
    for fname in glob(join(input_dir, prefix + '*' + suffix)):
        infile = basename(fname)
        fbase = splitext(infile)[0]
        # filter input files :
        pyrestr = fbase.rsplit('_py', 1)[-1] # like _26 or 26
        if pyrestr.isdigit(): # '24', '25'...
            if SYS_VERS_STR < pyrestr:
                continue
        if pyrestr.startswith('_') and  pyrestr[1:].isdigit():
            # skip test for higher python versions
            if SYS_VERS_STR >= pyrestr[1:]:
                continue
        messages = glob(join(msg_dir, fbase + '*.txt'))
        # the last one will be without ext, i.e. for all or upper versions:
        if messages:
            for outfile in sorted(messages, reverse=True):
                py_rest = outfile.rsplit('_py', 1)[-1][:-4]
                if py_rest.isdigit() and SYS_VERS_STR >= py_rest:
                    break
        else:
            outfile = None
        result.append((infile, outfile))
    return result
Ejemplo n.º 25
0
 def deploy(self, file, contextroot=None, deploymentorder=100, libraries=[]):
     files = glob.glob(file)
     if len(files) != 1: abort("Exactly one file must match " + file)
     cmd = self.asadminCommand + " " + "deploy"
     if self.version >= 4:
         cmd = cmd + " --deploymentorder " + str(deploymentorder)
     if contextroot:
         cmd = cmd + " --contextroot " + contextroot
     if libraries:
         libstring = ""
         for library in libraries:
             path = os.path.join(self.lib_path, library)
             libs = glob.glob(path)
             if len(libs) != 1: abort("Exactly one library must match " + path)
             libadd = os.path.basename(libs[0])
             if libstring:
                 libstring += "," + libadd
             else:
                 libstring = "--libraries " + libadd
         cmd = cmd + " " + libstring
     cmd = cmd + " " + files[0]
     if self.verbosity: print "\nexecute: " + cmd 
     out, err, rc = self.execute(cmd)
     if self.verbosity > 1:
         if out: print out
     if err:
         for line in err.splitlines():
             line = line.strip()
             if line:
                 if line.startswith("PER01"): continue
                 print line   
Ejemplo n.º 26
0
    def init_notebooks(self):
        """Construct the list of notebooks.
        If notebooks are passed on the command-line,
        they override notebooks specified in config files.
        Glob each notebook to replace notebook patterns with filenames.
        """

        # Specifying notebooks on the command-line overrides (rather than adds)
        # the notebook list
        if self.extra_args:
            patterns = self.extra_args
        else:
            patterns = self.notebooks

        # Use glob to replace all the notebook patterns with filenames.
        filenames = []
        for pattern in patterns:
            
            # Use glob to find matching filenames.  Allow the user to convert 
            # notebooks without having to type the extension.
            globbed_files = glob.glob(pattern)
            globbed_files.extend(glob.glob(pattern + '.ipynb'))
            if not globbed_files:
                self.log.warn("pattern %r matched no files", pattern)

            for filename in globbed_files:
                if not filename in filenames:
                    filenames.append(filename)
        self.notebooks = filenames
Ejemplo n.º 27
0
def w2p_pack_plugin(filename, path, plugin_name):
    """Packs the given plugin into a w2p file.
    Will match files at::

        <path>/*/plugin_[name].*
        <path>/*/plugin_[name]/*

    """
    filename = abspath(filename)
    path = abspath(path)
    if not filename.endswith('web2py.plugin.%s.w2p' % plugin_name):
        raise Exception("Not a web2py plugin name")
    plugin_tarball = tarfile.open(filename, 'w:gz')
    try:
        app_dir = path
        while app_dir[-1] == '/':
            app_dir = app_dir[:-1]
        files1 = glob.glob(
            os.path.join(app_dir, '*/plugin_%s.*' % plugin_name))
        files2 = glob.glob(
            os.path.join(app_dir, '*/plugin_%s/*' % plugin_name))
        for file in files1 + files2:
            plugin_tarball.add(file, arcname=file[len(app_dir) + 1:])
    finally:
        plugin_tarball.close()
Ejemplo n.º 28
0
    def process_all_input(self, projectname, path, queue, \
            increment_preprocessed_func, increment_processed_func, cancelled_func) :
        dir_re   = re.compile(".*c(\d+)$")
        input_re = re.compile("^datain_(\d+)\..*")

        listing = filter(lambda x : os.path.isdir(x) and dir_re.match(x), glob.glob(path + os.sep + "*"))

        if len(listing) == 0 :
            raise PluginError("no chromosome directories to process in %s" % path)

        for dir in listing :
            chromo = dir_re.match(dir).group(1)
            inputfiles = glob.glob(dir + os.sep + 'datain_*')

            for f in inputfiles :
                if cancelled_func() :
                    return
                
                dirname,filename = os.path.split(f)
                m = input_re.match(filename)
                if not m :
                    continue
                fragid = m.group(1)

                if os.path.exists(dirname + os.sep + ("gh_%s.out" % fragid)) :
                    increment_processed_func()
                    continue

                input  = map(lambda x : dirname + os.sep + (x % (fragid,chromo)), \
                        ['datain_%s.%s','map_%s.%s','pedin_%s.%s','setup_%s.%s'])
                output = dirname + os.sep + ("gh_%s.out" % fragid)
                tmp = (input,output)

                queue.put( tmp )
                increment_preprocessed_func()
def config():
	# spark-default.conf
	spark_defaults_tmp_location = os.path.join(tmp_dir,"spark-defaults.conf")
	spark_default_final_location = os.path.join(spark_home,"conf")
	with open(spark_defaults_tmp_location,'a') as spark_defaults:
		spark_defaults.write("spark.eventLog.enabled  true\n")
		spark_defaults.write("spark.eventLog.dir      {0}\n".format(spark_evlogs))
	subprocess.check_call(["/bin/mv",spark_defaults_tmp_location,spark_default_final_location])

	# bashrc file
	with open("/home/hadoop/.bashrc","a") as bashrc:
		bashrc.write("export SCALA_HOME={0}".format(scala_home))

	# spark-env.sh
	spark_env_tmp_location = os.path.join(tmp_dir,"spark-env.sh")
	spark_env_final_location = os.path.join(spark_home,"conf")

	files= glob.glob("{0}/{1}/share/*/*/*/hadoop-*lzo.jar".format(hadoop_apps,hadoop_version))
	if len(files) < 1:
		files=glob.glob("{0}/{1}/share/*/*/*/hadoop-*lzo-*.jar".format(hadoop_apps,hadoop_version))
	if len(files) < 1:
		print "lzo not found inside {0}/{1}/share/".format(hadoop_apps,hadoop_version)
	else:
		lzo_jar=files[0]

	#subprocess.check_call(["/bin/mkdir","-p",spark_log_dir])
	subprocess.call(["/bin/mkdir","-p",spark_log_dir])

	with open(spark_env_tmp_location,'a') as spark_env:
		spark_env.write("export SPARK_DAEMON_JAVA_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps\"\n")
		spark_env.write("export SPARK_LOCAL_DIRS={0}\n".format(local_dir))
		spark_env.write("export SPARK_LOG_DIR={0}\n".format(spark_log_dir))
		spark_env.write("export SPARK_CLASSPATH=\"{0}/emr/*:{1}/emrfs/*:{2}/share/hadoop/common/lib/*:{3}:/home/hadoop/hive/conf/*\"\n".format(spark_classpath,spark_classpath,hadoop_home,lzo_jar))

	subprocess.check_call(["mv",spark_env_tmp_location,spark_env_final_location])
Ejemplo n.º 30
0
 def complete(self, txt):
     """
     Returns the next completion for txt, or None if there is no completion.
     """
     if not self.lookup:
         self.lookup = []
         if txt == "" or txt[0] not in "~/":
             txt = "~/" + txt
         path = os.path.expanduser(txt)
         if os.path.isdir(path):
             files = glob.glob(os.path.join(path, "*"))
             prefix = txt
         else:
             files = glob.glob(path + "*")
             prefix = os.path.dirname(txt)
             prefix = prefix.rstrip("/") or "/"
         for f in files:
             display = os.path.join(prefix, os.path.basename(f))
             if os.path.isdir(f):
                 display += "/"
             self.lookup.append((display, f))
             self.lookup.sort()
         self.offset = -1
         self.lookup.append((txt, txt))
     self.offset += 1
     if self.offset >= len(self.lookup):
         self.offset = 0
     ret = self.lookup[self.offset]
     self.thisfinal = ret[1]
     return ret[0]
Ejemplo n.º 31
0
import cv2
import glob
import time
import numpy as np

images = glob.glob("/home/pi/Desktop/cone/real/*.jpg")
#print images

images.sort()
tmp_list = images[0].split('/')
title = tmp_list[-1]

image = cv2.imread(images[0])

height, width, layers = image.shape

video = cv2.VideoWriter('/home/pi/Desktop/cone/video.avi',
                        cv2.cv.CV_FOURCC('M', 'J', 'P', 'G'), 1, (640, 240))

for name in images:
    tmp_list = name.split('/')
    title = tmp_list[-1]
    image_real = cv2.imread("/home/pi/Desktop/cone/real/" + title)
    image_filtered = cv2.imread("/home/pi/Desktop/cone/filtered/" + title)
    vis = np.concatenate((image_real, image_filtered), axis=1)
    time.sleep(0.1)

    cv2.imshow("vis", vis)
    key = cv2.waitKey(1) & 0xFF
    video.write(vis)
Ejemplo n.º 32
0
def main_worker(rank, cfg):
  # Initialize the worker
  distributed = init_worker(rank, cfg)

  # Initialize the random seed
  if cfg.seed is not None:
    torch.manual_seed(cfg.seed)

  # Initialize the PyTorch device
  device_id = cfg.device_id + rank
  device = init_device(cfg, id=device_id)

  # Initialize the model
  model = get_model(cfg)
  model.to(device)
  if distributed:
    model = nn.parallel.DistributedDataParallel(model, device_ids=[device_id])

  # Initialize the loss function
  criterion = get_loss_function(cfg)
  criterion.to(device)

  # Initialize the optimizer
  optimizer = optim.Adam(model.parameters(), lr=1)

  # Check whether the result already exists
  result_dir = get_result_dir(cfg)
  resume = os.path.isdir(result_dir)

  # Sync the workers (required due to the previous isdir check)
  if distributed:
    dist.barrier()

  # Start or resume training
  if resume:
    if rank == 0:
      print('Resuming result:', cfg.result)

    # Load and verify the config
    result_cfg = load_config(result_dir)
    if set(result_cfg.features) != set(cfg.features):
      error('input feature set mismatch')

    # Restore the latest checkpoint
    last_epoch = get_latest_checkpoint_epoch(result_dir)
    checkpoint = load_checkpoint(result_dir, device, last_epoch, model, optimizer)
    step = checkpoint['step']
  else:
    if rank == 0:
      print('Result:', cfg.result)
      os.makedirs(result_dir)

      # Save the config
      save_config(result_dir, cfg)

      # Save the source code
      src_filenames = glob(os.path.join(os.path.dirname(sys.argv[0]), '*.py'))
      src_zip_filename = os.path.join(result_dir, 'src.zip')
      save_zip(src_zip_filename, src_filenames)

    last_epoch = 0
    step = 0

  # Make sure all workers have loaded the checkpoint
  if distributed:
    dist.barrier()

  start_epoch = last_epoch + 1
  if start_epoch > cfg.num_epochs:
    exit() # nothing to do

  # Reset the random seed if resuming result
  if cfg.seed is not None and start_epoch > 1:
    seed = cfg.seed + start_epoch - 1
    torch.manual_seed(seed)

  # Initialize the training dataset
  train_data = TrainingDataset(cfg, cfg.train_data)
  if len(train_data) > 0:
    if rank == 0:
      print('Training images:', train_data.num_images)
  else:
    error('no training images')
  train_loader, train_sampler = get_data_loader(rank, cfg, train_data, shuffle=True)
  train_steps_per_epoch = len(train_loader)

  # Initialize the validation dataset
  valid_data = ValidationDataset(cfg, cfg.valid_data)
  if len(valid_data) > 0:
    if rank == 0:
      print('Validation images:', valid_data.num_images)
    valid_loader, valid_sampler = get_data_loader(rank, cfg, valid_data, shuffle=False)
    valid_steps_per_epoch = len(valid_loader)

  # Initialize the learning rate scheduler
  lr_scheduler = optim.lr_scheduler.OneCycleLR(
    optimizer,
    max_lr=cfg.max_lr,
    total_steps=cfg.num_epochs,
    pct_start=cfg.lr_warmup,
    anneal_strategy='cos',
    div_factor=(25. if cfg.lr is None else cfg.max_lr / cfg.lr),
    final_div_factor=1e4,
    last_epoch=last_epoch-1)

  if lr_scheduler.last_epoch != last_epoch:
    error('failed to restore LR scheduler state')

  # Check whether AMP is enabled
  amp_enabled = cfg.precision == 'mixed'

  if amp_enabled:
    # Initialize the gradient scaler
    scaler = amp.GradScaler()

  # Initialize the summary writer
  log_dir = get_result_log_dir(result_dir)
  if rank == 0:
    summary_writer = SummaryWriter(log_dir)
    if step == 0:
      summary_writer.add_scalar('learning_rate', lr_scheduler.get_last_lr()[0], 0)

  # Training and evaluation loops
  if rank == 0:
    print()
    progress_format = '%-5s %' + str(len(str(cfg.num_epochs))) + 'd/%d:' % cfg.num_epochs
    total_start_time = time.time()

  for epoch in range(start_epoch, cfg.num_epochs+1):
    if rank == 0:
      start_time = time.time()
      progress = ProgressBar(train_steps_per_epoch, progress_format % ('Train', epoch))

    # Switch to training mode
    model.train()
    train_loss = 0.

    # Iterate over the batches
    if distributed:
      train_sampler.set_epoch(epoch)

    for i, batch in enumerate(train_loader, 0):
      # Get the batch
      input, target = batch
      input  = input.to(device,  non_blocking=True)
      target = target.to(device, non_blocking=True)
      if not amp_enabled:
        input  = input.float()
        target = target.float()

      # Run a training step
      optimizer.zero_grad()

      with amp.autocast(enabled=amp_enabled):
        output = model(input)
        loss = criterion(output, target)

      if amp_enabled:
        scaler.scale(loss).backward()
        scaler.step(optimizer)
        scaler.update()
      else:
        loss.backward()
        optimizer.step()

      # Next step
      step += 1
      train_loss += loss
      if rank == 0:
        progress.next()

    # Get and update the learning rate
    lr = lr_scheduler.get_last_lr()[0]
    lr_scheduler.step()

    # Compute the average training loss
    if distributed:
      dist.all_reduce(train_loss, op=dist.ReduceOp.SUM)
    train_loss = train_loss.item() / (train_steps_per_epoch * cfg.num_devices)

    # Write summary
    if rank == 0:
      summary_writer.add_scalar('learning_rate', lr, epoch)
      summary_writer.add_scalar('loss', train_loss, epoch)

    # Print stats
    if rank == 0:
      duration = time.time() - start_time
      total_duration = time.time() - total_start_time
      images_per_sec = len(train_data) / duration
      eta = ((cfg.num_epochs - epoch) * total_duration / (epoch + 1 - start_epoch))
      progress.finish('loss=%.6f, lr=%.6f (%.1f images/s, %s, eta %s)'
                      % (train_loss, lr, images_per_sec, format_time(duration), format_time(eta, precision=2)))

    if ((cfg.num_valid_epochs > 0 and epoch % cfg.num_valid_epochs == 0) or epoch == cfg.num_epochs) \
      and len(valid_data) > 0:
      # Validation
      if rank == 0:
        start_time = time.time()
        progress = ProgressBar(valid_steps_per_epoch, progress_format % ('Valid', epoch))

      # Switch to evaluation mode
      model.eval()
      valid_loss = 0.

      # Iterate over the batches
      with torch.no_grad():
        for _, batch in enumerate(valid_loader, 0):
          # Get the batch
          input, target = batch
          input  = input.to(device,  non_blocking=True).float()
          target = target.to(device, non_blocking=True).float()

          # Run a validation step
          loss = criterion(model(input), target)

          # Next step
          valid_loss += loss
          if rank == 0:
            progress.next()

      # Compute the average validation loss
      if distributed:
        dist.all_reduce(valid_loss, op=dist.ReduceOp.SUM)
      valid_loss = valid_loss.item() / (valid_steps_per_epoch * cfg.num_devices)

      # Write summary
      if rank == 0:
        summary_writer.add_scalar('valid_loss', valid_loss, epoch)

      # Print stats
      if rank == 0:
        duration = time.time() - start_time
        images_per_sec = len(valid_data) / duration
        progress.finish('valid_loss=%.6f (%.1f images/s, %.1fs)'
                        % (valid_loss, images_per_sec, duration))

    if (rank == 0) and ((cfg.num_save_epochs > 0 and epoch % cfg.num_save_epochs == 0) or epoch == cfg.num_epochs):
      # Save a checkpoint
      save_checkpoint(result_dir, epoch, step, model, optimizer)

  # Print final stats
  if rank == 0:
    total_duration = time.time() - total_start_time
    print('\nFinished (%s)' % format_time(total_duration))

  # Cleanup
  cleanup_worker(cfg)
Ejemplo n.º 33
0
def delete_products(current_products, products_to_update):
    """
    Удаление продуктов, которых нет в новом xml или которые удалили из директории с дополнительными изображениями.
    Удаление индексов продуктов из модели с faiss индексами
    :param current_products: датафрейм из нового xml-файла
    :param products_to_update: текущий датафрейм
    :return: датафрейм products_to_update с удаленными продуктами
    """
    logging.info('Удаление продуктов')
    idx_to_remove = []

    # удаление продуктов, которых нет в новом xml
    # смотрим, какие продукты нужно удалить, исходя из отсутствия артикулов продуктов текущей модели в новом xml
    products_to_update_vendor_code = set(products_to_update['vendor_code'])
    current_products_vendor_code = set(current_products['vendor_code'])
    products_to_update_vendor_code.difference_update(
        current_products_vendor_code)
    if products_to_update_vendor_code:
        for index, row in products_to_update.iterrows():
            if row['vendor_code'] in products_to_update_vendor_code:
                idx_to_remove.append(index)
                products_to_update.drop(index, inplace=True)

    # удаление строк с изображениями, которых уже нет в директории
    current_product_files = set(
        glob.glob(config.PATH_TO_PRODUCT_FOLDER + '/*/*/*'))
    product_files_to_update = set(
        products_to_update[~products_to_update['picture'].str.
                           startswith('http')]['picture'].values)
    product_files_to_update.difference_update(current_product_files)
    if product_files_to_update:
        for file in product_files_to_update:
            index = products_to_update[products_to_update['picture'] ==
                                       file].index[0]
            products_to_update.drop(index, inplace=True)
            idx_to_remove.append(index)

    products_to_update.reset_index(inplace=True, drop=True)

    # если ничего не удаляли, возвращаем неизмененный датафрейм
    if not idx_to_remove:
        logging.info('Продуктов для удаления нет')
        return products_to_update

    # удаление из faiss соответствующих векторов
    logging.info(f'Удаление индексов из faiss [{len(idx_to_remove)} строк]')
    index = faiss.read_index(config.PATH_TO_FAISS_INDEX)
    vectors = [index.reconstruct(i) for i in range(index.ntotal)]
    vectors_without_removed = [
        vectors[i] for i in range(len(vectors)) if i not in idx_to_remove
    ]

    updated_index = faiss.IndexFlatL2(2048)
    updated_index = faiss.IndexIDMap2(updated_index)
    updated_index.add_with_ids(
        np.vstack(vectors_without_removed),
        np.hstack([i for i in range(len(vectors_without_removed))]))

    logging.info('Запись')
    faiss.write_index(updated_index, config.PATH_TO_FAISS_INDEX)
    products_to_update.to_pickle(config.PATH_TO_PRODUCT_DATASET)
    assert updated_index.ntotal == products_to_update.shape[0]

    logging.info(f'Удалено {len(idx_to_remove)} строк')
    return products_to_update
Ejemplo n.º 34
0
def add_additional_images(products):
    """
    Добавление в датафрейм изображений продуктов в директории config.PATH_TO_PRODUCT_FOLDER
    :param products: датафрейм с текущими продуктами
    :return: датафрейм products с добавленными строками с дополнительными изображениями продуктов,
    список idx_to_add с индексами добавленных строк для их дальнейшей обработки
    """
    logging.info(
        f'Добавление в датафрейм изображений из директории {config.PATH_TO_PRODUCT_FOLDER}'
    )
    product_files = glob.glob(config.PATH_TO_PRODUCT_FOLDER + '/*/*')
    # счетчики для отслеживания количества обработанных продуктов
    count_all_products = 0
    count_absent_products = 0
    count_all_images = 0
    count_error_images = 0
    # последний индекс в датафрейме, после которого можно добавлять новые строки
    curr_index = products.index.max()
    # индексы добавленных строк
    idx_to_add = []

    # читаем отдельно каждую папку с отдельным продуктом
    for product_path in product_files:
        count_all_products += 1
        vendor_code = product_path.split('/')[3]
        # проверяем, если в датасете продукт с таким артикулом
        product_to_duplicate = products.loc[products['vendor_code'] == str(
            vendor_code)]
        if product_to_duplicate.empty:
            count_absent_products += 1
            logging.warning(f'В базе нет продукта с артикулом {vendor_code}')
            continue

        # читаем все изображения текущего продукта
        pictures = glob.glob(product_path + '/*')
        for picture_full_path in pictures:
            picture_name = picture_full_path.replace(
                config.PATH_TO_PRODUCT_FOLDER, '')
            # если такое изображение уже есть в датафрейме, читаем следующее
            if picture_name in products['picture'].values:
                continue
            # открываем изображение в зависимости от его расширения
            count_all_images += 1
            try:
                utils.open_local_image(picture_name)
                # дублируем строку с найденным продуктом в датасете по текущему артикулу
                # меняем только путь к изображению
                curr_index += 1
                products.loc[curr_index] = product_to_duplicate.values[0]
                products.loc[curr_index]['picture'] = picture_name
                idx_to_add.append(curr_index)
            # если изображение не открывается, пишем в лог
            except Exception as ex:
                count_error_images += 1
                logging.warning(
                    f'Не получилось открыть изображение, ошибка: {ex}')

        if count_all_products % 500 == 0:
            logging.debug(f'Обработано строк: {count_all_products}')
    logging.info(
        f'Из {count_all_products} продуктов в базе не нашлось {count_absent_products}'
    )
    logging.info(
        f'Из {count_all_images} изображений продуктов, которые нашлись в базе, не обработано {count_error_images}'
    )
    return products, idx_to_add
Ejemplo n.º 35
0
              "Topic :: Multimedia :: Graphics :: Viewers",
              "Programming Language :: Python :: 2",
              "Programming Language :: Python :: 2.7",
              "Programming Language :: Python :: 3",
              "Programming Language :: Python :: 3.3",
              "Programming Language :: Python :: 3.4",
              "Programming Language :: Python :: 3.5",
              "Programming Language :: Python :: 3.6",
              'Programming Language :: Python :: Implementation :: CPython',
              'Programming Language :: Python :: Implementation :: PyPy',
          ],
          cmdclass={"build_ext": pil_build_ext},
          ext_modules=[Extension("PIL._imaging", ["_imaging.c"])],
          include_package_data=True,
          packages=find_packages(),
          scripts=glob.glob("Scripts/*.py"),
          install_requires=['olefile'],
          test_suite='nose.collector',
          keywords=["Imaging", ],
          license='Standard PIL License',
          zip_safe=not debug_build(), )
except RequiredDependencyException as err:
    msg = """

The headers or library files could not be found for %s,
a required dependency when compiling Pillow from source.

Please see the install instructions at:
   https://pillow.readthedocs.io/en/latest/installation.html

""" % (str(err))
Ejemplo n.º 36
0
counter = 0

#noiseFunctions = funcGenerator()


source_paths = []   # Array of file paths to be reviewed
shape_vals = []     # Array containing the shape feature values of the candidates
skew_vals = []      # Array containing the skewness feature values of the candidates
kurt_vals = []      # Array containing the kurtosis feature values of the candidates
kstest_vals = []    # Array containing the ks-test feature values of the candidates
class_vals = []     # Array containing the classification labels of the candidates

timer1 = []
timer2 = []
# Loops through all .dat files to store them in the 'source_paths' array
for file in glob.glob(os.getcwd() + '\idir\\' + "*.dat"):
    source_paths.append(file)

# Import dedispersion plan
df_ddp = pd.read_csv("dd_plan.txt")

# Setup array for step limits
dd_DM = np.array(df_ddp["DM_stop"])
dd_step = np.array(df_ddp["DM_step"])

# Constructing DM_poss array of possible DM values from ddp
DM_start = df_ddp["DM_start"]
DM_stop = df_ddp["DM_stop"]
DM_step = df_ddp["DM_step"]
DM_poss = [0.0] 
Ejemplo n.º 37
0
]

# Documents to append as an appendix to all manuals.
#texinfo_appendices = []

# If false, no module index is generated.
#texinfo_domain_indices = True

# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'

# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False

# -- Options for autosummary ----------------------------------------------
autosummary_generate = glob.glob('*.rst')

# -- Options for numpydoc -------------------------------------------------
# Generate plots for example sections
numpydoc_use_plots = True
# If we don't turn numpydoc's toctree generation off, Sphinx will warn about
# the toctree referencing missing document(s). This appears to be related to
# generating docs for classes with a __call__ method.
numpydoc_class_members_toctree = False

#------------------------------------------------------------------------------
# Plot
#------------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
import scipy as sp
Ejemplo n.º 38
0
#Diccionario con info de plot: se lee la info de todos los parametrizaciones
ListPlotVar = al.get_modelConfig_lines(ListConfig, '-p', Calib_Storage='Plot',PlotType='Humedad_map')
DictStore = al.get_modelConfig_lines(ListConfig, '-s', 'Store')



#construye las listas para plotear en paralelo
ListaEjec = []

for l in ListPlotVar:
	#Se define ruta donde se leeran los resultados a plotear
	ruta_in = ruta_sto + DictStore['-s '+l]['Nombre']
	#Se crea un folder en el que se van a contener las imagenes de cada parametrizacion asignada
	#Mira la ruta del folder y si no existe la crea
	ruta_folder = ruta_Hsim+l+'/'
	Esta = glob.glob(ruta_folder)
	if len(Esta) == 0:
		os.system('mkdir '+ruta_folder)
	#Obtiene las rutas de los archivos de salida
	ruta_out_png = ruta_folder +'Humedad'+l+'_'+args.date+'.png'
	ruta_out_txt = ruta_folder +'Humedad'+l+'_'+args.date+'.txt'
	#Lee los binarios de humedad para la cuenca de cada parametrizacion
	v,r = wmf.models.read_float_basin_ncol(ruta_in,args.record, cu.ncells, 5)
	#Se organiza la lista con parametros necesarios para plotear los mapas con la funcion que sigue
	ListaEjec.append([ruta_in, ruta_out_png, ruta_out_txt, v, l])

#-------------------------------------------------------------------------------------------------------
#Se generan  los plots de humedad de cada parametrizacion 
#-------------------------------------------------------------------------------------------------------

def Plot_Hsim(Lista):
Ejemplo n.º 39
0
import numpy as np

# Read test data from file
test_data = pd.read_excel('../resources/pozyxAPI_only_localization_dane_testowe_i_dystrybuanta.xlsx')
test_mes_x = test_data.pop('measurement x')
test_mes_y = test_data.pop('measurement y')
test_tar_x = test_data.pop('reference x')
test_tar_y = test_data.pop('reference y')
test_mes = pd.concat([test_mes_x, test_mes_y], axis=1)
test_tar = pd.concat([test_tar_x, test_tar_y], axis=1)

# Read training & target data from files
path = '../resources/'
column_names = ['0/timestamp', 't', 'no', 'measurement x', 'measurement y', 'reference x', 'reference y']

all_files = glob.glob(os.path.join(path, 'pozyxAPI_only_localization_measurement*.xlsx'))
df_from_each_file = (pd.read_excel(f, names=column_names) for f in all_files)
concatenated_df = pd.concat(df_from_each_file, ignore_index=True)

mes_x = concatenated_df.pop('measurement x')
mes_y = concatenated_df.pop('measurement y')
training_data = pd.concat([mes_x, mes_y], axis=1)

tar_x = concatenated_df.pop('reference x')
tar_y = concatenated_df.pop('reference y')
target_data = pd.concat([tar_x, tar_y], axis=1)

# Add offset to data for better readability
training_data = (training_data.astype('float32') + 2000) / 10000
target_data = (target_data.astype('float32') + 2000) / 10000
Ejemplo n.º 40
0
def install_requirements_conda(ctx: Context):
    """Install all requirements, including tools used by Roberto."""
    # Collect all parameters determining the install commands (to good
    # approximation) and turn them into a hash.
    # Some conda requirements are included by default because they must be present:
    # - conda: to make sure it is always up to date.
    # - conda-build: to have conda-render for getting requirements from recipes.
    conda_reqs = set(["conda"])
    pip_reqs = set([])
    recipe_dirs = []
    # Add project as a tool because it also contains requirements.
    tools = [ctx.project]
    for package in ctx.project.packages:
        for toolname in package.tools:
            tools.append(ctx.tools[toolname])
        recipe_dir = os.path.join(package.path, "tools", "conda.recipe")
        if os.path.isdir(recipe_dir):
            recipe_dirs.append(recipe_dir)
        else:
            print("Skipping recipe {}. (directory does not exist)".format(recipe_dir))
    for tool in tools:
        for conda_req, pip_req in tool.get("requirements", []):
            if conda_req is None:
                pip_reqs.add(pip_req)
                conda_reqs.add("pip")
            else:
                conda_reqs.add(conda_req)
    req_hash = compute_req_hash(
        set("conda:" + conda_req for conda_req in conda_reqs) |
        set("pip:" + pip_req for pip_req in pip_reqs),
        sum([glob(os.path.join(recipe_dir, "*")) for recipe_dir in recipe_dirs], [])
    )

    fn_skip = os.path.join(ctx.testenv.path, ".skip_install")
    if check_install_requirements(fn_skip, req_hash):
        with ctx.prefix(ctx.conda.activate_base):
            # Update conda packages in the base env. Conda packages in the dev env
            # tend to be ignored.
            ctx.run("conda install --update-deps -y {}".format(
                " ".join("'{}'".format(conda_req) for conda_req
                         in conda_reqs if conda_req.startswith('conda'))))

        with ctx.prefix(ctx.testenv.activate):
            # Update packages already installed
            ctx.run("conda update --all -y")

            # Update and install other requirements for Roberto, in the dev env.
            ctx.run("conda install --update-deps -y {}".format(" ".join(
                "'{}'".format(conda_req) for conda_req in conda_reqs
                if not conda_req.startswith('conda'))))

            print("Rendering conda package, extracting requirements, which will be installed.")

            # Install dependencies from recipes, excluding own packages.
            own_conda_reqs = [package.dist_name for package in ctx.project.packages]
            for recipe_dir in recipe_dirs:
                # Send the output of conda render to a temporary directory.
                with tempfile.TemporaryDirectory() as tmpdir:
                    rendered_path = os.path.join(tmpdir, "rendered.yml")
                    ctx.run(
                        "conda render -f {} {} --variants {}".format(
                            rendered_path, recipe_dir, ctx.conda.variants))
                    with open(rendered_path) as f:
                        rendered = yaml.safe_load(f)
                # Build a (simplified) list of requirements and install.
                dep_conda_reqs = set([])
                req_sources = [
                    ("requirements", 'build'),
                    ("requirements", 'host'),
                    ("requirements", 'run'),
                    ("test", 'requires'),
                ]
                for req_section, req_type in req_sources:
                    for recipe_req in rendered.get(req_section, {}).get(req_type, []):
                        words = recipe_req.split()
                        if words[0] not in own_conda_reqs:
                            dep_conda_reqs.add(" ".join(words[:2]))
                ctx.run("conda install --update-deps -y {}".format(" ".join(
                    "'{}'".format(conda_req) for conda_req in dep_conda_reqs)))

            # Update and install requirements for Roberto from pip, if any.
            if pip_reqs:
                ctx.run("pip install --upgrade {}".format(" ".join(
                    "'{}'".format(pip_req) for pip_req in pip_reqs)))

        # Update the timestamp on the skip file.
        with open(fn_skip, 'w') as f:
            f.write(req_hash + '\n')
Ejemplo n.º 41
0
datasetfn1 = args.datasetfn1
datasetfn2 = args.datasetfn2

import utilities as util
import models as models
import data_generation as data_generation
import plotting as plotting

use_dir = False

if configfile:
    cfg_fns = [configfile]
elif configfile_dir:
    if configfile_dir[-1] != '/':
        configfile_dir += '/'
    cfg_fns = glob.glob(configfile_dir + '*')
    cfg_fns.sort()
    if endidx:
        assert startidx <= endidx, "Startidx is greater than endidx...not judging, just letting you know..."
        cfg_fns = cfg_fns[startidx:endidx + 1]
elif use_dir:
    # cfg_fns = "config_files/noise_model.ini"
    cfg_fns = glob.glob('exp_bm_gaussian_11am/*')
    cfg_fns.sort()
    if endidx:
        assert startidx <= endidx, "Startidx is greater than endidx...not judging, just letting you know..."
        cfg_fns = cfg_fns[startidx:endidx]
    #cfg_fns = glob.glob('test_batch/*')
else:
    #cfg_fns = ["config_files/noise_baseModel.ini"]
    #cfg_fns = ["config_files/broken_structured.ini"]
      ('.'.join((str(v) for v in version_required)),
       '.'.join((str(v) for v in version_running )),), file=sys.stderr)
  sys.exit(1)


def resize_mask(filename, width, height, output_folder=""):
  if not os.path.exists(filename): # remove file extension and try again
    ext = filename.rsplit(".",1)
    if len(ext)>1:
      filename = ext[0]
    filename = filename + ".npy"
  tmp = np.load(filename)
  name = os.path.basename(filename).replace(".npy","")
  tmp = cv2.resize(tmp.reshape(tmp.shape[1], tmp.shape[2]), (width, height), interpolation = cv2.INTER_CUBIC)
  _, tmp = cv2.threshold(tmp,0.5,255,cv2.THRESH_BINARY)
  if output_folder:
    # print(os.path.join(output_folder, name + "_segmentation.png"))
    cv2.imwrite(os.path.join(output_folder, name + "_segmentation.png"), tmp)
  return tmp

size = int(sys.argv[1])
source = sys.argv[2]
dest = sys.argv[3]
print("Resizing images to %dx%d from %s to %s" % (size, size, source, dest))

width, height = size, size
output_folder = dest
for fname in tqdm(glob.glob(os.path.join(source, "*"))):
  resized_img = resize_mask(fname, width, height, output_folder=output_folder)
  assert resized_img.shape==(width, height)
Ejemplo n.º 43
0
    def __init__(self,
                 input_dim=(3, 32, 32),
                 num_filters=None,
                 filter_size=5,
                 hidden_dims=None,
                 num_classes=10,
                 weight_scale=1e-3,
                 reg=0.1,
                 dtype=np.float32,
                 use_batchnorm=False,
                 dropout=0,
                 seed=None,
                 loadData=None,
                 predict_fn=None,
                 augment_fn=None):
        """
    predic_fn, augment_fn: for data augmentation
    """
        self.params = {}
        self.reg = reg
        self.dtype = dtype

        self.num_filters = len(num_filters)
        self.filter_size = filter_size
        self.bn_batchnorm = use_batchnorm
        self.use_dropout = (dropout > 0)

        ############# the total number of layers including conv layer and affine layer#######
        self.num_layers = self.num_filters + len(hidden_dims) + 1
        #####################################################################################
        #    print "how many layers ? ",self.num_layers

        self.bn_params = []
        self.dropout_param = {}

        self.predict_fn = predict_fn
        self.augment_fn = augment_fn
        if augment_fn is not None:
            input_dim = (3, 28, 28)

        self.input_dim = input_dim
        if loadData is not None:
            print "Load Data is ", loadData
            for f in glob.glob("%s/convNet_params_*.npy" % loadData):
                name_lst = op.splitext(op.basename(f))[0].split("_")
                if len(name_lst) == 3:
                    param = name_lst[2]
                    if param == "dropout":
                        self.dropout_param = self.load_param(f)
                    elif param == "bn":
                        self.bn_params = self.load_param(f)
                    else:
                        self.params[param] = self.load_param(
                            f)  # W_i,b_i,beta_i, gamma_i
                        print self.params[param].shape,
                    print "load parameter %s successfully" % param
            return

        C, H, W = input_dim
        assert filter_size % 2 == 1, 'Filter size must be odd: got %d' % filter_size
        all_filters = np.array([C])
        all_filters = np.concatenate((all_filters, np.array(num_filters)),
                                     axis=0)
        for i in range(self.num_filters):
            t = i + 1
            self.params['W%d' % t] = weight_scale * np.random.randn(
                all_filters[t], all_filters[t - 1], filter_size, filter_size)
            self.params['b%d' % t] = np.zeros(all_filters[t])
            if self.bn_batchnorm is True:
                self.params['gamma%d' % t] = np.random.randn(all_filters[t])
                self.params['beta%d' % t] = np.random.randn(all_filters[t])

        all_hidden_layers = np.array(
            [H * W * all_filters[-1] / np.power(4, self.num_filters)])
        a = np.array(hidden_dims)
        all_hidden_layers = np.concatenate((all_hidden_layers, a), axis=0)
        b = np.array([num_classes])
        all_hidden_layers = np.concatenate((all_hidden_layers, b))
        length = len(all_hidden_layers) - 1
        for i in range(length):
            t = i + self.num_filters + 1
            self.params['W%d' % t] = weight_scale * np.random.randn(
                all_hidden_layers[i], all_hidden_layers[i + 1])
            self.params['b%d' % t] = np.zeros(all_hidden_layers[i + 1])
            if self.bn_batchnorm is True and i < length - 1:
                self.params['gamma%d' % t] = np.random.randn(
                    all_hidden_layers[i + 1])
                self.params['beta%d' % t] = np.random.randn(
                    all_hidden_layers[i + 1])

        if self.bn_batchnorm:
            self.bn_params = [{
                'mode': 'train'
            } for i in xrange(self.num_layers)]

        if self.use_dropout is True:
            self.dropout_param = {'mode': 'train', 'p': dropout}
            if seed is not None:
                self.dropout_param['seed'] = seed

        for k, v in self.params.iteritems():
            self.params[k] = v.astype(dtype)
Ejemplo n.º 44
0
		self.hdu_list = fitsio.FITS(filepath) #, iter_row_buffer=25)
	
	@property
	def dataHDU(self):
		return self.hdu_list[1]
	
	@property
	def column_names(self):
		if self._column_names == None:
			row1 = self.dataHDU.read(rows=[0,0])
			self._column_names = [x.lower() for x in row1.dtype.names]
		return self._column_names
	
#column_names = None

for filepath in glob.glob(os.path.join(base_directory, "8162", "photoRun-*.fits")):

	# The file contains a list of Field records.

	session.begin()

	photoRunFile = PhotoRunFile(filepath)
	filename = os.path.basename(filepath)

	if args.verbose:
		print("Processing {0}...".format(filename))
	
	try:
		session.query(Run).filter(Run.filename == filename).one()
		#print ("found")
		continue # already in database, go to next file
Ejemplo n.º 45
0
    def tearDown(self):

        files = glob.glob('test_lc0*')
        print files
        for name in files:
            os.remove(name)
Ejemplo n.º 46
0
def evaluation_with_statistics(model_dir, statistics_file , silver_all_seed_json, silver_sample_seed_json, to_file):

    statistics_list = read_dict_from_csv(statistics_file)

    dict_result = {}
    positive_count = 0
    negative_count = 0
    NULL_count = 0
    for x in statistics_list:
        P, positive, negative, null_ = x["P"], x["positive"], x["negative"], x["NULL"]
        P = unicode(P)
        dict_result[P] = {}
        dict_result[P]["positive"] = int(positive)
        dict_result[P]["negative"] = int(negative)
        dict_result[P]["NULL"] = int(null_)

        positive_count += int(positive)
        negative_count += int(negative)
        NULL_count += int(null_)


    dict_all_seed = json.load(open(silver_all_seed_json))
    dict_sample_seed = json.load(open(silver_sample_seed_json))

    # 分子, 也就是对的个数
    precision_molecular = 0
    # 分母, 也就是预测的个数
    precision_denominator = 0

    recall_molecular = 0
    recall_denominator = 0

    #
    N = 0
    macro_precision = 0.0
    macro_recall = 0.0

    for predict_file in glob.glob('%s/*/predict.json' % model_dir):

        dict_predict = json.load(open(predict_file))
        for P in dict_predict:
            predicts = set([(s, o) for s, o, prob in dict_predict[P]])
            silver_all = set(map(tuple, dict_all_seed[P]))
            silver_sample = set(map(tuple, dict_sample_seed[P]))

            precision_molecular += len(predicts & silver_all)
            precision_denominator += len(predicts)

            recall_molecular += len(predicts & silver_sample)
            recall_denominator += len(silver_sample)

            # 每个P也计算一下吧
            if len(predicts) == 0:
                precision = 0
                recall = 0
            else:
                precision = len(predicts & silver_all) / float(len(predicts)) * 100
                recall = len(predicts & silver_sample) / float(len(silver_sample)) * 100

            N += 1
            macro_precision += precision
            macro_recall += recall



            dict_result[P]["precision"] = "%d / %d = %.2f%%" %\
                                          (len(predicts & silver_all), len(predicts), precision)

            dict_result[P]["recall"] = "%d / %d = %.2f%%" % (len(predicts & silver_sample), len(silver_sample), recall)


            s = "%s\tprecision: %d / %d = %.2f%%\trecall: %d / %d = %.2f%%" % \
                        ( P,
                         len(predicts & silver_all), len(predicts), precision,
                         len(predicts & silver_sample), len(silver_sample), recall)

            print s



    micro_precision = precision_molecular / float(precision_denominator) * 100
    micro_recall  = recall_molecular / float(recall_denominator) * 100

    macro_precision = macro_precision / N
    macro_recall = macro_recall / N


    dict_result["Micro"] = {}
    dict_result["Micro"]["positive"] = positive_count
    dict_result["Micro"]["negative"] = negative_count
    dict_result["Micro"]["NULL"] = NULL_count
    dict_result["Micro"]["precision"] = "%d / %d = %.2f%%" % (precision_molecular, precision_denominator, micro_precision)
    dict_result["Micro"]["recall"] = "%d / %d = %.2f%%" % (recall_molecular, recall_denominator, micro_recall)


    dict_result["Macro"] = {}
    dict_result["Macro"]["positive"] = ""
    dict_result["Macro"]["negative"] = ""
    dict_result["Macro"]["NULL"] = ""
    dict_result["Macro"]["precision"] = "%.2f%%" % (macro_precision)
    dict_result["Macro"]["recall"] = "%.2f%%" % (macro_recall)


    s = "%s\tprecision: %d / %d = %.2f%%\trecall: %d / %d = %.2f%%" % \
                        ( "Micro",
                         precision_molecular, precision_denominator, micro_precision,
                         recall_molecular, recall_denominator, micro_recall)
    print s



    fout = codecs.open(to_file, "w", encoding="utf-8")
    fout.write("P,positive,negative,NULL,precision,recall\n")
    for P in sorted(dict_result.keys()):
        fout.write("%s,%s,%s,%s,%s,%s\n" % (P,
                                            dict_result[P]["positive"],
                                            dict_result[P]["negative"],
                                            dict_result[P]["NULL"],
                                            dict_result[P]["precision"],
                                            dict_result[P]["recall"],
        )
    )
    fout.close()
Ejemplo n.º 47
0
#     "櫻井愛菜",
#     "田中沙季",
#     "増崎セリナ",
#     "小野晴茄",
#     "澤田守杏",
#     "播磨ここね",
#     "田中希湖",
#     "石田美咲希",
# ]

input_file = 'output/measure/'
output_file = 'compare/'


# %%
file_paths = glob(input_file+'*.xlsx')
file_paths


# %%
def extract(file_path):
    df = pd.read_excel(file_path)
    column = df.iloc[0, 2]
    df = df.iloc[:, [1, 12]]
    df = df.rename(columns={'20本ジャンプ (秒)':column})
    return df


# %%
df = pd.DataFrame()
import glob
import os
import shutil
import sys
from typing import Any, Dict, List

from argoverse.utils.json_utils import read_json_file, save_json_dict

root_dir = sys.argv[1]

print("root dir = ", root_dir)
print("updating track_labels_amodal folders...")
list_log_folders = glob.glob(os.path.join(root_dir, "*"))

if len(list_log_folders) == 0:
    print("Not file founded.")
else:
    for ind_log, path_log in enumerate(list_log_folders):
        print("Processing %d/%d" % (ind_log + 1, len(list_log_folders)))
        list_path_label_persweep = glob.glob(
            os.path.join(path_log, "per_sweep_annotations_amodal", "*"))
        list_path_label_persweep.sort()
        dist_track_labels: Dict[str, List[Any]] = {}
        for path_label_persweep in list_path_label_persweep:
            data = read_json_file(path_label_persweep)
            for data_obj in data:
                id_obj = data_obj["track_label_uuid"]
                if id_obj not in dist_track_labels.keys():
                    dist_track_labels[id_obj] = []
                dist_track_labels[id_obj].append(data_obj)
Ejemplo n.º 49
0
# DemoOS.py
from os.path import *

#print(dir(os.path))

print(abspath("python.exe"))
print(basename("c:\\python\\python.exe"))
print(exists("c:\\python\\python.exe"))
print(getsize("c:\\python\\python.exe"))

#운영체제에 있는 명령어를 실행(cmd --> cd \, cd work2)
from os import *
# c:\work2 --> c: --> c:\work2
print("현재 작업 폴더:", getcwd())
chdir("..")
chdir("c:\\work2")
print("현재 폴더:", getcwd())
#외부의 실행파일 수행
#system("notepad.exe")

#파일, 폴더 리스트
import glob
print(glob.glob("*.py"))
print("="*20)
for item in glob.glob("*.*"):
    print(item)
Ejemplo n.º 50
0
        'lb-tp-approx2-weak-scaling'
    ]
}

if __name__ == '__main__':
    args = parser.parse_args()

    failed_plots = []
    not_ready_plots = []
    for plot, requirements in plot_scripts.items():
        isReady = True
        for case in args.testcases.split(','):
            for req in requirements:
                directory = os.path.join(args.indir, case, req)
                if os.path.isdir(directory) and len(
                        glob.glob(directory + '/.extracted')) > 0:
                    continue
                else:
                    isReady = False
                    break
        if isReady:
            cmd = [
                'python',
                os.path.join(this_directory, plot), '-i', args.indir, '-c',
                args.testcases
            ]
            output = subprocess.run(cmd,
                                    stdout=sys.stdout,
                                    stderr=subprocess.STDOUT,
                                    env=os.environ.copy())
            if output.returncode != 0:
Ejemplo n.º 51
0
def documents(name, **kwargs):
    directory = kwargs['directory']
    for doc_path in glob.glob(directory+'*.md'):
        yield document_from_path(doc_path)
2) Can we find any evidence against hull hypothesis that neural activations patterns 
on the cortex does remain the same across sessions ?

* In other words, there is no evidence of cortical reorganization.  
"""
#%%##############################################################################
# Read the EEG data
from uhClass import MRCP

mainDir = '/home/hero/uhdata/freesurfer'
subject = 'S9023'

os.chdir(os.path.join(mainDir, subject))

eegfiles = glob.glob('*-epo.fif')

#%%
eeg = []
for eegfilename in eegfiles:
    if not ('_ses1_' in eegfilename or '_ses2_' in eegfilename):
        #    eegfilename = 'S9017_ses3_closeloop_block0000-epo.fif'
        ep = MRCP.readEEGepoch(eegfilename, mainDir)
        ep = ep.interpolate_bads(reset_bads='True', mode='accurate')
        ep.info['bads'] = []
        ep.resample(100)
        eeg.append(ep)

#%% Plot EEG
#from uhClass import MRCP
#filename = eegfilename.split('_s')[0]
Ejemplo n.º 53
0
def blosc_extension():
    info('setting up Blosc extension')

    # setup blosc extension
    blosc_sources = []
    extra_compile_args = []
    include_dirs = []
    define_macros = []

    # generic setup
    blosc_sources += [
        f for f in glob('c-blosc/blosc/*.c')
        if 'avx2' not in f and 'sse2' not in f
    ]
    blosc_sources += glob('c-blosc/internal-complibs/lz4*/*.c')
    blosc_sources += glob('c-blosc/internal-complibs/snappy*/*.cc')
    blosc_sources += glob('c-blosc/internal-complibs/zlib*/*.c')
    blosc_sources += glob('c-blosc/internal-complibs/zstd*/common/*.c')
    blosc_sources += glob('c-blosc/internal-complibs/zstd*/compress/*.c')
    blosc_sources += glob('c-blosc/internal-complibs/zstd*/decompress/*.c')
    blosc_sources += glob('c-blosc/internal-complibs/zstd*/dictBuilder/*.c')
    include_dirs += [os.path.join('c-blosc', 'blosc')]
    include_dirs += [
        d for d in glob('c-blosc/internal-complibs/*') if os.path.isdir(d)
    ]
    include_dirs += [
        d for d in glob('c-blosc/internal-complibs/*/*') if os.path.isdir(d)
    ]
    define_macros += [('HAVE_LZ4', 1), ('HAVE_SNAPPY', 1), ('HAVE_ZLIB', 1),
                      ('HAVE_ZSTD', 1)]
    # define_macros += [('CYTHON_TRACE', '1')]

    # determine CPU support for SSE2 and AVX2
    cpu_info = cpuinfo.get_cpu_info()

    # SSE2
    if 'sse2' in cpu_info['flags']:
        info('SSE2 detected')
        extra_compile_args.append('-DSHUFFLE_SSE2_ENABLED')
        blosc_sources += [f for f in glob('c-blosc/blosc/*.c') if 'sse2' in f]
        if os.name == 'posix':
            extra_compile_args.append('-msse2')
        elif os.name == 'nt':
            define_macros += [('__SSE2__', 1)]

    # AVX2
    if 'avx2' in cpu_info['flags']:
        info('AVX2 detected')
        extra_compile_args.append('-DSHUFFLE_AVX2_ENABLED')
        blosc_sources += [f for f in glob('c-blosc/blosc/*.c') if 'avx2' in f]
        if os.name == 'posix':
            extra_compile_args.append('-mavx2')
        elif os.name == 'nt':
            define_macros += [('__AVX2__', 1)]

    # workaround lack of support for "inline" in MSVC when building for Python
    # 2.7 64-bit
    if PY2 and os.name == 'nt':
        extra_compile_args.append('-Dinline=__inline')

    if have_cython:
        sources = ['numcodecs/blosc.pyx']
    else:
        sources = ['numcodecs/blosc.c']

    # define extension module
    extensions = [
        Extension(
            'numcodecs.blosc',
            sources=sources + blosc_sources,
            include_dirs=include_dirs,
            define_macros=define_macros,
            extra_compile_args=extra_compile_args,
        ),
    ]

    if have_cython:
        extensions = cythonize(extensions)

    return extensions
Ejemplo n.º 54
0

# ==============================================================================
# -- find carla module ---------------------------------------------------------
# ==============================================================================


import glob
import os
import sys
import pandas as pd 
import csv

try:
    sys.path.append(glob.glob('C:/Coursera/carla/PythonAPI/carla/dist/carla-*%d.%d-%s.egg' % (
        sys.version_info.major,
        sys.version_info.minor,
        'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
    pass


# ==============================================================================
# -- imports -------------------------------------------------------------------
# ==============================================================================


import carla

from carla import ColorConverter as cc

import argparse
 def __init__(self):
     self.base_dir = '/sys/bus/w1/devices/'
     self.device_folder = glob.glob(self.base_dir + '28*')[0]
     self.device_file = self.device_folder + '/w1_slave'
Ejemplo n.º 56
0
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import sqlite3
import glob

sns.set(font_scale=1.5, style="white")
files = glob.glob('*_traj_merged.db')

dfs = []
for fi in files:
    s = fi.split("_")
    opt = s[3]
    if opt == '1':
        mu = float(s[5])
        print(fi,mu)
        c = sqlite3.connect(fi)
        df = pd.read_sql('select repid,freq,origin,esize from freqs where repid < 1 and generation == 50000',c) # group by repid,pos,origin',c)
        df.drop_duplicates(inplace=True)
        df['mu']=[mu]*len(df.index)
        dfs.append(df)
        c.close()

df = pd.concat(dfs)

gr = df.groupby(['mu'])
Ejemplo n.º 57
0
#!/usr/bin/env python
#coding:utf-8

import os, sys, re, csv, glob

wordlist = list()
for fname in glob.glob('../../../forth/primitives/*.asm'):
    wordlist = wordlist + re.findall('\n\s+[;/]+\s+word:\s+(\S+)', open(fname).read())

def texify(x):
    for word in x:
        for char in "\\$_&{}%#":
            word = word.replace(char, '\\' + char)
        yield word.replace('\\\\', '\\symbol{92}')

print '\n'.join(r'\fw{%s}' % x for x in texify(sorted(wordlist)))

args = VarParsing.VarParsing('analysis')
args.register('inputFile', '', args.multiplicity.list, args.varType.string,
              "Input file or template for glob")
args.outputFile = ''
args.parseArguments()
'''
#####################   Input    ###################
'''
process.maxEvents = cms.untracked.PSet(
    input=cms.untracked.int32(args.maxEvents))

from glob import glob
if args.inputFile:
    if len(args.inputFile) == 1 and '*' in args.inputFile[0]:
        flist = glob(args.inputFile[0])
    else:
        flist = args.inputFile
elif args.inputFiles:
    if len(args.inputFiles) == 1 and args.inputFiles[0].endswith('.txt'):
        with open(args.inputFiles[0]) as f:
            flist = [l[:-1] for l in f.readlines()]
    else:
        flist = args.inputFiles
else:
    fdefault = os.environ[
        'CMSSW_BASE'] + '/src/ntuplizer/BPH_RDntuplizer/production/'
    # fdefault += 'inputFiles_BP_Tag_B0_MuNuDmst_Hardbbbar_evtgen_ISGW2_PUc0_10-2-3.txt'
    fdefault += 'inputFiles_BP_Tag-Probe_B0_JpsiKst_Hardbbbar_evtgen_HELAMP_PUc0_10-2-3.txt'
    with open(fdefault) as f:
        flist = [l[:-1] for l in f.readlines()]
Ejemplo n.º 59
0
def contain_img(d):
    return len(glob(join(d, '*.jpg'))) > 0
Ejemplo n.º 60
0
# -*- coding: utf-8 -*-
from pkg_resources import get_distribution, DistributionNotFound

from os.path import dirname, basename, isfile
import glob
modules = glob.glob(dirname(__file__)+"/*.py")
__all__ = [basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')]
name = "uncertainty_forest"

try:
    # Change here if project is renamed and does not equal the package name
    dist_name = 'uncertainty-forest'
    __version__ = get_distribution(dist_name).version
except DistributionNotFound:
    __version__ = 'unknown'
finally:
    del get_distribution, DistributionNotFound