Example #1
0
    def test_global_check_and_location_preprocessors_should_be_applied_after_global_custom_preprocessors(self):
        ui = self._create_ui()
        ui.add_library('lib')
        ui.enable_location_preprocessing()
        ui.enable_check_preprocessing()
        ui.add_preprocessor(TestPreprocessor())

        file_name = self.create_entity_file()
        ui.add_source_files(file_name, 'lib')

        pp_source = Template("""\
-- check_relation(a = b, line_num => 1, file_name => "$file", \
auto_msg => "Relation a = b failed! Left is " & to_string(a) & ". Right is " & to_string(b) & ".");

library vunit_lib;
context vunit_lib.vunit_context;

entity $entity is
end entity;

architecture arch of $entity is
begin
    log("Hello World", line_num => 11, file_name => "$file");
    check_relation(1 /= 2, line_num => 12, file_name => "$file", \
auto_msg => "Relation 1 /= 2 failed! Left is " & to_string(1) & ". Right is " & to_string(2) & ".");
    report "Here I am!";
end architecture;
""")
        with open(join(self._preprocessed_path, 'lib', basename(file_name))) as fread:
            self.assertEqual(fread.read(), pp_source.substitute(entity='ent0', file=basename(file_name)))
Example #2
0
    def _preprocess(self, library_name, file_name, preprocessors):
        """
        Preprocess file_name within library_name using explicit preprocessors
        if preprocessors is None then use implicit globally defined processors
        """
        # @TODO dependency checking etc...

        if preprocessors is None:
            preprocessors = [self._location_preprocessor, self._check_preprocessor]
            preprocessors = [p for p in preprocessors if p is not None]
            preprocessors = self._external_preprocessors + preprocessors

        if len(preprocessors) == 0:
            return file_name

        code = ostools.read_file(file_name)
        for preprocessor in preprocessors:
            code = preprocessor.run(code, basename(file_name))

        pp_file_name = join(self._preprocessed_path, library_name, basename(file_name))

        idx = 1
        while ostools.file_exists(pp_file_name):
            LOGGER.debug("Preprocessed file exists '%s', adding prefix", pp_file_name)
            pp_file_name = join(self._preprocessed_path,
                                library_name, "%i_%s" % (idx, basename(file_name)))
            idx += 1

        ostools.write_file(pp_file_name, code)
        return pp_file_name
Example #3
0
def _copy_contents(dst_dir, contents):
    items = {"dirs": set(), "files": set()}

    for path in contents:
        if isdir(path):
            items['dirs'].add(path)
        elif isfile(path):
            items['files'].add(path)

    dst_dir_name = basename(dst_dir)

    if dst_dir_name == "src" and len(items['dirs']) == 1:
        copytree(list(items['dirs']).pop(), dst_dir, symlinks=True)
    else:
        makedirs(dst_dir)
        for d in items['dirs']:
            copytree(d, join(dst_dir, basename(d)), symlinks=True)

    if not items['files']:
        return

    if dst_dir_name == "lib":
        dst_dir = join(dst_dir, mkdtemp(dir=dst_dir))

    for f in items['files']:
        copyfile(f, join(dst_dir, basename(f)))
Example #4
0
    def _find_in_most_specific_archive(self, path):
        file_name = basename(path)
        dir_name = dirname(path)

        while len(dir_name) > 0:
            archive_name = join(self.data_root, dir_name + archive_extension)

            if access(archive_name, R_OK) and \
                archive_name not in self.open_archives:
                self.open_archives[archive_name] = ZipFile(archive_name, "r")

            if archive_name in self.open_archives:
#                print "expecting to find file %s in archive %s" % \
#                        (file_name, archive_name)
                return self.open_archives[archive_name].open(file_name)
                    # XXX open() method is for ZipFile; would be different for
                    # Tar if we want to change archive type.

            file_name = join(basename(dir_name), file_name)
            dir_name = dirname(dir_name)

        raise IOError(
            "File not found (%s), not even in archive; data_root is %s" % \
                    (path, self.data_root)
                    )
Example #5
0
    def vars2png(self, wrfout_path, dom_id, ts_esmf, vars):
        """
        Postprocess a list of scalar fields into KMZ files.

        :param wrfout_path: WRF file to process
        :param dom_id: the domain identifier
        :param ts_esmf: time stamp in ESMF format
        :param vars: list of variables to process
        """
        # open the netCDF dataset
        d = nc4.Dataset(wrfout_path)

        # extract ESMF string times and identify timestamp of interest
        times = [''.join(x) for x in d.variables['Times'][:]]
        if ts_esmf not in times:
            raise PostprocError("Invalid timestamp %s" % ts_esmf)
        tndx = times.index(ts_esmf)

        # build one KMZ per variable
        for var in vars:
            try:
                outpath_base = os.path.join(self.output_path, self.product_name + ("-%02d-" % dom_id) + ts_esmf + "-" + var) 
                if var in ['WINDVEC']:
                    raster_path, coords = self._vector2png(d, var, tndx, outpath_base)
                    raster_name = osp.basename(raster_path)
                    self._update_manifest(dom_id, ts_esmf, var, { 'raster' : raster_name, 'coords' : coords})
                else:
                    raster_path, cb_path, coords = self._scalar2png(d, var, tndx, outpath_base)
                    mf_upd = { 'raster' : osp.basename(raster_path), 'coords' : coords}
                    if cb_path is not None:
                        mf_upd['colorbar'] = osp.basename(cb_path)
                    self._update_manifest(dom_id, ts_esmf, var, mf_upd)
            except Exception as e:
                logging.warning("Exception %s while postprocessing %s for time %s into PNG" % (e.message, var, ts_esmf))
                logging.warning(traceback.print_exc())
Example #6
0
    def submit(self,  root=None, force=False, repo=None): 
        """Create a dataset for the bundle, then add a resource for each of the
        extracts listed in the bundle.yaml file"""
        import databundles.util as du
        
        if repo:
            self.repo_name = repo
            self.set_api()
        
        
        from os.path import  basename
    
        ckb = self.remote.update_or_new_bundle_extract(self.bundle)
        
        sent = set()
    
        self.remote.put_package(ckb)
        
        for doc in self.bundle.config.group('about').get('documents',[]):
            self.store_document(ckb, doc)

        for extract_data in self.generate_extracts(root=root):

            file_ = self._do_extract(extract_data, force=force)
            if file_ not in sent:

                r = self._send(ckb, extract_data,file_)
                sent.add(file_)
                url = r['ckan_url']
                self.bundle.log("Submitted {} to {}".format(basename(file_), url))
            else:
                self.bundle.log("Already processed {}, not sending.".format(basename(file_)))
        
        return True
Example #7
0
def cpMCNPproject(directory):
    wkdir=getcwd()
    if checkifMCNPproject(directory,1)==1:
        return 1
    elif checkifMCNPproject(wkdir,2)==2:
        return 2
    else:
        cards = [   path.join(directory,"cards/parameters.part"),
                    path.join(directory,"cards/materials.part"),
                    path.join(directory,"cards/source.part"),
                    path.join(directory,"cards/tallies.part"),
                    path.join(directory,"cards/traslations.part")]
        geom  = [   path.join(directory,"geom/cells.part"),
                    path.join(directory,"geom/surfaces.part")]
        for card in cards:
            try:
                copyfile(card, path.join(wkdir, "cards/",path.basename(card)))
            except Exception as e:
                print "\n\033[1;34mMCNPmanager cp error:\033[1;32m %s \033[0m\n" % (e)

        for g in geom:
            try:
                copyfile(g, path.join(wkdir, "geom/",path.basename(g)))
            except Exception as e:
                print "\n\033[1;34mMCNPmanager cp error:\033[1;32m %s \033[0m\n" % (e)
        return 0
Example #8
0
def remote_copy(files, host, user, path, transfer_tool='ssh'):
    if transfer_tool == 'paramiko':
        import paramiko
        pyhrf.verbose(1, 'Copying files to remote destination %s@%s:%s ...' \
                          %(host,user,path))
        ssh = paramiko.SSHClient()
        known_hosts_file = os.path.join("~", ".ssh", "known_hosts")
        ssh.load_host_keys(os.path.expanduser(known_hosts_file))
        ssh.connect(host, username=user)
        sftp = ssh.open_sftp()
        for f in files:
            remotepath = op.join(path,op.basename(f))
            pyhrf.verbose(2, f + ' -> ' + remotepath + ' ...')
            flocal = open(f)
            remote_file = sftp.file(remotepath, "wb")
            remote_file.set_pipelined(True)
            remote_file.write(flocal.read())
            flocal.close()
            remote_file.close()
        sftp.close()
        ssh.close()
    else:
        sfiles = string.join(['"%s"'%f for f in files], ' ')

        scp_cmd = 'scp -C %s "%s@%s:%s"' %(sfiles, user, host, path)
        pyhrf.verbose(1, 'Data files transfer with scp ...')
        pyhrf.verbose(2, scp_cmd)
        if os.system(scp_cmd) != 0:
            raise Exception('Error while scp ...')

    pyhrf.verbose(1, 'Copy done!')

    return [op.join(path,op.basename(f)) for f in files]
Example #9
0
def compile_timestamped_transcript_files(json_filenames):
    """
    `json_filenames` is a list of filepaths with this filename format:
      00900-01000.json
      where the left-number represents the starting time offset and
        the right-number represents the ending time, in seconds

    Each file in this list follows the Watson API standard JSON response

    Returns: a dictionary that is the result of concatenating all the json files
    into one, with "results" pointing to a list of all returned responses.

    To maintain compatibility with Watson's API response, "result_index" key
      is included and is set to 0, i.e. it'd be as if the resulting dictionary
      is the response returned when sending an entire unbroken soundstream to Watson
    """
    compiled_results = []
    compiled_dict = {'results': compiled_results, "result_index": 0}
    filenames = sorted(json_filenames, key=lambda x: int(basename(x).split('-')[0]))
    for fn in filenames:
        start_offset_sec = int(basename(fn).split('-')[0])
        with open(fn) as f:
            data = json.load(f)
            for result in data['results']:
                for x in result.get('word_alternatives'):
                    x['start_time'] += start_offset_sec
                    x['end_time'] += start_offset_sec
                for alt in result.get('alternatives'):
                    for ts in alt['timestamps']:
                        # each timestamp object is a list:
                        # ["hi", 9.93, 10.11]
                        ts[1] += start_offset_sec
                        ts[2] += start_offset_sec
                compiled_results.append(result)
    return compiled_dict
Example #10
0
def ZipIt(request, Qobject6):
    import zipfile
    # remove old
    import os
    try:
        pathtoold = PathMaker(request.user.username, 'results.zip')
        os.remove(pathtoold)
    except OSError:
        pass
    #from Results.models import dbResults
    # Make Results object
    # query = 'SELECT * FROM Inputs_dbresults WHERE username = "******" ORDER BY id DESC LIMIT 1'
    # Qobject6 = dbResults.objects.raw(query)[0]

    # select table columns to send; fibril.pdb, LayerLines.jpg and parameters.txt
    fibfile = str(Qobject6.fibrilPDB)
    LLout = str(Qobject6.LLoutputPic)
    inten = 'static_in_pro/media_root/' + str(Qobject6.intensity)
    parampath = PathMaker(request.user.username, 'parameters.txt')
    # give file a name and location in user's dir
    filename = 'results.zip'
    Path = PathMaker(request.user.username, filename)
    # remove paths
    from os.path import basename
    # buffer = StringIO()
    zipped = zipfile.ZipFile(Path, 'w')
    zipped.write(fibfile, basename(fibfile))
    zipped.write(LLout, basename(LLout))
    zipped.write(inten, basename(inten))
    zipped.write(parampath, basename(parampath))
    zipped.close()
Example #11
0
 def create_tag(self):
     current_stack = inspect.stack()
     stackid = 0
     while basename(current_stack[stackid][1]) == 'environment.py' or basename(current_stack[stackid][1]) == 'nfs4client.py':
           stackid = stackid + 1
     test_name = '%s:%s' % (basename(current_stack[stackid][1]), current_stack[stackid][3])
     return test_name
Example #12
0
def print_usage(actions):
    """Print the usage information.  (Help screen)"""
    actions = actions.items()
    actions.sort()
    print 'usage: %s <action> [<options>]' % basename(sys.argv[0])
    print '       %s --help' % basename(sys.argv[0])
    print
    print 'actions:'
    for name, (func, doc, arguments) in actions:
        print '  %s:' % name
        for line in doc.splitlines():
            print '    %s' % line
        if arguments:
            print
        for arg, shortcut, default, argtype in arguments:
            if isinstance(default, bool):
                print '    %s' % (
                    (shortcut and '-%s, ' % shortcut or '') + '--' + arg
                )
            else:
                print '    %-30s%-10s%s' % (
                    (shortcut and '-%s, ' % shortcut or '') + '--' + arg,
                    argtype, default
                )
        print
Example #13
0
    def __init__(self, current_file_path=''):
        """
        FileDialog constructor.

        Args:
            current_file_path: the current directory or path to the open flow graph
        """
        if not current_file_path: current_file_path = path.join(DEFAULT_FILE_PATH, NEW_FLOGRAPH_TITLE + Preferences.file_extension())
        if self.type == OPEN_FLOW_GRAPH:
            FileDialogHelper.__init__(self, gtk.FILE_CHOOSER_ACTION_OPEN, 'Open a Flow Graph from a File...')
            self.add_and_set_filter(get_flow_graph_files_filter())
            self.set_select_multiple(True)
        elif self.type == SAVE_FLOW_GRAPH:
            FileDialogHelper.__init__(self, gtk.FILE_CHOOSER_ACTION_SAVE, 'Save a Flow Graph to a File...')
            self.add_and_set_filter(get_flow_graph_files_filter())
            self.set_current_name(path.basename(current_file_path))
        elif self.type == SAVE_CONSOLE:
            FileDialogHelper.__init__(self, gtk.FILE_CHOOSER_ACTION_SAVE, 'Save Console to a File...')
            self.add_and_set_filter(get_text_files_filter())
            file_path = path.splitext(path.basename(current_file_path))[0]
            self.set_current_name(file_path) #show the current filename
        elif self.type == SAVE_IMAGE:
            FileDialogHelper.__init__(self, gtk.FILE_CHOOSER_ACTION_SAVE, 'Save a Flow Graph Screen Shot...')
            self.add_and_set_filter(get_image_files_filter())
            current_file_path = current_file_path + IMAGE_FILE_EXTENSION
            self.set_current_name(path.basename(current_file_path)) #show the current filename
        elif self.type == OPEN_QSS_THEME:
            FileDialogHelper.__init__(self, gtk.FILE_CHOOSER_ACTION_OPEN, 'Open a QSS theme...')
            self.add_and_set_filter(get_qss_themes_filter())
            self.set_select_multiple(False)
        self.set_current_folder(path.dirname(current_file_path)) #current directory
Example #14
0
	def to_xml(self):
		q = Element('dictionary')
		q.attrib["value"] = basename(dirname(self.dct))
		
		r = SubElement(q, "revision", 
					value=str(self._svn_revision(dirname(self.dct))),
					timestamp=datetime.utcnow().isoformat(),
					checksum=self._checksum(open(self.dct, 'rb').read()))
		
		s = SubElement(r, 'corpus')
		s.attrib["value"] = basename(self.fn)
		s.attrib["checksum"] = self._checksum(open(self.fn, 'rb').read())
		
		SubElement(r, 'percent').text = "%.2f" % self.get_coverage()
		SubElement(r, 'total').text = str(len(self.get_words()))
		SubElement(r, 'known').text = str(len(self.get_known_words()))
		SubElement(r, 'unknown').text = str(len(self.get_unknown_words()))
		
		wrx = re.compile(r"\^(.*)/")
		s = SubElement(r, 'top')
		for word, count in self.get_top_unknown_words():
			SubElement(s, 'word', count=str(count)).text = wrx.search(word).group(1)
		
		s = SubElement(r, 'system')
		SubElement(s, 'time').text = "%.4f" % self.timer
		
		return ("coverage", etree.tostring(q))
Example #15
0
def copy_static_entry(source, targetdir, builder, context={},
                      exclude_matchers=(), level=0):
    """Copy a HTML builder static_path entry from source to targetdir.

    Handles all possible cases of files, directories and subdirectories.
    """
    if exclude_matchers:
        relpath = relative_path(builder.srcdir, source)
        for matcher in exclude_matchers:
            if matcher(relpath):
                return
    if path.isfile(source):
        target = path.join(targetdir, path.basename(source))
        if source.lower().endswith('_t') and builder.templates:
            # templated!
            fsrc = open(source, 'r', encoding='utf-8')
            fdst = open(target[:-2], 'w', encoding='utf-8')
            fdst.write(builder.templates.render_string(fsrc.read(), context))
            fsrc.close()
            fdst.close()
        else:
            copyfile(source, target)
    elif path.isdir(source):
        if level == 0:
            for entry in os.listdir(source):
                if entry.startswith('.'):
                    continue
                copy_static_entry(path.join(source, entry), targetdir,
                                  builder, context, level=1,
                                  exclude_matchers=exclude_matchers)
        else:
            target = path.join(targetdir, path.basename(source))
            if path.exists(target):
                shutil.rmtree(target)
            shutil.copytree(source, target)
def eMailCSV(fName):
#This allows the uset to automatically send an e-mail containing the CSV file via e-mail
    send_from = "From_Email"
    send_to = "To_Email"
    subject = "Email_Subject"
    header = "To: To_Email\n From: From_Email\n Subject: Email_Subject\n\n\n"
    text = "Email_Text"
    eServer = "email.server.ext"
    ePort = portNumber
    
    msg = MIMEMultipart()
    msg['From']=send_from
    msg['To'] = send_to
    msg['Date'] =formatdate(localtime=True)
    msg['Subject'] = subject
    msg.attach(MIMEText(text))
    
    with open(fName, "rb") as fil:
        msg.attach(MIMEApplication(
            fil.read(),
            Content_Disposition='attachment; filename="%s"' % basename(fName),
            Name=basename(fName)
        ))

    smtp = smtplib.SMTP(eServer,ePort)
    smtp.ehlo()
    #smtp.start_ssl()
    smtp.starttls()
    smtp.ehlo
    smtp.login('Login_Name', 'Login_Password')
    smtp.sendmail(send_from, send_to, msg.as_string())
    smtp.close()
def _get_matfile_data():

    # compare to Arielle's in the .mat file
    mat_file = "sub01_session_1_raw_ROI_timeseries.mat"
    mat_file = osp.join(osp.dirname(osp.realpath(__file__)), mat_file)
    # to_check.keys() == ['all_rois', 'time_series', 
    #                     '__globals__', 'Nvox', '__header__', '__version__']
    to_check = sio.loadmat(mat_file)
    nvox = to_check['Nvox'][0]
    nb_runs = to_check['time_series'].shape[2] # has shape (time, rois, nb_runs)
    assert nb_runs == 4

    # make a dict for nvox
    check_nvox = {}
    for idx, roi in enumerate(to_check['all_rois']):
        k, _ = osp.splitext(osp.basename(roi[0][0]))
        check_nvox[k] = nvox[idx]

    # make a dict for signals
    arielle_runs = []
    for run in range(nb_runs):
        check_signals = {}
        for idx, roi in enumerate(to_check['all_rois']):
            k  = osp.splitext(osp.basename(roi[0][0]))[0]
            check_signals[k] = to_check['time_series'][:,idx,run]
        arielle_runs.append(check_signals)

    return check_nvox, arielle_runs
def main():
    DCT_H = 30
    DCT_W = 40
    a = 1
    while a < len(argv):
        if argv[a] == '-h': 
            DCT_H = int(argv[a+1])
            i += 2
        elif argv[a] == '-w': 
            DCT_W = int(argv[a+1])
            i += 2
        else:
            stderr.write('Unknown option: %s\n' % argv[a])
            return 1
            
    for fname in stdin:
        fname=fname.strip()
        if len(fname) == 0: continue
        f = basename(fname)[:3] # frame
        us = basename(dirname(fname)).split('_')
        u, s = us[0], us[1]
        dct = reduce(lambda x,y: x+y,
                     GetDCT(fname)[:DCT_H,:DCT_W].tolist(), [])
        odir = 'data/features/video/%s/%s' % (u, s)
        if not exists(odir):
            system('mkdir -p %s' % odir)
        fdct = open('%s/%s.dct' % (odir, f), 'w')
        for i in dct:
            fdct.write('%f\n' % i)
        fdct.close()
    return 0
 def createChimeraScript(self, volume, pdb):
     """ Create a chimera script to visualize a pseudoatoms pdb
     obteined from a given EM 3d volume.
     A property will be set in the pdb object to 
     store the location of the script.
     """
     pseudoatoms = pdb.getFileName()
     scriptFile = pseudoatoms + '_chimera.cmd'
     pdb._chimeraScript = String(scriptFile)
     sampling = volume.getSamplingRate()
     radius = sampling * self.pseudoAtomRadius.get() 
     fnIn = volume.getFileName()
     localInputFn = self._getBasePath(fnIn)
     createLink(fnIn, localInputFn)
     fhCmd = open(scriptFile, 'w')
     fhCmd.write("open %s\n" % basename(pseudoatoms))
     fhCmd.write("rangecol bfactor,a 0 white 1 red\n")
     fhCmd.write("setattr a radius %f\n" % radius)
     fhCmd.write("represent sphere\n")
     fhCmd.write("open %s\n" % basename(localInputFn))
      
     threshold = 0.01
     if self.maskMode == NMA_MASK_THRE:
         self.maskThreshold.get()
     xdim = volume.getDim()[0]
     origin = xdim / 2
     fhCmd.write("volume #1 level %f transparency 0.5 voxelSize %f originIndex %d\n" % (threshold, sampling, origin))
     fhCmd.close()
Example #20
0
def get(url, output=None):
    if output is None:
        output = P.basename(urlparse(url).path)
        base = output
    else:
        base = P.basename(output)

    with file(output, 'wb') as f:
        try:
            r = urllib2.urlopen(url)
        except urllib2.HTTPError, e:
            raise HelperError('urlopen', None, '%s: %s' % (url, e))

        current = 0
        size = int(r.info().get('Content-Length', -1))

        while True:
            block = r.read(16384)
            if not block:
                break
            current += len(block)
            if size < 0:
                info('\rDownloading %s: %i kB' % (base, current/1024.), end='')
            else:
                info('\rDownloading %s: %i / %i kB (%0.2f%%)' % (base, current/1024., size/1024., current*100./size), end='')
            f.write(block)
        info('\nDone')
Example #21
0
    def _remove_res(self, res):
        LEN_PREFIX = self.db.LEN_PREFIX
        res_id, last_updated, name, res_data = self.res_index[res.area_path]
        # res_data: {lang -> blobname -> ilk -> toplevelnames}
        for lang, tfifb in res_data.items():
            dbfile_and_res_id_from_blobname = self.blob_index[lang]
            for blobname, toplevelnames_from_ilk in tfifb.items():
                # Update 'blob_index' for $lang.
                dbfile, res_id = dbfile_and_res_id_from_blobname[blobname]
                del dbfile_and_res_id_from_blobname[blobname]

                # Remove ".blob" file (and associated caches).
                pattern = join(self.base_dir, safe_lang_from_lang(lang),
                               dbfile+".*")
                try:
                    for path in glob(pattern):
                        log.debug("fs-write: remove catalog %s blob file '%s'",
                                  lang, basename(path))
                        os.remove(path)
                except EnvironmentError as ex:
                    #XXX If get lots of these, then try harder. Perhaps
                    #    creating a zombies area, or creating a list of
                    #    them: self.db.add_zombie(dbpath).
                    #XXX THis isn't a correct analysis: the dbfile may just
                    #    not have been there.
                    log.warn("could not remove dbfile '%s' (%s '%s'): "
                             "leaving zombie", dbpath, lang, blobname)

                # Update 'toplevel*_index' for $lang.
                # toplevelname_index:   {lang -> ilk -> toplevelname -> res_id -> blobnames}
                # toplevelprefix_index: {lang -> ilk -> prefix -> res_id -> toplevelnames}
                for ilk, toplevelnames in six.iteritems(toplevelnames_from_ilk):
                    try:
                        bfrft = self.toplevelname_index[lang][ilk]
                        for toplevelname in toplevelnames:
                            del bfrft[toplevelname][res_id]
                            if not bfrft[toplevelname]:
                                del bfrft[toplevelname]
                    except KeyError as ex:
                        self.db.corruption("CatalogsZone._remove_res",
                            "error removing top-level names of ilk '%s' for "
                                "'%s' resource from toplevelname_index: %s"
                                % (ilk, basename(res.path), ex),
                            "ignore")

                    try:
                        tfrfp = self.toplevelprefix_index[lang][ilk]
                        for toplevelname in toplevelnames:
                            prefix = toplevelname[:LEN_PREFIX]
                            del tfrfp[prefix][res_id]
                            if not tfrfp[prefix]:
                                del tfrfp[prefix]
                    except KeyError as ex:
                        self.db.corruption("CatalogsZone._remove_res",
                            "error removing top-level name of ilk '%s' for "
                                "'%s' resource from toplevelprefix_index: %s"
                                % (ilk, basename(res.path), ex),
                            "ignore")

        del self.res_index[res.area_path]
def mainFunc():
    parser = argparse.ArgumentParser(description='Run Elastix registration protocol for all images in the directory')
    parser.add_argument('--refDir', '-r', dest='refDir', required = True, \
    help='The directory containing the reference images.')
    parser.add_argument('--floatFile', '-f', dest='floatFile', required = True, \
    help='Path to the floating image.')
    parser.add_argument('--outDir', '-o', dest='outDir', required = False, \
    help='Path to store the output images/parameters (default: current dir)', default=os.getcwd())
    parser.add_argument('--atlas', '-a', dest='atlas', required = False, \
    help='Path to the atlas segmentation file which will be resampled with the CPP file from the registration.', default=None)

    args = parser.parse_args()

    refImgs = [join(args.refDir, File) for File in listdir(args.refDir)]
    refImgs = [img for img in refImgs if isfile(img) and img.endswith('.nii')]

    if not refImgs:
        print('Couldn\'t find any reference images')
        return

    if not path.isfile(args.floatFile):
        print('Coudln\'t find the float image')

    refImgs.sort(key=str.lower)

    refFloatPairs = [[refImg, args.floatFile] for refImg in refImgs]

    f3dParStr = paramListToShortString(f3d_params)
    aladinParStr = paramListToShortString(aladin_params)
    for rfPair in refFloatPairs:
        baseName = basename(rfPair[0])[:-4]+'_'+basename(rfPair[1])[:-4]
        currOutDir = join(args.outDir,baseName)
        mkdir(currOutDir)
        elastixLogPath = join(currOutDir,basename+'_LOG.txt')
        elastixCommand = elastixExec+' -f '+rfPair[0]+' -m '+rfPair[1]+' -p '.join(elastixParams)+' -o '+currOutDir
        elastixLog = ''
        try:
            elastixLog = ccall(elastixCommand, shell=True, stderr=STDOUT)
        except CalledProcessError as err:
            writeAndDie(err.output, elastixLogPath)   
        with open(elastixLogPath, 'w') as f:
            f.write(elastixLog)
        
        transformParameterFiles = ['TransformParameters.0.txt', 'TransformParameters.1.txt']
        transformParameterFiles = [join(currOutDir,tpFile) for tpFile in transformParameterFiles]
        for tpFilePath in transformParameterFiles:
	  with open(tpFilePath,'r') as tpFile:
	    tpCont = tpFile.read()
	  tpCont = tpCont.replace('(FinalBSplineInterpolationOrder 3)', '(FinalBSplineInterpolationOrder 1)')
	  with open(tpFilePath,'w') as tpFile:
	    tpCont = tpFile.write(tpCont)
        
        if args.atlas is not None:
	  atlasOutDir = join(currOutDir,'atlas')
	  mkdir(atlasOutDir)
          trfxCmd = trfxExec+' -in '+args.atlas+' -out '+atlasOutDir+' tp '+transformParameterFiles[-1]
          try:
            resampleLog = ccall(trfxCmd, shell=True, stderr=STDOUT)
          except CalledProcessError as err:
            writeAndDie(err.output, join(atlasOutDir,'ERR.txt'))
Example #23
0
    def generate(self):
        self.resources.win_to_unix()
        source_files = []
        for r_type, n in CoIDE.FILE_TYPES.iteritems():
            for file in getattr(self.resources, r_type):
                source_files.append({"name": basename(file), "type": n, "path": file})
        header_files = []
        for r_type, n in CoIDE.FILE_TYPES2.iteritems():
            for file in getattr(self.resources, r_type):
                header_files.append({"name": basename(file), "type": n, "path": file})

        libraries = []
        for lib in self.resources.libraries:
            l, _ = splitext(basename(lib))
            libraries.append(l[3:])

        if self.resources.linker_script is None:
            self.resources.linker_script = ""

        ctx = {
            "name": self.program_name,
            "source_files": source_files,
            "header_files": header_files,
            "include_paths": self.resources.inc_dirs,
            "scatter_file": self.resources.linker_script,
            "library_paths": self.resources.lib_dirs,
            "object_files": self.resources.objects,
            "libraries": libraries,
            "symbols": self.get_symbols(),
        }
        target = self.target.lower()

        # Project file
        self.gen_file("coide_%s.coproj.tmpl" % target, ctx, "%s.coproj" % self.program_name)
 def test_validate_demux_file_infer(self):
     demux_fp, _, out_dir = self._generate_files({'s1': 'SKB2.640194',
                                                  's2': 'SKM4.640180'})
     prep_info = {"1.SKB2.640194": {"not_a_run_prefix": "s1"},
                  "1.SKM4.640180": {"not_a_run_prefix": "s2"},
                  "1.SKB3.640195": {"not_a_run_prefix": "s3"},
                  "1.SKB6.640176": {"not_a_run_prefix": "s4"}}
     files = {'preprocessed_demux': [demux_fp]}
     job_id = self._create_template_and_job(
         prep_info, files, "Demultiplexed")
     obs_success, obs_ainfo, obs_error = _validate_demux_file(
         self.qclient, job_id, prep_info, out_dir, demux_fp)
     self.assertTrue(obs_success)
     name = splitext(basename(demux_fp))[0]
     exp_fastq_fp = join(out_dir, "%s.fastq" % name)
     exp_fasta_fp = join(out_dir, "%s.fasta" % name)
     exp_demux_fp = join(out_dir, basename(demux_fp))
     filepaths = [
         (exp_fastq_fp, 'preprocessed_fastq'),
         (exp_fasta_fp, 'preprocessed_fasta'),
         (exp_demux_fp, 'preprocessed_demux')]
     exp = [ArtifactInfo(None, "Demultiplexed", filepaths)]
     self.assertEqual(obs_ainfo, exp)
     self.assertEqual(obs_error, "")
     with File(exp_demux_fp) as f:
         self.assertItemsEqual(f.keys(), ["1.SKB2.640194", "1.SKM4.640180"])
Example #25
0
 def rename_file(self, fname):
     """Rename file"""
     path, valid = QInputDialog.getText(self, _("Rename"), _("New name:"), QLineEdit.Normal, osp.basename(fname))
     if valid:
         path = osp.join(osp.dirname(fname), to_text_string(path))
         if path == fname:
             return
         if osp.exists(path):
             if (
                 QMessageBox.warning(
                     self,
                     _("Rename"),
                     _("Do you really want to rename <b>%s</b> and " "overwrite the existing file <b>%s</b>?")
                     % (osp.basename(fname), osp.basename(path)),
                     QMessageBox.Yes | QMessageBox.No,
                 )
                 == QMessageBox.No
             ):
                 return
         try:
             misc.rename_file(fname, path)
             self.parent_widget.renamed.emit(fname, path)
             return path
         except EnvironmentError as error:
             QMessageBox.critical(
                 self,
                 _("Rename"),
                 _("<b>Unable to rename file <i>%s</i></b>" "<br><br>Error message:<br>%s")
                 % (osp.basename(fname), to_text_string(error)),
             )
Example #26
0
def append_deps_rootpath(dep_modules_roots, search_depth=10):
    """
    Append all paths described in PackageInfo.dep_modules_roots into the sys.path,
    so any module in the package can be called as a main entry,
    with successfully importing dependent modules of dependent package in an easy scheme:
        'import <identifiable_package_root>.<sub>.<target_module>',
    Which could a more intuitive usage of module import.

    Be sure this function is called for the main entry script with all outer dependent
    package names in the parameter 'dep_modules_roots'
    """
    check_path = ospath.dirname(ospath.abspath(__file__))
    dep_dirs = []
    dep_remains = list(dep_modules_roots)
    for i in range(search_depth):
        check_path = ospath.dirname(check_path)
        check_name = ospath.basename(check_path)
        for dep_name in dep_remains:
            if dep_name == check_name:
                dep_dirs.append(check_path)
                dep_remains.remove(dep_name)
        if not dep_remains:
            break

    if dep_dirs:
        for dep_dir in dep_dirs:
            sys.path.append(dep_dir)
            print(BColors.BLUE
                  + "Append path of package:'{pkg}'".format(pkg=ospath.basename(dep_dir))
                  + " for package:'{name}' to sys.path as dependent modules root."
                    .format(name=PackageInfo.package_name)
                  + BColors.ENDC)
        return [name for name in dep_modules_roots if name not in dep_remains]
    else:
        return None
Example #27
0
 def _load_rbo(self):
     """Load APC2015rbo dataset"""
     dataset_dir = osp.join(this_dir, 'dataset/APC2015rbo/berlin_samples')
     img_glob = osp.join(dataset_dir, '*_bin_[A-L].jpg')
     desc = 'rbo'
     for img_file in tqdm.tqdm(glob.glob(img_glob), ncols=80, desc=desc):
         basename = osp.splitext(osp.basename(img_file))[0]
         # apply mask, crop and save
         bin_mask_file = re.sub('.jpg$', '.pbm', img_file)
         bin_mask = imread(bin_mask_file, mode='L')
         where = np.argwhere(bin_mask)
         roi = where.min(0), where.max(0) + 1
         id_ = osp.join('rbo', basename)
         dataset_index = len(self.ids) - 1
         self.datasets['rbo'].append(dataset_index)
         mask_glob = re.sub('.jpg$', '_*.pbm', img_file)
         mask_files = [None] * self.n_class
         for mask_file in glob.glob(mask_glob):
             mask_basename = osp.splitext(osp.basename(mask_file))[0]
             label_name = re.sub(basename + '_', '', mask_basename)
             if label_name == 'shelf':
                 continue
             mask_files[self.target_names.index(label_name)] = mask_file
         self.ids.append(id_)
         self.rois.append(roi)
         self.img_files.append(img_file)
         self.mask_files.append(mask_files)
Example #28
0
def test_compare_triples():
    for mime, fext in MIME_TYPES.items():
        dump_path = path.join(DUMP_DIR, path.basename(mime))

        for url in URLs:
            if six.PY2:
                fname = '%s.%s' % (path.basename(urlparse.urlparse(url).path), fext)
            else:
                fname = '%s.%s' % (path.basename(urlparse(url).path), fext)

            fname = path.join(dump_path, fname)

            req = Request(url)
            req.add_header('Accept', mime)
            res = urlopen(req)

            g_fdp.parse(data=res.read(), format=mime)
            g_dump.parse(fname, format=mime)

            both, first, second = graph_diff(g_fdp, g_dump)
            n_first = len(first)
            # n_second = len(second)
            # n_both = len(both)

            assert_equals(
               n_first, 0, '{} triple(s) different from reference:\n\n{}===\n{}\n'.format(
                  n_first, first.serialize(format='turtle'), second.serialize(format='turtle')))
Example #29
0
    def __init__(self, images, delivery_types=None):
        """
        Parameters
        ----------
        images : iterable (list, tuple, etc)
            A sequence of paths to the image files.
        delivery_types : iterable, None
            If None (default), the image paths names must follow the `Naming Convention <http://pylinac.readthedocs.org/en/latest/vmat_docs.html#naming-convention>`_.
            If the image paths do not follow the naming convention, a 2-element string sequence for ``delivery_types`` must be passed in. E.g. ``['open', 'dmlc']``.
        """
        self.settings = Settings('', 1.5)

        # error checks
        if len(images) != 2:
            raise ValueError("Exactly 2 images (open, DMLC) must be passed")
        if delivery_types and len(delivery_types) != 2:
            raise ValueError("Delivery types must be 2 elements long")
        if delivery_types is None:
            delivery_types = []

        # walk over images and load the open and DMLC fields
        for img, deliv in zip_longest(images, delivery_types, fillvalue=''):
            if OPEN in img.lower() or OPEN == deliv.lower():
                self.image_open = image.load(img)
            elif DMLC in img.lower() or DMLC == deliv.lower():
                self.image_dmlc = image.load(img)
            else:
                raise ValueError("Image file names must follow the naming convention (e.g. 'DRGS_open.dcm'), or the delivery types must be passed explicitly")

        # try to determine test type
        if all(DRGS in osp.basename(img).lower() for img in images):
            self.settings.test_type = DRGS
        elif all(DRMLC in osp.basename(img).lower() for img in images):
            self.settings.test_type = DRMLC
Example #30
0
def gmap_setup(gsnap_dir, out_dir, ref_fasta):
    ref_base = op.splitext(op.basename(ref_fasta))[0]
    ref_dir = op.dirname(ref_fasta)
    ref_name = op.basename(ref_base)
    # have to cd to the out_dir because gsnap writes to cwd.
    cmd = "set -e\n cd %(ref_dir)s && \n"
    cmd += "gmap_build"
    cmd += " -k 12 -D %(ref_dir)s -d %(ref_base)s %(ref_fasta)s > %(out_dir)s/gmap_build.log && "
    cmd += "\ncmetindex -d %(ref_base)s -F %(ref_dir)s > gmap_cmetindex.log 2> gmap_cmetindex.error.log"
    cmd %= locals()
    print >>sys.stderr, "[ command ] $", cmd
    cmd_last = op.join(out_dir, "ran_gsnap_setup.sh")
    rerun = False
    if not op.exists(cmd_last) or not is_up_to_date_b(ref_fasta, cmd_last) or not is_same_cmd(cmd, cmd_last):
        fh = open(cmd_last, "w")
        print >>fh, cmd
        fh.close()
        rerun = True
    elif is_up_to_date_b(ref_fasta, cmd_last) and not is_same_cmd(cmd, cmd_last):
        fh = open(cmd_last, "w")
        print >>fh, cmd
        fh.close()
        rerun = True
    # TODO: check time-stamp
    rerun = True
    if rerun:
        p = Popen(cmd.replace('\n', ' '), shell=True)
        print >>sys.stderr, "^ executing gmap/gsnap setup^"
        if p.wait() != 0:
            pass
    else:
        print >>sys.stderr, "gsnap setup stuff is up to date, re-using"
    return ref_base
dir1= os.getcwd()


for root, dirs, files in os.walk(dir1):

    for file in files:

        if file.endswith('.csv'):

            #os.chdir(dir)

            with open(file) as f:
                csv_reader= csv.reader(f,delimiter=";")
                print dir1

                dir2 = dir1 + "\\" + basename(file)+"docs"
                print dir2

                os.mkdir(dir2)
                os.chdir(dir2)

                for row in csv_reader:
                       # urls=i
                       txt = []
                       #txt.append("url:")
                       #txt.append(str(row[0]))

                       #txt.append("\n")
                       try:

                           r = requests.get(row[0])
Example #32
0
#       Email:      [email protected]                                                           %
#       Homepage:   https://www.researchgate.net/profile/Thieu_Nguyen6                                  %
#       Github:     https://github.com/thieunguyen5991                                                  %
# -------------------------------------------------------------------------------------------------------%

from os.path import splitext, basename, realpath
from sklearn.model_selection import ParameterGrid
from models.main.traditional_rnn import Cnn1
from utils.IOUtil import _load_dataset__
from utils.Settings import *
from utils.Settings import cnn1_final as param_grid

if SPF_RUN_TIMES == 1:
    all_model_file_name = SPF_LOG_FILENAME
else:  # If runs with more than 1, like stability test --> name of the models ==> such as: rnn1hl.csv
    all_model_file_name = str(splitext(basename(realpath(__file__)))[0])


def train_model(item):
    root_base_paras = {
        "data_original": dataset,
        "train_split": SPF_TRAIN_SPLIT,  # should use the same in all test
        "data_window": data_window,  # same
        "scaling": SPF_SCALING,  # minmax or std
        "feature_size": SPF_FEATURE_SIZE,  # same, usually : 1
        "network_type": SPF_3D_NETWORK,  # RNN-based: 3D, others: 2D
        "n_runs": SPF_RUN_TIMES,  # 1 or others
        "log_filename": all_model_file_name,
        "path_save_result": SPF_PATH_SAVE_BASE + SPF_DATA_FILENAME[loop] + "/",
        "draw": SPF_DRAW,
        "log": SPF_LOG
Example #33
0
 def get_object_id(self):
     return basename(self.__path), self.get_current_version()
Example #34
0
def main():
    warnings.warn("This script is aimed to demonstrate how to convert the\n"
                  "JSON file to a single image dataset, and not to handle\n"
                  "multiple JSON files to generate a real-use dataset.")

    parser = argparse.ArgumentParser()
    parser.add_argument('json_file')
    parser.add_argument('-o', '--out', default=None)
    args = parser.parse_args()

    json_file = args.json_file

    alist = os.listdir(json_file)

    for i in range(0, len(alist)):
        path = os.path.join(json_file, alist[i])
        data = json.load(open(path))

        out_dir = osp.basename(path).replace('.', '_')
        out_dir = osp.join(osp.dirname(path), out_dir)

        if not osp.exists(out_dir):
            os.mkdir(out_dir)

        if data['imageData']:
            imageData = data['imageData']
        else:
            imagePath = os.path.join(os.path.dirname(path), data['imagePath'])
            with open(imagePath, 'rb') as f:
                imageData = f.read()
                imageData = base64.b64encode(imageData).decode('utf-8')

        img = utils.img_b64_to_arr(imageData)

        label_name_to_value = {'_background_': 0}
        for shape in data['shapes']:
            label_name = shape['label']
            if label_name in label_name_to_value:
                label_value = label_name_to_value[label_name]
            else:
                label_value = len(label_name_to_value)
                label_name_to_value[label_name] = label_value

        # label_values must be dense
        label_values, label_names = [], []
        for ln, lv in sorted(label_name_to_value.items(), key=lambda x: x[1]):
            label_values.append(lv)
            label_names.append(ln)
        assert label_values == list(range(len(label_values)))

        lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value)

        captions = ['{}: {}'.format(lv, ln)
                    for ln, lv in label_name_to_value.items()]
        lbl_viz = utils.draw_label(lbl, img, captions)

        PIL.Image.fromarray(img).save(osp.join(out_dir, 'img.png'))
        utils.lblsave(osp.join(out_dir, 'label.png'), lbl)
        PIL.Image.fromarray(lbl_viz).save(osp.join(out_dir, 'label_viz.png'))

        with open(osp.join(out_dir, 'label_names.txt'), 'w') as f:
            for lbl_name in label_names:
                f.write(lbl_name + '\n')

        warnings.warn('info.yaml is being replaced by label_names.txt')
        info = dict(label_names=label_names)
        with open(osp.join(out_dir, 'info.yaml'), 'w') as f:
            yaml.safe_dump(info, f, default_flow_style=False)

        print('Saved to: %s' % out_dir)
Example #35
0
    {
        'dbtag':'Vb',
        'unit':'V',
        'description':'Battery voltage',
        'lb':3.7,
        'ub':5.5,
        'interval':60*60,
    },
    {
        'dbtag':'SoC',
        'unit':'%',
        'description':'State of Charge',
        'lb':30,    # more like a warning than a valid range check
        'ub':100,
        'interval':60*60,
    },
]


if '__main__' == __name__:
    for c in conf:
        print('- - -')
        for k, v in c.items():
            print(k, ':' ,v)

    import sys
    sys.path.append('../..')
    from os.path import basename
    from storage.storage2 import create_table
    create_table(conf, basename(__file__).split('.')[0].replace('_', '-'))
Example #36
0
from os import listdir
from os.path import basename, dirname

# Import all the game managers

__all__ = list()
for f in listdir(dirname(__file__)):
    if f[-3:] == '.py' and not f.endswith('__init__.py'):
        __all__.append(basename(f)[:-3])
                resampler = param
            else:
                log("Invalid value for resampler, ignoring")
        elif argname in ("--out-sample-rate", "-out-sample-rate"):
            if (isint(param)):
                outsamplerate = int(param)
            else:
                log("Invalid value for output sample rate, ignoring")
        elif argname in ("--filter-append", "-filter-append"):
            filter_append = param
        elif isdir(argname):
            path = abspath(argname)
            break
        elif isfile(argname):
            concatfile = realpath(argname)
            convfile = "binaural_%s" % basename(concatfile)
            wdir = dirname(concatfile)
            doconcat = False
            domakecue = False
            dosplit = False
            log("Enabling single file mode for '{filename}' (output filename: {convfile})"
                .format(filename=argname, convfile=convfile))
        else:
            log("Unknown argument: %s" % arg)

    if path is None:
        path = abspath(".")

    if (doconcat or dovolgain or dobconv) and not which(ffmpeg):
        fatal("Wrong FFmpeg path: %s" % ffmpeg)
Example #38
0
def primaryName(fileName):
    return path.basename(fileName).split(".")[0]
Example #39
0
    if not line:
        return None
    return tuple(map(str.strip, line.split(';')))


def parse_file(file):
    entries = [
        entry for entry in (parse_entry(line) for line in file)
        if entry is not None
    ]
    return entries


ualtest = sys.argv[1]
test_path = sys.argv[2]
test_name = path.basename(test_path)

with open(test_path, 'r', encoding='utf-8') as file:
    test_cases = parse_file(file)


def run_ualtest(text, *args):

    # Run command.
    command = [ualtest]
    command.extend(args)
    result = subprocess.run(command,
                            input=text,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.STDOUT)
Example #40
0
def getNameWithoutExtension(MovieNameWithExtension):
    extLenght = len(path.splitext( path.basename(MovieNameWithExtension) )[1])
    return MovieNameWithExtension[: -extLenght]
Example #41
0
                uid = '%s_%s_%s@git2ical' % \
                      (md5(author)[0:8], md5(day)[0:8], repo_hash[0:8])
                add_event(summary, day, desc, uid)
    else:
        for day in daydata:
            summary = u'%s: %d' % \
                      (repo_name, sum(map(len, daydata[day].itervalues())))
            subjects = []
            authors = daydata[day].keys()
            authors.sort()
            for author in authors:
                subjects.extend(['', u'%s:' % author])
                commits = daydata[day][author]
                subjects.extend(map(lambda c: u'— %s' % c.subject, commits))
            desc = u'\r\n'.join(subjects)
            uid = '%s_%s@git2ical' % (md5(day)[0:8], repo_hash[0:8])
            add_event(summary, day, desc, uid)

    return cal


if __name__ == '__main__':
    split_authors, last_days, repo, outfile = parse_cmdline()
    repo_hash = md5(repo)
    repo_name = basename(outfile).rsplit('.', 1)[0]
    daydata = load_commits(repo, last_days)
    cal = make_cal(daydata, split_authors, repo_name, repo_hash)
    f = open(outfile, 'w')
    f.write(cal.as_string())
    f.close()
Example #42
0
def run(config, MED_DATA_list):
    bound_lick_counter = 0
    color_counter = 0
    plot_total_counter = []
    bout_counter = 0

    for name, info in config.experiments.items():
        dir_name = path.join(config.path, info['folder'])
        experiment_files = listdir(dir_name)

        MED_DATA = [data for data in MED_DATA_list
                    if path.basename(data.file) in experiment_files]
        if not MED_DATA:
            continue
        if 'raster' in info:
            signal_in_trial = []
            for data in MED_DATA:
                res = raster(data, **info['raster'])
                signal_in_trial.append(res / (len(data.Licks) if hasattr(data, 'Licks') else 1))
        if 'raster psth' in info:
            signal_in_trial = []
            for data in MED_DATA:
                res = raster(data, **info['raster psth'])
                signal_in_trial.append(res / (len(data.Licks) if hasattr(data, 'Licks') else 1))
        if 'ordered raster' in info:
            m = len(MED_DATA)
            n = max(len(info['ordered raster'].get('valve', 'Sucrose')) for data in MED_DATA)
            label_x = info['ordered raster']['labels']
            if isinstance(label_x, str):
                label_x = [label_x]
            assert len(label_x) == n, f"{len(label_x)} labels doesn't match the number of signals ({n})"

            signal_per_conc = np.zeros(n, dtype=float)

            for data in MED_DATA:
                res = ordered_raster_PSTH(data, **info['ordered raster'])
                # print(res)
                if len(res) < n:
                    tmp = np.zeros(n, dtype=float)
                    # tmp[:len(res)] = res
                    res = tmp
                signal_per_conc += res
            signal_per_conc /= m
            if info['ordered raster'].get('average_licks'):
                plt.figure('Average rewarded licks')
                plt.title('Average rewarded licks')
                plt.bar(range(n), signal_per_conc, tick_label=label_x, color=background_color)
            # plt.ylim([0, max(signal_per_conc)+5])

        if 'trials_per_day' in info:
            # sort MED_DATA by mouse in a dictionary
            mouse_data = group_by_mouse(MED_DATA)
            trials_per_day(mouse_data)

        if 'close loop' in info:
            mouse_data = group_by_mouse(MED_DATA)
            bound_licks = {}
            for mouse, data_list in mouse_data.items():
                bound_licks[mouse] = {}
                for data in data_list:
                    res = raster(data, **info['close loop'])
                    if hasattr(data, 'Licks'):
                        index = res / len(data.Licks)
                    else:
                        index = 0
                    bound_licks[mouse][data.start_date] = index

            plt.figure(f'Bound Licks')
            for mouse, licks_with_laser in bound_licks.items():
                licks = [licks for date, licks in sorted(licks_with_laser.items())]

                i = bound_lick_counter + 1
                days = len(licks)
                plt.plot(range(i, i + days), licks, marker='o', label=f'{mouse} {name}')
                bound_lick_counter += days

            plt.xlabel('Day')
            plt.ylabel('Licks in laser period / Total licks')
            plt.legend(loc=0)
            plt.title('Bound Licks')

        if 'open loop' in info:
            mouse_data = group_by_mouse(MED_DATA)
            bound_licks = {}
            for mouse, data_list in mouse_data.items():
                bound_licks[mouse] = {}
                for data in data_list:
                    res = open_loop(data, **info['open loop'])
                    if hasattr(data, 'Licks'):
                        index = res / len(data.Licks)
                    else:
                        index = 0
                    bound_licks[mouse][data.start_date] = index

            plt.figure(f'Bound Licks')
            for mouse, licks_with_laser in bound_licks.items():
                licks = [licks for date, licks in sorted(licks_with_laser.items())]
                i = bound_lick_counter + 1
                days = len(licks)
                plt.plot(range(i, i + days), licks, marker='o', label=f'{mouse} {name}')
                bound_lick_counter += days

            plt.xlabel('Day')
            plt.ylabel('Licks in laser period / Total licks')
            plt.legend(loc=0)
            plt.title('Bound Licks')

        if 'open loop color task' in info:
            mouse_data = group_by_mouse(MED_DATA)
            bound_licks = {}
            for mouse, data_list in mouse_data.items():
                bound_licks[mouse] = {}
                for data in data_list:
                    res = open_loop_color_task(data, **info['open loop color task'])
                    if hasattr(data, 'Licks'):
                        index = res / len(data.Licks)
                    else:
                        index = 0
                    bound_licks[mouse][data.start_date] = index

            plt.figure(f'Bound Licks')
            for mouse, licks_with_laser in bound_licks.items():
                licks = [licks for date, licks in sorted(licks_with_laser.items())]
                i = bound_lick_counter + 1
                days = len(licks)
                plt.plot(range(i, i + days), licks, marker='o', label=f'{mouse} {name}')
                bound_lick_counter += days

            plt.xlabel('Day')
            plt.ylabel('Licks in laser period / Total licks')
            plt.legend(loc=0)
            plt.title('Bound Licks')

        if 'spike raster psth' in info:
            for data in MED_DATA:
                spike_raster_PSTH(data, **info['spike raster psth'])

        if 'plot' in info:
            plot = info['plot']
            if isinstance(plot, str):
                plot = [plot]
            for n, signal in enumerate(plot):


                plt.figure(f'Total {signal}')
                plt.title(MED_DATA[-1].subject)
                label = name
                # color = plot_colors[color_counter]

                y = [len(getattr(data, signal)) if hasattr(data, signal) else 0
                     for data in MED_DATA]

                try:
                    i = plot_total_counter[n]
                except IndexError:
                    plot_total_counter.append(0)
                    i = 0
                i += 1
                days = len(y)
                x = range(i, i + days)
                plt.bar(x, y, label=label)
                plt.xlabel('Day')
                plt.ylabel(signal)
                plt.legend()

                # color_counter += 1
                plot_total_counter[n] += days

        if 'bouts' in info:
            signal = info['bouts']

            y = [get_bouts(getattr(data, signal)) if hasattr(data, signal) else 0
                 for data in MED_DATA]

            plt.figure('Bouts')
            plt.title(MED_DATA[-1].subject)

            i = bout_counter + 1
            days = len(y)
            x = range(i, i + days)
            plt.bar(x, y, label=name)
            plt.xlabel('Day')
            plt.ylabel(f'Bouts of {signal}')
            plt.legend()

            bout_counter += days

    plt.show()
    return
Example #43
0
from os import listdir
from os.path import dirname, basename


# Automatically import every class inside the commands package. Magic! :D
__all__ = [basename(f)[:-3] for f in listdir(dirname(__file__)) if f[-3:] == ".py" and not f.endswith("__init__.py")]
Example #44
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    if args.options is not None:
        cfg.merge_from_dict(args.options)

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True

    # work_dir is determined in this priority: CLI > segment in file > filename
    if args.work_dir is not None:
        # update configs according to CLI args if args.work_dir is not None
        cfg.work_dir = args.work_dir
    elif cfg.get('work_dir', None) is None:
        # use config filename as default work_dir if cfg.work_dir is None
        cfg.work_dir = osp.join('./work_dirs',
                                osp.splitext(osp.basename(args.config))[0])
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    if args.gpu_ids is not None:
        cfg.gpu_ids = args.gpu_ids
    else:
        cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)

    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # create work_dir
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
    # init the logger before other steps
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
    logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)

    # add a logging filter
    logging_filter = logging.Filter('mmdet')
    logging_filter.filter = lambda record: record.find('mmdet') != -1

    # init the meta dict to record some important information such as
    # environment info and seed, which will be logged
    meta = dict()
    # log env info
    env_info_dict = collect_env()
    env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
    dash_line = '-' * 60 + '\n'
    logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                dash_line)
    meta['env_info'] = env_info

    # log some basic info
    logger.info(f'Distributed training: {distributed}')
    logger.info(f'Config:\n{cfg.pretty_text}')

    # set random seeds
    if args.seed is not None:
        logger.info(f'Set random seed to {args.seed}, '
                    f'deterministic: {args.deterministic}')
        set_random_seed(args.seed, deterministic=args.deterministic)
    cfg.seed = args.seed
    meta['seed'] = args.seed

    model = build_detector(cfg.model,
                           train_cfg=cfg.get('train_cfg'),
                           test_cfg=cfg.get('test_cfg'))

    logger.info(f'Model:\n{model}')
    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        val_dataset = copy.deepcopy(cfg.data.val)
        val_dataset.pipeline = cfg.data.train.pipeline
        datasets.append(build_dataset(val_dataset))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.pretty_text,
                                          CLASSES=datasets[0].CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    train_detector(model,
                   datasets,
                   cfg,
                   distributed=distributed,
                   validate=(not args.no_validate),
                   timestamp=timestamp,
                   meta=meta)
Example #45
0
def test_type1_raises(path, tmpdir):
    out = str(tmpdir / basename(path)) + ".out"
    options = Options(path, out)
    with pytest.raises(FontParseError):
        hintFiles(options)
Example #46
0
    def create_package(self, filename, debug=False):
        tmp_dir = temp_file.make_temp_dir(delete=not debug)
        if debug:
            print('tmp_dir: %s' % (tmp_dir))
        stage_dir = path.join(tmp_dir, 'stage')
        files_dir = path.join(stage_dir, 'files')
        env_files_dir = path.join(stage_dir, 'env')
        file_util.mkdir(files_dir)
        file_util.mkdir(env_files_dir)
        temp_file.write_temp_files(files_dir, self.files)
        temp_file.write_temp_files(env_files_dir, self.env_files)

        tmp_compiler_dir = path.join(tmp_dir, 'objects')

        cc = compiler(build_target.make_host_build_target())

        include_path = []
        lib_path = []

        static_c_libs = self.objects.get('static_c_libs', [])
        for static_c_lib in static_c_libs:
            sources, headers = static_c_lib.write_files(tmp_compiler_dir)
            include_dir = path.join(tmp_compiler_dir, static_c_lib.filename,
                                    'include')
            lib_dir = path.join(tmp_compiler_dir, static_c_lib.filename)
            include_path.append(include_dir)
            lib_path.append(lib_dir)
            cflags = ['-I%s' % (include_dir)]
            targets = cc.compile_c([source.path for source in sources],
                                   cflags=cflags)
            lib_filename = path.join(tmp_compiler_dir, static_c_lib.filename,
                                     path.basename(static_c_lib.filename))
            lib = cc.make_static_lib(lib_filename,
                                     [target.object for target in targets])
            file_util.copy(lib, path.join(files_dir, static_c_lib.filename))
            for header in headers:
                file_util.copy(header.path,
                               path.join(files_dir, header.filename))

        shared_c_libs = self.objects.get('shared_c_libs', [])
        for shared_c_lib in shared_c_libs:
            sources, headers = shared_c_lib.write_files(tmp_compiler_dir)
            include_dir = path.join(tmp_compiler_dir, shared_c_lib.filename,
                                    'include')
            lib_dir = path.join(tmp_compiler_dir, shared_c_lib.filename)
            include_path.append(include_dir)
            lib_path.append(lib_dir)
            cflags = ['-I%s' % (include_dir)]
            targets = cc.compile_c([source.path for source in sources],
                                   cflags=cflags)
            lib_filename = path.join(tmp_compiler_dir, shared_c_lib.filename,
                                     path.basename(shared_c_lib.filename))
            lib = cc.make_shared_lib(lib_filename,
                                     [target.object for target in targets])
            file_util.copy(lib, path.join(files_dir, shared_c_lib.filename))
            for header in headers:
                file_util.copy(header.path,
                               path.join(files_dir, header.filename))

        c_programs = self.objects.get('c_programs', [])
        for c_program in c_programs:
            sources, headers = c_program.write_files(tmp_compiler_dir)
            include_dir = path.join(tmp_compiler_dir, c_program.filename,
                                    'include')
            lib_dir = path.join(tmp_compiler_dir, c_program.filename)
            cflags = ['-I%s' % (include_dir)]
            cflags += ['-I%s' % (inc) for inc in include_path]
            ldflags = ['-L%s' % (lib_dir)]
            ldflags += ['-L%s' % (lib) for lib in lib_path]
            ldflags += c_program.ldflags or []
            targets = cc.compile_c([source.path for source in sources],
                                   cflags=cflags)
            exe_filename = path.join(tmp_compiler_dir, c_program.filename,
                                     path.basename(c_program.filename))
            exe = cc.link_exe(exe_filename,
                              [target.object for target in targets],
                              ldflags=ldflags)
            file_util.copy(exe, path.join(files_dir, c_program.filename))

        pkg_desc = package_descriptor(self.metadata.name,
                                      self.metadata.build_version,
                                      properties=self.properties,
                                      requirements=self.requirements)
        return package.create_package(filename, pkg_desc,
                                      self.metadata.build_target, stage_dir)
Example #47
0
def test_counter_glyphs(tmpdir):
    path = "%s/dummy/font.ufo" % DATA_DIR
    out = str(tmpdir / basename(path)) + ".out"
    options = Options(path, out)
    options.vCounterGlyphs = ["m", "M", "T"]
    hintFiles(options)
Example #48
0
if args.uninstall:
    call(["rm", "-f", join(ccls_install_dir, "bin", "ccls")])

chdir(homefolder)
# Install packages only if needed
for pac in packages_for_build:
    if not apt_cache[pac].is_installed:
        print("Needs to install packages for building CCLS")
        call(["sudo", "apt-get", "install", "-y", *packages_for_build])
        break

if args.clean:
    rmtree(ccls_dir)

if args.build:
    if not exists(ccls_dir):
        call([
            "git", "clone", "--depth=1", "--recursive",
            "https://github.com/MaskRay/ccls",
            basename(ccls_dir)
        ])
    else:
        call(["git", "-C", ccls_dir, "pull"])

    chdir(ccls_dir)
    call([
        "cmake", "-H.", "-BRelease", "-DCMAKE_BUILD_TYPE=Release",
        "-DCMAKE_INSTALL_PREFIX={}".format(ccls_install_dir)
    ])
    call(["cmake", "--build", "Release", "--target", "install", "--parallel"])
Example #49
0
from os.path import basename, dirname
from pybenzinaconcat.utils.jugutils import jug_main


def _main():
    from pybenzinaconcat.index_metadata.index_metadata import main
    main()


jug_main(_main, __file__, basename(dirname(__file__)))
Example #50
0
def test_ufo(ufo, tmpdir):
    out = str(tmpdir / basename(ufo))
    options = Options(ufo, out)
    hintFiles(options)

    assert differ([ufo, out])
Example #51
0
 def copy_resources(self, *resources, use_symlinks=False):
     for path in resources:
         resource_dest = op.join(self.resources, op.basename(path))
         action = symlink if use_symlinks else copy
         action(op.abspath(path), resource_dest)
Example #52
0
def test_round_trip(tmpdir):
    path_config = op.join(this_folder, '..', 'book_template', '_config.yml')
    path_out = op.join(tmpdir.dirpath(), 'tmp_test')

    # Custom CSS and JS code
    path_js = op.join(path_test_book, "my_js.js")
    path_css = op.join(path_test_book, "my_css.css")
    # Run the create command
    new_name = "test"
    new_book(path_out=op.join(path_out, new_name),
             config=path_config,
             toc=path_toc,
             content_folder=path_content,
             custom_js=path_js,
             custom_css=path_css,
             extra_files=[
                 op.join(path_test_book, 'foo', 'baz.txt'),
                 op.join(path_test_book, 'foo', 'you')
             ],
             license=path_license)

    # Table of contents
    old_toc = read(path_toc)
    new_toc = read(op.join(path_out, new_name, '_data', 'toc.yml'))
    assert old_toc == new_toc

    # Config files
    with open(path_config, 'r') as ff:
        old_config = yaml.load(ff)
    with open(op.join(path_out, new_name, '_config.yml'), 'r') as ff:
        new_config = yaml.load(ff)

    for ii in old_config.keys():
        if ii not in ["jupyter_book_version"]:
            assert old_config[ii] == new_config[ii]

    # License
    old_license = read(path_license)
    new_license = read(op.join(path_out, 'test', 'content', 'LICENSE.md'))
    assert old_license == new_license

    # Content
    for ifolder, _, ifiles in os.walk(path_content):
        for ifile in ifiles:
            basename = op.basename(ifile)
            # Only check the text files we care about since reading in other files is trickier
            if 'LICENSE.md' in basename or all(
                    ii not in basename for ii in ['.md', '.ipynb', '.html']):
                continue

            old_content = read(op.join(ifolder, ifile))
            new_content = read(
                op.join(path_out, 'test', 'content', ifolder, basename))
            assert old_content == new_content

    # CSS and JS
    assert file_contents_equal(
        path_js, op.join(path_out, "test", "assets", "custom", "custom.js"))
    assert file_contents_equal(
        path_css, op.join(path_out, "test", "assets", "custom", "custom.css"))

    # Extra files
    assert op.exists(op.join(path_out, "test", "baz.txt"))
    assert op.exists(op.join(path_out, "test", "you", "bar.txt"))

    # This should raise an error because the folder exists now
    with pytest.raises(CalledProcessError):
        cmd = [
            "jupyter-book", "create", new_name, "--config", path_config,
            "--toc", path_toc, "--content-folder", path_content, "--license",
            path_license, "--out-folder", path_out
        ]
        run(cmd, check=True)

    # If we succeed, remove the tmpdir
    tmpdir.remove()
Example #53
0
def open(init=None, extensions=None):
    """
    Creates a DataModel from a number of different types

    Parameters
    ----------

    init : shape tuple, file path, file object, astropy.io.fits.HDUList, numpy array, dict, None

        - None: A default data model with no shape

        - shape tuple: Initialize with empty data of the given shape

        - file path: Initialize from the given file (FITS , JSON or ASDF)

        - readable file object: Initialize from the given file object

        - astropy.io.fits.HDUList: Initialize from the given
          `~astropy.io.fits.HDUList`

        - A numpy array: A new model with the data array initialized
          to what was passed in.

        - dict: The object model tree for the data model

    extensions : list of AsdfExtension
        A list of extensions to the ASDF to support when reading
        and writing ASDF files.

   Results
    -------

    model : DataModel instance
    """
    from astropy.io import fits

    if init is None:
        return DataModel(None)
    # Send _asn.json files to ModelContainer; avoid shape "cleverness" below
    elif (isinstance(init, six.string_types)
          and basename(init).split('.')[0].split('_')[-1] == 'asn'):
        try:
            m = ModelContainer(init, extensions=extensions)
        except:
            raise TypeError("init ASN not valid for ModelContainer")
        return m
    elif isinstance(init, DataModel):
        # Copy the object so it knows not to close here
        return init.__class__(init)
    elif isinstance(init, tuple):
        for item in init:
            if not isinstance(item, int):
                raise ValueError("shape must be a tuple of ints")
        shape = init
    elif isinstance(init, np.ndarray):
        shape = init.shape
    else:
        if isinstance(init, (six.text_type, bytes)) or hasattr(init, "read"):
            hdulist = fits.open(init)
        elif isinstance(init, fits.HDUList):
            hdulist = init
        else:
            raise TypeError("init must be None, shape tuple, file path, "
                            "readable file object, or astropy.io.fits.HDUList")

        shape = ()
        try:
            hdu = hdulist[(fits_header_name('SCI'), 1)]
        except KeyError:
            pass
        else:
            if hasattr(hdu, 'shape'):
                shape = hdu.shape

    # Here, we try to be clever about which type to
    # return, otherwise, just return a new instance of the
    # requested class
    if len(shape) == 0:
        new_class = DataModel
    elif len(shape) == 4:
        # It's a RampModel, MIRIRampModel, or QuadModel
        try:
            dqhdu = hdulist[fits_header_name('DQ')]
        except KeyError:
            # It's a RampModel or MIRIRampModel
            try:
                refouthdu = hdulist[fits_header_name('REFOUT')]
            except KeyError:
                # It's a RampModel
                from . import ramp
                new_class = ramp.RampModel
            else:
                # It's a MIRIRampModel
                from . import miri_ramp
                new_class = miri_ramp.MIRIRampModel
        else:
            # It's a QuadModel
            from . import quad
            new_class = quad.QuadModel
    elif len(shape) == 3:
        # It's a CubeModel
        from . import cube
        new_class = cube.CubeModel
    elif len(shape) == 2:
        try:
            hdu = hdulist[(fits_header_name('SCI'), 2)]
        except (KeyError, NameError):
            # It's an ImageModel
            from . import image
            new_class = image.ImageModel
        else:
            # It's a MultiSlitModel
            from . import multislit
            new_class = multislit.MultiSlitModel
    else:
        raise ValueError("Don't have a DataModel class to match the shape")

    return new_class(init, extensions=extensions)
Example #54
0
 def copy_headers(self, *headers, use_symlinks=False):
     for path in headers:
         header_dest = op.join(self.headers, op.basename(path))
         action = symlink if use_symlinks else copy
         action(op.abspath(path), header_dest)
Example #55
0
"""
Atomate GUI
"""

from os.path import dirname, basename, isfile, join
import glob
modules = glob.glob(join(dirname(__file__), "*.py"))
__all__ = [
    basename(f)[:-3] for f in modules
    if isfile(f) and not f.endswith('__init__.py')
]

__version__ = "0.1"
Example #56
0
 def copy_frameworks(self, *frameworks):
     for path in frameworks:
         framework_dest = op.join(self.frameworks, op.basename(path))
         copy(path, framework_dest)
Example #57
0
from os.path import dirname, basename, isfile
import glob
modules = glob.glob(dirname(__file__) + "/*.py")
__all__ = [basename(f)[:-3] for f in modules if isfile(f)
           and not f.endswith('__init__.py')]
Example #58
0
    def start(self, wdir=None, args=None, pythonpath=None):
        filename = to_text_string(self.filecombo.currentText())
        if wdir is None:
            wdir = self._last_wdir
            if wdir is None:
                wdir = osp.basename(filename)
        if args is None:
            args = self._last_args
            if args is None:
                args = []
        if pythonpath is None:
            pythonpath = self._last_pythonpath
        self._last_wdir = wdir
        self._last_args = args
        self._last_pythonpath = pythonpath

        self.datelabel.setText(_('Profiling, please wait...'))

        self.process = QProcess(self)
        self.process.setProcessChannelMode(QProcess.SeparateChannels)
        self.process.setWorkingDirectory(wdir)
        self.process.readyReadStandardOutput.connect(self.read_output)
        self.process.readyReadStandardError.connect(
            lambda: self.read_output(error=True))
        self.process.finished.connect(
            lambda ec, es=QProcess.ExitStatus: self.finished(ec, es))
        self.stop_button.clicked.connect(self.process.kill)

        if pythonpath is not None:
            env = [
                to_text_string(_pth)
                for _pth in self.process.systemEnvironment()
            ]
            add_pathlist_to_PYTHONPATH(env, pythonpath)
            processEnvironment = QProcessEnvironment()
            for envItem in env:
                envName, separator, envValue = envItem.partition('=')
                processEnvironment.insert(envName, envValue)
            self.process.setProcessEnvironment(processEnvironment)

        self.output = ''
        self.error_output = ''

        p_args = ['-m', 'cProfile', '-o', self.DATAPATH]
        if os.name == 'nt':
            # On Windows, one has to replace backslashes by slashes to avoid
            # confusion with escape characters (otherwise, for example, '\t'
            # will be interpreted as a tabulation):
            p_args.append(osp.normpath(filename).replace(os.sep, '/'))
        else:
            p_args.append(filename)
        if args:
            p_args.extend(shell_split(args))
        executable = sys.executable
        if executable.endswith("spyder.exe"):
            # py2exe distribution
            executable = "python.exe"
        self.process.start(executable, p_args)

        running = self.process.waitForStarted()
        self.set_running_state(running)
        if not running:
            QMessageBox.critical(self, _("Error"),
                                 _("Process failed to start"))
Example #59
0
def load_datas(rots=None,add_rand=None,add_crop=None,eps=0.1,sample=None,norm=False,**kwargs):
    from glob import glob
    from PIL import Image
    if(rots is None):
        rots=[0,90,180,270]
    poss=list(glob(path.join(curpth,'datas','pos','*')))
    negs=list(glob(path.join(curpth,'datas','neg','*')))
    xs=[]
    ys=[]
    #for i in random.sample(list(poss),70):
    allimg=[(i,[eps,1-eps]) for i in poss]+[(j,[1-eps,eps]) for j in negs]
    
    az=list(range(len(allimg)))
    #random.shuffle(az)
    allimg.sort(key=lambda x:base32(x[0],length=3))
    le=len(az)
    data=az[le//15:]
    test=az[:le//15]
    if(sample is not None):
        data=random.sample(data,sample)
    _xdata=[]
    _ydata=[]
    _xtest=[]
    _ytest=[]
    S=dict()
    siz=kwargs.get('img_siz',48)
    prog=0
    tm=time.time()
    ims=kwargs.get("img_siz",img_siz)
    tot=len(data)*len(rots)+len(test)
    for pth,y in [allimg[i] for i in data]:
        try:
            prog+=len(rots)
            print('load%.2f%%,remaining%.1fsec'%(prog*100/tot,(time.time()-tm)/prog*(tot-prog)),end='\r')
            
            im=None
            #h=myhash.phashi(im,w=40)
            #f=open(pth,'rb')
            h=path.basename(path.dirname(pth))+base32(pth,length=16)
            #f.close()
            if(h in S):
                print(pth,'==',S[h])
                try:
                    im.close()
                    os.remove(pth)
                    im=Image.open(S[h])
                except Exception as e:
                    print(e)
                    pass
                continue
            S[h]=pth
            for r in rots:
                
                #azz=[im1]
                
                svname="file=%s,rot=%d,size=%d.npy"%(h,r,ims)
                svpth=path.join(curpth,'npy',svname)
                if(path.exists(svpth)):
                    data=np.load(svpth)
                else:
                    if(im is None):
                        im=Image.open(pth)
                    im1=im.rotate(r,expand=True)
                    data=img2input(im1,**kwargs)
                    np.save(svpth,data)
                '''if(add_crop is None):
                else:
                    azz=[im1]+crop_img(im1,add_crop)'''
                
                _xdata.append(data)
                _ydata.append(y)
        except Exception as e:
            print(pth,e)
            #traceback.print_exc()
            continue
    #for i in random.sample(list(negs),70):
    for pth,y in [allimg[i] for i in test]:
        try:
            prog+=1
            print('load%.2f%%,remaining%.1fsec'%(prog*100/tot,(time.time()-tm)/prog*(tot-prog)),end='\r')
            h=path.basename(path.dirname(pth))+base32(pth,length=16)
            svname="file=%s,rot=%d,size=%d.npy"%(h,0,ims)
            svpth=path.join(curpth,'npy',svname)
            if(path.exists(svpth)):
                data=np.load(svpth)
            else:
                im=Image.open(pth)
                data=img2input(im,**kwargs)
                np.save(svpth,data)
            _xtest.append(data)
            _ytest.append(y)
        except Exception as e:
            print(pth,e)
    if(add_rand is not None):
        _xdata=np.array(_xdata,np.float32)
        az=_xdata+(np.random.rand(*_xdata.shape)-0.5)*add_rand
        _xdata=np.concatenate((_xdata,az),axis=0)
        _ydata=np.array(_ydata)
        _ydata=np.concatenate((_ydata,_ydata),axis=0)
    if(norm):
        _xdata=(np.array(_xdata,np.float16)-127.5)/127.5
        _xtest=(np.array(_xtest,np.float16)-127.5)/127.5
    if(not isinstance(_xdata,np.ndarray)):
        _xdata=np.array(_xdata,np.float16)
    return _xdata,np.array(_ydata),np.array(_xtest),np.array(_ytest)
Example #60
0
def header(path):
    test_name = osp.basename(path)
    print(
        "\n".join(("\n", "*" * 40, "Starting %s test in %s" % (test_name, path), "*" * 40)),
        flush=True,
    )