def _demo_mode(self, connection):
        """ Continuously broadcast a demo image """
        os.abspath(__name__)
        stream = io.open(os.path.dirname(os.path.abspath(__file__)) +
            'test.bmp', 'rb')
        stream.seek(0, 2)

        while self.mode == 'demo':
            connection.write(struct.pack('<L', stream.tell()))
            connection.flush()
            stream.seek(0)
            connection.write(stream.read())
            stream.seek(0, 2)
        # Write a length of zero to the stream to signal that we're done
        connection.write(struct.pack('<L', 0))
示例#2
0
    def __in_place_uncompress(self, filein):
        """Uncompress the file ``in-place''        
        - It is better to use here ``os.system'' or ``popen''-family,
        but unfortunately it does not work properly for multithreaded environemnt        
        """
        _, ext = os.path.splitext(filein)
        if (not ext) or (ext not in self.extensions):
            logger.error('Unknown extension for %s' % filein)
        ##
        out, _ = os.path.split(filein)
        outdir = out if out else '.'
        assert os.access ( outdir , os.W_OK  ) ,\
               'The directory "%s" is not writeable!' % os.abspath ( outdir )

        ## uncompress the file
        tfiles = self.uncompress_file(filein)
        # remove the original
        os.remove(filein)
        ofiles = []
        for f in tfiles:
            _, ff = os.path.split(f)
            ff = os.path.join(out, ff)
            shutil.move(f, ff)
            ofiles.append(ff)

        return tuple(ofiles)
示例#3
0
    def backengine(self):
        """ This method drives the backengine code."""
        status = 0
        data_source = generateTestFilePath('pmi_out_0000001.h5')

        try:
            if self.parameters['number_of_trajectories'] == 1:
                data_target = os.abspath(self.output_path)
                command_string = 'cp %s %s' % (data_source, data_target)
                proc = subprocess.Popen(command_string, shell=True)
                proc.wait()
            else:

                # Check if output_path already exists as a file.
                if os.path.isfile(self.output_path):
                    raise IOError(
                        "Output path %s already exists but is not a directory. Cowardly refusing to overwrite existing file."
                        % (self.output_path))
                # Check if output_path already exists as a directory. Create if not.
                if not os.path.isdir(self.output_path):
                    os.mkdir(self.output_path)
                for i in range(self.parameters['number_of_trajectories']):
                    data_target = os.path.join(
                        os.path.abspath(self.output_path),
                        'pmi_out_%07d.h5' % (i))
                    command_string = 'cp %s %s' % (data_source, data_target)
                    proc = subprocess.Popen(command_string, shell=True)
                    proc.wait()
        except:
            status = 1

        return status
示例#4
0
    def list_profiles(self, req):
        """
        Displays all available profiles in list format.

        ``req``:
            :class:`webob.Request` containing the environment information from
            the request itself.

        Returns a WSGI application.
        """
        if 'enable' in req.params:
            try:
                if not os.path.exists(ENABLED_FLAG_FILE):
                    open(ENABLED_FLAG_FILE, 'w').close()
                self.profiling_enabled = True
            except IOError:
                log.error("Unable to create %s to enable profiling" % os.abspath(ENABLED_FLAG_FILE))
                raise
        elif 'disable' in req.params:
            try:
                if os.path.exists(ENABLED_FLAG_FILE):
                    os.remove(ENABLED_FLAG_FILE)
                self.profiling_enabled = False
            except IOError:
                log.error("Unable to delete %s to disable profiling" % os.path.abspath(ENABLED_FLAG_FILE))
                raise

        resp = Response(charset='utf8')
        session_history = self._backend.get_all()
        resp.unicode_body = self.get_template('list.tmpl').render_unicode(
            history=session_history,
            path=req.path,
            profiling_enabled=self.profiling_enabled)
        return resp
示例#5
0
文件: pytis.py 项目: Methimpact/btcdm
	def fixdirs(self):
		if not os.path.isdir(self._path) or  not os.path.exists(self._path):
			try:
				os.makedirs(self._path)
			except (OSError, IOError):
				self.path = os.abspath(os.getcwd())
		return
示例#6
0
def find_fastq_read_pairs(file_list):
    """
    Given a list of file names, finds read pairs (based on _R1_/_R2_ file naming)
    and returns a dict of {base_name: [ file_read_one, file_read_two ]}
    Filters out files not ending with .fastq[.gz|.gzip|.bz2].
    E.g.
        P567_102_AAAAAA_L001_R1_001.fastq.gz
        P567_102_AAAAAA_L001_R2_001.fastq.gz
    becomes
        { "P567_102_AAAAAA_L001":
           ["P567_102_AAAAAA_L001_R1_001.fastq.gz",
            "P567_102_AAAAAA_L001_R2_001.fastq.gz"] }

    :param list file_list: A list of files in no particular order

    :returns: A dict of file_basename -> [file1, file2]
    :rtype: dict
    """
    # We only want fastq files
    pt = re.compile(".*\.(fastq|fq)(\.gz|\.gzip|\.bz2)?$")
    file_list = filter(pt.match, file_list)
    if not file_list:
        # No files found
        LOG.warn("No fastq files found.")
        return {}
    # --> This is the SciLifeLab-Sthlm-specific format (obsolete as of August 1st, hopefully)
    #     Format: <lane>_<date>_<flowcell>_<project-sample>_<read>.fastq.gz
    #     Example: 1_140220_AH8AMJADXX_P673_101_1.fastq.gz
    # --> This is the standard Illumina/Uppsala format (and Sthlm -> August 1st 2014)
    #     Format: <sample_name>_<index>_<lane>_<read>_<group>.fastq.gz
    #     Example: NA10860_NR_TAAGGC_L005_R1_001.fastq.gz
    suffix_pattern = re.compile(r'(.*)fastq')
    # Cut off at the read group
    file_format_pattern = re.compile(r'(.*)_(?:R\d|\d\.).*')
    index_format_pattern = re.compile(r'(.*)_(?:I\d|\d\.).*')
    matches_dict = collections.defaultdict(list)
    for file_pathname in file_list:
        file_basename = os.path.basename(file_pathname)
        fc_id = os.path.dirname(file_pathname).split("_")[-1]
        try:
            # Check for a pair
            pair_base = file_format_pattern.match(file_basename).groups()[0]
            matches_dict["{}_{}".format(pair_base,
                                        fc_id)].append(file_pathname)
        except AttributeError:

            #look for index file - 10Xgenomics case
            index_file = index_format_pattern.match(file_basename).groups()[0]
            if index_file:
                matches_dict["{}_{}".format(index_file,
                                            fc_id)].append(file_pathname)
            else:
                LOG.warn("Warning: file doesn't match expected file format, "
                         "cannot be paired: \"{}\"".format(file_pathname))
                # File could not be paired, set by itself (?)
                file_basename_stripsuffix = suffix_pattern.split(
                    file_basename)[0]
                matches_dict[file_basename_stripsuffix].append(
                    os.abspath(file_pathname))
    return dict(matches_dict)
示例#7
0
def organize_files():

	for entry in os.scandir():
		if entry.is_dir():
			continue
		file_path = os.abspath(dir)
		print file_path
		file_ft = file_path.suffix.lower()
		if file_ft in FILES:
			directory_path = Path(FILES[file_ft])
			directory_path.mkdir(exist_ok=True)
			file_path.rename(directory_path.joinpath(file_path))
		
	try:
		os.mkdir("Others")
	except:
		pass
	
	for dir in os.scandir():
		try:
			print dir
			if dir.is_dir():
				os.rmdir(dir)
			else:	
				os.rename(os.getcwd() + '/' + str(Path(dir)), os.getcwd() + '/Others/' + str(Path(dir)))
				
		except Exception, err:
			print str(err)
			pass
示例#8
0
 def get_structures(self, directory, extension='.cif'):
     """Utillity function that provides a list with all structures in a directory"""
     return [
         os.abspath(os.path.join(directory, structure))
         for structure in os.listdir(directory)
         if structure.endswith(extension)
     ]
示例#9
0
 def _list_outputs(self):
     outputs = self.output_spec().get()
     outfilenames = _gen_output_filenames(self.inputs.in_tensors_file, self.inputs.in_bval_file)
     out_files = []
     for item in outfilenames:
         out_files.append(os.abspath(item))
     outputs['out_files'] = out_files
     return outputs
示例#10
0
 def setLogFile(self, filepath):
     parsed_path = os.abspath(filepath)
     # Checks if the provided log filename is in a real directory, and that
     # the filename itself is not a directory.
     if os.path.isdir(os.path.dirname(
             parsed_path)) and not os.path.isdir(parsed_path):
         self._log_file = parsed_path
     else:
         raise FileNotFoundError(filepath)
示例#11
0
def getSchemaOrgTurtlePath():
    if IS_LAMBDA:
        fn_schema = "/tmp/schema.org.ttl"
        if not os.path.exists(fn_schema):
            app.logger.info("Refresh cache of schemaorg.ttl")
            bucket = os.environ.get("resources_bucket", "sosov-data")
            s3_client.download_file(bucket, "resources/data/schema.org.ttl", fn_schema)
        return fn_schema
    return os.abspath("resources/data/schema.org.ttl")
def find_fastq_read_pairs(file_list):
    """
    Given a list of file names, finds read pairs (based on _R1_/_R2_ file naming)
    and returns a dict of {base_name: [ file_read_one, file_read_two ]}
    Filters out files not ending with .fastq[.gz|.gzip|.bz2].
    E.g.
        P567_102_AAAAAA_L001_R1_001.fastq.gz
        P567_102_AAAAAA_L001_R2_001.fastq.gz
    becomes
        { "P567_102_AAAAAA_L001":
           ["P567_102_AAAAAA_L001_R1_001.fastq.gz",
            "P567_102_AAAAAA_L001_R2_001.fastq.gz"] }

    :param list file_list: A list of files in no particular order

    :returns: A dict of file_basename -> [file1, file2]
    :rtype: dict
    """
    # We only want fastq files
    pt = re.compile(".*\.(fastq|fq)(\.gz|\.gzip|\.bz2)?$")
    file_list = filter(pt.match, file_list)
    if not file_list:
        # No files found
        LOG.warning("No fastq files found.")
        return {}
    # --> This is the SciLifeLab-Sthlm-specific format (obsolete as of August 1st, hopefully)
    #     Format: <lane>_<date>_<flowcell>_<project-sample>_<read>.fastq.gz
    #     Example: 1_140220_AH8AMJADXX_P673_101_1.fastq.gz
    # --> This is the standard Illumina/Uppsala format (and Sthlm -> August 1st 2014)
    #     Format: <sample_name>_<index>_<lane>_<read>_<group>.fastq.gz
    #     Example: NA10860_NR_TAAGGC_L005_R1_001.fastq.gz
    suffix_pattern = re.compile(r'(.*)fastq')
    # Cut off at the read group
    file_format_pattern = re.compile(r'(.*)_(?:R\d|\d\.).*')
    index_format_pattern= re.compile(r'(.*)_(?:I\d|\d\.).*')
    matches_dict = collections.defaultdict(list)
    for file_pathname in file_list:
        file_basename = os.path.basename(file_pathname)
        fc_id = os.path.dirname(file_pathname).split("_")[-1]
        try:
            # Check for a pair
            pair_base = file_format_pattern.match(file_basename).groups()[0]
            matches_dict["{}_{}".format(pair_base,fc_id)].append(file_pathname)
        except AttributeError:
    
            #look for index file - 10Xgenomics case
            index_file = index_format_pattern.match(file_basename).groups()[0]
            if index_file:
                matches_dict["{}_{}".format(index_file,fc_id)].append(file_pathname)
            else:
                LOG.warning("Warning: file doesn't match expected file format, "
                      "cannot be paired: \"{}\"".format(file_pathname))
                # File could not be paired, set by itself (?)
                file_basename_stripsuffix = suffix_pattern.split(file_basename)[0]
                matches_dict[file_basename_stripsuffix].append(os.abspath(file_pathname))
    return dict(matches_dict)
示例#13
0
def pool_make_build_files(list_of_m61s,
                          cpus=cf.CPUS,
                          bool_insert_endpoint_markers=False):
    # Build staging csv and build file from m61s

    m61_ext = 'm61'
    mod61_ext = 'mm61'
    m61s = [
        os.abspath(
            glob.glob(
                os.path.join(cf.data_files_dir,
                             '**/{}.{}'.format(j, m61_ext)))[0])
        for j in list_of_m61s
    ]
    mod_m61s = [
        os.abspath(
            glob.glob(
                os.path.join(cf.data_files_dir,
                             '**/{}.{}'.format(j, mod61_ext)))[0])
        for j in list_of_m61s
    ]

    m61_files_to_process = m61s
    m61_files_to_process.extend(mod_m61s)
    for mod61 in mod_m61s:
        tmp_mod61_basename = os.path.basename(mod61).split('.')[0]
        for am61 in m61s:
            tmp_m61_basename = os.path.basename(am61).split('.')[0]
            if tmp_mod61_basename == tmp_m61_basename:
                m61_files_to_process.remove(am61)
                break
    print('Processing m61 files...')
    [print(os.path.basename(i)) for i in m61_files_to_process]

    # Pool.starmap method accepts a sequence of argument tuples.
    with multiprocessing.Pool(processes=cf.CPUS) as pool:
        pool.starmap(production_make_build_file, m61_files_to_process)
示例#14
0
def show_image_det_from_class(img,objects,type_names=None,ignored_type_names=None,occlusion=-1,truncation=-1,saveto=-1):
    '''
    (needs dataset class and object class from frustum-pointnets.)
    show image with labels from objects,bbox,occlusion,truncation
    :param img(ndarray):
    :param objects(list of class): type,xmin,ymin,xmax,ymax,occlusion,truncation
    :param type_names(tuple): like('Car','Pedestrian','Cyclist')
    :param ignored_type_names(tuple): like ('DontCare')
    :param occlusion(int): >0:show occlusion(0,1,2,3)
    :param truncation(int): >0:show truncation(0~1)
    :param saveto(str):str:save else: show
    :return: None
    '''
    assert objects.type_names != None
    for obj in objects:
        if obj.type in type_names:
            try:
                cv2.rectangle(img, (int(obj.xmin),int(obj.ymin)),
                    (int(obj.xmax),int(obj.ymax)), (0,255,0), 1)
                cv2.putText(img, obj.type[:3], (int(obj.xmin), int(obj.ymin - 15)), cv2.FONT_HERSHEY_DUPLEX, \
                        0.5, (0, 255, 0), 1)
            except AttributeError:
                print("No object.xmin/ymin/xmax/ymax/type(str)")
            if occlusion>0:
                try:
                    cv2.putText(img, 'o'+str(obj.occlusion), (int(obj.xmin), int(obj.ymin + 15)), cv2.FONT_HERSHEY_DUPLEX, \
                                0.5, (0, 255, 0), 1)
                except AttributeError:
                    print("No object.occlusion but you set occlusion!=-1")
            if truncation>0:
                try:
                    cv2.putText(img, 't'+str(obj.truncation), (int(obj.xmin), int(obj.ymin)), cv2.FONT_HERSHEY_DUPLEX, \
                                0.5, (0, 255, 0), 1)
                except AttributeError:
                    print("No object.occlusion but you set truncation!=-1")

        elif obj.type in ignored_type_names:
            cv2.rectangle(img, (int(obj.xmin),int(obj.ymin)),
                (int(obj.xmax),int(obj.ymax)), (0,100,100), 1)
            cv2.putText(img, 'ignored', (int(obj.xmin), int(obj.ymin - 15)), cv2.FONT_HERSHEY_DUPLEX, \
                        0.5, (0, 100, 100), 1)
    if isinstance(saveto,str):
        dir = os.abspath(os.dirname(saveto))
        if not os.path.exists(dir):
            os.makedirs(dir)
        img.save(saveto)
    else:
        cv2.imshow(img)
        cv2.waitKey(0)
示例#15
0
 def auto_config(self, config_source):
     """creates a dictionary of configuration parameters out of given path"""
     config = None
     print(os.path.abspath(os.path.dirname(__file__))+"\\"+default_config)
     # fallback to default config if no file can be found
     try:
         config = self.load_config(config_source)
     except IOError:
         print("No configuration-file found. Using default-configuration.")
         try:
             config = self.load_config(os.path.dirname(os.abspath(__file__))+"/"+default_config)
         except:
             print("Someone messed with the default-configuration file. It cannot be found.")
         else: return(config)
     else: return(config)
示例#16
0
def test(coverage):
    """Run the unit tests."""
    if coverage and not os.environ.get('FLASK_COVERAGE'):
        os.environ['FLASK_COVERAGE'] = 1
        os.excvp(sys.executable, [sys.executable] + sys.argv)
    import unittest
    tests = unittest.TestLoader().discover('tests')
    unittest.TextTestRunner(verbosity=2).run(tests)
    if COV:
        COV.stop()
        COV.save()
        print('Coverage Summary:')
        COV.report()
        basdir = os.abspath(os.path.dirname(__file__))
        covdir = os.path.join(basdir, 'tmp/coverage')
        COV.html_report(directory=covdir)
        print('HTML version:file//%s/index.html' % covdir)
        COV.erase()
示例#17
0
def main():
    global NUM_TESTS
    global TEST_PERC
    global BENCHMARK_PATH
    global L2_PATH
    global TIMEOUT_PATH
    global OUT_DIR
    global SPECS_DIR
    
    args = docopt(__doc__)

    random.seed(int(args['--seed']))
    
    OUT_DIR = os.path.abspath(args['OUT_DIR'])
    NUM_TESTS = int(args['--num-tests'])
    TEST_PERC = float(args['--test-perc'])
    SPECS_DIR = DEFAULT_SPECS_DIR
    BENCHMARK_PATH = DEFAULT_BENCHMARK_PATH

    if args['--l2-path'] is None:
        L2_PATH = DEFAULT_L2_PATH
    else:
        L2_PATH = os.abspath(args['--l2-path'])
    
    if args['--timeout-path'] is None:
        if sys.platform == 'linux':
            TIMEOUT_PATH = DEFAULT_TIMEOUT_PATH_PREFIX + '_linux.native'
        elif sys.platform == 'darwin':
            TIMEOUT_PATH = DEFAULT_TIMEOUT_PATH_PREFIX + '_osx.native'
        else:
            print('Unsupported system: {}'.format(sys.platform))
            exit(-1)
    else:
        TIMEOUT_PATH = os.path.abspath(args['--timeout-path'])
    
    if args['setup']:
        setup()
    elif args['write-costs']:
        write_costs()
    elif args['run']:
        run()
    elif args['benchmark']:
        benchmark()
示例#18
0
def get_directory_contents(dirname, sort_field=None):
    """The directory is joined with the CKEDITOR_UPLOAD_PATH.
    A check is made not to go to directories below the CKEDITOR_UPLOAD_PATH.
    """
    if dirname is None:
        return None
        
    # Make sure the CKEDITOR_UPLOAD_PATH exists
    if not os.path.isdir(CKEDITOR_UPLOAD_PATH):
        os.makedirs(CKEDITOR_UPLOAD_PATH)
        
    # Join the CKEDITOR_UPLOAD_PATH and specified directory
    full_dirname = os.path.join(settings.CKEDITOR_UPLOAD_PATH, dirname)
    full_dirname = os.abspath(full_dirname)     # remove attempts to move the path upwards
    # If the directory doesn't exist, revert to the CKEDITOR_UPLOAD_PATH
    if not os.path.isdir(full_dirname): 
        full_dirname = settings.CKEDITOR_UPLOAD_PATH
    
    # array to hold directory contents
    dir_contents = []
    items = os.listdir(full_dirname)
    items = filter(lambda x: not x.startswith('.'), items)
    for item in items:
        fullpath = os.path.join(full_dirname, item)
        stats = os.stat(fullpath)
        # strftime('%y%m%d',localtime(os.stat('thefile' )[ST_MTIME])
        #print stats
        if os.path.isfile(fullpath):
            dli = DirListItem(item, DirListItem.TYPE_FILE, datetime.fromtimestamp(stats.st_mtime),  fsize=stats.st_size )
        elif os.path.isdir(fullpath):
            dli = DirListItem(item, DirListItem.TYPE_DIR, datetime.fromtimestamp(stats.st_mtime),  )
        dir_contents.append(dli)    
    
    if sort_field is not None and sort_field.startswith('-'):
        reverse_sort = True
        sort_field = sort_field[1:]
    else:
        reverse_sort = False
        
    if sort_field in DirListItem.ATTRS:
        dir_contents.sort(key=lambda obj: eval('obj.%s' % sort_field), reverse=reverse_sort)
    return dir_contents
示例#19
0
    def __in_place_compress(self, files):
        """Compress the file ``in-place''        
        - It is better to use here ``os.system'' or ``popen''-family,
        but it does not work properly for multiprocessing environemnt        
        """
        output = self.nominal_dbname
        out, _ = os.path.split(output)
        outdir = out if out else '.'
        assert os.access ( outdir , os.W_OK  ) ,\
               'The directory "%s" is not writeable!' % os.abspath ( outdir )
        #
        # compress the file
        compressed = self.compress_files(files)
        # remove input files
        for f in files:
            try:
                os.remove(f)
            except OSError:
                pass

        shutil.move(compressed, output)
def ensureDirExists(path):
    # check if an s3 path (starts with s3://
    if path.startswith("s3://"):
        # handle as s3 path
        return ensureS3BucketExists(path)
    elif os.path.isdir(path):
        # local path and exists
        return True
    elif os.path.exists(path):
        logMessage("Error", "path (" + path + ") exists but is not directory")
        return False
    else:
        # create local path for the first time
        os.makedirs(os.abspath(path))
        if os.path.isdir(path):
            return True
        else:
            logMessage(
                "ERROR",
                "Tried to create dir (" + path + ") but didn't seem to work")
            return False
示例#21
0
文件: pytis.py 项目: Methimpact/btcdm
	def get_path(self):
		''' determined based on OS, can be set on instance
		'''
		if not self._path:
			if os.name in ('posix'):
				rundir = '/run/'
			elif os.name in ('mac', 'os2', 'ce', 'riscos'):
				rundir = '/var/run/'
			elif os.name == 'nt':
				if self.caller is not None:
					rundir = os.path.join(os.path.dirname(self.caller.__file__), '/run/')
				else:
					rundir = os.path.join(os.path.dirname(__file__), '/run/')
			else:
				rundir = os.getcwd()
			self._path = os.path.abspath(rundir)
			if not os.path.isdir(self._path) or  not os.path.exists(self._path):
				try:
					os.makedirs(self._path)
				except (OSError, IOError):
					self._path = os.abspath(os.getcwd())
		return self._path
示例#22
0
def main(args):
    logging.basicConfig(level=logging.DEBUG,
                        format="\n%(levelname)s:\t%(message)s")
    logger = logging.getLogger()
    output_folder = args['output_folder'] + '_' + os.path.basename(
        args['fastq_reads']).split(".")[0]
    args['fastq_reads'] = os.abspath(args['fastq_reads'])
    if not os.path.isdir(output_folder): os.mkdir(output_folder)

    logger.info("Saving run info to output folder...")
    write_run_info(args, output_folder)

    logger.info("Building the insertion sequence indices...")
    bowtie2.build('2.2.9', args['insertion_sequence_fasta'])

    logger.info("Aligning the fastq file to the insertion sequences...")
    sam_file_loc = bowtie2.align('2.2.9',
                                 args['insertion_sequence_fasta'],
                                 args['fastq_reads'],
                                 output_folder,
                                 threads=args['threads'])

    read_filter = filters.Filter(args['fastq_reads'],
                                 args['classification_file'],
                                 args['taxon_nodes'],
                                 logger_in=logger)

    logger.info("Invoking the Jansen Protocol...")
    taxon_total_count, taxon_IS_count, potential_transfers, intra_IS = read_filter.filter_reads_ISCounter2(
        sam_file_loc)

    logger.info("Saving results to file... ")
    out_file = save_summary_stats(args['fastq_reads'], taxon_total_count,
                                  taxon_IS_count, potential_transfers,
                                  intra_IS, output_folder)
    logger.info("Results saved to %s" % out_file)

    logger.info("Analysis Complete  :)")
示例#23
0
    def list_profiles(self, req):
        """
        Displays all available profiles in list format.

        ``req``:
            :class:`webob.Request` containing the environment information from
            the request itself.

        Returns a WSGI application.
        """
        if 'enable' in req.params:
            try:
                if not os.path.exists(ENABLED_FLAG_FILE):
                    open(ENABLED_FLAG_FILE, 'w').close()
                self.profiling_enabled = True
            except IOError:
                log.error("Unable to create %s to enable profiling" %
                          os.abspath(ENABLED_FLAG_FILE))
                raise
        elif 'disable' in req.params:
            try:
                if os.path.exists(ENABLED_FLAG_FILE):
                    os.remove(ENABLED_FLAG_FILE)
                self.profiling_enabled = False
            except IOError:
                log.error("Unable to delete %s to disable profiling" %
                          os.path.abspath(ENABLED_FLAG_FILE))
                raise

        resp = Response(charset='utf8')
        session_history = self._backend.get_all()
        resp.unicode_body = self.get_template('list.tmpl').render_unicode(
            history=session_history,
            path=req.path,
            profiling_enabled=self.profiling_enabled)
        return resp
示例#24
0
def get_cwd():
    filename = sys.argv[0]
    return os.dirname(os.abspath(os.expanduser(filename)))
示例#25
0
文件: config.py 项目: rblack42/RRBweb
from os import abspath, dirname

_CWD = dirname(abspath(__file__))
示例#26
0
# Just sketch

import os, sys


CONFIG_FILE = os.path.join(os.abspath(os.dirname(__file__)), "../config.yaml")


def get_config_variable(name):
    """ Retrieve variable name from configuration """

    pass
示例#27
0
variable_config = {
    "num_workers":6, # Should speed up data fetching on a multi-CPU machine
    "activation": "elu",
    "batch_size": 32,
    "bidir": True,
    "dropout_layers": [
        0.5
    ],
    "embed_dim": 200,
    "include_metadata": False,
    "linear_layer_sizes": [
        1000
    ],
    "lr": 0.0001,
    "max_len": 512,
    "modelpath": os.abspath(os.path.join(bpe_models_dir, "attrib_1000.model")),
    "num_classes": 1314,
    "num_lstm_hidden": 128,
    "num_lstm_layers": 2,
    "other_features_size": 39,
    "other_linear_depth": 2,
    "vocab_size": 1000
}


full_config = {**params, **variable_config}

# This uses the exposed ray trainable interface without the ray garbage
trainable = mytorch.RaylessTrainable(
    full_config,
    mytorch.myLSTM,
示例#28
0
            # There's a hand code dir in this subdir, cd into it
            try:
                os.chdir(hc_dir)
            except:
                # Maybe subdirs is stale?
                print("Could not access dir: {}  Skipping.".format(subsys +
                                                                   os.sep +
                                                                   hc_dir))
                continue

            # Assert: In project_dir/subsystem_dir/knit_dir

            for hand_code_filename in glob.glob(_GLOB_SUFFIX):
                print("      Knitting: " + hand_code_filename)

                # The target gen'd file will have the same name in the parent dir
                gen_filename = os.pardir + os.sep + hand_code_filename

                # Read all the hand code and break into sections
                read_hand_code(hand_code_filename)

                # Splice in each section into the designated location in the gen file
                splice_hand_code(gen_filename)

            try:
                os.chdir(os.pardir)  # pop back up into the subsys dir
            except:
                raise mi_Error(
                    "Could not return to parent dir: {}  Abort.".format(
                        os.abspath(subsys)))
示例#29
0
def resource_path(relative_path):
    if hasattr(sys, "_MEIPASS"):
        base_path = sys._MEIPASS
    else:
        base_path = os.abspath(".")
    return os.path.join(base_path, relative_path)
示例#30
0
model_params['backprop'] = False
base = mytorch.myLSTMOutputHidden(model_params)
base.load_state_dict(model_stuff['state_dict'], strict=False)
model = nn.Sequential(
    base,
    nn.Linear(model_params['linear_layer_sizes'][-1] + 39,
              model_params['linear_layer_sizes'][-1]),
    nn.ELU(),
    nn.Dropout(),
    nn.Linear(model_params['linear_layer_sizes'][-1], 1314),
)
model.to(device)

train_loader, val_loader = mytorch.get_attrib_bpe_data(
    model_params['batch_size'],
    modelpath=os.abspath(os.path.join(bpe_models_dir, 'attrib_1000.model')),
    num_workers=4)
loss_func = model_stuff['loss_func']
optim = torch.optim.Adam(model.parameters(), lr=.0001)
components = model, loss_func, optim, train_loader, val_loader

# I will do jank score logging in this case
global_step = 0
logfile = os.path.join(logpath, "logs.csv")
with open(logfile, "w+") as f:
    f.write(f"global_step,train_loss,train_accuracy,val_loss,val_accuracy\n")

try:
    i = 0
    while True:
        train_loss, train_accuracy, val_loss, val_accuracy, global_step = mytorch.fit(
示例#31
0
 def set_download_dir(self, dl_dir):
     self.download_dir = os.abspath(dl_dir)
示例#32
0
import sys, os

print ('sys.argv[0] = ', sys.argv[0])
pathname = os.path.dirname(sys.argv[0])
print ('path = ', pathname)
print ('full path =', os.abspath(pathname))
示例#33
0
文件: apt.py 项目: rory/camarabuntu
 def __str__(self):
     print "self.type = " + self.type
     if self.type == Repository.LOCAL_REPOSITORY:
         return "\"file://" + os.abspath( self.path ) + "\""
     elif self.type == Repository.REMOTE_REPOSITORY:
         return "%s %s %s" % (self.url, self.dist, self.component)
示例#34
0
if USE_WINDOWS:
    # Try to add some commonly needed paths to PATH
    paths = os.environ.get('PATH', '').split(os.path.pathsep)

    program_files = os.environ.get('PROGRAMFILES')
    if program_files:
        # Guess some paths
        paths += glob.glob(os.path.join(program_files, 'gs/gs*/bin'))
        paths += glob.glob(os.path.join(program_files, 'pstoedit*'))
        paths += glob.glob(os.path.join(program_files, 'miktex*/miktex/bin'))

        # FIXME: a better solution would be to look them up from the registry

    # The path where Inkscape is likely to be
    paths += [os.path.join(os.abspath(os.dirname(sys.argv[0])), '..', '..')]

    # Set the paths
    os.environ['PATH'] = os.path.pathsep.join(paths)


class ConvertInfo(object):
    def __init__(self):
        self.text = None
        self.preamble_file = None
        self.page_width = None
        self.scale_factor = None
        self.has_node = False
        self.text_to_path = False
        self.selected_converter = None
示例#35
0
import sys
import os
import unittest

tests_path = os.path.dirname(__file__)
root = os.abspath(os.path.join(tests_path, '..'))
sys.path.insert(0, root)

import pygrid


def create_job(jobname):
    _job = pygrid.

    return


class CommandJobTest(unittest.TestCase):
    def test(self):
        self.assertEqual(fun(3), 4)
示例#36
0
def getGeneInfoList():
    print("connected to flask server")
    geneInfo = os.abspath("../database/preprocessing/gene_information.csv")
    geneInfoDf = pd.read_csv(geneInfo)
    uidList = list(df["uid"].astype("int").values)
    return uidList
示例#37
0
 def ignoref(p, files):
     return (f for f in files if os.abspath(os.path.join(p, f)) == path)
示例#38
0
# Run to find code for importing ScreenKit
__version__ = 0.1
RELEASE = 0.1
import sys
import os

print('import sys\nsys.path.insert(0,' +
      str(os.dirname(os.abspath(__file__))) + ')')
示例#39
0
from os import join, abspath, curdir

code_file = open(join(abspath(curdir), 'auth_code.txt'), 'r')

spotify_token = code_file.read()

spotify_user_id = "sinistersandwich"

# Recent Song Documentation: https://developer.spotify.com/documentation/web-api/reference/player/get-recently-played/

示例#40
0
def get_full_filepath(filename):
    """ Returns a tuple of the path and the full pathname of the given 
    filename. """
    pathname = os.path.dirname(filename)
    return ('filename = ' + filename, 'path = ' + pathname, \
        'full path = ' + os.abspath(pathname))
示例#41
0
model_params = model_stuff['params']
model_params['my_device'] = device
model_params['backprop'] = True
base = mytorch.myLSTMOutputHidden(model_params)
model = nn.Sequential(base,
                      nn.Linear(model_params['linear_layer_sizes'][-1] + 39, model_params['linear_layer_sizes'][-1]),
                      nn.ELU(),
                      nn.Dropout(),
                      nn.Linear(model_params['linear_layer_sizes'][-1], 1314),
)
model.load_state_dict(model_stuff['state_dict'])
model.to(device)


train_loader, val_loader = mytorch.get_attrib_bpe_data(model_params['batch_size'], modelpath=os.abspath(os.path.join(bpe_models_dir,'attrib_1000.model')), num_workers=4)
loss_func = F.cross_entropy
optim = torch.optim.Adam(model.parameters(),lr=.00005)
components = model, loss_func, optim, train_loader, val_loader


# I will do jank score logging in this case
global_step = 0
logfile = os.path.join(logpath, "logs.csv")
with open(logfile, "w+") as f:
    f.write(f"global_step,train_loss,train_accuracy,val_loss,val_accuracy\n")

try:
    i = 0
    while True:
        train_loss, train_accuracy, val_loss, val_accuracy, global_step = mytorch.fit(
            # There's a hand code dir in this subdir, cd into it
            try:
                os.chdir( hc_dir )
            except:
                # Maybe subdirs is stale?
                print( "Could not access dir: {}  Skipping.".format(
                    subsys + os.sep + hc_dir
                ) )
                continue

            # Assert: In project_dir/subsystem_dir/knit_dir

            for hand_code_filename in glob.glob( _GLOB_SUFFIX ):
                print("      Knitting: " + hand_code_filename )

                # The target gen'd file will have the same name in the parent dir
                gen_filename = os.pardir + os.sep + hand_code_filename

                # Read all the hand code and break into sections
                read_hand_code( hand_code_filename )

                # Splice in each section into the designated location in the gen file
                splice_hand_code( gen_filename )
        
            try:
                os.chdir( os.pardir ) # pop back up into the subsys dir
            except:
                raise mi_Error( "Could not return to parent dir: {}  Abort.".format(
                    os.abspath(subsys)
                ) )
示例#43
0
        '-c',
        '--classification_file',
        required=True,
        help=
        'A tab-separated file where the first column is the read title and the second'
        'column is the assigned taxon id')

    parser.add_argument(
        '-nodes',
        '--taxon_nodes',
        required=False,
        type=list,
        default=[
            os.path.abspath(
                os.path.join(data_dir, "TaxonomyDatabase/nodes.dmp")),
            os.abspath(os.path.join(data_dir, "TaxonomyDatabase/merged.dmp"))
        ],
        help='Location of the NCBI Taxonomy Database nodes.txt file',
        nargs='*')

    parser.add_argument(
        '-is',
        '--insertion_sequence_fasta',
        required=False,
        type=str,
        default=os.path.join(data_dir, "IS_fastas/Bacteroides_all.fasta"),
        help='A fasta file containing the insertion sequences of interest,'
        ' concatenated sequentially in any order.')

    parser.add_argument('-o',
                        '--output_folder',
示例#44
0
 def get_attachment(self, token, filename):
     attach = self.db.get_attachment(token, filename).read()
     with open(filename, 'w') as f:
         for line in attach:
             f.write(line)
     return os.abspath(filename)
示例#45
0
from pastebin_python import PastebinPython
from pastebin_python.pastebin_exceptions import *
from pastebin_python.pastebin_constants import *
from pastebin_python.pastebin_formats import *

pbin  = PastebinPython(api_dev_key='###---YOUR API KEY HERE---###')

"""
###--NOT IMPLEMENTED IN BASE LOGGER---###

Debug = False
DBG   = False
VM    = False

name     = sys.argv[0]
location = os.abspath(name)

"""
We prepare a keymap to utilize with the
ctypes module in order to log keystrokes
"""
VKStr = {}
VKStr[0x01] = "LEFT_MOUSEE"
VKStr[0x02] = "RIGHT_MOUSE"
VKStr[0x03] = "MIDDLE_MOUSE"
VKStr[0x08] = "BACKSPACE"
VKStr[0x09] = "TAB"
VKStr[0x0D] = "ENTER"
VKStr[0x10] = "SHIFT"
VKStr[0x11] = "CTRL"
VKStr[0x12] = "ALT"
示例#46
0
    def _delete_file(path: str) -> None:
        """ deletes the file at given path.
        """

        os.remove(os.abspath(path))