Example #1
0
    def test_phva_rate_const_mat(self):
        fixed_atoms = load_fixed_g03com(
            pkg_resources.resource_filename(__name__, "../data/test/mat/Zp_p_react.14mei.com"))
        mol_react = load_molecule_g03fchk(
            pkg_resources.resource_filename(__name__, "../data/test/mat/Zp_p_react.28aug.fchk"),
            pkg_resources.resource_filename(__name__, "../data/test/mat/Zp_p_react.14mei.fchk"))
        mol_trans = load_molecule_g03fchk(
            pkg_resources.resource_filename(__name__, "../data/test/mat/Zp_p_TS.28aug.fchk"),
            pkg_resources.resource_filename(__name__, "../data/test/mat/5Tp_p_TS.oniom21apr_HF.fchk"))
        pf_react = PartFun(NMA(mol_react, PHVA(fixed_atoms)))
        pf_trans = PartFun(NMA(mol_trans, PHVA(fixed_atoms)))
        km = KineticModel([pf_react], pf_trans)

        # values taken from the fancy excel file...
        temps = np.array([670,680,690,700,710,720,730,740,750,760,770])
        expected_ks = np.array([
            7.9473102E+05, 9.8300444E+05, 1.2085262E+06, 1.4771808E+06,
            1.7955340E+06, 2.1708793E+06, 2.6112829E+06, 3.1256298E+06,
            3.7236678E+06, 4.4160510E+06, 5.2143822E+06
        ])
        for i in xrange(len(temps)):
            k = km.rate_constant(temps[i])
            self.assertAlmostEqual(np.log(k/(1/second)), np.log(expected_ks[i]),5)
            log_k = km.rate_constant(temps[i], do_log=True)
            self.assertAlmostEqual(np.log(k), log_k)
Example #2
0
    def setup(self):
        '''scenario setup'''
        self.tacker_script = pkg_resources.resource_filename(
            'yardstick.benchmark.scenarios.networking',
            Sfc.TACKER_SCRIPT)

        self.server_script = pkg_resources.resource_filename(
            'yardstick.benchmark.scenarios.networking',
            Sfc.SERVER_SCRIPT)

        ''' calling Tacker to instantiate VNFs and Service Chains '''
        cmd_tacker = "%s" % (self.tacker_script)
        subprocess.call(cmd_tacker, shell=True)

        target = self.context_cfg['target']
        target_user = target.get('user', 'root')
        target_pwd = target.get('password', 'octopus')
        target_ip = target.get('ip', None)

        ''' webserver start automatically during the vm boot '''
        LOG.info("user:%s, target:%s", target_user, target_ip)
        self.server = ssh.SSH(target_user, target_ip, password=target_pwd)
        self.server.wait(timeout=600)
        self.server.run("cat > ~/server.sh",
                        stdin=open(self.server_script, "rb"))
        cmd_server = "sudo bash server.sh"
        LOG.debug("Executing command: %s", cmd_server)
        status, stdout, stderr = self.server.execute(cmd_server)
        LOG.debug("Output server command: %s", status)

        self.setup_done = True
Example #3
0
def main(args):
	'''Module main function'''
	global database
	global genetic_algorithm
	global joint_positions
	global goal_positions
	pygame.init()
	random.seed()
	database = utils.initialize_database(args, 'RobotTrainingData')
	database.set_objective_names(['Tiempo', r'Error en $\theta_1$', r'Error en $\theta_2$', r'Error en $\theta_3$', 'Energía'])
	problem = EV3Problem()
	generation = database.properties['highest_population']
	population_size = database.properties['population_size']
	genetic_algorithm = evolution.NSGA(problem, population_size)

	x_path = os.path.abspath(pkg_resources.resource_filename('resources.ev3', 'x_train.txt'))
	y_path = os.path.abspath(pkg_resources.resource_filename('resources.ev3', 'y_train.txt'))
	batch_start = (generation % 10) * N_GOALS
	joint_positions = np.loadtxt(x_path)[batch_start : batch_start + N_GOALS, :]
	goal_positions = np.loadtxt(y_path)[batch_start : batch_start + N_GOALS, :]

	if generation > 0:
		parents, children = utils.load_data(database)
		genetic_algorithm.set_population(parents)
		genetic_algorithm.set_children(children)
	for _ in range(args.iterations):
		generation += 1
		print('Starting generation ' + str(generation))
		genetic_algorithm.iterate()
		database.create_population()
		utils.save_data(genetic_algorithm, database)
		print('=' * (SCREEN_WIDTH - 1))
Example #4
0
def render_thumb(width, imgfile):
	try:
		imgpath=pkg_resources.resource_filename('vishwin_http.views', 'img/' + imgfile)
		thumbfile=width + 'px-' + imgfile
		thumbpath=pkg_resources.resource_filename('vishwin_http.views', 'generated/thumb/' + thumbfile)
		# get file modified time for original; will throw exception if not found
		mtime_orig=os.path.getmtime(imgpath)
		if not (os.path.isfile(thumbpath)) or (os.path.getmtime(thumbpath) < mtime_orig):
			img=open(imgpath, 'rb')
			tags=exifread.process_file(img, details=False, stop_tag='Image_Orientation')
			img.close()
			# reopen using PIL
			img=Image.open(imgpath)
			# upon transpose, format attribute in object is cleared
			format=img.format
			if 'Image Orientation' in tags:
				if tags['Image Orientation'].values[0]>=5:
					# rotations in PIL(low) are anti-clockwise, would be easier if clockwise was default
					img=img.transpose(Image.ROTATE_270)
				if (tags['Image Orientation'].values[0]==(3 or 4)) or tags['Image Orientation'].values[0]>=7:
					img=img.transpose(Image.ROTATE_180)
				# flipped images
				if tags['Image Orientation'].values[0]==(2 or 4 or 5 or 7):
					img=img.transpose(Image.FLIP_LEFT_RIGHT)
			img.thumbnail((int(width), int(width)/(img.size[0]/img.size[1])))
			img.save(thumbpath, format)
			img.close()
		return send_file(thumbpath, mimetype=guess_type(imgpath)[0])
	except OSError:
		abort(404)
Example #5
0
def create_app(config: dict = {}, mail_client=None):
    app = Flask(__name__, static_folder='static')

    if config:
        app.config.update(config)
    else:
        app.config.from_envvar('ALSERVICE_CONFIG')

    MakoTemplates(app)
    app._mako_lookup = TemplateLookup(directories=[pkg_resources.resource_filename('alservice.service', 'templates')],
                                      input_encoding='utf-8', output_encoding='utf-8',
                                      imports=['from flask_babel import gettext as _'])

    app.al = init_account_linking(app, mail_client)

    babel = Babel(app)
    babel.localeselector(get_locale)
    app.config['BABEL_TRANSLATION_DIRECTORIES'] = pkg_resources.resource_filename('alservice.service',
                                                                                  'data/i18n/locales')

    from .views import account_linking_views
    app.register_blueprint(account_linking_views)

    setup_logging(app.config.get('LOGGING_LEVEL', 'INFO'))

    logger = logging.getLogger(__name__)
    logger.info('Running ALservice version %s', pkg_resources.get_distribution('ALservice').version)

    return app
Example #6
0
def setup_blank_site(meta):
    def safe_mkdir(name):
        # Create the directory if it doesn't already exist
        if not os.path.exists(name):
            os.mkdir(name)

    # Write meta.md
    output = open("meta.md", "w")
    for key in meta:
        output.write(key + ": " + meta[key] + "\n")
    output.close()

    # Create folders
    safe_mkdir(PagesDirectory)
    safe_mkdir(PostsDirectory)
    safe_mkdir(TemplatesDirectory)
    safe_mkdir(ArchiveDirectory)

    # Copy default templates
    for name in Templates:
        rsc_path = "templates/" + name + "-template.html"
        src = pkg_resources.resource_filename(__name__, rsc_path)
        shutil.copyfile(src, TemplatesDirectory + name + "-template.html")

    # Copy style.css
    src = pkg_resources.resource_filename(__name__, "style.css")
    shutil.copyfile(src, "style.css")

    # Generate the site
    gen_site()
Example #7
0
  def test20_verification_filelist(self):
    try:
      db1 = facereclib.utils.resources.load_resource(pkg_resources.resource_filename('facereclib.tests', os.path.join('scripts', 'atnt_Test.py')), 'database')
    except Exception as e:
      raise SkipTest("This test is skipped since the atnt database is not available.")
    try:
      db2 = facereclib.utils.resources.load_resource(pkg_resources.resource_filename('facereclib.tests', os.path.join('databases', 'atnt_fl', 'atnt_fl_database.py')), 'database')
    except Exception as e:
      raise SkipTest("This test is skipped since the verification.filelist database is not available.")
    # The test of the verification.filelist database is a bit different.
    # here, we test the output of two different ways of querying the AT&T database
    # where actually both ways are uncommon...

    # assure that different kind of queries result in the same file lists
    self.assertEqual(set([str(id) for id in db1.model_ids()]), set(db2.model_ids()))
    self.assertEqual(set([str(id) for id in db1.t_model_ids()]), set(db2.t_model_ids()))

    def check_files(f1, f2):
      self.assertEqual(set([file.path for file in f1]), set([file.path for file in f2]))

    check_files(db1.all_files(), db2.all_files())
    check_files(db1.training_files('train_extractor'), db2.training_files('train_extractor'))
    check_files(db1.enroll_files(model_id=22), db2.enroll_files(model_id='22'))
    check_files(db1.probe_files(model_id=22), db2.probe_files(model_id='22'))

    check_files(db1.t_enroll_files(model_id=22), db2.t_enroll_files(model_id='22'))
    check_files(db1.z_probe_files(), db2.z_probe_files())

    f1 = db1.all_files()[0]
    f2 = db2.all_files()[0]
    self.assertEqual(f1.make_path(directory='xx', extension='.yy'), f2.make_path(directory='xx', extension='.yy'))

    m1 = sorted([str(id) for id in db1.model_ids()])[0]
    m2 = sorted([str(id) for id in db2.model_ids()])[0]
    self.assertEqual(str(db1.client_id_from_model_id(m1)), db2.client_id_from_model_id(m2))
  def setUp(self):

    if "NTA_DYNAMIC_CONF_DIR" in os.environ:
      # Remove it to make sure our in-proc tests won't accidentally
      # mess with actual files
      oldNtaDynamicConfDir = os.environ["NTA_DYNAMIC_CONF_DIR"]
      del os.environ["NTA_DYNAMIC_CONF_DIR"]
      self.addCleanup(os.environ.update,
                      dict(NTA_DYNAMIC_CONF_DIR=oldNtaDynamicConfDir))

    self.files = dict()

    tmpDir = tempfile.mkdtemp()
    self.addCleanup(shutil.rmtree, tmpDir)


    with open(os.path.join(tmpDir, 'nupic-default.xml-unittest'), 'w') as fp:
      with open(resource_filename(__name__, 'conf/nupic-default.xml')) as inp:
        fp.write(inp.read())
        self.files['nupic-default.xml'] = fp.name

    with open(os.path.join(tmpDir, 'nupic-site.xml-unittest'), 'w') as fp:
      with open(resource_filename(__name__, 'conf/nupic-site.xml')) as inp:
        fp.write(inp.read())
        self.files['nupic-site.xml'] = fp.name

    with open(os.path.join(tmpDir, 'nupic-custom.xml'), 'w') as fp:
      with open(resource_filename(__name__, 'conf/nupic-custom.xml')) as inp:
        fp.write(inp.read())
        self.files['nupic-custom.xml'] = fp.name

    self.customParam = 'nupic.custom.hello'
    self.customValue = 'world'

    configuration.Configuration.clear()
def get_tiddler_locations(store_contents, package_name):
    """
    returns instance_tiddlers structure using tiddler paths from within the
    package if available

    store_structure is a dictionary listing tiddler URIs per bag

    packaged tiddlers must be listed in <package>/resources/tiddlers.index
    """
    package_path = os.path.join(*package_name.split("."))
    tiddler_index = os.path.join("resources", "tiddlers.index")
    tiddler_index = resource_filename(package_name, tiddler_index)
    instance_tiddlers = {}
    try:
        f = open(tiddler_index)
        for line in f:
            bag, filename = line.rstrip().split("/", 1)
            filepath = os.path.join("resources", bag, filename)
            filepath = resource_filename(package_name, filepath)
            try: # convert Windows paths to URIs
                sys.getwindowsversion() # XXX: safer detection than sys.platform or os.name?
                uri = "file:///%s" % filepath.replace("\\", "/")
            except AttributeError:
                uri = "file://%s" % filepath
            resource_filename(package_name, "%s.meta" % filepath)
            try:
                instance_tiddlers[bag].append(uri)
            except KeyError:
                instance_tiddlers[bag] = [uri]
        f.close()
    except IOError:
        for bag, uris in store_contents.items():
            instance_tiddlers[bag] = uris
    return instance_tiddlers
Example #10
0
def make_app(global_conf, full_stack=True, **app_conf):
    """
    Set vigiboard up with the settings found in the PasteDeploy configuration
    file used.

    This is the PasteDeploy factory for the vigiboard application.

    C{app_conf} contains all the application-specific settings (those defined
    under ``[app:main]``).

    @param global_conf: The global settings for vigiboard (those
        defined under the ``[DEFAULT]`` section).
    @type global_conf: C{dict}
    @param full_stack: Should the whole TG2 stack be set up?
    @type full_stack: C{str} or C{bool}
    @return: The vigiboard application with all the relevant middleware
        loaded.
    """
    app = make_base_app(global_conf, full_stack=full_stack, **app_conf)

    # Personalisation des fichiers statiques via /etc/vigilo/vigiboard/public/.
    custom_static = StaticURLParser('/etc/vigilo/vigiboard/public/')

    # On définit 2 middlewares pour fichiers statiques qui cherchent
    # les fichiers dans le thème actuellement chargé.
    # Le premier va les chercher dans le dossier des fichiers spécifiques
    # à l'application, le second cherche dans les fichiers communs.
    app_static = StaticURLParser(resource_filename(
        'vigilo.themes.public', 'vigiboard'))
    common_static = StaticURLParser(resource_filename(
        'vigilo.themes.public', 'common'))
    local_static = StaticURLParser(resource_filename(
        'vigiboard', 'public'))
    app = Cascade([custom_static, app_static, common_static, local_static, app])
    return app
Example #11
0
def resource_filename(name, pfx=None):
    """
Attempt to find and return the filename of the resource named by the first argument
in the first location of:

# as name in the current directory
# as name in the `pfx` subdirectory of the current directory if provided
# as name relative to the package
# as pfx/name relative to the package

The last two alternatives is used to locate resources distributed in the package.
This includes certain XSLT and XSD files.

:param name: The string name of a resource
:param pfx: An optional prefix to use in searching for name

    """
    if os.path.exists(name):
        with open(name) as fd:
            return fd.read()
    elif pfx and os.path.exists(os.path.join(pfx, name)):
        with open(os.path.join(pfx, name)) as fd:
            return fd.read()
    elif pkg_resources.resource_exists(__name__, name):
        return pkg_resources.resource_filename(__name__, name)
    elif pfx and pkg_resources.resource_exists(__name__, "%s/%s" % (pfx, name)):
        return pkg_resources.resource_filename(__name__, "%s/%s" % (pfx, name))

    return None
def resolve_main_template():
    # Plone 5
    main_template = os.path.join(
        'browser', 'templates', 'main_template.pt')
    if pkg_resources.resource_exists('Products.CMFPlone', main_template):
        filename = pkg_resources.resource_filename('Products.CMFPlone',
                                                   main_template)
        return ViewPageTemplateFile(filename)

    # Plone 4 with Sunburst
    sunburst_main_template = os.path.join(
        'skins', 'sunburst_templates', 'main_template.pt')
    if pkg_resources.resource_exists('plonetheme.sunburst',
                                     sunburst_main_template):
        filename = pkg_resources.resource_filename('plonetheme.sunburst',
                                                   sunburst_main_template)
        return ViewPageTemplateFile(filename)

    # Fallback
    skins_main_template = os.path.join(
        'skins', 'plone_templates', 'main_template.pt')
    if pkg_resources.resource_exists('Products.CMFPlone', skins_main_template):
        filename = pkg_resources.resource_filename('Products.CMFPlone',
                                                   skins_main_template)
        return ViewPageTemplateFile(filename)
  def __init__(self):

    examplesDir = resource_filename("nupic", os.path.join("..", "examples"))

    _debugOut("examplesDir=<%s>" % (examplesDir,))

    assert os.path.exists(examplesDir), \
           "%s is not present in filesystem" % examplesDir

    # This is where we find OPF binaries (e.g., run_opf_experiment.py, etc.)
    # In the autobuild, it is a read-only directory
    self.__opfBinDir = resource_filename("nupic", os.path.join("..", "scripts"))
    assert os.path.exists(self.__opfBinDir), \
           "%s is not present in filesystem" % self.__opfBinDir
    _debugOut("self.__opfBinDir=<%s>" % self.__opfBinDir)

    # Where this script is running from (our autotest counterpart may have
    # copied it from its original location)
    self.__testRunDir = os.path.abspath(os.path.dirname(__file__))
    _debugOut("self.__testRunDir=<%s>" % self.__testRunDir)

    # Parent directory of our private OPF experiments
    self.__opfExperimentsParentDir = os.path.join(self.__testRunDir,
                                                  "experiments")
    assert os.path.exists(self.__opfExperimentsParentDir), \
           "%s is not present in filesystem" % self.__opfExperimentsParentDir
    _debugOut("self.__opfExperimentsParentDir=<%s>"
        % self.__opfExperimentsParentDir)
Example #14
0
def copyConfig():
    '''copyConfig()
Copies samples configuration if necessary to /etc/odinsys directory.'''
    from pkg_resources import Requirement, resource_filename
 
    # Get our file.
    filename_odinsys = resource_filename(Requirement.parse("odinsys"),
                                            "config/odinsys.sample.conf")

    filename_odinsys_log = resource_filename(Requirement.parse("odinsys"),
                                            "config/odinsyslogger.sample.conf")
 
    try:
        import shutil
 
        # Create the directory.
        if not os.path.exists("/opt/odinsys"):
            os.mkdir("/opt/odinsys")
 
        # Copy the configuration. Don't clobber existing files.
        if not os.path.exists("/etc/odinsys/odinsys.conf"):
            shutil.copyfile(filename_odinsys, "/etc/odinsys/odinsys.conf")

        if not os.path.exists("/etc/odinsys/odinsyslogger.conf"):
            shutil.copyfile(filename_odinsys_log, "/etc/odinsys/odinsyslogger.conf")
 
    except IOError:
        print "Unable to copy configuration file to /etc/odinsys directory."
Example #15
0
def find_packaged_regressor(name):
    """ Find location of a regression method packaged with YATSM

    See :data:`packaged_regressions` for a list of
    available pre-packaged regressors

    Args:
        name (str): name of packaged regression object

    Returns:
        str: path to packaged regression method

    Raises:
        KeyError: raise KeyError if user specifies unknown regressor
        IOError: raise IOError if the packaged regressor cannot be found

    """
    if name not in packaged_regressions:
        raise KeyError('Cannot load unknown packaged regressor %s' % name)

    path = pkg_resources.resource_filename(__name__, 'pickles')
    logger.debug('Checking data files in %s for packaged regressors' % path)
    if not pkg_resources.resource_exists(__name__, 'pickles'):
        raise IOError('Cannot find packaged regressors in %s. Did you install '
                      'YATSM via setuptools?' % path)

    resource = os.path.join('pickles', name + '.pkl')
    if not pkg_resources.resource_exists(__name__, resource):
        raise IOError('Cannot find packaged regression method %s, but package '
                      'directory exists. Check the contents of %s if possible'
                      % (resource, path))

    return pkg_resources.resource_filename(__name__, resource)
Example #16
0
    def __init__(self):
        configfile=resource_filename("portal","config_prod.ini")
        self.config = safeConfigParser(configfile)
        logging.info("Loaded portal config %s " % configfile)
        self.control = Control(self.config)
        logging.info("Control loaded")

        
        handlers = [
            (r"/lib/(.*)", StaticFileHandler, {"path": resource_filename("illiquids","www/lib")}),
            (r"/local/(.*)", StaticFileHandler,  {"path": resource_filename("illiquids","www/static")}),
            (r"/accounts/(.*)/(.*)", AccountsHandler, {"control": self.control}),    
            (r"/accounts/(.*)", AccountsHandler, {"control": self.control}),    
            (r"/balances/(.*)/(.*)", BalancesHandler, {"control": self.control}),   
            (r"/balances/(.*)", BalancesHandler, {"control": self.control}),    
            (r"/", LayoutHandler, {"control": self.control}), 
            (r"/(.*)", StaticFileHandler, {"path": resource_filename("illiquids","www"), 
                                           "default_filename":"index.html"}),
        ]
        
        settings = {
            'cookie_secret' : "whare_are_the_bank_accounts",
            'debug' : options.debug,
            'enabled_protocols' : ['websocket', 'flashsocket', 'xhr-multipart', 'xhr-polling'],
            'flash_policy_port' : 843,
            'flash_policy_file' : resource_filename("illiquids",'flashpolicy.xml'),
            'socket_io_port' : 8001,
        }
        
	tornado.web.Application.__init__(self, handlers, **settings)
Example #17
0
def test_cli():
    """Test cds-dojson CLI."""
    runner = CliRunner()
    result = runner.invoke(
        compile_schema,
        [pkg_resources.resource_filename(
            'cds_dojson.schemas', 'records/video_src-v1.0.0.json'), ]
    )

    assert 0 == result.exit_code
    compiled_schema_result = json.loads(result.output)
    with open(pkg_resources.resource_filename(
            'cds_dojson.schemas', 'records/video-v1.0.0.json'), 'r') as f:
        compile_schema_expected = json.load(f)
    assert compile_schema_expected == compiled_schema_result

    result = runner.invoke(
        compile_schema,
        [pkg_resources.resource_filename(
            'cds_dojson.schemas', 'records/project_src-v1.0.0.json'), ]
    )

    assert 0 == result.exit_code
    compiled_schema_result = json.loads(result.output)
    with open(pkg_resources.resource_filename(
            'cds_dojson.schemas', 'records/project-v1.0.0.json'), 'r') as f:
        compile_schema_expected = json.load(f)
    assert compile_schema_expected == compiled_schema_result
Example #18
0
    def _get_system_wiki_list(self):
        """Helper function that enumerates all 'system' wikis. The
        list is combined of default wiki pages and pages that are
        bundled with Bloodhound dashboard and search plugins"""
        from bhdashboard import wiki

        paths = [resource_filename('trac.wiki',
                                   'default-pages')] + \
                [resource_filename('bhdashboard',
                                   'default-pages')] + \
                [resource_filename('bhsearch',
                                   'default-pages')]
        pages = []
        original_pages = []
        for path in paths:
            for page in os.listdir(path):
                filename = os.path.join(path, page)
                page = unicode_unquote(page.encode('utf-8'))
                if os.path.isfile(filename):
                    original_pages.append(page)
        for original_name in original_pages:
            if original_name.startswith('Trac'):
                new_name = wiki.new_name(original_name)
                if not new_name:
                    continue
                if new_name in original_pages:
                    continue
                name = new_name
                # original trac wikis should also be included in the list
                pages.append(original_name)
            else:
                name = original_name
            pages.append(name)
        return pages
Example #19
0
def test_generator_glycine_fixq():
    system = get_system_glycine()
    fn_pars = pkg_resources.resource_filename(__name__, '../../data/test/parameters_glycine_fixq.txt')
    ff = ForceField.generate(system, fn_pars)
    assert len(ff.parts) == 1 #Non-periodic, so only one part
    part_pair_ei = ff.part_pair_ei
    # check part settings
    assert part_pair_ei.pair_pot.alpha == 0.0
    # check charges and atomic radii
    ac = {1:0.2, 6:0.5, 7:-1.0, 8:-0.5 } #Charges
    ar = {1:1.2*angstrom, 6: 1.7*angstrom, 7: 1.55*angstrom, 8: 1.50*angstrom} #Radii
    for i in range(system.natom):
        assert abs(system.charges[i] - ac[system.numbers[i]]) < 1e-5
        assert abs(system.radii[i] - ar[system.numbers[i]]) < 1e-5

    system = get_system_glycine()
    log.set_level(log.silent)
    fn_pars = pkg_resources.resource_filename(__name__, '../../data/test/parameters_glycine_fixq.txt')
    ff2 = ForceField.generate(system, fn_pars)
    log.set_level(log.debug)
    # check charges and atomic radii
    ac = {1:0.2, 6:0.5, 7:-1.0, 8:-0.5 } #Charges
    ar = {1:1.2*angstrom, 6: 1.7*angstrom, 7: 1.55*angstrom, 8: 1.50*angstrom} #Radii
    for i in range(system.natom):
        assert abs(system.charges[i] - ac[system.numbers[i]]) < 1e-5
        assert abs(system.radii[i] - ar[system.numbers[i]]) < 1e-5
    energy = ff.compute()
    energy2 = ff2.compute()
    assert abs(energy - energy2) < 1e-3
Example #20
0
    def add_panel(self, metric_list, dashboard_name="son-monitor", title=None, graph_type="lines"):
        """
        add a graph panel to the son-monitor dashboard
        :param:metric_list: list of dictionaries from the nsd [{"desc":"", "metric":""}, ...]
        :return:
        """
        url = self.construct_api_url('dashboards/db/{0}'.format(dashboard_name))
        ret = self.session.get(url)
        dashboard = ret.json()
        dashboard = dashboard['dashboard']

        # add a new row
        src_path = os.path.join('grafana', 'grafana_row.json')
        srcfile = pkg_resources.resource_filename(__name__, src_path)
        dashboard['rows'].append(json.load(open(srcfile)))
        row_index = len(dashboard['rows']) - 1

        # add  a new panel to the row
        src_path = os.path.join('grafana', 'grafana_panel.json')
        srcfile = pkg_resources.resource_filename(__name__, src_path)
        new_panel = json.load(open(srcfile))
        new_panel['id'] = row_index + 1
        if graph_type == "lines":
            new_panel['bars'] = False
            new_panel['lines'] = True
        elif graph_type == "bars":
            new_panel['bars'] = True
            new_panel['lines'] = False
        dashboard['rows'][row_index]['panels'].append(new_panel)
        panel_index = len(dashboard['rows'][row_index]['panels']) - 1

        if title is None:
            # set panel title (first word of metric description)
            # need to make a copy of the list, because otherwise the original metric_list seems to get corrupted
            new_list = list(copy.deepcopy(metric_list))
            title = new_list[0]['desc'].split(' ')[0]

        dashboard['rows'][row_index]['panels'][panel_index]['title'] = title



        for metric in metric_list:
            # add a new target(graph) to the panel
            src_path = os.path.join('grafana', 'grafana_target.json')
            srcfile = pkg_resources.resource_filename(__name__, src_path)
            new_target = json.load(open(srcfile))
            target_index = len(dashboard['rows'][row_index]['panels'][panel_index]['targets'])
            new_target['refId'] = chr(ord('A') + target_index)
            dashboard['rows'][row_index]['panels'][panel_index]['targets'].append(new_target)

            query = metric['metric']
            dashboard['rows'][row_index]['panels'][panel_index]['targets'][target_index]['expr'] = query.replace('"',
                                                                                                                 '\"')
            legend = metric['desc']
            dashboard['rows'][row_index]['panels'][panel_index]['targets'][target_index]['legendFormat'] = legend
            logging.info('installed metric: {0} {1}'.format(legend, metric))

        url = self.construct_api_url('dashboards/db')
        ret = self.session.post(url, json={'dashboard': dashboard, 'overwrite': True})
        logging.info('post new dashboard: {0}'.format(ret))
Example #21
0
 def TestExample(self):
     """
     Sets all variables corresponding to --test in the methods script
     """
     self.GPAentryVariable.set(str(os.path.join(resource_filename(__name__, 'exampledata'), 'Gene_presence_absence.csv')))
     self.TraitsentryVariable.set(str(os.path.join(resource_filename(__name__, 'exampledata'), 'Tetracycline_resistance.csv')))
     self.TreeentryVariable.set("")
     self.RestrictVariable.set("")
     self.Outputdir.set("./")
     self.maxhitsvar.set("")
     self.delimvar.set(",")
     self.scvar.set("15")
     self.notimevar.set(0)
     self.writetreevar.set(0)
     self.permvar.set("0")
     self.pVar.set(1)
     self.pBVar.set(0)
     self.pBHVar.set(0)
     self.pPWVar.set(0)
     self.pEPWVar.set(1)
     self.pNaive.set("0.05")
     self.pBonf.set("1.0")
     self.pBH.set("1.0")
     self.pPW.set("1.0")
     self.pEPW.set("0.05")
     self.pPerm.set("1.0")
Example #22
0
File: qc.py Project: asntech/chilin
def stat_fastqc(workflow, conf):  # collect raw reads quality and GC contents
    """
    long: generate long pages or not
    """
    sums = []
    for raw, target in conf.sample_pairs:
        if conf.pe:
            sums.append(target[0] + "_100k_fastqc/fastqc_data.txt")
        else:
            sums.append(target + "_100k_fastqc/fastqc_data.txt")

    attach_back(workflow,
                PythonCommand(
                    json_fastqc,
                    input={"fastqc_summaries": sums},
                    output={"json": conf.json_prefix + "_fastqc.json"},
                    param={"ids": conf.sample_bases,
                           "id": conf.id},
                    name = "collect fastqc results"))
    
    if conf.long:  ## prepare long document images and tex
        attach_back(workflow,
        PythonCommand(fastqc_detailed_figure,
                      input = {"dbaccessor": resource_filename("chilin2.modules.dbaccessor", "ChiLinQC.db"),
                               "template": resource_filename("chilin2.modules.summary", "R_culmulative_plot.R"), 
                               "json": conf.json_prefix + "_fastqc.json"},
                      output = {"R": conf.prefix + "_raw_sequence_qc.R",
                                "pdf": conf.prefix + "_raw_sequence_qc.pdf"},
                      param={"ids": conf.sample_bases}))
Example #23
0
def config_obj(configfile=None, modulename=None):
    defaults = config_defaults()
    if modulename:
        mod_globals = dict()
        lastdot = modulename.rfind(".")
        firstdot = modulename.find(".")
        packagename = modulename[:lastdot]
        top_level_package = modulename[:firstdot]

        modname = modulename[lastdot + 1 :]
        modfile = pkg_resources.resource_filename(packagename, modname + ".cfg")
        if not os.path.exists(modfile):
            modfile = pkg_resources.resource_filename(packagename, modname)
        if os.path.isdir(modfile):
            configfiles = glob.glob(os.path.join(modfile, "*.cfg"))
        else:
            configfiles = [modfile]
        configdata = ConfigObj(unrepr=True)
        top_level_dir = pkg_resources.resource_filename(top_level_package, "")[:-1].replace("\\", "/")
        package_dir = pkg_resources.resource_filename(packagename, "")[:-1].replace("\\", "/")
        defaults.update(dict(top_level_dir=top_level_dir, package_dir=package_dir))
        configdata.merge(dict(DEFAULT=defaults))
        for file in configfiles:
            configdata2 = ConfigObj(file, unrepr=True)
            configdata2.merge(dict(DEFAULT=defaults))
            configdata.merge(configdata2)

    if configfile:
        if modulename:
            configdata2 = ConfigObj(configfile, unrepr=True)
            configdata2.merge(dict(DEFAULT=defaults))
            configdata.merge(configdata2)
        else:
            configdata = ConfigObj(configfile, unrepr=True)
    return configdata
Example #24
0
    def test_checkpoint(self):
        molecule = load_molecule_cp2k(
            pkg_resources.resource_filename(__name__, "../data/test/cp2k/pentane/sp.out"),
            pkg_resources.resource_filename(__name__, "../data/test/cp2k/pentane/freq.out"))
        nma1 = NMA(molecule)
        with tmpdir(__name__, 'test_checkpoint') as dn:
            fn_out = os.path.join(dn, 'test.chk')
            nma1.write_to_file(fn_out)
            nma2 = NMA.read_from_file(fn_out)

        self.assertEqual(nma1.freqs.shape, nma2.freqs.shape)
        self.assertEqual(nma1.modes.shape, nma2.modes.shape)
        self.assertEqual(nma1.masses.shape, nma2.masses.shape)
        self.assertEqual(nma1.numbers.shape, nma2.numbers.shape)
        self.assertEqual(nma1.coordinates.shape, nma2.coordinates.shape)
        self.assertEqual(nma1.inertia_tensor.shape, nma2.inertia_tensor.shape)

        self.assert_(abs(nma1.freqs - nma2.freqs).max()/abs(nma1.freqs).max() < 1e-15)
        self.assert_(abs(nma1.modes - nma2.modes).max()/abs(nma1.modes).max() < 1e-15)
        self.assert_(abs(nma1.masses - nma2.masses).max()/abs(nma1.masses).max() < 1e-15)
        self.assert_(abs(nma1.coordinates - nma2.coordinates).max()/abs(nma1.coordinates).max() < 1e-15)
        self.assert_(abs(nma1.inertia_tensor - nma2.inertia_tensor).max()/abs(nma1.inertia_tensor).max() < 1e-15)
        self.assert_((nma1.numbers==nma2.numbers).all())

        self.assertAlmostEqual(nma1.mass, nma2.mass)
        self.assertAlmostEqual(nma1.energy, nma2.energy)
        self.assertEqual(nma1.multiplicity, nma2.multiplicity)
        self.assertEqual(nma1.symmetry_number, nma2.symmetry_number)
Example #25
0
 def test_load_indices(self):
     blocks = load_indices(pkg_resources.resource_filename(__name__, "../data/test/an/fixed.07.txt"), groups=True)
     self.assertEqual(blocks, [[3,2,6]])
     blocks = load_indices(pkg_resources.resource_filename(__name__, "../data/test/an/fixed.07.txt"))
     self.assertEqual(blocks, [3,2,6])
     blocks = load_indices(pkg_resources.resource_filename(__name__, "../data/test/an/fixed.08.txt"))
     self.assertEqual(blocks, [5,4,8])
Example #26
0
 def test_load_molecule_vasp_53(self):
     molecule = load_molecule_vasp(
         pkg_resources.resource_filename(__name__, "../data/test/lucas/vasp_5_3_5_complex/CONTCAR_opt"),
         pkg_resources.resource_filename(__name__, "../data/test/lucas/vasp_5_3_5_complex/OUTCAR_freq"))
     # contcar
     assert molecule.numbers[0] == 6
     assert (molecule.numbers[1:] == 1).all()
     assert molecule.size == 5
     assert molecule.unit_cell.matrix[0,0] == 15.0*angstrom
     assert molecule.unit_cell.matrix[1,2] == 0.0
     self.assertAlmostEqual(molecule.coordinates[0,0]/angstrom, 7.15840, 3)
     self.assertAlmostEqual(molecule.coordinates[1,2]/angstrom, 8.44640, 2) #?
     self.assertAlmostEqual(molecule.coordinates[-1,-1]/angstrom, 6.95131, 2) #?
     # outcar_freq
     assert molecule.masses[0] == 12.011*amu
     assert (molecule.masses[1:] == 1.000*amu).all()
     hunit = electronvolt/angstrom**2
     assert molecule.hessian[0,0] == 53.624756*hunit
     assert molecule.hessian[-1,-1] == 31.299419*hunit
     self.assertAlmostEqual(molecule.hessian[2,5], 0.5*(-7.551817 + 3.319877)*hunit)
     assert molecule.energy == -24.11901936*electronvolt
     gunit = electronvolt/angstrom
     assert molecule.gradient[0, 0] == 0.096977*gunit
     assert molecule.gradient[2, 1] == 0.100275*gunit
     assert molecule.gradient[-1, -1] == -0.212810*gunit
Example #27
0
 def test_load_molecule_vasp_5_3_5_gamma(self):
     molecule = load_molecule_vasp(
         pkg_resources.resource_filename(__name__, "../data/test/julianna/vasp_5_3_5_gamma/CONTCAR_opt"),
         pkg_resources.resource_filename(__name__, "../data/test/julianna/vasp_5_3_5_gamma/OUTCAR_freq"))
     # contcar
     assert molecule.numbers[0] == 6
     assert (molecule.numbers[1:] == 1).all()
     assert molecule.size == 5
     assert molecule.unit_cell.matrix[0,0] == 15.0*angstrom
     assert molecule.unit_cell.matrix[1,2] == 0.0
     self.assertAlmostEqual(molecule.coordinates[0,0]/angstrom, 7.15782, 3)
     self.assertAlmostEqual(molecule.coordinates[1,2]/angstrom, 8.44278, 1) #??
     self.assertAlmostEqual(molecule.coordinates[-1,-1]/angstrom, 6.95393, 2)  #?
     # outcar_freq
     assert molecule.masses[0] == 12.011*amu
     assert (molecule.masses[1:] == 1.000*amu).all()
     hunit = electronvolt/angstrom**2
     assert molecule.hessian[0,0] == 47.756815*hunit
     assert molecule.hessian[-1,-1] == 31.561376*hunit
     self.assertAlmostEqual(molecule.hessian[2,5], 0.5*(-2.265871 + -3.645039)*hunit)
     assert molecule.energy == -24.12364199*electronvolt
     gunit = electronvolt/angstrom
     assert molecule.gradient[0, 0] == -0.005459*gunit
     assert molecule.gradient[2, 1] == -0.008215*gunit
     assert molecule.gradient[-1, -1] == 0.003424*gunit
Example #28
0
 def installApache(self, document_root, ip=None, port=None):
   if ip is None:
     ip=self.getGlobalIPv6Address()
   if port is None:
     port = '9080'
   apache_config = dict(
       pid_file=os.path.join(self.run_directory, 'httpd.pid'),
       lock_file=os.path.join(self.run_directory, 'httpd.lock'),
       ip=ip,
       port=port,
       error_log=os.path.join(self.log_directory, 'httpd-error.log'),
       access_log=os.path.join(self.log_directory, 'httpd-access.log'),
       document_root=document_root,
       php_ini_dir=self.etc_directory
   )
   config_file = self.createConfigurationFile('httpd.conf',
       self.substituteTemplate(pkg_resources.resource_filename(__name__,
         'template/apache.in'), apache_config))
   self.path_list.append(config_file)
   php_ini = pkg_resources.resource_filename(__name__, 'template/php.ini.in')
   if self.options.has_key('php_ini'):
     php_ini = os.path.join(self.options['php_ini'], 'php.ini.in')
   self.path_list.append(self.createConfigurationFile('php.ini',
       self.substituteTemplate(php_ini, dict(tmp_directory=self.tmp_directory))))
   self.path_list.extend(zc.buildout.easy_install.scripts([(
     'httpd',
       __name__ + '.apache', 'runApache')], self.ws,
         sys.executable, self.wrapper_directory, arguments=[
           dict(
             required_path_list=[],
             binary=self.options['httpd_binary'],
             config=config_file
           )
         ]))
   return 'http://[%s]:%s' % (ip, port)
Example #29
0
    def test_multi_proc(self):
        json_conf = configurator.to_json(None)
        json_conf["pick"]["run_options"]["procs"] = 2
        json_conf["pick"]["files"]["input"] = pkg_resources.resource_filename("Mikado.tests",
                                                                              "mikado_prepared.gtf")
        json_conf["pick"]["files"]["output_dir"] = tempfile.gettempdir()
        json_conf["pick"]["files"]["loci_out"] = "mikado.multiproc.loci.gff3"
        json_conf["pick"]["files"]["subloci_out"] = "mikado.multiproc.subloci.gff3"
        json_conf["pick"]["files"]["monoloci_out"] = "mikado.multiproc.monoloci.gff3"
        json_conf["pick"]["files"]["log"] = "mikado.multiproc.log"
        json_conf["db_settings"]["db"] = pkg_resources.resource_filename("Mikado.tests", "mikado.db")
        json_conf["log_settings"]["log_level"] = "WARNING"

        pick_caller = picker.Picker(json_conf=json_conf)
        with self.assertRaises(SystemExit), self.assertLogs("main_logger", "INFO"):
            pick_caller()
        self.assertTrue(os.path.exists(os.path.join(tempfile.gettempdir(), "mikado.multiproc.loci.gff3")))
        with to_gff(os.path.join(tempfile.gettempdir(), "mikado.multiproc.loci.gff3")) as inp_gff:
            lines = [_ for _ in inp_gff if not _.header is True]
            self.assertGreater(len(lines), 0)
            self.assertGreater(len([_ for _ in lines if _.is_transcript is True]), 0)
            self.assertGreater(len([_ for _ in lines if _.feature == "mRNA"]), 0)
            self.assertGreater(len([_ for _ in lines if _.feature == "CDS"]), 0)

        [os.remove(_) for _ in glob.glob(os.path.join(tempfile.gettempdir(), "mikado.multiproc.") + "*")]
Example #30
0
 def test_cross_reference(self):
     modelfile = resource_filename('desidatamodel.test', 't/fits_file.rst')
     f = DataModel(modelfile, os.path.dirname(modelfile))
     line = "See :doc:`Other file <fits_file>`"
     ref = f._cross_reference(line)
     self.assertEqual(ref, resource_filename('desidatamodel.test',
                                             't/fits_file.rst'))
Example #31
0
from gradio.tunneling import create_tunnel
from gradio import encryptor
from gradio import queue
from functools import wraps
import io

INITIAL_PORT_VALUE = int(os.getenv(
    'GRADIO_SERVER_PORT', "7860"))  # The http server will try to open on port 7860. If not available, 7861, 7862, etc.
TRY_NUM_PORTS = int(os.getenv(
    'GRADIO_NUM_PORTS', "100"))  # Number of ports to try before giving up and throwing an exception.
LOCALHOST_NAME = os.getenv(
    'GRADIO_SERVER_NAME', "127.0.0.1")
GRADIO_API_SERVER = "https://api.gradio.app/v1/tunnel-request"
GRADIO_FEATURE_ANALYTICS_URL = "https://api.gradio.app/gradio-feature-analytics/"

STATIC_TEMPLATE_LIB = pkg_resources.resource_filename("gradio", "frontend/")
STATIC_PATH_LIB = pkg_resources.resource_filename("gradio", "frontend/static")
VERSION_FILE = pkg_resources.resource_filename("gradio", "version.txt")
with open(VERSION_FILE) as version_file:
    GRADIO_STATIC_ROOT = "https://gradio.s3-us-west-2.amazonaws.com/" + \
        version_file.read().strip() + "/static/"

app = Flask(__name__,
            template_folder=STATIC_TEMPLATE_LIB,
            static_folder="",
            static_url_path="/none/")
CORS(app)
cache_buster = CacheBuster(
    config={'extensions': ['.js', '.css'], 'hash_size': 5})
cache_buster.init_app(app)
app.secret_key = os.getenv("GRADIO_KEY", "secret")
Example #32
0
# -*- coding: utf-8 -*-

from typing import Dict

import os
import pkg_resources

from bag.design import Module

yaml_file = pkg_resources.resource_filename(
    __name__, os.path.join('netlist_info', 'qdr_top.yaml'))


# noinspection PyPep8Naming
class bag_serdes_ec__qdr_top(Module):
    """Module for library bag_serdes_ec cell qdr_top.

    Fill in high level description here.
    """
    def __init__(self, bag_config, parent=None, prj=None, **kwargs):
        Module.__init__(self,
                        bag_config,
                        yaml_file,
                        parent=parent,
                        prj=prj,
                        **kwargs)

    @classmethod
    def get_params_info(cls):
        # type: () -> Dict[str, str]
        return dict(
Example #33
0
    def __init__(self, *args):
        QtGui.QMainWindow.__init__(*(self,) + args)

        self.config = config.read()

        self.resize(1000, 600)
        self.setWindowTitle(NAME)

        self.debug_dialog = None
        self.debug_lines = []

        self.about_dialog = None
        self.connection_dialog = None
        self.preferences_dialog = None

        # network
        self.network = Network()
        self.network.statusChanged.connect(self._network_status_changed)
        self.network.messageFromWeechat.connect(self._network_weechat_msg)

        # list of buffers
        self.list_buffers = BufferListWidget()
        self.list_buffers.currentRowChanged.connect(self._buffer_switch)

        # default buffer
        self.buffers = [Buffer()]
        self.stacked_buffers = QtGui.QStackedWidget()
        self.stacked_buffers.addWidget(self.buffers[0].widget)

        # splitter with buffers + chat/input
        splitter = QtGui.QSplitter()
        splitter.addWidget(self.list_buffers)
        splitter.addWidget(self.stacked_buffers)

        self.setCentralWidget(splitter)

        if self.config.getboolean('look', 'statusbar'):
            self.statusBar().visible = True

        # actions for menu and toolbar
        actions_def = {
            'connect': [
                'network-connect.png', 'Connect to WeeChat',
                'Ctrl+O', self.open_connection_dialog],
            'disconnect': [
                'network-disconnect.png', 'Disconnect from WeeChat',
                'Ctrl+D', self.network.disconnect_weechat],
            'debug': [
                'edit-find.png', 'Debug console window',
                'Ctrl+B', self.open_debug_dialog],
            'preferences': [
                'preferences-other.png', 'Preferences',
                'Ctrl+P', self.open_preferences_dialog],
            'about': [
                'help-about.png', 'About',
                'Ctrl+H', self.open_about_dialog],
            'save connection': [
                'document-save.png', 'Save connection configuration',
                'Ctrl+S', self.save_connection],
            'quit': [
                'application-exit.png', 'Quit application',
                'Ctrl+Q', self.close],
        }
        self.actions = {}
        for name, action in list(actions_def.items()):
            self.actions[name] = QtGui.QAction(
                QtGui.QIcon(
                    resource_filename(__name__, 'data/icons/%s' % action[0])),
                name.capitalize(), self)
            self.actions[name].setStatusTip(action[1])
            self.actions[name].setShortcut(action[2])
            self.actions[name].triggered.connect(action[3])

        # menu
        self.menu = self.menuBar()
        menu_file = self.menu.addMenu('&File')
        menu_file.addActions([self.actions['connect'],
                              self.actions['disconnect'],
                              self.actions['preferences'],
                              self.actions['save connection'],
                              self.actions['quit']])
        menu_window = self.menu.addMenu('&Window')
        menu_window.addAction(self.actions['debug'])
        menu_help = self.menu.addMenu('&Help')
        menu_help.addAction(self.actions['about'])
        self.network_status = QtGui.QLabel()
        self.network_status.setFixedHeight(20)
        self.network_status.setFixedWidth(200)
        self.network_status.setContentsMargins(0, 0, 10, 0)
        self.network_status.setAlignment(QtCore.Qt.AlignRight)
        if hasattr(self.menu, 'setCornerWidget'):
            self.menu.setCornerWidget(self.network_status,
                                      QtCore.Qt.TopRightCorner)
        self.network_status_set(self.network.status_disconnected)

        # toolbar
        toolbar = self.addToolBar('toolBar')
        toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
        toolbar.addActions([self.actions['connect'],
                            self.actions['disconnect'],
                            self.actions['debug'],
                            self.actions['preferences'],
                            self.actions['about'],
                            self.actions['quit']])

        self.buffers[0].widget.input.setFocus()

        # open debug dialog
        if self.config.getboolean('look', 'debug'):
            self.open_debug_dialog()

        # auto-connect to relay
        if self.config.getboolean('relay', 'autoconnect'):
            self.network.connect_weechat(self.config.get('relay', 'server'),
                                         self.config.get('relay', 'port'),
                                         self.config.getboolean('relay',
                                                                'ssl'),
                                         self.config.get('relay', 'password'),
                                         self.config.get('relay', 'lines'))

        self.show()
Example #34
0
        """Find position to insert a buffer in list."""
        index = -1
        if next_buffer == '0x0':
            index = len(self.buffers)
        else:
            index = [i for i, b in enumerate(self.buffers)
                     if b.pointer() == next_buffer]
            if index:
                index = index[0]
        if index < 0:
            print('Warning: unable to find position for buffer, using end of '
                  'list by default')
            index = len(self.buffers)
        return index

    def closeEvent(self, event):
        """Called when QWeeChat window is closed."""
        self.network.disconnect_weechat()
        if self.debug_dialog:
            self.debug_dialog.close()
        config.write(self.config)
        QtGui.QMainWindow.closeEvent(self, event)


app = QtGui.QApplication(sys.argv)
app.setStyle(QtGui.QStyleFactory.create('Cleanlooks'))
app.setWindowIcon(QtGui.QIcon(
    resource_filename(__name__, 'data/icons/weechat.png')))
main = MainWindow()
sys.exit(app.exec_())
Example #35
0
class TelegramChannel(EFBChannel):
    """
    EFB Channel - Telegram (Master)
    Based on python-telegram-bot, Telegram Bot API

    Author: Eana Hufwe <https://github.com/blueset>

    Configuration file example:
        .. code-block:: yaml

            token: "12345678:1a2b3c4d5e6g7h8i9j"
            admins:
            - 102938475
            - 91827364
            flags:
                join_msg_threshold_secs: 10
                multiple_slave_chats: false
    """
    def get_chats(self) -> List[EFBChat]:
        raise EFBOperationNotSupported()

    def get_chat(self,
                 chat_uid: ChatID,
                 member_uid: Optional[ChatID] = None) -> EFBChat:
        raise EFBOperationNotSupported()

    def get_chat_picture(self, chat: EFBChat) -> IO[bytes]:
        raise EFBOperationNotSupported()

    # Meta Info
    channel_name = "Telegram Master"
    channel_emoji = "✈"
    channel_id = ModuleID("blueset.telegram")
    channel_type = ChannelType.Master
    supported_message_types = {
        MsgType.Text, MsgType.File, MsgType.Audio, MsgType.Image, MsgType.Link,
        MsgType.Location, MsgType.Sticker, MsgType.Video, MsgType.Animation
    }
    __version__ = __version__

    # Data
    _stop_polling = False
    timeout_count = 0

    # Constants
    config: dict

    # Translator
    translator: NullTranslations = translation("efb_telegram_master",
                                               resource_filename(
                                                   'efb_telegram_master',
                                                   'locale'),
                                               fallback=True)
    locale: Optional[str] = None

    # RPC server
    rpc_server: SimpleXMLRPCServer = None

    def __init__(self, instance_id: InstanceID = None):
        """
        Initialization.
        """
        super().__init__(instance_id)

        # Check PIL support for WebP
        Image.init()
        if 'WEBP' not in Image.ID:
            raise EFBException(
                self._(
                    "WebP support of Pillow is required.\n"
                    "Please refer to Pillow Documentation for instructions.\n"
                    "https://pillow.readthedocs.io/"))

        # Suppress debug logs from dependencies
        logging.getLogger('requests').setLevel(logging.CRITICAL)
        logging.getLogger('urllib3').setLevel(logging.CRITICAL)
        logging.getLogger('telegram.bot').setLevel(logging.CRITICAL)
        logging.getLogger(
            'telegram.vendor.ptb_urllib3.urllib3.connectionpool').setLevel(
                logging.CRITICAL)

        # Set up logger
        self.logger: logging.Logger = logging.getLogger(__name__)

        # Load configs
        self.load_config()

        # Load predefined MIME types
        mimetypes.init(files=["mimetypes"])

        # Initialize managers
        self.flag: ExperimentalFlagsManager = ExperimentalFlagsManager(self)
        self.db: DatabaseManager = DatabaseManager(self)
        self.chat_dest_cache: ChatDestinationCache = ChatDestinationCache(
            self.flag("send_to_last_chat"))
        self.bot_manager: TelegramBotManager = TelegramBotManager(self)
        self.chat_binding: ChatBindingManager = ChatBindingManager(self)
        self.commands: CommandsManager = CommandsManager(self)
        self.master_messages: MasterMessageProcessor = MasterMessageProcessor(
            self)
        self.slave_messages: SlaveMessageProcessor = SlaveMessageProcessor(
            self)

        if not self.flag('auto_locale'):
            self.translator = translation("efb_telegram_master",
                                          resource_filename(
                                              'efb_telegram_master', 'locale'),
                                          fallback=True)

        # Basic message handlers
        non_edit_filter = Filters.update.message | Filters.update.channel_post
        self.bot_manager.dispatcher.add_handler(
            CommandHandler("start", self.start, filters=non_edit_filter))
        self.bot_manager.dispatcher.add_handler(
            CommandHandler("help", self.help, filters=non_edit_filter))
        self.bot_manager.dispatcher.add_handler(
            CommandHandler("info", self.info, filters=non_edit_filter))
        self.bot_manager.dispatcher.add_handler(
            CallbackQueryHandler(self.void_callback_handler, pattern="void"))
        self.bot_manager.dispatcher.add_handler(
            CallbackQueryHandler(self.bot_manager.session_expired))
        self.bot_manager.dispatcher.add_handler(
            CommandHandler("react", self.react, filters=non_edit_filter))

        self.bot_manager.dispatcher.add_error_handler(self.error)

        self.rpc_utilities = RPCUtilities(self)

    @property
    def _(self):
        return self.translator.gettext

    @property
    def ngettext(self):
        return self.translator.ngettext

    def load_config(self):
        """
        Load configuration from path specified by the framework.

        Configuration file is in YAML format.
        """
        config_path = efb_utils.get_config_path(self.channel_id)
        if not config_path.exists():
            raise FileNotFoundError(
                self._("Config File does not exist. ({path})").format(
                    path=config_path))
        with config_path.open() as f:
            data = YAML().load(f)

            # Verify configuration
            if not isinstance(data.get('token', None), str):
                raise ValueError(self._('Telegram bot token must be a string'))
            if isinstance(data.get('admins', None), int):
                data['admins'] = [data['admins']]
            if isinstance(data.get('admins', None),
                          str) and data['admins'].isdigit():
                data['admins'] = [int(data['admins'])]
            if not isinstance(data.get('admins', None),
                              list) or len(data['admins']) < 1:
                raise ValueError(
                    self.
                    _("Admins' user IDs must be a list of one number or more."
                      ))
            for i in range(len(data['admins'])):
                if isinstance(data['admins'][i],
                              str) and data['admins'][i].isdigit():
                    data['admins'][i] = int(data['admins'][i])
                if not isinstance(data['admins'][i], int):
                    raise ValueError(
                        self.
                        _('Admin ID is expected to be an int, but {data} is found.'
                          ).format(data=data['admins'][i]))

            self.config = data.copy()

    def info(self, update: Update, context: CallbackContext):
        """
        Show info of the current telegram conversation.
        Triggered by `/info`.
        """
        if update.message.chat.type != telegram.Chat.PRIVATE:  # Group message
            msg = self.info_group(update)
        elif update.effective_message.forward_from_chat and \
                update.effective_message.forward_from_chat.type == 'channel':  # Forwarded channel command.
            msg = self.info_channel(update)
        else:  # Talking to the bot.
            msg = self.info_general()

        update.message.reply_text(msg)

    def info_general(self):
        """Generate string for information of the current running EFB instance."""
        msg = self.ngettext(
            "This is EFB Telegram Master Channel {version}, running on EFB {fw_version}.\n"
            "{count} slave channel activated:",
            "This is EFB Telegram Master Channel {version}, running on EFB {fw_version}.\n"
            "{count} slave channels activated:",
            len(coordinator.slaves)).format(
                version=self.__version__,
                fw_version=ehforwarderbot.__version__,
                count=len(coordinator.slaves))
        for i in coordinator.slaves:
            msg += "\n- %s %s (%s, %s)" % (coordinator.slaves[i].channel_emoji,
                                           coordinator.slaves[i].channel_name,
                                           i,
                                           coordinator.slaves[i].__version__)
        if coordinator.middlewares:
            msg += self.ngettext("\n\n{count} middleware activated:",
                                 "\n\n{count} middlewares activated:",
                                 len(coordinator.middlewares)).format(
                                     count=len(coordinator.middlewares))
            for i in coordinator.middlewares:
                msg += "\n- %s (%s, %s)" % (i.middleware_name, i.middleware_id,
                                            i.__version__)
        return msg

    def info_channel(self, update):
        """Generate string for chat linking info of a channel."""
        chat = update.effective_message.forward_from_chat
        links = self.db.get_chat_assoc(
            master_uid=etm_utils.chat_id_to_str(self.channel_id, chat.id))
        if links:  # Linked chat
            msg = self._("The channel {group_name} ({group_id}) is linked to:") \
                .format(group_name=chat.title,
                        group_id=chat.id)
            for i in links:
                channel_id, chat_id = etm_utils.chat_id_str_to_id(i)
                d = self.chat_binding.get_chat_from_db(channel_id, chat_id)
                if d:
                    msg += "\n- %s" % ETMChat(db=self.db, chat=d).full_name
                else:
                    msg += self._(
                        "\n- {channel_emoji} {channel_name}: Unknown chat ({chat_id})"
                    ).format(channel_emoji=coordinator.slaves[channel_id].
                             channel_emoji,
                             channel_name=coordinator.slaves[channel_id].
                             channel_name,
                             chat_id=chat_id)
        else:
            msg = self._("The channel {group_name} ({group_id}) is "
                         "not linked to any remote chat. ").format(
                             group_name=chat.title, group_id=chat.id)
        return msg

    def info_group(self, update):
        """Generate string for chat linking info of a group."""
        links = self.db.get_chat_assoc(master_uid=etm_utils.chat_id_to_str(
            self.channel_id, update.message.chat_id))
        if links:  # Linked chat
            msg = self._(
                "The group {group_name} ({group_id}) is linked to:").format(
                    group_name=update.message.chat.title,
                    group_id=update.message.chat_id)
            for i in links:
                channel_id, chat_id = etm_utils.chat_id_str_to_id(i)
                d = self.chat_binding.get_chat_from_db(channel_id, chat_id)
                if d:
                    msg += "\n- %s (%s:%s)" % (ETMChat(
                        db=self.db, chat=d).full_name, d.module_id, d.chat_uid)
                else:
                    if channel_id not in coordinator.slaves:
                        msg += self._(
                            "\n- Unknown channel {channel_id}: {chat_id}"
                        ).format(channel_id=channel_id, chat_id=chat_id)
                    else:
                        msg += self._(
                            "\n- {channel_emoji} {channel_name}: Unknown chat ({chat_id})"
                        ).format(channel_emoji=coordinator.slaves[channel_id].
                                 channel_emoji,
                                 channel_name=coordinator.slaves[channel_id].
                                 channel_name,
                                 chat_id=chat_id)
        else:
            msg = self._(
                "The group {group_name} ({group_id}) is not linked to any remote chat. "
                "To link one, use /link.").format(
                    group_name=update.message.chat.title,
                    group_id=update.message.chat_id)
        return msg

    def start(self, update: Update, context: CallbackContext):
        """
        Process bot command `/start`.
        """
        if context.args:  # Group binding command
            if update.effective_message.chat.type != telegram.Chat.PRIVATE or \
                    (update.effective_message.forward_from_chat and
                     update.effective_message.forward_from_chat.type == telegram.Chat.CHANNEL):
                self.chat_binding.link_chat(update, context.args)
            else:
                self.bot_manager.send_message(
                    update.effective_chat.id,
                    self.
                    _('You cannot link remote chats to here. Please try again.'
                      ))
        else:
            txt = self._(
                "This is EFB Telegram Master Channel.\n\n"
                "To learn more, please visit https://github.com/blueset/efb-telegram-master ."
            )
            self.bot_manager.send_message(update.effective_chat.id, txt)

    def react(self, update: Update, context: CallbackContext):
        """React to a message."""
        message: Message = update.effective_message

        reaction = None
        args = message.text and message.text.split(' ', 1)
        if args and len(args) > 1:
            reaction = args[1]

        if not message.reply_to_message:
            message.reply_html(
                self._("Reply to a message with this command and an emoji "
                       "to send a reaction. "
                       "Ex.: <code>/react �</code>.\n"
                       "Send <code>/react -</code> to remove your reaction "
                       "from a message."))
            return

        target: Message = update.message.reply_to_message
        msg_log = self.db.get_msg_log(
            master_msg_id=etm_utils.message_id_to_str(
                chat_id=target.chat_id, message_id=target.message_id))
        if msg_log is None:
            message.reply_text(
                self.
                _("The message you replied to is not recorded in ETM database. "
                  "You cannot react to this message."))
            return

        if not reaction:
            if msg_log.pickle is None:
                message.reply_text(
                    self.
                    _("Reactors of this message are not recorded in database."
                      ))
                return
            msg_log_obj: ETMMsg = ETMMsg.unpickle(msg_log.pickle, self.db)
            reactors = msg_log_obj.reactions
            if not reactors:
                message.reply_html(
                    self._("This message has no reactions yet. "
                           "Reply to a message with this command and "
                           "an emoji to send a reaction. "
                           "Ex.: <code>/react �</code>."))
                return
            else:
                text = ""
                for key, values in reactors.items():
                    if not values:
                        continue
                    text += f"{key}:\n"
                    for j in values:
                        text += f"    {j.display_name}\n"
                text = text.strip()
                message.reply_text(text)
                return

        message_id = msg_log.slave_message_id
        channel_id, chat_uid = etm_utils.chat_id_str_to_id(
            msg_log.slave_origin_uid)

        if channel_id not in coordinator.slaves:
            message.reply_text(
                self.
                _("The slave channel involved in this message ({}) is not available. "
                  "You cannot react to this message.").format(channel_id))
            return

        channel = coordinator.slaves[channel_id]

        if channel.suggested_reactions is None:
            message.reply_text(
                self.
                _("The channel involved in this message ({}) does not accept reactions. "
                  "You cannot react to this message.").format(channel_id))
            return

        try:
            chat_obj = channel.get_chat(chat_uid)
        except EFBChatNotFound:
            message.reply_text(
                self._("The chat involved in this message ({}) is not found. "
                       "You cannot react to this message.").format(chat_uid))
            return

        if reaction == "-":
            reaction = None

        try:
            coordinator.send_status(
                EFBReactToMessage(chat=chat_obj,
                                  msg_id=message_id,
                                  reaction=reaction))
        except EFBOperationNotSupported:
            message.reply_text(
                self._("You cannot react anything to this message."))
            return
        except EFBMessageReactionNotPossible:
            prompt = self._("{} is not accepted as a reaction to this message."
                            ).format(reaction)
            if channel.suggested_reactions:
                prompt += "\n" + self._("You may want to try: {}").format(
                    ", ".join(channel.suggested_reactions[:10]))
            message.reply_text(prompt)
            return

    def help(self, update: Update, context: CallbackContext):
        txt = self._(
            "EFB Telegram Master Channel\n"
            "/link\n"
            "    Link a remote chat to an empty Telegram group.\n"
            "    Followed by a regular expression to filter results.\n"
            "/chat\n"
            "    Generate a chat head to start a conversation.\n"
            "    Followed by a regular expression to filter results.\n"
            "/extra\n"
            "    List all additional features from slave channels.\n"
            "/unlink_all\n"
            "    Unlink all remote chats in this chat.\n"
            "/info\n"
            "    Show information of the current Telegram chat.\n"
            "/react [emoji]\n"
            "    React to a message with an emoji, or show a list of members reacted.\n"
            "/update_info\n"
            "    Update name and profile picture a linked Telegram group.\n"
            "    Only works in singly linked group where the bot is an admin.\n"
            "/help\n"
            "    Print this command list.")
        self.bot_manager.send_message(update.message.from_user.id, txt)

    def poll(self):
        """
        Message polling process.
        """
        self.bot_manager.polling()

    def error(self, update: Update, context: CallbackContext):
        """
        Print error to console, and send error message to first admin.
        Triggered by python-telegram-bot error callback.
        """
        error = context.error
        if "(409)" in str(error):
            msg = self._(
                'Conflicted polling detected. If this error persists, '
                'please ensure you are running only one instance of this Telegram bot.'
            )
            self.logger.critical(msg)
            self.bot_manager.send_message(self.config['admins'][0], msg)
            return
        if "Invalid server response" in str(error) and not update:
            self.logger.error(
                "Boom! Telegram API is no good. (Invalid server response.)")
            return
        # noinspection PyBroadException
        try:
            raise error
        except telegram.error.Unauthorized:
            self.logger.error(
                "The bot is not authorised to send update:\n%s\n%s",
                str(update), str(error))
        except telegram.error.BadRequest as e:
            if e.message == "Message is not modified" and update.callback_query:
                self.logger.error("Chill bro, don't click that fast.")
            else:
                self.logger.error("Message request is invalid.\n%s\n%s",
                                  str(update), str(error))
                self.bot_manager.send_message(
                    self.config['admins'][0],
                    self._("Message request is invalid.\n{error}\n"
                           "<code>{update}</code>").format(
                               error=html.escape(str(error)),
                               update=html.escape(str(update))),
                    parse_mode="HTML")
        except (telegram.error.TimedOut, telegram.error.NetworkError):
            self.timeout_count += 1
            self.logger.error(
                "Poor internet connection detected.\n"
                "Number of network error occurred since last startup: %s\n%s\nUpdate: %s",
                self.timeout_count, str(error), str(update))
            if update is not None and isinstance(
                    getattr(update, "message", None), telegram.Message):
                update.message.reply_text(self._(
                    "This message is not processed due to poor internet environment "
                    "of the server.\n"
                    "<code>{code}</code>").format(
                        code=html.escape(str(error))),
                                          quote=True,
                                          parse_mode="HTML")

            timeout_interval = self.flag('network_error_prompt_interval')
            if timeout_interval > 0 and self.timeout_count % timeout_interval == 0:
                self.bot_manager.send_message(
                    self.config['admins'][0],
                    self.ngettext(
                        "<b>EFB Telegram Master channel</b>\n"
                        "You may have a poor internet connection on your server. "
                        "Currently {count} network error is detected.\n"
                        "For more details, please refer to the log.",
                        "<b>EFB Telegram Master channel</b>\n"
                        "You may have a poor internet connection on your server. "
                        "Currently {count} network errors are detected.\n"
                        "For more details, please refer to the log.",
                        self.timeout_count).format(count=self.timeout_count),
                    parse_mode="HTML")
        except telegram.error.ChatMigrated as e:
            new_id = e.new_chat_id
            old_id = update.message.chat_id
            count = 0
            for i in self.db.get_chat_assoc(
                    master_uid=etm_utils.chat_id_to_str(
                        self.channel_id, old_id)):
                self.logger.debug(
                    'Migrating slave chat %s from Telegram chat %s to %s.', i,
                    old_id, new_id)
                self.db.remove_chat_assoc(slave_uid=i)
                self.db.add_chat_assoc(master_uid=etm_utils.chat_id_to_str(
                    self.channel_id, new_id),
                                       slave_uid=i)
                count += 1
            self.bot_manager.send_message(
                new_id,
                self.ngettext(
                    "Chat migration detected.\n"
                    "All {count} remote chat are now linked to this new group.",
                    "Chat migration detected.\n"
                    "All {count} remote chats are now linked to this new group.",
                    count).format(count=count))
        except Exception:
            try:
                self.bot_manager.send_message(
                    self.config['admins'][0],
                    self.
                    _("EFB Telegram Master channel encountered error <code>{error}</code> "
                      "caused by update <code>{update}</code>.").format(
                          error=html.escape(str(error)),
                          update=html.escape(str(update))),
                    parse_mode="HTML")
            except Exception as ex:
                self.logger.exception(
                    "Failed to send error message through Telegram: %s", ex)

            finally:
                self.logger.exception(
                    'Unhandled telegram bot error!\n'
                    'Update %s caused error %s. Exception', update, error)

    def send_message(self, msg: EFBMsg) -> EFBMsg:
        return self.slave_messages.send_message(msg)

    def send_status(self, status: EFBStatus):
        return self.slave_messages.send_status(status)

    def get_message_by_id(self, chat: EFBChat,
                          msg_id: MessageID) -> Optional['EFBMsg']:
        origin_uid = etm_utils.chat_id_to_str(chat=chat)
        msg_log = self.db.get_msg_log(slave_origin_uid=origin_uid,
                                      slave_msg_id=msg_id)
        if msg_log is not None:
            if msg_log.pickle:
                return ETMMsg.unpickle(msg_log.pickle, self.db)
            else:
                # Pickled data is not recorded.
                raise EFBOperationNotSupported(
                    self._("Message is not possible to be retrieved."))
        else:
            # Message is not found.
            return None

    def void_callback_handler(self, update: Update, context: CallbackContext):
        self.bot_manager.answer_callback_query(
            update.callback_query.id,
            text=self._("This button does nothing."),
            cache_time=180)

    def stop_polling(self):
        self.logger.debug("Gracefully stopping %s (%s).", self.channel_name,
                          self.channel_id)
        self.rpc_utilities.shutdown()
        self.bot_manager.graceful_stop()
        self.master_messages.stop_worker()
        self.db.stop_worker()
        self.logger.debug("%s (%s) gracefully stopped.", self.channel_name,
                          self.channel_id)
Example #36
0
    def __init__(self, instance_id: InstanceID = None):
        """
        Initialization.
        """
        super().__init__(instance_id)

        # Check PIL support for WebP
        Image.init()
        if 'WEBP' not in Image.ID:
            raise EFBException(
                self._(
                    "WebP support of Pillow is required.\n"
                    "Please refer to Pillow Documentation for instructions.\n"
                    "https://pillow.readthedocs.io/"))

        # Suppress debug logs from dependencies
        logging.getLogger('requests').setLevel(logging.CRITICAL)
        logging.getLogger('urllib3').setLevel(logging.CRITICAL)
        logging.getLogger('telegram.bot').setLevel(logging.CRITICAL)
        logging.getLogger(
            'telegram.vendor.ptb_urllib3.urllib3.connectionpool').setLevel(
                logging.CRITICAL)

        # Set up logger
        self.logger: logging.Logger = logging.getLogger(__name__)

        # Load configs
        self.load_config()

        # Load predefined MIME types
        mimetypes.init(files=["mimetypes"])

        # Initialize managers
        self.flag: ExperimentalFlagsManager = ExperimentalFlagsManager(self)
        self.db: DatabaseManager = DatabaseManager(self)
        self.chat_dest_cache: ChatDestinationCache = ChatDestinationCache(
            self.flag("send_to_last_chat"))
        self.bot_manager: TelegramBotManager = TelegramBotManager(self)
        self.chat_binding: ChatBindingManager = ChatBindingManager(self)
        self.commands: CommandsManager = CommandsManager(self)
        self.master_messages: MasterMessageProcessor = MasterMessageProcessor(
            self)
        self.slave_messages: SlaveMessageProcessor = SlaveMessageProcessor(
            self)

        if not self.flag('auto_locale'):
            self.translator = translation("efb_telegram_master",
                                          resource_filename(
                                              'efb_telegram_master', 'locale'),
                                          fallback=True)

        # Basic message handlers
        non_edit_filter = Filters.update.message | Filters.update.channel_post
        self.bot_manager.dispatcher.add_handler(
            CommandHandler("start", self.start, filters=non_edit_filter))
        self.bot_manager.dispatcher.add_handler(
            CommandHandler("help", self.help, filters=non_edit_filter))
        self.bot_manager.dispatcher.add_handler(
            CommandHandler("info", self.info, filters=non_edit_filter))
        self.bot_manager.dispatcher.add_handler(
            CallbackQueryHandler(self.void_callback_handler, pattern="void"))
        self.bot_manager.dispatcher.add_handler(
            CallbackQueryHandler(self.bot_manager.session_expired))
        self.bot_manager.dispatcher.add_handler(
            CommandHandler("react", self.react, filters=non_edit_filter))

        self.bot_manager.dispatcher.add_error_handler(self.error)

        self.rpc_utilities = RPCUtilities(self)
Example #37
0
# encoding: utf-8
import os
import pkg_resources
import re
import subprocess
import tempfile
import unittest

import pdf_redactor

FIXTURE_PATH = pkg_resources.resource_filename(__name__, "test-ssns.pdf")


class RedactFixture(object):
	def __init__(self, input_path, options):
		self.input_path = input_path
		self.options = options

	def __enter__(self):
		self.input_file = open(self.input_path, "rb")
		self.options.input_stream = self.input_file

		fd, self.redacted_path = tempfile.mkstemp(".pdf")
		self.redacted_file = os.fdopen(fd, "wb")
		self.options.output_stream = self.redacted_file

		pdf_redactor.redactor(self.options)
		self.redacted_file.close()

		return self.redacted_path
Example #38
0
 def __init__(self):
     """Summary
     """
     lib = to_shared_lib("numpy_weld_convertor")
     lib_file = pkg_resources.resource_filename(__name__, lib)
     self.utils = ctypes.PyDLL(lib_file)
Example #39
0
def test_resource_executor(yaml_path, name, expected):
    a = BaseExecutor.load_config(
        resource_filename('jina', '/'.join(('resources', yaml_path))))
    assert a.name == name
    assert len(a._drivers) == expected
def _test_component_to_complex_workflow(browser):
    project_dict, workspace_page = startup(browser)

    # Add paraboloid and vehicle_threesim files
    file1_path = pkg_resources.resource_filename('openmdao.examples.simple',
                                                 'paraboloid.py')
    file2_path = pkg_resources.resource_filename(
        'openmdao.examples.enginedesign', 'vehicle_threesim.py')
    workspace_page.add_file(file1_path)
    workspace_page.add_file(file2_path)

    # create an instance of VehicleSim2
    sim_name = workspace_page.put_element_on_grid("VehicleSim2")

    # Drag paraboloid element into sim dataflow figure
    sim = workspace_page.get_dataflow_figure(sim_name)
    paraboloid = workspace_page.find_library_button('Paraboloid')
    chain = workspace_page.drag_element_to(paraboloid,
                                           sim('content_area').element, False)
    workspace_page.release(chain)
    paraboloid_name = NameInstanceDialog(workspace_page).create_and_dismiss()
    paraboloid_pathname = sim_name + "." + paraboloid_name

    # Switch to Workflow pane and show the sim workflow
    workspace_page('workflow_tab').click()
    workspace_page.show_workflow(sim_name)

    # See how many workflow component figures there are before we add to it
    eq(len(workspace_page.get_workflow_component_figures()), 16)

    ############################################################################
    # Drop paraboloid component onto the top level workflow for sim
    ############################################################################
    workspace_page('dataflow_tab').click()
    workspace_page.expand_object(sim_name)
    workspace_page.add_object_to_workflow(paraboloid_pathname, sim_name)

    # Confirm that there is one more workflow component figure
    workspace_page('workflow_tab').click()
    eq(len(workspace_page.get_workflow_component_figures()), 17)

    # Confirm that the paraboloid has been added to the sim workflow by trying
    # to access it.
    workspace_page.find_object_button(sim_name + "." + paraboloid_name)

    ############################################################################
    # Drop paraboloid component onto the sim_acc workflow under sim
    ############################################################################
    workspace_page('dataflow_tab').click()
    simsim_name = sim_name + '.sim_acc'
    workspace_page.add_object_to_workflow(paraboloid_pathname, simsim_name)

    # Confirm that there is one more workflow component figure
    workspace_page('workflow_tab').click()
    eq(len(workspace_page.get_workflow_component_figures()), 18)

    # Confirm that the paraboloid has been added to the sim workflow by trying
    # to access it.
    workspace_page.find_object_button(sim_name + "." + paraboloid_name)

    ############################################################################
    # Drop paraboloid component onto the vehicle workflow under sim_acc
    # This should NOT work since the paraboloid is not in the vehicle assembly
    ############################################################################

    # These error messages are tested in SequentialFlow, though we may want
    # to have one test that makes sure that the error dialog makes it through.

    #workspace_page('dataflow_tab').click()
    #workspace_page.expand_object(simsim_name)
    #simsimsim_name = simsim_name + '.vehicle'
    #workspace_page.add_object_to_workflow(paraboloid_pathname, simsimsim_name)
    #message = NotifierPage.wait(workspace_page)
    #eq(message, "x")

    # Confirm that there is NOT a new workflow component figure
    #workspace_page('workflow_tab').click()
    #eq(len(workspace_page.get_workflow_component_figures()), 18)

    # Clean up.
    closeout(project_dict, workspace_page)
Example #41
0
def get_resource(path: str) -> Path:
    return Path(resource_filename('bldr', str(Path('data', path))))
Example #42
0
from argparse import ArgumentParser, Namespace
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union

import jsonschema
import pkg_resources

from runrestic import __version__
from runrestic.runrestic.tools import deep_update

logger = logging.getLogger(__name__)

CONFIG_DEFAULTS = {
    "execution": {"parallel": False, "exit_on_error": True, "retry_count": 0}
}
SCHEMA = json.load(
    open(pkg_resources.resource_filename("runrestic", "runrestic/schema.json"), "r")
)


def cli_arguments(args: Union[List[str], None] = None) -> Tuple[Namespace, List[str]]:
    parser = ArgumentParser(
        prog="runrestic",
        description="""
            A wrapper for restic. It runs restic based on config files and also outputs metrics.
            To initialize the repos, run `runrestic init`.
            If you don't define any actions, it will default to `backup prune check`, and `stats` if metrics are set.
            """,
    )
    parser.add_argument(
        "actions",
        type=str,
Example #43
0
def __bootstrap__():
    global __bootstrap__, __loader__, __file__
    import sys, pkg_resources, imp
    __file__ = pkg_resources.resource_filename(__name__, '_max_len_seq.so')
    __loader__ = None; del __bootstrap__, __loader__
    imp.load_dynamic(__name__,__file__)
Example #44
0
def start(
    targets, module_name, logging_level, config_file="api.yaml", schema_file="api.schema.yaml"
):
    """Start the API.

    Keyword arguments:
    targets -- shared target list
    module_name -- module name set by the main code to separate targets
    logging_level -- level logging set in the global config file
    config_file -- name of the config file for the API
    schema_file -- schema of the config file for the API
    """
    global shared_targets_list
    global global_module_name
    global token
    global max_targets
    global target_lifetime
    global max_target_lifetime

    # initiate list in dict from Manager (shared variable with main code)
    targets[module_name] = []

    # transform config file to absolute path
    entry_path = os.path.dirname(os.path.abspath(sys.argv[0]))
    config_file_abs = os.path.join(entry_path, config_file)

    # load inventory configuration
    if not os.path.exists(config_file_abs):
        log_api.info("No configuration file for api module")
        return

    # getting a reference of this list and module name
    shared_targets_list = targets
    global_module_name = module_name

    # validate yaml config with the schema
    schema = pkg_resources.resource_filename(__name__, schema_file)
    yaml_validator = Core(source_file=config_file_abs, schema_files=[schema])
    yaml_validator.validate(raise_exception=True)

    # if config file exists, we load it and parse it
    with open(config_file_abs, "r") as conf_file:
        try:
            conf = yaml.safe_load(conf_file)
            log_api.debug("network_devices configuration loaded")
        except Exception as error:
            log_api.error("Unable to load config file: {0}".format(error))

    listen_address = conf.get("listen_address", "0.0.0.0")
    listen_port = conf.get("listen_port", 8009)
    groups.extend(conf.get("groups", []))
    token = conf.get("token")
    max_targets = conf.get("max_targets", 10)
    target_lifetime = common.explode_datetime(conf.get("target_lifetime", "1d"))
    max_target_lifetime = common.explode_datetime(conf.get("max_target_lifetime", "30d"))

    # starting the API
    log_api.logging_level = "WARNING"
    app = connexion.FlaskApp(__name__, server="tornado", debug=False)
    app.add_api("api.swagger.yml")
    app.run(host=listen_address, port=listen_port, debug=False)
Example #45
0
def get_redshift_efficiency(simtype,
                            targets,
                            truth,
                            targets_in_tile,
                            obsconditions=None):
    """
    Simple model to get the redshift effiency from the observational conditions or observed magnitudes+redshuft

    Args:
        simtype: ELG, LRG, QSO, MWS, BGS
        targets: target catalog table; currently used only for TARGETID
        truth: truth table with OIIFLUX, TRUEZ
        targets_in_tile: dictionary. Keys correspond to tileids, its values are the
            arrays of targetids observed in that tile.
        obsconditions: table observing conditions with columns
           'TILEID': array of tile IDs
           'AIRMASS': array of airmass values on a tile
           'EBMV': array of E(B-V) values on a tile
           'LINTRANS': array of atmospheric transparency during spectro obs; floats [0-1]
           'MOONFRAC': array of moonfraction values on a tile.
           'SEEING': array of FWHM seeing during spectroscopic observation on a tile.

    Returns:
        tuple of arrays (observed, p) both with same length as targets

        observed: boolean array of whether the target was observed in these tiles

        p: probability to get this redshift right
    """
    targetid = targets['TARGETID']
    n = len(targetid)

    try:
        if 'DECAM_FLUX' in targets.colnames:
            true_gflux = targets['DECAM_FLUX'][:, 1]
            true_rflux = targets['DECAM_FLUX'][:, 2]
        else:
            true_gflux = targets['FLUX_G']
            true_rflux = targets['FLUX_R']
    except:
        raise Exception(
            'Missing photometry needed to estimate redshift efficiency!')

    if (obsconditions is None) and (truth['OIIFLUX'] not in truth.colnames):
        raise Exception(
            'Missing obsconditions and flux information to estimate redshift efficiency'
        )

    if (simtype == 'ELG'):
        # Read the model OII flux threshold (FDR fig 7.12 modified to fit redmonster efficiency on OAK)
        filename = resource_filename(
            'desisim', 'data/quickcat_elg_oii_flux_threshold.txt')
        fdr_z, modified_fdr_oii_flux_threshold = np.loadtxt(filename,
                                                            unpack=True)

        # Get OIIflux from truth
        true_oii_flux = truth['OIIFLUX']

        # Compute OII flux thresholds for truez
        oii_flux_threshold = np.interp(truth['TRUEZ'], fdr_z,
                                       modified_fdr_oii_flux_threshold)
        assert (
            oii_flux_threshold.size == true_oii_flux.size
        ), "oii_flux_threshold and true_oii_flux should have the same size"

        # efficiency is modeled as a function of flux_OII/f_OII_threshold(z) and an arbitrary sigma_fudge
        sigma_fudge = 1.0
        max_efficiency = 1.0
        simulated_eff = eff_model(true_oii_flux / oii_flux_threshold,
                                  sigma_fudge, max_efficiency)

    elif (simtype == 'LRG'):
        # Read the model rmag efficiency
        filename = resource_filename('desisim',
                                     'data/quickcat_lrg_rmag_eff.txt')
        magr, magr_eff = np.loadtxt(filename, unpack=True)

        # Get Rflux from truth
        r_mag = 22.5 - 2.5 * np.log10(true_rflux)

        mean_eff_mag = np.interp(r_mag, magr, magr_eff)
        fudge = 0.002
        max_efficiency = 0.98
        simulated_eff = max_efficiency * mean_eff_mag * (
            1. + fudge * np.random.normal(size=mean_eff_mag.size))
        simulated_eff[np.where(
            simulated_eff > max_efficiency)] = max_efficiency

    elif (simtype == 'QSO'):
        # Read the model gmag threshold
        filename = resource_filename('desisim',
                                     'data/quickcat_qso_gmag_threshold.txt')
        zc, qso_gmag_threshold_vs_z = np.loadtxt(filename, unpack=True)

        # Get Gflux from truth
        true_gmag = 22.5 - 2.5 * np.log10(true_gflux)

        # Computes QSO mag thresholds for truez
        qso_gmag_threshold = np.interp(truth['TRUEZ'], zc,
                                       qso_gmag_threshold_vs_z)
        assert (qso_gmag_threshold.size == true_gmag.size
                ), "qso_gmag_threshold and true_gmag should have the same size"

        # Computes G flux
        qso_true_normed_flux = 10**(-0.4 * (true_gmag - qso_gmag_threshold))

        #model effificieny for QSO:
        sigma_fudge = 0.5
        max_efficiency = 0.95
        simulated_eff = eff_model(qso_true_normed_flux, sigma_fudge,
                                  max_efficiency)

    elif simtype == 'BGS':
        simulated_eff = 0.98 * np.ones(n)

    elif simtype == 'MWS':
        simulated_eff = 0.98 * np.ones(n)

    else:
        default_zeff = 0.98
        log.warning('using default redshift efficiency of {} for {}'.format(
            default_zeff, simtype))
        simulated_eff = default_zeff * np.ones(n)

    #- Get the corrections for observing conditions per tile, then
    #- correct targets on those tiles.  Parameterize in terms of failure
    #- rate instead of success rate to handle bookkeeping of targets that
    #- are observed on more than one tile.
    #- NOTE: this still isn't quite right since multiple observations will
    #- be simultaneously fit instead of just taking whichever individual one
    #- succeeds.

    zeff_obs = get_zeff_obs(simtype, obsconditions)
    pfail = np.ones(n)
    observed = np.zeros(n, dtype=bool)

    # More efficient alternative for large numbers of tiles + large target
    # list, but requires pre-computing the sort order of targetids.
    # Assume targets['TARGETID'] is unique, so not checking this.
    sort_targetid = np.argsort(targetid)

    # Extract the targets-per-tile lists into one huge list.
    concat_targets_in_tile = np.concatenate(
        [targets_in_tile[tileid] for tileid in obsconditions['TILEID']])
    ntargets_per_tile = np.array(
        [len(targets_in_tile[tileid]) for tileid in obsconditions['TILEID']])

    # Match entries in each tile list against sorted target list.
    target_idx = targetid[sort_targetid].searchsorted(concat_targets_in_tile,
                                                      side='left')
    target_idx_r = targetid[sort_targetid].searchsorted(concat_targets_in_tile,
                                                        side='right')
    del (concat_targets_in_tile)

    # Flag targets in tiles that do not appear in the target list (sky,
    # standards).
    not_matched = target_idx_r - target_idx == 0
    target_idx[not_matched] = -1
    del (target_idx_r, not_matched)

    # Not every tile has 5000 targets, so use individual counts to
    # construct offset of each tile in target_idx.
    offset = np.concatenate([[0], np.cumsum(ntargets_per_tile[:-1])])

    # For each tile, process targets.
    for i, tileid in enumerate(obsconditions['TILEID']):
        if ntargets_per_tile[i] > 0:
            # Quickly get all the matched targets on this tile.
            targets_this_tile = target_idx[offset[i]:offset[i] +
                                           ntargets_per_tile[i]]
            targets_this_tile = targets_this_tile[targets_this_tile > 0]
            # List of indices into sorted target list for each observed
            # source.
            ii = sort_targetid[targets_this_tile]
            tmp = (simulated_eff[ii] * zeff_obs[i]).clip(0, 1)
            pfail[ii] *= (1 - tmp)
            observed[ii] = True

    simulated_eff = (1 - pfail)

    return observed, simulated_eff
Example #46
0
def res_filename(file):
    return pkg_resources.resource_filename('bigml', "../../../%s" % file)
Example #47
0
    def install(self):
        """Installer"""

        # Utility function to interpreted boolean option value
        getBool = lambda s: s.strip().lower() in ['true', 'yes']

        # Utility function to parse a multi-line/multi-value parameter
        def cleanMultiParams(v):
            params = [
                s.split('|') for s in [l.strip() for l in v.split('\n')]
                if len(s) > 0
            ]
            cleaned_params = []
            for line in params:
                cleaned_params.append([row.strip() for row in line])
            return cleaned_params

        # Utility function to transform any string to an ID
        getId = lambda s: ''.join([c for c in s if c.isalnum()]).lower()

        options = self.options

        # Add command line scripts trac-admin and tracd into bin
        entry_points = [('trac-admin', 'trac.admin.console', 'run'),
                        ('tracd', 'trac.web.standalone', 'main')]
        zc.buildout.easy_install.scripts(entry_points,
                                         pkg_resources.working_set,
                                         options['executable'],
                                         options['bin-directory'])

        ####################
        # Init Trac instance
        ####################

        # Generate the trac instance, if required
        location = options['location']
        project_name = options.get('project-name', 'My project')
        project_url = options.get('project-url', 'http://example.com')
        db = 'sqlite:%s' % os.path.join('db', 'trac.db')
        if not os.path.exists(location):
            os.mkdir(location)
        trac = TracAdmin(location)
        if not trac.env_check():
            trac.do_initenv('"%s" %s' % (project_name, db))
        env = trac.env

        # Remove Trac default example data
        clean_up = getBool(options.get('remove-examples', 'True'))
        if clean_up:
            # Remove default milestones
            for mil in Milestone.select(env):
                if mil.name in [
                        'milestone1', 'milestone2', 'milestone3', 'milestone4'
                ]:
                    mil.delete()
            # Remove default components
            for comp in Component.select(env):
                if comp.name in ['component1', 'component2']:
                    comp.delete()

        # Add custom milestones
        for mil_data in cleanMultiParams(options.get('milestones', '')):
            mil_name = mil_data[0]
            try:
                mil = Milestone(env, name=mil_name)
            except ResourceNotFound:
                mil = Milestone(env)
                mil.name = mil_name
                mil.insert()

        # Add custom components
        for comp_data in cleanMultiParams(options.get('components', '')):
            comp_name = comp_data[0]
            try:
                comp = Component(env, name=comp_name)
            except ResourceNotFound:
                comp = Component(env)
                comp.name = comp_name
                if len(comp_data) == 2 and comp_data[1] not in [None, '']:
                    comp.owner = comp_data[1]
                comp.insert()

        #######################
        # Generate the trac.ini
        #######################

        # Read the trac.ini config file
        trac_ini = os.path.join(location, 'conf', 'trac.ini')
        parser = ConfigParser.ConfigParser()
        parser.read([trac_ini])

        # Clean-up trac.ini: add missing stuff
        if 'components' not in parser.sections():
            parser.add_section('components')

        # Force upgrade of informations used during initialization
        parser.set('project', 'name', project_name)

        # Set all repositories
        repos = cleanMultiParams(options.get('repos', None))
        repo_names = [getId(r[0]) for r in repos]
        repo_types = {}.fromkeys([r[1].lower() for r in repos]).keys()
        if 'repositories' not in parser.sections():
            parser.add_section('repositories')
        for repo in repos:
            repo_name = getId(repo[0])
            repo_type = repo[1]
            repo_dir = repo[2]
            repo_url = repo[3]
            parser.set('repositories', '%s.type' % repo_name, repo_type)
            parser.set('repositories', '%s.dir' % repo_name, repo_dir)
            if repo_url not in ['', None]:
                parser.set('repositories', '%s.url' % repo_name, repo_url)

        # Set default repository
        default_repo = getId(options.get('default-repo', None))
        if default_repo and default_repo in repo_names:
            parser.set('repositories', '.alias', default_repo)
            parser.set('repositories', '.hidden', 'true')

        # Set repository sync method
        sync_method = options.get('repos-sync', 'request').strip().lower()
        svn_repos = [getId(r[0]) for r in repos if r[1] == 'svn']
        if sync_method == 'request':
            parser.set('trac', 'repository_sync_per_request',
                       ', '.join(svn_repos))
        # TODO
        # elif sync_method == 'hook':
        #   do stuff...

        # Set project description
        project_descr = options.get('project-description', None)
        if project_descr:
            parser.set('project', 'descr', project_descr)
            parser.set('header_logo', 'alt', project_descr)

        # Setup logo
        header_logo = options.get('header-logo', '')
        header_logo = os.path.realpath(header_logo)
        if os.path.exists(header_logo):
            shutil.copyfile(header_logo,
                            os.path.join(location, 'htdocs', 'logo'))
        parser.set('header_logo', 'src', 'site/logo')
        parser.set('header_logo', 'link', project_url)

        # Set footer message
        parser.set(
            'project', 'footer',
            options.get(
                'footer-message',
                'This Trac instance was generated by <a href="http://pypi.python.org/pypi/pbp.recipe.trac">pbp.recipe.trac</a>.'
            ))

        # SMTP parameters
        for name in ('always-bcc', 'always-cc', 'default-domain', 'enabled',
                     'from', 'from-name', 'password', 'port', 'replyto',
                     'server', 'subject-prefix', 'user'):
            param_name = "smtp-%s" % name
            default_value = None
            if param_name == "smtp-from-name":
                default_value = project_name
            value = options.get(param_name, default_value)
            if value is not None:
                parser.set('notification', param_name.replace('-', '_'), value)

        ###############
        # Plugins setup
        ###############

        # If one repository use Mercurial, hook its plugin
        if 'hg' in repo_types:
            parser.set('components', 'tracext.hg.*', 'enabled')

        # Configure the NavAdd plugin
        menu_items = cleanMultiParams(options.get('additional-menu-items', ''))
        item_list = []
        for item in menu_items:
            item_title = item[0]
            item_url = item[1]
            item_id = getId(item_title)
            item_list.append((item_id, item_title, item_url))
        if item_list > 0:
            parser.set('components', 'navadd.*', 'enabled')
            if 'navadd' not in parser.sections():
                parser.add_section('navadd')
            parser.set('navadd', 'add_items',
                       ','.join([i[0] for i in item_list]))
            for (uid, title, url) in item_list:
                parser.set('navadd', '%s.target' % uid, 'mainnav')
                parser.set('navadd', '%s.title' % uid, title)
                parser.set('navadd', '%s.url' % uid, url)

        # Enable and setup time tracking
        time_tracking = options.get('time-tracking-plugin',
                                    'disabled').strip().lower() == 'enabled'
        if time_tracking:
            parser.set('components', 'timingandestimationplugin.*', 'enabled')

        # Enable and setup the stat plugin
        stats = options.get('stats-plugin',
                            'disabled').strip().lower() == 'enabled'
        if stats:
            parser.set('components', 'tracstats.*', 'enabled')

        #######################
        # Final upgrades & sync
        #######################

        # Apply custom parameters defined by the user
        custom_params = cleanMultiParams(options.get('trac-ini-additional',
                                                     ''))
        for param in custom_params:
            if len(param) == 3:
                section = param[0]
                if section not in parser.sections():
                    parser.add_section(section)
                parser.set(section, param[1], param[2])

        # Write the final trac.ini
        parser.write(open(trac_ini, 'w'))

        # Reload the environment
        env.shutdown()
        trac = TracAdmin(location)
        env = trac.env

        # Set custom permissions
        perm_sys = PermissionSystem(env)
        for cperm in cleanMultiParams(options.get('permissions', '')):
            if len(cperm) == 2:
                user = cperm[0]
                current_user_perms = perm_sys.get_user_permissions(user)
                perm_list = [p.upper() for p in cperm[1].split(' ') if len(p)]
                for perm in perm_list:
                    if perm not in current_user_perms:
                        perm_sys.grant_permission(user, perm)

        # Upgrade Trac instance to keep it fresh
        needs_upgrade = env.needs_upgrade()
        force_upgrade = getBool(options.get('force-instance-upgrade', 'False'))
        if needs_upgrade or force_upgrade:
            env.upgrade(backup=True)

        # Force repository resync
        repo_resync = getBool(options.get('force-repos-resync', 'False'))
        if repo_resync:
            rm = RepositoryManager(env)
            repositories = rm.get_real_repositories()
            for repos in sorted(repositories, key=lambda r: r.reponame):
                repos.sync(clean=True)

        # Upgrade default wiki pages embedded in Trac instance
        wiki_upgrade = getBool(options.get('wiki-doc-upgrade', 'False'))
        if wiki_upgrade:
            # Got the command below from trac/admin/console.py
            pages_dir = pkg_resources.resource_filename(
                'trac.wiki', 'default-pages')
            WikiAdmin(env).load_pages(pages_dir,
                                      ignore=['WikiStart', 'checkwiki.py'],
                                      create_only=['InterMapTxt'])

        # Return files that were created by the recipe. The buildout
        # will remove all returned files upon reinstall.
        return tuple()
Example #48
0
def get_observed_redshifts(targets, truth, targets_in_tile, obsconditions):
    """
    Returns observed z, zerr, zwarn arrays given true object types and redshifts

    Args:
        targets: target catalog table; currently used only for target mask bits
        truth: truth table with OIIFLUX, TRUEZ
        targets_in_tile: dictionary. Keys correspond to tileids, its values are the
            arrays of targetids observed in that tile.
        obsconditions: table observing conditions with columns
           'TILEID': array of tile IDs
           'AIRMASS': array of airmass values on a tile
           'EBMV': array of E(B-V) values on a tile
           'LINTRANS': array of atmospheric transparency during spectro obs; floats [0-1]
           'MOONFRAC': array of moonfraction values on a tile.
           'SEEING': array of FWHM seeing during spectroscopic observation on a tile.

    Returns:
        tuple of (zout, zerr, zwarn)
    """

    simtype = get_simtype(np.char.strip(truth['TRUESPECTYPE']),
                          targets['DESI_TARGET'], targets['BGS_TARGET'],
                          targets['MWS_TARGET'])
    truez = truth['TRUEZ']
    targetid = truth['TARGETID']

    try:
        if 'DECAM_FLUX' in targets.colnames:
            true_gflux = targets['DECAM_FLUX'][:, 1]
            true_rflux = targets['DECAM_FLUX'][:, 2]
        else:
            true_gflux = targets['FLUX_G']
            true_rflux = targets['FLUX_R']
    except:
        raise Exception(
            'Missing photometry needed to estimate redshift efficiency!')

    zout = truez.copy()
    zerr = np.zeros(len(truez), dtype=np.float32)
    zwarn = np.zeros(len(truez), dtype=np.int32)

    objtypes = list(set(simtype))
    n_tiles = len(np.unique(obsconditions['TILEID']))

    if (n_tiles != len(targets_in_tile)):
        raise ValueError(
            'Number of obsconditions {} != len(targets_in_tile) {}'.format(
                n_tiles, len(targets_in_tile)))

    for objtype in objtypes:
        if objtype in _sigma_v.keys():
            ii = (simtype == objtype)
            n = np.count_nonzero(ii)

            # Error model for ELGs
            if (objtype == 'ELG'):
                filename = resource_filename('desisim',
                                             'data/quickcat_elg_oii_errz.txt')
                oii, errz_oii = np.loadtxt(filename, unpack=True)
                try:
                    true_oii_flux = truth['OIIFLUX'][ii]
                except:
                    raise Exception(
                        'Missing OII flux information to estimate redshift error for ELGs'
                    )

                mean_err_oii = np.interp(true_oii_flux, oii, errz_oii)
                zerr[ii] = mean_err_oii * (1. + truez[ii])
                zout[ii] += np.random.normal(scale=zerr[ii])

            # Error model for LRGs
            elif (objtype == 'LRG'):
                redbins = np.linspace(0.55, 1.1, 5)
                mag = np.linspace(20.5, 23., 50)

                true_magr = 22.5 - 2.5 * np.log10(true_rflux)

                coefs = [[9.46882282e-05,-1.87022383e-03],[6.14601021e-05,-1.17406643e-03],\
                             [8.85342362e-05,-1.76079966e-03],[6.96202042e-05,-1.38632104e-03]]
                '''
                Coefs of linear fit of LRG zdc1 redmonster error as a function of Rmag in 4 bins of redshift: (z_low,coef0,coef1)
                0.55 9.4688228224e-05 -0.00187022382514
                0.6875 6.14601021052e-05 -0.00117406643273
                0.825 8.85342361594e-05 -0.00176079966322
                0.9625 6.96202042482e-05 -0.00138632103551
                '''

                # for each redshift bin, select the corresponding coefs
                zerr_tmp = np.zeros(len(truez[ii]))
                for i in range(redbins.size - 1):
                    index0, = np.where((truez[ii] >= redbins[i])
                                       & (truez[ii] < redbins[i + 1]))
                    if (i == 0):
                        index1, = np.where(truez[ii] < redbins[0])
                        index = np.concatenate((index0, index1))
                    elif (i == (redbins.size - 2)):
                        index1, = np.where(truez[ii] >= redbins[-1])
                        index = np.concatenate((index0, index1))
                    else:
                        index = index0

                    # Find mean error at true mag
                    pol = np.poly1d(coefs[i])
                    mean_err_mag = np.interp(true_magr[index], mag, pol(mag))

                    # Computes output error and redshift
                    zerr_tmp[index] = mean_err_mag
                zerr[ii] = zerr_tmp * (1. + truez[ii])
                zout[ii] += np.random.normal(scale=zerr[ii])

            # Error model for QSOs
            elif (objtype == 'QSO'):
                redbins = np.linspace(0.5, 3.5, 7)
                mag = np.linspace(21., 23., 50)

                true_magg = 22.5 - 2.5 * np.log10(true_gflux)

                coefs = [[0.000156950059747,-0.00320719603886],[0.000461779391179,-0.00924485142818],\
                             [0.000458672517009,-0.0091254038977],[0.000461427968475,-0.00923812594293],\
                             [0.000312919487343,-0.00618137905849],[0.000219438845624,-0.00423782927109]]
                '''
                Coefs of linear fit of QSO zdc1 redmonster error as a function of Gmag in 6 bins of redshift: (z_low,coef0,coef1)
                0.5 0.000156950059747 -0.00320719603886
                1.0 0.000461779391179 -0.00924485142818
                1.5 0.000458672517009 -0.0091254038977
                2.0 0.000461427968475 -0.00923812594293
                2.5 0.000312919487343 -0.00618137905849
                3.0 0.000219438845624 -0.00423782927109
                '''

                # for each redshift bin, select the corresponding coefs
                zerr_tmp = np.zeros(len(truez[ii]))
                for i in range(redbins.size - 1):
                    index0, = np.where((truez[ii] >= redbins[i])
                                       & (truez[ii] < redbins[i + 1]))
                    if (i == 0):
                        index1, = np.where(truez[ii] < redbins[0])
                        index = np.concatenate((index0, index1))
                    elif (i == (redbins.size - 2)):
                        index1, = np.where(truez[ii] >= redbins[-1])
                        index = np.concatenate((index0, index1))
                    else:
                        index = index0
                    # Find mean error at true mag
                    pol = np.poly1d(coefs[i])
                    mean_err_mag = np.interp(true_magg[index], mag, pol(mag))

                    # Computes output error and redshift
                    zerr_tmp[index] = mean_err_mag
                zerr[ii] = zerr_tmp * (1. + truez[ii])
                zout[ii] += np.random.normal(scale=zerr[ii])

            else:
                zerr[ii] = _sigma_v[objtype] * (1 + truez[ii]) / c
                zout[ii] += np.random.normal(scale=zerr[ii])

            # Set ZWARN flags for some targets
            # the redshift efficiency only sets warning, but does not impact
            # the redshift value and its error.
            was_observed, goodz_prob = get_redshift_efficiency(
                objtype,
                targets[ii],
                truth[ii],
                targets_in_tile,
                obsconditions=obsconditions)

            assert len(was_observed) == n
            assert len(goodz_prob) == n
            r = np.random.random(len(was_observed))
            zwarn[ii] = 4 * (r > goodz_prob) * was_observed

            # Add fraction of catastrophic failures (zwarn=0 but wrong z)
            nzwarnzero = np.count_nonzero(zwarn[ii][was_observed] == 0)
            num_cata = np.random.poisson(_cata_fail_fraction[objtype] *
                                         nzwarnzero)
            if (objtype == 'ELG'): zlim = [0.6, 1.7]
            elif (objtype == 'LRG'): zlim = [0.5, 1.1]
            elif (objtype == 'QSO'): zlim = [0.5, 3.5]
            if num_cata > 0:
                #- tmp = boolean array for all targets, flagging only those
                #- that are of this simtype and were observed this epoch
                tmp = np.zeros(len(ii), dtype=bool)
                tmp[ii] = was_observed
                kk, = np.where((zwarn == 0) & tmp)
                index = np.random.choice(kk, size=num_cata, replace=False)
                assert np.all(np.in1d(index, np.where(ii)[0]))
                assert np.all(zwarn[index] == 0)

                zout[index] = np.random.uniform(zlim[0], zlim[1], len(index))

        else:
            msg = 'No redshift efficiency model for {}; using true z\n'.format(objtype) + \
                  'Known types are {}'.format(list(_sigma_v.keys()))
            log.warning(msg)

    return zout, zerr, zwarn
Example #49
0
import pybullet_data
import glob
import pybullet
import pybullet_utils.bullet_client as bc
import time
import numpy as np
from gym.utils import seeding
import gym
import os
import inspect
from myGym.envs.camera import Camera
import pkg_resources
currentdir = pkg_resources.resource_filename("myGym", "envs")
repodir = pkg_resources.resource_filename("myGym", "./")


class BaseEnv(gym.Env):
    """
    The base class for environments without rendering

    Parameters:
        :param gui_on: (bool) Whether or not to use PyBullet built-in GUI
        :param objects_dir_path: (str) Path to directory with URDF files for objects
        :param max_steps: (int) The maximum number of actions per episode
        :param show_bounding_boxes_gui: (bool) Whether or not to show bounding boxes in GUI
        :param changing_light_gui: (bool) Whether or not to change light in GUI
        :param shadows_on_gui: (bool) Whether or not to show shadows in GUI
    """
    metadata = {
        'render.modes': ['human', 'rgb_array'],
        'video.frames_per_second': 50
Example #50
0
# Initiate Config
# if modules are not available from central python install try the ones in the same directory
from Nagstamon.Config import Config
conf = Config()

# check for old settings when upgrading from a nagstamon version < 0.8 and convert them
conf.Convert_Conf_to_Multiple_Servers()
# convert settings for actions to custom actions for Nagstamon < 0.9.9
conf.Convert_Conf_to_Custom_Actions()

# try to get resources path if nagstamon got be installed by setup.py
Resources = ""
try:
    import pkg_resources
    Resources = pkg_resources.resource_filename("Nagstamon", "resources")
except Exception, err:
    # get resources directory from current directory - only if not being set before by pkg_resources
    # try-excepts necessary for platforms like Windows .EXE
    join = os.path.join
    normcase = os.path.normcase
    paths_to_check = [
        normcase(join(os.getcwd(), "Nagstamon", "resources")),
        normcase(join(os.getcwd(), "resources"))
    ]
    try:
        # if resources dir is not available in CWD, try the
        # libs dir (site-packages) for the current Python
        from distutils.sysconfig import get_python_lib
        paths_to_check.append(
            normcase(join(get_python_lib(), "Nagstamon", "resources")))
Example #51
0
import os
import pkg_resources

from alembic.config import Config
from alembic.environment import EnvironmentContext
from alembic.script import ScriptDirectory
from alembic.util import load_python_file
from zope.sqlalchemy import mark_changed

from kotti import conf_defaults
from kotti import get_settings
from kotti import DBSession
from kotti.util import command

KOTTI_SCRIPT_DIR = pkg_resources.resource_filename('kotti', 'alembic')
DEFAULT_LOCATION = 'kotti:alembic'


class ScriptDirectoryWithDefaultEnvPy(ScriptDirectory):
    @property
    def env_py_location(self):
        loc = super(ScriptDirectoryWithDefaultEnvPy, self).env_py_location
        if not os.path.exists(loc):
            loc = os.path.join(KOTTI_SCRIPT_DIR, 'env.py')
        return loc

    def run_env(self):
        dir_, filename = self.env_py_location.rsplit(os.path.sep, 1)
        load_python_file(dir_, filename)
Example #52
0
def pkg_filename(relative_path):
    return pkg_resources.resource_filename(__name__, relative_path)
 def __init__(self, logger=None):
     self.agent_swagger = pkg_resources.resource_filename('symphonybinding', 'data/agent-api-public-deprecated.yaml')
     self.pod_swagger = pkg_resources.resource_filename('symphonybinding', 'data/pod-api-public-deprecated.yaml')
     self.logger = logger or logging.getLogger(__name__)
Example #54
0
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################################################################

from __future__ import (absolute_import, division, print_function,
                        unicode_literals)
# noinspection PyUnresolvedReferences,PyCompatibility
from builtins import *

import os
import pkg_resources

from bag.design import Module

yaml_file = pkg_resources.resource_filename(
    __name__, os.path.join('netlist_info', 'capdac_8b_2.yaml'))


# noinspection PyPep8Naming
class adc_sar_templates__capdac_8b_2(Module):
    """Module for library adc_sar_templates cell capdac_8b_2.

    Fill in high level description here.
    """
    def __init__(self, bag_config, parent=None, prj=None, **kwargs):
        Module.__init__(self,
                        bag_config,
                        yaml_file,
                        parent=parent,
                        prj=prj,
                        **kwargs)
Example #55
0
# -*- coding: utf-8 -*-

import os
import pkg_resources
import subprocess

domain = "collective.symlink"
os.chdir(pkg_resources.resource_filename(domain, ""))
os.chdir("../../../")
target_path = "src/collective/symlink/"
locale_path = target_path + "locales/"
i18ndude = "./bin/i18ndude"


def locale_folder_setup():
    os.chdir(locale_path)
    languages = [d for d in os.listdir(".") if os.path.isdir(d)]
    for lang in languages:
        folder = os.listdir(lang)
        if "LC_MESSAGES" in folder:
            continue
        else:
            lc_messages_path = lang + "/LC_MESSAGES/"
            os.mkdir(lc_messages_path)
            cmd = "msginit --locale={0} --input={1}.pot --output={2}/LC_MESSAGES/{3}.po".format(  # NOQA: E501
                lang, domain, lang, domain)
            subprocess.call(cmd, shell=True)

    os.chdir("../../../../")

Example #56
0
 def __init__(self, parameters: Dict[str, Any]) -> None:
     assert self.template is not None
     self.template_path = pkg_resources.resource_filename(
         constants.misc.TEMPLATE_PATH, self.template)
     self.parameters = parameters
Example #57
0
from pkg_resources import resource_filename

DATA_ROOT = resource_filename('langcodes', 'data')
import os


def data_filename(filename):
    return os.path.join(DATA_ROOT, filename)
Example #58
0
def icon_path(basename):
    return pkg_resources.resource_filename(__name__, "icons/" + basename)
Example #59
0
    def save(self,
             parent=None,
             current_name="graph",
             size="auto",
             num_specimens=1,
             offset=0.75):
        """
            Displays a save dialog to export an image from the current plot.
        """
        # Parse arguments:
        width, height = 0, 0
        if size == "auto":
            descr, width, height, dpi = settings.OUTPUT_PRESETS[0]
        else:
            width, height, dpi = map(float, size.replace("@", "x").split("x"))

        # Load gui:
        builder = gtk.Builder()
        builder.add_from_file(
            resource_filename("pyxrd.specimen", "glade/save_graph_size.glade")
        )  # FIXME move this to this namespace!!
        size_expander = builder.get_object("size_expander")
        cmb_presets = builder.get_object("cmb_presets")

        # Setup combo with presets:
        cmb_store = gtk.ListStore(str, int, int, float)
        for row in settings.OUTPUT_PRESETS:
            cmb_store.append(row)
        cmb_presets.clear()
        cmb_presets.set_model(cmb_store)
        cell = gtk.CellRendererText()
        cmb_presets.pack_start(cell, True)
        cmb_presets.add_attribute(cell, 'text', 0)

        def on_cmb_changed(cmb, *args):
            itr = cmb_presets.get_active_iter()
            w, h, d = cmb_store.get(itr, 1, 2, 3)
            entry_w.set_text(str(w))
            entry_h.set_text(str(h))
            entry_dpi.set_text(str(d))

        cmb_presets.connect('changed', on_cmb_changed)

        # Setup input boxes:
        entry_w = builder.get_object("entry_width")
        entry_h = builder.get_object("entry_height")
        entry_dpi = builder.get_object("entry_dpi")
        entry_w.set_text(str(width))
        entry_h.set_text(str(height))
        entry_dpi.set_text(str(dpi))

        # What to do when the user wants to save this:
        def on_accept(dialog):
            # Get the width, height & dpi
            width = float(entry_w.get_text())
            height = float(entry_h.get_text())
            dpi = float(entry_dpi.get_text())
            i_width, i_height = width / dpi, height / dpi
            # Save it all right!
            self.save_figure(dialog.filename, dpi, i_width, i_height)

        # Ask the user where, how and if he wants to save:
        DialogFactory.get_save_dialog(
            "Save Graph",
            parent=parent,
            filters=self.file_filters,
            current_name=current_name,
            extra_widget=size_expander).run(on_accept)
Example #60
0
standard_library.install_aliases()

# Standard library
from argparse import ArgumentParser
from builtins import str
import os
import urllib.parse

# Third-party
import pkg_resources

# Local/library specific
from .support import *


RDF_DIR = pkg_resources.resource_filename('cc.licenserdf', 'licenses')


# *
# *******************************************************************


def _printer(string):
    """
    A simple wrapper for the print statement so we can do testing on
    the info method
    """
    print(string)

def license_rdf_filename(license_uri, rdf_dir=RDF_DIR):
    """Map a license URI to the filesystem filename containing the RDF."""