def main():
	try:
		config = SafeConfigParser()
		if config.read('config.ini') != ['config.ini']:
			 raise OSError('no config.ini file present')

		if config.getboolean('Generate CSVs', 'autogen-q'):
			if config.get('Generate CSVs', 'type') == 'logarithmic':
				minQ = config.getint('Generate CSVs', 'min-q')
				maxQ = config.getint('Generate CSVs', 'max-q')
				numQ = config.getint('Generate CSVs', 'num-q')
				pd.Series(np.logspace(round(math.log(minQ, 10)), round(math.log(maxQ,10)), numQ, dtype=int)).to_csv('DataIN/Quantity.csv', index=False)
			if config.get('Generate CSVs', 'type') == 'linear':
				minQ = config.getint('Generate CSVs', 'min-q')
				maxQ = config.getint('Generate CSVs', 'max-q')
				numQ = config.getint('Generate CSVs', 'num-q')
				pd.Series(np.linspace(minQ, maxQ, numQ, dtype=int)).to_csv('DataIN/Quantity.csv', index=False)
			if config.get('Generate CSVs', 'type') == 'digikey':
				pd.Series([1,5,10,25,50,100,250,500,1000,5000,10000]).to_csv('DataIN/Quantity.csv', index=False)

		qs = pd.read_csv('DataIN/Quantity.csv', squeeze=True)

		titlesVarCost = ['Name']
		titlesDigiKey = ['Name', 'URL', 'Quantity']

		for i in range(0, len(qs)):
			titlesVarCost.append('Cost @' + str(qs.iloc[i]) + "pcs")

		if config.getboolean('Generate CSVs', 'gen-fc'):
			pd.DataFrame(columns = ['Name', 'Cost']).to_csv('DataIN/FixedCosts.csv', index=False)

		if config.getboolean('Generate CSVs', 'gen-dkp'):
			pd.DataFrame(columns = titlesDigiKey).to_csv('DataIN/DigiKeyParts.csv', index=False)

		if config.getboolean('Generate CSVs', 'gen-vc'):
			pd.DataFrame(columns = titlesVarCost).to_csv('DataIN/VarCosts.csv', index=False)

		if config.getboolean('Generate CSVs', 'gen-p'):
			pd.DataFrame(columns = ['Price']).to_csv('DataIN/Price.csv', index=False)

	except OSError as e:
		print(e)
	except:
		print("something went wrong, most likely your config.ini file is not properly configured")
    def run(self):
        """ Build the tutorials (iPython notebook files) located in tutorials/*
            into static HTML pages.
        """
        from IPython.nbconvert.nbconvertapp import NbConvertApp

        check_ipython_version()

        current_directory = os.getcwd()
        html_base = os.path.join(current_directory,"html")
        if not os.path.exists(html_base):
            os.mkdir(html_base)
        tutorials_base = os.path.join(current_directory,'tutorials')

        app = NbConvertApp()
        app.initialize()
        app.export_format = 'html'

        template_path = os.path.join(tutorials_base, 'templates')
        app.config.Exporter.template_path = ['templates', template_path]
        app.config.Exporter.template_file = 'astropy'

        # walk through each directory in tutorials/ to find all .ipynb file
        index_list = []
        for tutorial_name in os.listdir(tutorials_base):
            path = os.path.join(tutorials_base, tutorial_name)
            if not os.path.isdir(path):
                continue

            # read metadata from config file
            config = SafeConfigParser()
            config.read(os.path.join(path,"metadata.cfg"))

            is_published = config.getboolean("config", "published")
            if not is_published:
                continue

            for filename in os.listdir(path):
                base,ext = os.path.splitext(filename)
                if ext.lower() == ".ipynb" and "checkpoint" not in base:
                    app.output_base = os.path.join(html_base,base)
                    app.notebooks = [os.path.join(path,filename)]
                    app.start()

                    index_listing = dict()
                    index_listing["link_path"] = "{}.html".format(base)
                    index_listing["link_name"] = config.get("config", "link_name")
                    index_list.append(index_listing)

        # Make an index of all notes
        entries = []
        for page in index_list:
            entries.append('      <li><a href="{0[link_path]}">{0[link_name]}</a></li>'.format(page))
        with open(os.path.join(current_directory,'html','index.html'), 'w') as f:
            f.write(INDEX_TEMPLATE.format(entries='\n'.join(entries)))
Exemple #3
0
def test_pulp():
    tasker = DockerTasker()
    parsed_config = SafeConfigParser()
    assert len(parsed_config.read(PULP_CONF_PATH)) > 0

    host = parsed_config.get("server", "host")
    un = parsed_config.get("server", "username")
    pswd = parsed_config.get("server", "password")
    verify_ssl = parsed_config.getboolean("server", "verify_ssl")
    push_image_to_pulp("busybox-test", "busybox", host, un, pswd, verify_ssl,
                       tasker, logging.getLogger("dock.tests"))
def init():
    ConfigFilePath = os.path.dirname(os.path.realpath(__file__)) + '/config.ini'
    print("Looking for config file at: " + ConfigFilePath)

    parser = SafeConfigParser()
    parser.read(ConfigFilePath)

    global ConnectionString 
    ConnectionString = parser.get('PostgreSQL', 'ConnectionString')
    
    global DebugEnabled
    DebugEnabled = parser.getboolean('Etc', 'Debug')
    def parse_config(self,config,section=None):
        if section is None: section = self.section

        if isinstance(config,basestring):
            configfile = config
            config = SafeConfigParser()
            config.read(configfile)

        # Set the options first (otherwise optional positional arguments disappear)
        if OPTIONS in config._sections[section].keys():
            self.parse_args(config.get(section,OPTIONS))

        # Don't grab the default keys
        dests = [a.dest for a in self.parser._actions]
        for key in config._sections[section].keys():
            if key == '__name__': continue
            if key == OPTIONS: continue
            try: 
                index = dests.index(key)
                action = self.parser._actions[index]
            except ValueError:
                msg = "Unrecognized argument in config: %s\n"%key
                msg += self.get_help()
                raise ValueError(msg)

            if isinstance(action,argparse._StoreTrueAction):
                value = config.getboolean(section,key)
            if isinstance(action,argparse._StoreFalseAction):
                value = config.getboolean(section,key)
            if isinstance(action,argparse._CountAction):
                try:
                    value = int(config.getboolean(section,key))
                except ValueError:
                    value = config.getint(section,key)
            else:
                value = config.get(section,key)
            setattr(self.namespace,key,value)

        return self.namespace
Exemple #6
0
class Config(object):
	def __init__(self):
		self._configFileName = "rover.cfg"
		self._readConfig()

	def _readConfig(self):
		self._config = SafeConfigParser({
			"db_host": "localhost",
			"db_port": "27017",
			"db_name": "rover",
			"serial_path": "/dev/ttymxc3",
			"serial_baud": "115200",
			"serial_timeout": "0.5",
			"serial_encoding": "windows-1252"})
		self._config.read(self._configFileName)
		self._validateConfig()

	def _writeConfig(self):
		self._config.write(open(self._configFileName, "w"))

	def _validateConfig(self):
		changed = False
		if not self._config.has_section("db"):
			self._config.add_section("db")
			changed = True
		if not self._config.has_section("arduino_serial"):
			self._config.add_section("arduino_serial")
			changed = True
		if changed:
			self._writeConfig()

	def get(self, section, key):
		return self._config.get(section, key)

	def getInt(self, section, key):
		return self._config.getint(section, key)

	def getFloat(self, section, key):
		return self._config.getfloat(section, key)

	def getBoolean(self, section, key):
		return self._config.getboolean(section, key)

	def set(self, section, key, value):
		self._config.set(section, key, value)
		self._writeConfig()
Exemple #7
0
def _init(apiurl):
    """Initialize osc library.

    apiurl is the apiurl which should be used.

    """
    if hasattr(apiurl, 'extend') and len(set(apiurl)) > 1:
        msg = ("Different apiurls are not supported at the moment: "
               "%s (using %s)" % (', '.join(apiurl), apiurl[0]))
        logger().info(msg)
        apiurl = apiurl[0]
    conf_filename = os.environ.get('OSC_CONFIG', '~/.oscrc')
    conf_filename = os.path.expanduser(conf_filename)
    cp = SafeConfigParser({'plaintext_password': True, 'aliases': ''})
    cp.read(conf_filename)
    apiurl = apiurl.strip('/')
    if apiurl == 'api':
        apiurl = 'https://api.opensuse.org'
    for section in cp.sections():
        aliases = cp.get(section, 'aliases', raw=True)
        aliases = aliases.split(',')
        if section.strip('/') == apiurl or apiurl in aliases:
            user = cp.get(section, 'user', raw=True)
            password = None
            if cp.has_option(section, 'pass'):
                password = cp.get(section, 'pass', raw=True)
            if cp.has_option(section, 'passx'):
                password = cp.get(section, 'pass', raw=True)
                password = password.decode('base64').decode('bz2')
            if (cp.has_option(section, 'keyring')
                    and cp.getboolean(section, 'keyring')):
                try:
                    import keyring
                    host = urllib.parse.urlparse(apiurl).hostname
                    password = keyring.get_password(host, user)
                except ImportError:
                    msg = ("keyring module not available but '%s' "
                           "stores password there") % conf_filename
                    raise ValueError(msg)
            if password is None:
                msg = "No password provided for %s" % section
                raise ValueError(msg)
            if '://' not in section:
                section = 'https://{0}'.format(section)
            Osc.init(section, username=user, password=password)
            return section
Exemple #8
0
class Configuration(object):

    def __init__(self):
        self.parser = SafeConfigParser(defaults={
            'theme' : 'default',
            'theme.mobile' : 'touch',
            'hostname' : '0.0.0.0',
            'port' : '7000',
            'log.file' : '/tmp/rb-serve.log',
            'log.level' : 'INFO',
            'log.format' : '%(levelname)s	%(asctime)s	%(name)s: %(message)s',
            'debug' : 'False',
            })
        self.parser.add_section('server')
        self.parser.read(path.expanduser('~/.rhythmweb'))
        self.configure_logger()

    def get_string(self, key):
        return self.parser.get('server', key, raw=True)

    def get_int(self, key):
        return self.parser.getint('server', key)

    def get_boolean(self, key):
        return self.parser.getboolean('server', key)

    def configure_logger(self):
        root = logging.getLogger()
        root.setLevel(self.get_string('log.level'))
        handler = logging.handlers.RotatingFileHandler(
                self.get_string('log.file'),
                backupCount=5, 
                maxBytes=1024*1024)
        handler.setFormatter(logging.Formatter(fmt=self.get_string('log.format')))
        root.addHandler(handler)

        if root.isEnabledFor(logging.DEBUG):
            root.debug('Logger configured')
            root.debug('Showing app configuration:')
            for section_name in self.parser.sections():
                root.debug('Section: %s' % section_name)
                for key in self.parser.options(section_name):
                    root.debug('  %s = %s' % (key, self.parser.get(section_name, key, raw=True)))
Exemple #9
0
    def _load_config(self):
        "Load and parse config file, pass options to livestreamer"
        
        config = SafeConfigParser()
        config_file = os.path.join(self.config_path, 'settings.ini')
        config.read(config_file)

        for option, type in list(AVAILABLE_OPTIONS.items()):
            if config.has_option('DEFAULT', option):
                if type == 'int':
                    value = config.getint('DEFAULT', option)
                if type == 'float':
                    value = config.getfloat('DEFAULT', option)
                if type == 'bool':
                    value = config.getboolean('DEFAULT', option)
                if type == 'str':
                    value = config.get('DEFAULT', option)

                self.livestreamer.set_option(option, value)
Exemple #10
0
class CrabClient:
    """Crab client class, used for interaction with the server."""
    def __init__(self, command=None, crabid=None):
        """Constructor for CrabClient.

        This causes the client to configure itself,
        by looking for the crab.ini file.  If the environment
        variables CRABHOST or CRABPORT exist, these override
        settings from the configuration files.

        If the client has been started to report on the status of a
        job, then the command must be supplied, and the crabid should
        be given if known.
        """
        self.command = command
        self.crabid = crabid

        self.config = SafeConfigParser()
        self.config.add_section('server')
        self.config.set('server', 'host', 'localhost')
        self.config.set('server', 'port', '8000')
        self.config.set('server', 'timeout', '30')
        self.config.add_section('client')
        self.config.set('client', 'use_fqdn', 'false')

        env = os.environ

        # Read configuration files -- first system and then user.
        sysconfdir = env.get('CRABSYSCONFIG', '/etc/crab')
        userconfdir = env.get('CRABUSERCONFIG', os.path.expanduser('~/.crab'))

        self.configfiles = self.config.read([
            os.path.join(sysconfdir, 'crab.ini'),
            os.path.join(userconfdir, 'crab.ini')
        ])

        # Override configuration as specified by environment variables.
        if 'CRABHOST' in env:
            self.config.set('server', 'host', env['CRABHOST'])
        if 'CRABPORT' in env:
            self.config.set('server', 'port', env['CRABPORT'])
        if 'CRABUSERNAME' in env:
            self.config.set('client', 'username', env['CRABUSERNAME'])
        if 'CRABCLIENTHOSTNAME' in env:
            self.config.set('client', 'hostname', env['CRABCLIENTHOSTNAME'])

        # Add computed defaults for some values if they have not already
        # been determined.  This avoids the need to perform these operations
        # if the value is already known and would allow the way in which this
        # is done to be customized based on other values.
        if not self.config.has_option('client', 'hostname'):
            if self.config.getboolean('client', 'use_fqdn'):
                self.config.set('client', 'hostname', socket.getfqdn())
            else:
                self.config.set('client', 'hostname',
                                socket.gethostname().split('.', 1)[0])

        if not self.config.has_option('client', 'username'):
            self.config.set('client', 'username', pwd.getpwuid(os.getuid())[0])

    def start(self):
        """Notify the server that the job is starting.

        Return the decoded server response, which may include
        an inhibit dictionary item."""

        return self._write_json(self._get_url('start'),
                                {'command': self.command},
                                read=True)

    def finish(self, status=CrabStatus.UNKNOWN, stdoutdata='', stderrdata=''):
        """Notify the server that the job is finishing."""

        self._write_json(
            self._get_url('finish'), {
                'command': self.command,
                'status': status,
                'stdout': stdoutdata,
                'stderr': stderrdata
            })

    def send_crontab(self, crontab, timezone=None):
        """Takes the crontab as a string, breaks it into lines,
        and transmits it to the server.

        Returns a list of warnings."""

        data = self._write_json(self._get_url('crontab'), {
            'crontab': crontab.split('\n'),
            'timezone': timezone
        },
                                read=True)

        return data['warning']

    def fetch_crontab(self, raw=False):
        """Retrieves crontab lines from the server, and returns
        them as a single string."""

        url = self._get_url('crontab')
        if raw:
            url = url + '?raw=true'
        data = self._read_json(url)

        if data['crontab'] is None:
            return ''
        else:
            return '\n'.join(data['crontab'])

    def get_info(self):
        info = []
        info.append('Server: ' + self.config.get('server', 'host') + ':' +
                    self.config.get('server', 'port'))
        info.append('Client: ' + self.config.get('client', 'username') + '@' +
                    self.config.get('client', 'hostname'))
        info.append('Files: ' + ', '.join(self.configfiles))
        return '\n'.join(info)

    def _get_url(self, action):
        """Creates the URL to be used to perform the given server action."""

        url = ('/api/0/' + action + '/' +
               urlquote(self.config.get('client', 'hostname'), '') + '/' +
               urlquote(self.config.get('client', 'username'), ''))

        if self.crabid is not None:
            url = url + '/' + urlquote(self.crabid, '')

        return url

    def _get_conn(self):
        """Opens an HTTP connection to the configured server."""

        # Try first to construct the connection with a timeout.  However
        # this feature was added in Python 2.6, so for older versions of
        # Python, we must catch the TypeError and construct the object
        # without a timeout.
        try:
            return HTTPConnection(self.config.get('server', 'host'),
                                  self.config.get('server', 'port'),
                                  timeout=int(
                                      self.config.get('server', 'timeout')))
        except TypeError:
            return HTTPConnection(self.config.get('server', 'host'),
                                  self.config.get('server', 'port'))

    def _read_json(self, url):
        """Performs an HTTP GET on the given URL and interprets the
        response as JSON."""

        try:
            try:
                conn = self._get_conn()
                conn.request('GET', url)

                res = conn.getresponse()

                if res.status != 200:
                    raise CrabError('server error: ' + self._read_error(res))

                return json.loads(latin_1_decode(res.read(), 'replace')[0])

            # except HTTPException as err:
            except HTTPException:
                err = sys.exc_info()[1]
                raise CrabError('HTTP error: ' + str(err))

            # except socket.error as err:
            except socket.error:
                err = sys.exc_info()[1]
                raise CrabError('socket error: ' + str(err))

            # except ValueError as err:
            except ValueError:
                err = sys.exc_info()[1]
                raise CrabError('did not understand response: ' + str(err))

        finally:
            conn.close()

    def _write_json(self, url, obj, read=False):
        """Converts the given object to JSON and sends it with an
        HTTP PUT to the given URL.

        Optionally attempts to read JSON from the response."""

        try:
            try:
                conn = self._get_conn()
                conn.request('PUT', url, json.dumps(obj))

                res = conn.getresponse()

                if res.status != 200:
                    raise CrabError('server error: ' + self._read_error(res))

                if read:
                    response = latin_1_decode(res.read(), 'replace')[0]

                    # Check we got a response before attempting to decode
                    # it as JSON.  (Some messages did not have responses
                    # for previous server versions.)
                    if response:
                        return json.loads(response)
                    else:
                        return {}

            # except HTTPException as err:
            except HTTPException:
                err = sys.exc_info()[1]
                raise CrabError('HTTP error: ' + str(err))

            # except socket.error as err:
            except socket.error:
                err = sys.exc_info()[1]
                raise CrabError('socket error: ' + str(err))

            # except ValueError as err:
            except ValueError:
                err = sys.exc_info()[1]
                raise CrabError('did not understand response: ' + str(err))

        finally:
            conn.close()

    def _read_error(self, res):
        """Determine the error message to show based on an
        unsuccessful HTTP response.

        Currently use the HTTP status phrase or the first
        paragraph of the body, if found with a regular expression."""

        message = res.reason

        try:
            body = latin_1_decode(res.read(), 'replace')[0]
            match = re.search('<p>([^<]*)', body)
            if match:
                message = match.group(1)
        except:
            pass

        return message
Exemple #11
0
def run(ini_file='TOPKAPI.ini'):
    """Run the model with the set-up defined by `ini_file`.

    """

    ##================================##
    ##  Read the input file (*.ini)   ##
    ##================================##
    config = SafeConfigParser()
    config.read(ini_file)
    print('Read the file ',ini_file)

    ##~~~~~~ Numerical_options ~~~~~~##
    solve_s = config.getfloat('numerical_options', 'solve_s')
    solve_o = config.getfloat('numerical_options', 'solve_o')
    solve_c = config.getfloat('numerical_options', 'solve_c')
    only_channel_output = config.getboolean('numerical_options',
                                            'only_channel_output')

    ##~~~~~~~~~~~ input files ~~~~~~~~~~~##
    #Param
    file_global_param = config.get('input_files', 'file_global_param')
    file_cell_param = config.get('input_files', 'file_cell_param')
    #Rain
    file_rain = config.get('input_files', 'file_rain')
    #ETP
    file_ET = config.get('input_files', 'file_ET')

    #~~~~~~~~~~~ Group (simulated event) ~~~~~~~~~~~##
    group_name = config.get('groups', 'group_name')

    ##~~~~~~ Calibration ~~~~~~##
    fac_L = config.getfloat('calib_params', 'fac_L')
    fac_Ks = config.getfloat('calib_params', 'fac_Ks')
    fac_n_o = config.getfloat('calib_params', 'fac_n_o')
    fac_n_c = config.getfloat('calib_params', 'fac_n_c')

    ##~~~~~~ External flows ~~~~~~##
    external_flow = config.getboolean('external_flow', 'external_flow')
    if external_flow:
        file_Qexternal_flow = config.get('external_flow',
                                         'file_Qexternal_flow')
        Xexternal_flow = config.getfloat('external_flow', 'Xexternal_flow')
        Yexternal_flow = config.getfloat('external_flow', 'Yexternal_flow')

    ##~~~~~~~~~~~ output files ~~~~~~~~~~##
    file_out = config.get('output_files', 'file_out')
    ut.check_file_exist(file_out) #create path_out if it doesn't exist
    if os.path.exists(file_out):
        first_run = False
    else:
        first_run = True

    append_output = config.getboolean('output_files', 'append_output')
    if append_output is True:
        fmode = 'a'
    else:
        fmode = 'w'

    ##============================##
    ##   Read the forcing data    ##
    ##============================##
    print('Read the forcing data')

    #~~~~Rainfall
    h5file_in = h5.openFile(file_rain,mode='r')
    group = '/'+group_name+'/'
    node = h5file_in.getNode(group+'rainfall')
    ndar_rain = node.read()
    h5file_in.close()

    #~~~~ETr - Reference crop ET
    h5file_in = h5.openFile(file_ET,mode='r')
    group = '/'+group_name+'/'
    node = h5file_in.getNode(group+'ETr')
    ndar_ETr = node.read()
    h5file_in.close()

    #~~~~ETo - Open water potential evap.
    h5file_in = h5.openFile(file_ET,mode='r')
    group = '/'+group_name+'/'
    node = h5file_in.getNode(group+'ETo')
    ndar_ETo = node.read()
    h5file_in.close()

    #~~~~external_flow flows
    if external_flow:
        ar_Qexternal_flow = np.loadtxt(file_Qexternal_flow)[:, 5]


    ##============================##
    ## Pretreatment of input data ##
    ##============================##
    print('Pretreatment of input data')

    #~~~~Read Global parameters file
    X, Dt, alpha_s, \
    alpha_o, alpha_c, \
    A_thres, W_min, W_max = pm.read_global_parameters(file_global_param)

    #~~~~Read Cell parameters file
    ar_cell_label, ar_coorx, \
    ar_coory, ar_lambda, \
    ar_Xc, ar_dam, \
    ar_tan_beta, ar_tan_beta_channel, \
    ar_L0, ar_Ks0, \
    ar_theta_r, ar_theta_s, \
    ar_n_o0, ar_n_c0, \
    ar_cell_down, ar_pVs_t0, \
    ar_Vo_t0, ar_Qc_t0, \
    ar_kc, psi_b, lamda = pm.read_cell_parameters(file_cell_param)

    #~~~~Number of cell in the catchment
    nb_cell = len(ar_cell_label)

    #~~~~Computation of cell order
    ar_label_sort = pm.sort_cell(ar_cell_label, ar_cell_down)

    #~~~~Computation of upcells
    li_cell_up = pm.direct_up_cell(ar_cell_label, ar_cell_down, ar_label_sort)

    #~~~~Computation of drained area
    ar_A_drained = pm.drained_area(ar_label_sort, li_cell_up, X)

    #~~~~Apply calibration factors to the parameter values
    ar_L = ar_L0*fac_L
    ar_Ks = ar_Ks0*fac_Ks
    ar_n_o = ar_n_o0*fac_n_o
    ar_n_c = ar_n_c0*fac_n_c

    print('Max L=', max(ar_L))
    print('Max Ks=', max(ar_Ks))
    print('Max n_o=', max(ar_n_o))
    print('Max n_c=', max(ar_n_c))

    #~~~~Computation of model parameters from physical parameters
    ar_Vsm, ar_b_s, ar_b_o, \
    ar_W, ar_b_c = pm.compute_cell_param(X, ar_Xc, Dt, alpha_s,
                                         alpha_o, alpha_c, nb_cell,
                                         A_thres, W_max, W_min,
                                         ar_lambda, ar_tan_beta,
                                         ar_tan_beta_channel, ar_L,
                                         ar_Ks, ar_theta_r, ar_theta_s,
                                         ar_n_o, ar_n_c, ar_A_drained)

    #~~~~Look for the cell of external_flow tunnel
    if external_flow:
        cell_external_flow = ut.find_cell_coordinates(ar_cell_label,
                                                      Xexternal_flow,
                                                      Yexternal_flow,
                                                      ar_coorx,
                                                      ar_coory,
                                                      ar_lambda)

        print('external flows will be taken into account for cell no',\
            cell_external_flow, ' coordinates ('\
            ,Xexternal_flow,',',Yexternal_flow,')')

    #~~~~Number of simulation time steps
    nb_time_step = len(ndar_rain[:,0])


    ##=============================##
    ##  Variable array definition  ##
    ##=============================##

    ## Initialisation of the reservoirs
    #Matrix of soil,overland and channel store at the begining of the time step
    if append_output and not first_run:
        print('Initialize from file')

        h5file_in = h5py.File(file_out)

        ar_Vs0 = h5file_in['/Soil/V_s'][-1, :]
        ar_Vc0 = h5file_in['/Channel/V_c'][-1, :]
        ar_Vo0 = h5file_in['/Overland/V_o'][-1, :]

        h5file_in.close()
    else:
        print('Initialize from parms')
        ar_Vs0 = fl.initial_volume_soil(ar_pVs_t0, ar_Vsm)
        ar_Vo0 = ar_Vo_t0
        ar_Vc0 = fl.initial_volume_channel(ar_Qc_t0, ar_W, X, ar_n_c)

    ## Computed variables
    #Matrix of soil,overland and channel store at the end of the time step
    ar_Vs1 = np.ones(nb_cell)*-99.9
    ar_Vo1 = np.ones(nb_cell)*-99.9
    ar_Vc1 = np.ones(nb_cell)*-99.9

    #Matrix of outflows between two time steps
    ar_Qs_out = np.ones(nb_cell)*-99.9
    ar_Qo_out = np.ones(nb_cell)*-99.9
    ar_Qc_out = np.zeros(nb_cell)

    ## Intermediate variables
    ar_a_s = np.ones(nb_cell)*-99.9
    ar_a_o = np.ones(nb_cell)*-99.9
    ar_a_c = np.ones(nb_cell)*-99.9
    ar_Q_to_next_cell = np.ones(nb_cell)*-99.9
    ar_Q_to_channel = np.ones(nb_cell)*-99.9
    ar_Q_to_channel_sub = np.zeros(nb_cell)
    ar_Qc_cell_up = np.zeros(nb_cell)
    ar_ETa = np.zeros(nb_cell)
    ar_ET_channel = np.zeros(nb_cell)


    ##=============================##
    ## HDF5 output file definition ##
    ##=============================##
    h5file = h5.openFile(file_out, mode=fmode, title='TOPKAPI_out')

    root = h5file.getNode('/')
    root._v_attrs.pytopkapi_version = pytopkapi.__version__
    root._v_attrs.pytopkapi_git_revision = pytopkapi.__git_revision__

    atom = h5.Float32Atom()
    h5filter = h5.Filters(9)# maximum compression

    # create file structure as necessary
    grp_name = '/Soil'
    if grp_name not in h5file:
        h5file.createGroup('/', 'Soil', 'Soil arrays')
    if grp_name+'/Qs_out' not in h5file:
        array_Qs_out = h5file.createEArray(grp_name, 'Qs_out',
                                           atom, shape=(0,nb_cell),
                                           title='m3/s', filters=h5filter,
                                           expectedrows=nb_time_step)
    else:
        array_Qs_out = h5file.getNode(grp_name+'/Qs_out')
    if grp_name+'/V_s' not in h5file:
        array_Vs = h5file.createEArray(grp_name, 'V_s',
                                       atom, shape=(0, nb_cell),
                                       title='m3', filters=h5filter,
                                       expectedrows=nb_time_step+1)
    else:
        array_Vs = h5file.getNode(grp_name+'/V_s')

    grp_name = '/Overland'
    if grp_name not in h5file:
        h5file.createGroup('/', 'Overland', 'Overland arrays')
    if grp_name+'/Qo_out' not in h5file:
        array_Qo_out = h5file.createEArray(grp_name, 'Qo_out',
                                           atom, shape=(0,nb_cell),
                                           title='m3/s', filters=h5filter,
                                           expectedrows=nb_time_step)
    else:
        array_Qo_out = h5file.getNode(grp_name+'/Qo_out')
    if grp_name+'/V_o' not in h5file:
        array_Vo = h5file.createEArray(grp_name, 'V_o',
                                       atom, shape=(0,nb_cell),
                                       title='m3', filters=h5filter,
                                       expectedrows=nb_time_step+1)
    else:
        array_Vo = h5file.getNode(grp_name+'/V_o')

    grp_name = '/Channel'
    if grp_name not in h5file:
        h5file.createGroup('/', 'Channel', 'Channel arrays')
    if grp_name+'/Qc_out' not in h5file:
        array_Qc_out = h5file.createEArray(grp_name, 'Qc_out',
                                           atom, shape=(0,nb_cell),
                                           title='m3/s', filters=h5filter,
                                           expectedrows=nb_time_step)
    else:
        array_Qc_out = h5file.getNode(grp_name+'/Qc_out')
    if grp_name+'/V_c' not in h5file:
        array_Vc = h5file.createEArray(grp_name, 'V_c',
                                       atom, shape=(0,nb_cell),
                                       title='m3', filters=h5filter,
                                       expectedrows=nb_time_step)
    else:
        array_Vc = h5file.getNode(grp_name+'/V_c')
    if grp_name+'/Ec_out' not in h5file:
        array_Ec_out = h5file.createEArray(grp_name, 'Ec_out',
                                           atom, shape=(0,nb_cell),
                                           title='m3', filters=h5filter,
                                           expectedrows=nb_time_step)
    else:
        array_Ec_out = h5file.getNode(grp_name+'/Ec_out')

    if '/ET_out' not in h5file:
        array_ET_out = h5file.createEArray('/', 'ET_out',
                                           atom, shape=(0,nb_cell),
                                           title='mm', filters=h5filter,
                                           expectedrows=nb_time_step)
    else:
        array_ET_out = h5file.getNode('/ET_out')

    if '/Q_down' not in h5file:
        array_Q_down = h5file.createEArray('/', 'Q_down',
                                           atom, shape=(0,nb_cell),
                                           title='m3/s', filters=h5filter,
                                           expectedrows=nb_time_step)
    else:
        array_Q_down = h5file.getNode('/Q_down')

    if append_output is False or first_run is True:
        #Write the initial values into the output file
        array_Vs.append(ar_Vs0.reshape((1,nb_cell)))
        array_Vo.append(ar_Vo0.reshape((1,nb_cell)))
        array_Vc.append(ar_Vc0.reshape((1,nb_cell)))

        array_Qs_out.append(ar_Qs_out.reshape((1,nb_cell)))
        array_Qo_out.append(ar_Qo_out.reshape((1,nb_cell)))
        array_Qc_out.append(ar_Qc_out.reshape((1,nb_cell)))

        array_Q_down.append(ar_Q_to_next_cell.reshape((1,nb_cell)))

        array_ET_out.append(ar_ETa.reshape((1,nb_cell)))

        E_vol = ar_ET_channel*1e-3 * ar_W * ar_Xc
        array_Ec_out.append(E_vol.reshape((1,nb_cell)))

    eff_theta = ar_theta_s - ar_theta_r

    ##===========================##
    ##     Core of the Model     ##
    ##===========================##
    print('** NB_CELL=',nb_cell)
    print('** NB_TIME_STEP=',nb_time_step)
    print('--> SIMULATIONS <--')

    ## Loop on time
    for t in range(nb_time_step):
        print(t+1, '/', nb_time_step)

        eff_sat = ar_Vs0/ar_Vsm

        # estimate soil suction head using Brookes and Corey (1964)
        psi = psi_b/np.power(eff_sat, 1.0/lamda)

        ## Loop on cells
        n=-1
        for cell1 in ar_label_sort:
            cell=np.where(ar_cell_label==cell1)[0][0]
            n=n+1


            ## ======================== ##
            ## ===== INTERCEPTION ===== ##
            ## ======================== ##
            ## No interception for the moment

            ## ======================== ##
            ## ===== INFILTRATION ===== ##
            ## ======================== ##
            rain_rate = ndar_rain[t, cell]/Dt

            infiltration_depth = green_ampt_cum_infiltration(rain_rate,
                                                             psi[cell],
                                                             eff_theta[cell],
                                                             eff_sat[cell],
                                                             ar_Ks[cell], Dt)

            ## ====================== ##
            ## ===== SOIL STORE ===== ##
            ## ====================== ##
            #~~~~ Computation of soil input
            ar_a_s[cell] = fl.input_soil(infiltration_depth,
                                         Dt, X,
                                         ar_Q_to_next_cell,
                                         li_cell_up[cell])

            #~~~~ Resolution of the equation dV/dt=a_s-b_s*V^alpha_s
            # Calculate the volume in the soil store at the end of the
            # current time-step.

            Vs_prim = om.solve_storage_eq(ar_a_s[cell], ar_b_s[cell],
                                          alpha_s, ar_Vs0[cell], Dt, solve_s)

            #~~~~ Computation of soil outflow and overland input
            ar_Qs_out[cell], ar_Vs1[cell] = fl.output_soil(ar_Vs0[cell],
                                                           Vs_prim,
                                                           ar_Vsm[cell],
                                                           ar_a_s[cell],
                                                           ar_b_s[cell],
                                                           alpha_s, Dt)
            if ar_Qs_out[cell] < 0:
                print('Problem Soil:output greater than input....')
                print('n=', n, 'label=', cell)
                stop

            ## ========================== ##
            ## ===== OVERLAND STORE ===== ##
            ## ========================== ##
            #~~~~ Computation of overland input
            rain_excess = ndar_rain[t, cell] - infiltration_depth
            # convert mm to m^3/s
            rain_excess = max(0, (rain_excess*(10**-3)/Dt)*X**2)

            ar_a_o[cell] = max(0,
                               ar_a_s[cell] \
                               - ((ar_Vs1[cell]-ar_Vs0[cell])/Dt \
                                  + ar_Qs_out[cell]) \
                               + rain_excess)

            #~~~~ Resolution of the equation dV/dt=a_o-b_o*V^alpha_o

            ar_Vo1[cell] = om.solve_storage_eq(ar_a_o[cell],
                                               ar_b_o[cell], alpha_o,
                                               ar_Vo0[cell], Dt, solve_o)

            #~~~~ Computation of overland outflows
            ar_Qo_out[cell] = fl.Qout_computing(ar_Vo0[cell], ar_Vo1[cell],
                                                ar_a_o[cell], Dt)

            if ar_Qo_out[cell] < 0:
                print('Problem Overland:output greater than input....')
                print('n=', n, 'label=', cell)
                stop

            ## ============================= ##
            ## ===== FLOW PARTITIONING ===== ##
            ## ============================= ##
            # ar_Q_to_channel_sub doesn't get used for anything?

            ar_Q_to_next_cell[cell], \
            ar_Q_to_channel[cell], \
            ar_Q_to_channel_sub[cell] = fl.flow_partitioning(ar_lambda[cell],
                                                             ar_Qs_out[cell],
                                                             ar_Qo_out[cell],
                                                             ar_W[cell],
                                                             X, ar_Xc[cell])

            ## ======================== ##
            ## ===== CHANNEL STORE ==== ##
            ## ======================== ##
            if ar_lambda[cell] == 1:
                if ar_cell_down[cell] >= 0 \
                   and ar_lambda[ar_cell_down[cell]] == 0:

                    print('Problem: the present cell has a channel but not the cell down...')
                    Stop

                #~~~~ Computation of channel input
                ar_a_c[cell], \
                ar_Qc_cell_up[cell] = fl.input_channel(ar_Qc_out,
                                                       ar_Q_to_channel[cell],
                                                       li_cell_up[cell])

                if external_flow \
                and cell == np.where(ar_cell_label==cell_external_flow)[0][0]:
                    ar_a_c[cell] = ar_a_c[cell] + ar_Qexternal_flow[t]

                #~~~~ Resolution of the equation dV/dt=a_c-b_c*V^alpha_c

                ar_Vc1[cell] = om.solve_storage_eq(ar_a_c[cell],
                                                   ar_b_c[cell], alpha_c,
                                                   ar_Vc0[cell], Dt, solve_c)

                #~~~~ Computation of channel outflows
                ar_Qc_out[cell] = fl.Qout_computing(ar_Vc0[cell],
                                                    ar_Vc1[cell],
                                                    ar_a_c[cell], Dt)

                if ar_Qc_out[cell] < 0:
                    print('Problem Channel: output greater than input....')
                    stop
                if str(ar_Qc_out[cell]).count('N') > 0:
                    print(ar_Qc_out[cell])
                    print('Problem Channel: Non authorized operand....')
                    stop

            else:
                ar_a_c[cell] = 0.
                ar_Vc1[cell] = 0.
                ar_Qc_out[cell] = 0.


            ## ============================== ##
            ## ===== EVAPOTRANSPIRATION ===== ##
            ## ============================== ##
            #~~~~~ From soil
            ar_ETa[cell], \
            ar_Vs1[cell], \
            ar_Vo1[cell] = em.evapot_soil_overland(ar_Vo1[cell],
                                                   ar_Vs1[cell],
                                                   ar_Vsm[cell],
                                                   ar_kc[cell],
                                                   ndar_ETr[t, cell], X)

            #~~~~~ Evaporation from channel
            if ar_lambda[cell] == 1:
                ar_ET_channel[cell], \
                ar_Vc1[cell] = em.evapor_channel(ar_Vc1[cell],
                                                 ndar_ETo[t, cell],
                                                 ar_W[cell], ar_Xc[cell])

        ####===================================####
        #### Affectation of new vector values  ####
        ####===================================####
        ar_Vs0 = np.array(ar_Vs1)
        ar_Vo0 = np.array(ar_Vo1)
        ar_Vc0 = np.array(ar_Vc1)

        ####===================================####
        #### Results writing at each time step ####
        ####===================================####
        array_Vs.append(ar_Vs1.reshape((1,nb_cell)))
        array_Vo.append(ar_Vo1.reshape((1,nb_cell)))
        array_Vc.append(ar_Vc1.reshape((1,nb_cell)))

        array_Qs_out.append(ar_Qs_out.reshape((1,nb_cell)))
        array_Qo_out.append(ar_Qo_out.reshape((1,nb_cell)))
        array_Qc_out.append(ar_Qc_out.reshape((1,nb_cell)))

        array_Q_down.append(ar_Q_to_next_cell.reshape((1,nb_cell)))

        array_ET_out.append(ar_ETa.reshape((1,nb_cell)))

        E_vol = ar_ET_channel*1e-3 * ar_W * ar_Xc
        array_Ec_out.append(E_vol.reshape((1,nb_cell)))

    h5file.close()

    print(' ')
    print('***** THE END *****')
item_seperator('fill')

#AlarmDistanceActivate
item_options('Distance travelled alarm, status','AlarmDistanceActivate')
option('Active','True');
option('Inactive','False');
item_count+=1

#AlarmDistanceValue
item_input_alphanumeric('Distance travelled alarm, set value','AlarmDistanceValue','6','0','0','',' NM')

#Seperator
item_seperator('fill')

#Get baro unit
barounit_mmhg=config.getboolean(config_category2, 'BaroUnitMmhg')
if barounit_mmhg is True:
  barounit='mmHg'
else:
  barounit='Mbar'

#AlarmBaroLowActivate
item_options('Barometric pressure (low) alarm, status','AlarmBaroLowActivate')
option('Active','True');
option('Inactive','False');
item_count+=1

#AlarmBaroLowValue
item_input_alphanumeric('Barometric pressure (low) alarm, set lower limit','AlarmBaroLowValue','4','0','0','',' '+barounit)

#AlarmBaroHighActivate
Exemple #13
0
async def on_message(message):

    global giveaway_is_running

    # we do not want the bot to reply to itself
    if message.author == client.user:
        return

    # Log all recieved commands, even if it came from DM
    if message.content.startswith('.'):
        # Don't log if second character is ., _ or " "
        char_2 = message.content[1]
        if  char_2 not in [" ", "_", ".", "-"]:
            delta = datetime.now()
            username = message.author.name
            channel = message.channel.name
            msg = message.content

            log_message = "{} - {}@{}: {}".format(delta, username, channel, message.content) + "\n"

            with open("command_log", "a") as log_file:
                log_file.write(log_message)

    # Check if the message was recieved over DM
    if message.channel.type == discord.ChannelType.private:
        return

    # Check for the next (expected) patch date for GW2
    if message.content.startswith('.patch'):
        link = 'https://www.thatshaman.com/tools/countdown/'
        json_link = link + '?format=json'
        with urllib.request.urlopen(json_link) as url:
            s = url.read()
            response = s.decode('utf-8')

        json_obj = json.loads(response)

        date = json_obj['date'].split("T")[0]

        if json_obj['confirmed']:
            status = "Confirmed"
        else:
            status = "Expected"

        output = "Here's what I could find...\n"
        output += "**Status**: {}\n".format(status)
        output += "**Date**: {}\n".format(date)
        output += "Based on info from that_shaman: <{}>".format(link)

        await client.send_message(message.channel, output)
        return

    # Test for voice access
    if message.content.startswith('.listchannelacces'):
        if message.author.id in devs:
            server = next(iter(client.servers))
            channels = []
            for channel in server.channels:
                if not channel.is_private:
                    if channel.type == discord.ChannelType.text:
                        typename = "text"
                    elif channel.type == discord.ChannelType.voice:
                        typename = "voice"
                    else:
                        typename = "unknown"
                    channels.append("{} ({}),".format(channel.name, typename))
            msg = ""
            for c in channels:
                msg += c
            await client.send_message(message.channel, msg)

    # Show given permissions


    # COMMUNITY GIVEAWAYS!
    if message.content.startswith('.giveaway --check-rank'):
        if not giveaway_is_running:
            if has_admin_rights(message.author) or message.author.id in devs:
                # Set config rank restriction to True
                config = SafeConfigParser()
                config.read('config.ini')
                config.set('giveaway', 'rank_restriction', 'True')
                with open('config.ini', 'w') as f:
                    config.write(f)

                giveaway_is_running = True
                await client.send_message(message.channel, "{} has started a giveaway! This giveaway is restricted to Little Sprouts and higher (lvl 2+)! Type .enter to enter the giveaway!".format(message.author.mention))


        return

    if message.content.startswith('.giveaway'):
        if not giveaway_is_running:
            if has_admin_rights(message.author) or message.author.id in devs:
                # Set the rank restriction in config to false
                config = SafeConfigParser()
                config.read('config.ini')
                config.set('giveaway', 'rank_restriction', 'False')
                with open('config.ini', 'w') as f:
                    config.write(f)
                
                giveaway_is_running = True
                await client.send_message(message.channel, "{} has started a giveaway! This giveaway is open to all! Type .enter to enter the giveaway!".format(message.author.mention))
                return

    if message.content.startswith('.enter'):
        if giveaway_is_running:
            discord_id = message.author.id

            # Check if user is entered, either manually by admin or by the user himself
            resultset = session.query(Giveaway).filter_by(discord_id = discord_id)
            if resultset.count() > 0:
                sent_msg = await client.send_message(message.channel, "{}, you are already entered in the giveaway!".format(message.author.mention))
                await asyncio.sleep(5)
                await client.delete_message(sent_msg)
                await client.delete_message(message)
                return

            # Check if the giveaway is rank restricted
            config = SafeConfigParser()
            config.read('config.ini')

            rank_restricted = config.getboolean('giveaway', 'rank_restriction')
            # If the rank is restricted, and the user doesn't match the rank
            # They get a DM and the function is exited because it doesn't need to check the rest
            if rank_restricted:
                # Check if the user has the required rank
                member = message.author
                has_rank = False
                for role in member.roles:
                    if role.name == "Little Sprout":
                        has_rank = True

                if not has_rank:
                    explain = ""
                    explain += "The giveaway you tried to enter has a rank restriction.\n\n"
                    explain += "**Why is there a rank restriction?**\n"
                    explain += "We have two types of giveaways. There's global giveaways set up by the GM's and WP, and "
                    explain += "there are giveaways set up by individual members. Global giveaways are open to all. However, when an "
                    explain += "individual member gives away something, they can choose to put this rank restriction in place. Usually this is "
                    explain += "to prevent people generally not really engaging with the community snatching away keys from more active members. "
                    explain += "You should see this type of giveaway as a giveaway where a community member gives away something personal to another (active) community member.\n\n"
                    explain += "**So how do these ranks work?**\n"
                    explain += "The ranks are tied in to the Mee6 leveling system. When you post a message on the discord server, Mee6 gives you XP. "
                    explain += "When you've accumulated enough XP, Mee6 will give you a new level, and at certain levels also a new rank. You need to be "
                    explain += "at least level 2 (which is the Little Sprout rank on discord) to enter this giveaway.\n\n"
                    explain += "**Can I still enter the giveaway if I manage to get the required rank before the giveaway ends?**\n"
                    explain += "Absolutely! Just be aware that it might take several minutes for Mee6 to give you the rank once you hit level 2.\n\n"
                    explain += "**So I should just spam all day to get my rank up huh?**\n"
                    explain += "Mee6 only gives you XP once every minute. Spamming will not help to get your rank any faster.\n\n"
                    explain += "**I still engage with the community through other means. Shouldn't I still be allowed to enter?**\n"
                    explain += "It depends. If you are not active through chat but active through other means, send a message to the member facilitating "
                    explain += "this giveaway. Ultimately, since they are giving away the goods, it's their decision. If they agree with you, you can be "
                    explain += "entered in the giveaway manually!"

                    await client.send_message(member, explain)
                    await client.delete_message(message)

                    return
                
            new_record = Giveaway(discord_id = discord_id)
            session.add(new_record)
            session.commit()
            sent_msg = await client.send_message(message.channel, "{}, you have succesfully entered the giveaway!".format(message.author.mention))
            await asyncio.sleep(5)
            await client.delete_message(sent_msg)
            await client.delete_message(message)

            return
        else:
            await client.send_message(message.author, "You attempted to enter a giveaway, but there isn't one running. If you believe this is in error, please ask in chat or contact the bot owner or a server admin.")
            await client.delete_message(message)
            return

    if message.content.startswith('.roll'):
        if has_admin_rights(message.author) or message.author.id in devs:
            giveaway_is_running = False
            # Check if there are any entries
            resultset = session.query(Giveaway)
            if resultset.count() <= 0:
                await client.send_message(message.channel, "There were no entries (left) so no winner was drawn.")
                return

            # Fetch all entries as a list
            entries = []
            server = next(iter(client.servers))
            # First, get all the entries from the db
            for instance in session.query(Giveaway):
                # Lookup the discord member object from the user_id
                member = server.get_member(instance.discord_id)
                # If the member has left the server, they can no longer be found during lookup and will result in a NoneType
                if member is not None:
                    entries.append(member)

            win_chance = 1 / len(entries) * 100
            winner = random.choice(entries)

            msg = "Congratulations {}! You won the giveaway (win chance {:.2f}%).".format(winner.mention, win_chance)

            # Remove the winner from the giveaway table
            session.query(Giveaway).filter_by(discord_id = winner.id).delete()
            session.commit()

            await client.send_message(message.channel, msg)
            return

    if message.content.startswith('.end'):
        if has_admin_rights(message.author) or message.author.id in devs:
            session.query(Giveaway).delete()
            session.commit()
            giveaway_is_running = False
            await client.send_message(message.channel, "The current giveaway has ended!")
        return

    if message.content.startswith('.approve'):
        if has_admin_rights(message.author) or message.author.id in devs:
            if giveaway_is_running:
                member = message.mentions[0]
                resultset = session.query(Giveaway).filter_by(discord_id = member.id)
                # Don't save a new record if the person already entered
                if resultset.count() > 0:
                    sent_msg = await client.send_message(message.channel, "This user has already entered the giveaway!")
                    await asyncio.sleep(5)
                    await client.delete_message(sent_msg)
                    await client.delete_message(message)
                else:
                    new_record = Giveaway(discord_id = member.id)
                    session.add(new_record)
                    session.commit()
                    sent_msg = await client.send_message(message.channel, "{} has approved {} for the giveaway manually!".format(message.author.mention, member.mention))
                    await asyncio.sleep(5)
                    await client.delete_message(message)
                return
            else:
                await client.send_message(message.author, "You attempted to manually approve someone for the giveaway, but I couldn't find a giveaway running at this time.")

            return

    # POLLS!
    if message.content.startswith('.poll'):
        if has_admin_rights(message.author) or message.author.id in devs:
            split_message = message.content.split()
            poll_body = "**POLL TIME BA-BY!** \n"
            poll_body += " ".join(split_message[1:])
            poll = await client.send_message(message.channel, poll_body)
            await client.add_reaction(poll, '\U0001F44D')
            await client.add_reaction(poll, '\U0001F44E')
        else:
            return

    # Remove self-assignable role
    if message.content.startswith('.iamn'):
        author = message.author
        split_message = message.content.split()
        try:
            role_cmd = split_message[1].lower()
            if role_cmd in self_roles:
                role_to_set = self_roles[role_cmd]
                rm = RoleManager(client, message)
                role = rm.fetch_role_by_name(role_to_set)
                await rm.take_role(role, author)
                return
            else:
                return
        except:
            return

    # Add self-assignable role
    if message.content.startswith('.iam'):
        author = message.author
        split_message = message.content.split()
        try:
            role_cmd = split_message[1].lower()
            if role_cmd in self_roles:
                role_to_set = self_roles[role_cmd]
                rm = RoleManager(client, message)
                role = rm.fetch_role_by_name(role_to_set)
                await rm.give_role(role, author)
                return
            else:
                raise
                return
        except:
            return

    # Add moderator assignable roles
    # i.e. .giverole Role MemberMention MemberMention (as *args)
    if message.content.startswith('.giverole'):
        author = message.author
        rm = RoleManager(client, message)
        # Check if message author has access to command
        if has_admin_rights(author) or message.author.id in devs:
            # Parse the given command arguments.
            try:
                arguments = message.content.split()
                role_txt = arguments[1].lower()
                # Find the role, see if it is a mass-assignable role and fetch the role object
                if role_txt in mod_roles:
                    role_name = mod_roles[role_txt]
                    role = rm.fetch_role_by_name(role_name)
                else:
                    return

                # Parse all the members given to the command
                member_mentions = arguments[2:]
                members = [] # Keep track of the fetched member objects
                for mention in member_mentions:
                    if '!' in mention:
                        member_id = mention[3:-1]
                    else:
                        member_id = mention[2:-1]
                    member = message.server.get_member(member_id)
                    members.append(member)
                    

                # Hand over all parsed data to the RoleManager
                await rm.mass_give_role(role, members)
                return

            except:
                raise
                return
        else:
            return

    # Remove moderator assignable role
    if message.content.startswith('.takerole'):
        author = message.author
        rm = RoleManager(client, message)
        # Check if message author has access to command
        if has_admin_rights(author) or message.author.id in devs:
            # Parse the given command arguments.
            try:
                arguments = message.content.split()
                role_txt = arguments[1].lower()
                # Find the role, see if it is a mass-assignable role and fetch the role object
                if role_txt in mod_roles:
                    role_name = mod_roles[role_txt]
                    role = rm.fetch_role_by_name(role_name)
                else:
                    return

                # Parse all the members given to the command
                member_mentions = arguments[2:]
                members = [] # Keep track of the fetched member objects
                for mention in member_mentions:
                    if '!' in mention:
                        member_id = mention[3:-1]
                    else:
                        member_id = mention[2:-1]
                    member = message.server.get_member(member_id)
                    members.append(member)
                    

                # Hand over all parsed data to the RoleManager
                await rm.mass_take_role(role, members)
                return

            except:
                raise
                return
        else:
            return

    # Reset the giveaway entries table
    if message.content.startswith('.reset'):
        pm = PrizeManager(client)
        await pm.reset_giveaway_entries(message)
        return

    # List the entries for the giveaway
    if message.content.startswith('.entries'):
        pm = PrizeManager(client)
        await pm.print_entries(message)
        return

    # DM the list of giveaway entries
    if message.content.startswith('.dmentries'):
        pm = PrizeManager(client)
        await pm.dm_entries(message)
        return

    # MAKE SURE THIS STAYS ABOVE .DRAW OR IT WILL NOT FIRE
    if message.content.startswith('.draw --preserve'):
        pm = PrizeManager(client)
        await pm.draw_winner(message, intermediary=True)
        return

    if message.content.startswith('.draw'):
        pm = PrizeManager(client)
        await pm.draw_winner(message)
        return

    if message.content.startswith('.bug'):
        report = " ".join(message.content.split()[1:])
        br = BugReporter(client)
        await br.report_bug(message, report)
        return

    if message.content.startswith('.denyreporting'):
        br = BugReporter(client)
        await br.add_abuser(message, abuser_mention=message.content.split()[1])
        return

    if message.content.startswith('.allowreporting'):
        br = BugReporter(client)
        await br.remove_abuser(message, abuser_mention=message.content.split()[1])
        return

    if message.content.startswith('.issue'):
        command = message.content[6:]
        try:
            title, body = command.split("|")
            br = BugReporter(client)
            await br.create_issue(message, title, body)
        except:
            return
        return

    # Send copyright notice
    if message.content.startswith('.copyright'):
        msg = "All Guildwars-related texts, names and images are © 2015 ArenaNet, "
        msg += "LLC. All rights reserved. NCSOFT, the interlocking NC logo, ArenaNet, Guild Wars, "
        msg += "Guild Wars Factions, Guild Wars Nightfall, Guild Wars: Eye of the North, Guild Wars 2, "
        msg += "Heart of Thorns, and all associated logos and designs are trademarks or registered "
        msg += "trademarks of NCSOFT Corporation. All other trademarks are the property of their "
        msg += "respective owners."

        await client.send_message(message.author, msg)
        await client.delete_message(message)
        return

    if message.content.startswith('.build'):
        if has_admin_rights(message.author) or has_build_rights(message.author):
            mb = MessageBuilder(client)
            await mb.print_stuff_happening()

        await client.delete_message(message)
        return

    # Dm the evoker a list of all token positions in the event modules
    if message.content.startswith('.modules'):
        mb = MessageBuilder(client)
        await mb.dm_modules(message)
        return

    if message.content.startswith('.delmodule'):
        try:
            token = message.content.split(" ")[1]
            mb = MessageBuilder(client)
            await mb.delete_module(message, token)
        except:
            raise
        return

    if message.content.startswith('.addmodule'):
        if has_admin_rights(message.author):
            try:
                mb = MessageBuilder(client)
                contents = message.content[11:]
                commands = contents.split("|")
                token = commands[0].strip() # Strip leading and trailing spaces
                if not mb.token_in_use(token):
                    position = commands[1].strip()
                    image_link = commands[2].strip()
                    contents = commands[3].strip()

                    await mb.add_module(message, token, position, image_link, contents)
                else:
                    resp = "Sorry, the token you have given me already exists. "
                    resp += "Please find your original message below:\n{}".format(message.content)
                    await client.send_message(message.author, resp)

            except:
                resp = "I'm sorry, but I could not understand you command. Contact the hoster of this bot if you have any questions. "
                resp += "Original message: \n{}".format(message.content)
                await client.send_message(message.author, resp)
                raise

        await client.delete_message(message)

        
        return

    if message.content.startswith('.commands'):
        file = open("commands.md", "r")
        msg = ""
        for line in file:
            msg += line
        await client.send_message(message.author, msg)
        await client.delete_message(message)
        return

    # Sends some bot stats over DM
    if message.content.startswith('.stats'):
        if message.author.id in settings.REPORT_TO_DEVS or has_admin_rights(message.author):
            server = next(iter(client.servers))

            msg = "**Bot Version**\n"
            msg += "Bot version: {}\n".format(settings.BOT_VERSION)
            msg += "Lib version: {}\n\n".format(discord.__version__)

            cnt_eu = 0
            cnt_na = 0
            cnt_no_role = 0
            cnt_any = 0
            for member in server.members:
                valid_roles = 0
                for role in member.roles:
                    if role.name == 'NA':
                        cnt_na += 1
                        valid_roles += 1
                    elif role.name == "EU":
                        cnt_eu += 1
                        valid_roles += 1

                if valid_roles == 0:
                    cnt_no_role += 1
                else:
                    cnt_any += valid_roles

            total_found = len(server.members)
            percent_eu = cnt_eu / total_found * 100 if cnt_eu > 0 else 0
            percent_na = cnt_na / total_found * 100 if cnt_na > 0 else 0
            percent_none = cnt_no_role / total_found * 100 if cnt_no_role > 0 else 0
            msg += "**Region role distribution** (of entire population) \n"
            msg += "Total discord members: {}\n".format(total_found)
            msg += "EU role percentage: {:.2f}%  \n".format(percent_eu)
            msg += "NA role percentage: {:.2f}% \n".format(percent_na)
            msg += "No role percentage: {:.2f}% \n".format(percent_none)
            msg += "\n"

            percent_eu = cnt_eu / cnt_any * 100 if cnt_eu > 0 else 0
            percent_na = cnt_na / cnt_any * 100 if cnt_na > 0 else 0
            msg += "**Region role distribution amongst distributed roles**\n"
            msg += "EU role percentage: {:.2f}% ({} members)\n".format(percent_eu, cnt_eu)
            msg += "NA role percentage: {:.2f}% ({} members)\n\n".format(percent_na, cnt_na)

            msg += "**Assignable roles allowed** \n"
            msg += "Self-assignable roles list: \n"
            for key in self_roles:
                msg += "\t{} => {} \n".format(key, self_roles[key])
        
            msg += "\nGM-assignable roles list:  \n"
            for key in mod_roles:
                msg += "\t{} => {} \n".format(key, mod_roles[key])

            msg += "\n"
            pm = PrizeManager(client)
            sum_tickets = pm.sum_entries()
            avg_tickets = pm.avg_entries()
            participants = len(pm.get_entries_as_list())
            msg += "**Giveaway Stats**\n"
            msg += "Giveaway entries distributed this month: {}\n".format(sum_tickets)
            msg += "Average giveaway entries/entered member: {:.2f}\n".format(avg_tickets)
            msg += "Total giveaway participants: {}\n".format(participants)
            await client.send_message(message.author, msg)
            await client.delete_message(message)
        return
Exemple #14
0
import sys

config = SafeConfigParser()
log = Logger('nn_model.log')
config.read('config.ini')
sys.setrecursionlimit(10000)

hidden_1 = config.getint('NeuralNetConfig', 'HIDDEN_1')
hidden_2 = config.getint('NeuralNetConfig', 'HIDDEN_2')
hidden_3 = config.getint('NeuralNetConfig', 'HIDDEN_3')
hidden_4 = config.getint('NeuralNetConfig', 'HIDDEN_4')
rstate = config.getint('NeuralNetConfig', 'RSTATE')
activationFunc = config.get('NeuralNetConfig', 'ACTIVATION')
m_iter = config.getint('NeuralNetConfig', 'MAX_ITER')
l_rate = config.getfloat('NeuralNetConfig', 'LEARNING_RATE')
ver_bose = config.getboolean('NeuralNetConfig', 'VERBOSE')

t_size = config.getfloat('Misc', 'TEST_SIZE_1')
digit_num = config.getint('Misc', 'DIGIT_NUM')


class NN_model:
    def __init__(self):
        log.info('Initializing Model.')
        self.current_report = None
        log.info('Initializing Neural Network.')
        self.model =  MLPClassifier(solver='adam', \
         alpha=config.getfloat('NeuralNetConfig','ALPHA'),\
         hidden_layer_sizes=(hidden_1, hidden_2, hidden_3, hidden_4), \
         random_state=rstate, activation=activationFunc, max_iter=m_iter, learning_rate_init=l_rate, verbose=ver_bose)
Exemple #15
0
def main():
    global business_logic, auth_key

    config = SafeConfigParser()
    dirs = ('.', '/etc', '/usr/local/etc')
    if not config.read([os.path.join(dir, config_file) for dir in dirs]):
        sys.exit('Could not find {} in {}'.format(config_file, dirs))

    try:
        logfile = config.get('logging', 'file')
        rotating = config.getboolean('logging', 'rotate', fallback=False)
        if rotating:
            max_size = config.get('logging', 'max_size', fallback=1048576)
            backup_count = config.get('logging', 'backup_count', fallback=5)
            handler = logbook.RotatingFileHandler(logfile, max_size=max_size,
                                                  backup_count=backup_count)
        else:
            handler = logbook.FileHandler(logfile)
        handler.push_application()
    except:
        pass

    try:
        kwargs = dict(config.items('mongodb'))
    except NoSectionError:
        sys.exit('No "mongodb" section in config file')
    args = []
    for arg in ('hosts', 'database', 'username', 'password'):
        try:
            args.append(config.get('mongodb', arg))
        except NoOptionError:
            sys.exit('No "{}" setting in "mongodb" section of config file'.
                     format(arg))
        kwargs.pop(arg)
    store = MongoStore(*args, **kwargs)

    try:
        email_sender = config.get('email', 'sender')
    except NoSectionError:
        sys.exit('No "email" section in config file')
    except NoOptionError:
        sys.exit('No "sender" setting in "email" section of config file')

    business_logic = BusinessLogic(store, email_sender)

    try:
        listen_port = int(config.get('wsgi', 'port'))
        log.info('Binding to port {}'.format(listen_port))
    except:
        listen_port = 80
        log.info('Binding to default port {}'.format(listen_port))

    try:
        auth_key = config.get('wsgi', 'auth_key')
        log.info('Server authentication enabled')
    except:
        log.warning('Server authentication DISABLED')

    httpd = make_server('', listen_port, application,
                        handler_class=LogbookWSGIRequestHandler)
    business_logic.schedule_next_deadline()
    httpd.serve_forever()
Exemple #16
0
def run(ini_file='TOPKAPI.ini',
        verbose=False, quiet=False,
        parallel_exec=True, nworkers=int(mp.cpu_count()-1)):
    """Run the model.

    Parameters
    ----------
    ini_file : str
       The name of the PyTOPKAPI initialization file. This file describes the
       locations of the parameter files and model setup options. Default is to
       use a file named `TOPKAPI.ini` in the current directory.
    verbose : bool
        Prints runtime information [default False - don't display runtime
        info]. Is independent of the `quiet` keyword argument.
    quiet : bool
        Toggles whether to display an informational banner at runtime [default
        False - display banner]. Is independent of the `verbose` keyword
        argument.
    nworkers : int
        Number of worker processes to spawn for solving each cell's time-series
        in parallel. Default is one fewer than CPU count reported by
        multiprocessing.

    """

    ##================================##
    ##  Read the input file (*.ini)   ##
    ##================================##
    config = SafeConfigParser()
    config.read(ini_file)

    ##~~~~~~ Numerical_options ~~~~~~##
    solve_s = config.getint('numerical_options', 'solve_s')
    solve_o = config.getint('numerical_options', 'solve_o')
    solve_c = config.getint('numerical_options', 'solve_c')

    ##~~~~~~~~~~~ input files ~~~~~~~~~~~##
    #Param
    file_global_param = config.get('input_files', 'file_global_param')
    file_cell_param = config.get('input_files', 'file_cell_param')
    #Rain
    file_rain = config.get('input_files', 'file_rain')
    #ETP
    file_ET = config.get('input_files', 'file_ET')

    #~~~~~~~~~~~ Group (simulated event) ~~~~~~~~~~~##
    group_name = config.get('groups', 'group_name')

    ##~~~~~~ Calibration ~~~~~~##
    fac_L = config.getfloat('calib_params', 'fac_L')
    fac_Ks = config.getfloat('calib_params', 'fac_Ks')
    fac_n_o = config.getfloat('calib_params', 'fac_n_o')
    fac_n_c = config.getfloat('calib_params', 'fac_n_c')

    ##~~~~~~ External flows ~~~~~~##
    external_flow = config.getboolean('external_flow', 'external_flow')
    if external_flow:
        file_Qexternal_flow = config.get('external_flow',
                                         'file_Qexternal_flow')
        Xexternal_flow = config.getfloat('external_flow', 'Xexternal_flow')
        Yexternal_flow = config.getfloat('external_flow', 'Yexternal_flow')

    ##~~~~~~~~~~~ output files ~~~~~~~~~~##
    file_out = config.get('output_files', 'file_out')
    ut.check_file_exist(file_out) #create path_out if it doesn't exist
    if os.path.exists(file_out):
        first_run = False
    else:
        first_run = True

    append_output = config.getboolean('output_files', 'append_output')
    if append_output is True:
        fmode = 'a'
    else:
        fmode = 'w'

    ##============================##
    ##   Read the forcing data    ##
    ##============================##
    if verbose:
        print('Read the forcing data')

    #~~~~Rainfall
    h5_rain = h5py.File(file_rain)
    dset_name = '/{}/rainfall'.format(group_name)
    rainfall_forcing = h5_rain[dset_name][...]
    h5_rain.close()

    #~~~~ETr - Reference crop ET
    h5_ET = h5py.File(file_ET)
    dset_name = '/{}/ETr'.format(group_name)
    ETr_forcing = h5_ET[dset_name][...]

    #~~~~ETo - Open water potential evap.
    dset_name = '/{}/ETo'.format(group_name)
    ET0_forcing = h5_ET[dset_name][...]
    h5_ET.close()

    #~~~~external_flow flows
    if external_flow:
        external_flow_records = np.loadtxt(file_Qexternal_flow)[:, 5]
    else:
        external_flow_records = None


    ##============================##
    ## Pretreatment of input data ##
    ##============================##
    if verbose:
        print('Pretreatment of input data')

    #~~~~Read Global parameters file
    X, Dt, alpha_s, \
    alpha_o, alpha_c, \
    A_thres, W_min, W_max = pm.read_global_parameters(file_global_param)

    #~~~~Read Cell parameters file
    ar_cell_label, ar_coorx, \
    ar_coory, channel_flag, \
    Xc, ar_dam, \
    ar_tan_beta, ar_tan_beta_channel, \
    ar_L, Ks, \
    ar_theta_r, ar_theta_s, \
    ar_n_o, ar_n_c, \
    ar_cell_down, ar_pVs_t0, \
    ar_Vo_t0, ar_Qc_t0, \
    Kc, psi_b, lamda = pm.read_cell_parameters(file_cell_param)

    #~~~~Number of cell in the catchment
    nb_cell = len(ar_cell_label)

    #~~~~Computation of cell order
    node_hierarchy = pm.compute_node_hierarchy(ar_cell_label, ar_cell_down)
    ar_label_sort = pm.sort_cell(ar_cell_label, ar_cell_down)

    #~~~~Computation of upcells
    li_cell_up = pm.direct_up_cell(ar_cell_label, ar_cell_down, ar_label_sort)

    #~~~~Computation of drained area
    ar_A_drained = pm.drained_area(ar_label_sort, li_cell_up, X)

    #~~~~Apply calibration factors to the parameter values
    ar_L = ar_L*fac_L
    Ks = Ks*fac_Ks
    ar_n_o = ar_n_o*fac_n_o
    ar_n_c = ar_n_c*fac_n_c

    if verbose:
        print('Max L=', max(ar_L))
        print('Max Ks=', max(Ks))
        print('Max n_o=', max(ar_n_o))
        print('Max n_c=', max(ar_n_c))

    #~~~~Computation of model parameters from physical parameters
    Vsm, b_s, b_o, \
    W, b_c = pm.compute_cell_param(X, Xc, Dt, alpha_s,
                                         alpha_o, alpha_c, nb_cell,
                                         A_thres, W_max, W_min,
                                         channel_flag, ar_tan_beta,
                                         ar_tan_beta_channel, ar_L,
                                         Ks, ar_theta_r, ar_theta_s,
                                         ar_n_o, ar_n_c, ar_A_drained)

    #~~~~Look for the cell of external_flow tunnel
    if external_flow:
        cell_external_flow = ut.find_cell_coordinates(ar_cell_label,
                                                      Xexternal_flow,
                                                      Yexternal_flow,
                                                      ar_coorx,
                                                      ar_coory,
                                                      channel_flag)

        if verbose:
            print('external flows will be taken into account for cell no',\
                  cell_external_flow, ' coordinates ('\
                  ,Xexternal_flow,',',Yexternal_flow,')')
    else:
        cell_external_flow = None

    #~~~~Number of simulation time steps
    nb_time_step = rainfall_forcing.shape[0]


    ##=============================##
    ##  Variable array definition  ##
    ##=============================##

    ## Initialisation of the reservoirs
    #Matrix of soil,overland and channel store at the begining of the time step
    if append_output and not first_run:
        if verbose:
            print('Initialize from simulation file')

        h5file_in = h5py.File(file_out)

        Vs_t0 = h5file_in['/Soil/V_s'][-1, :]
        Vc_t0 = h5file_in['/Channel/V_c'][-1, :]
        Vo_t0 = h5file_in['/Overland/V_o'][-1, :]

        h5file_in.close()
    else:
        if verbose:
            print('Initialize from parameters')
        Vs_t0 = fl.initial_volume_soil(ar_pVs_t0, Vsm)
        Vo_t0 = ar_Vo_t0
        Vc_t0 = fl.initial_volume_channel(ar_Qc_t0, W, X, ar_n_c)

    ##=============================##
    ## HDF5 output file definition ##
    ##=============================##
    h5file, dset_Vs, dset_Vo, dset_Vc,     \
    dset_Qs_out, dset_Qo_out, dset_Qc_out, \
    dset_Q_down, dset_ET_out, dset_Ec_out  \
                                    = ut.open_simulation_file(file_out, fmode,
                                                   Vs_t0, Vo_t0, Vc_t0, no_data,
                                                   nb_cell, nb_time_step,
                                                   append_output, first_run)

    eff_theta = ar_theta_s - ar_theta_r

    ##===========================##
    ##     Core of the Model     ##
    ##===========================##
    if not quiet:
        ut.show_banner(ini_file, nb_cell, nb_time_step)
        progress_desc = 'Simulation'
    else:
        progress_desc = 'PyTOPKAPI v{}'.format(pytopkapi.__version__)

    # prepare parameter dict
    exec_params = {'nb_cell': nb_cell,
                   'nb_time_step': nb_time_step,
                   'progress_desc': progress_desc,
                   'Dt': Dt,
                   'rainfall_forcing': rainfall_forcing,
                   'ETr_forcing': ETr_forcing,
                   'ET0_forcing': ET0_forcing,
                   'psi_b': psi_b,
                   'lamda': lamda,
                   'eff_theta': eff_theta,
                   'Ks': Ks,
                   'X': X,
                   'b_s': b_s,
                   'b_o': b_o,
                   'b_c': b_c,
                   'alpha_s': alpha_s,
                   'alpha_o': alpha_o,
                   'alpha_c': alpha_c,
                   'Vs_t0': Vs_t0,
                   'Vo_t0': Vo_t0,
                   'Vc_t0': Vc_t0,
                   'Vsm': Vsm,
                   'dset_Vs': dset_Vs,
                   'dset_Vo': dset_Vo,
                   'dset_Vc': dset_Vc,
                   'dset_Qs_out': dset_Qs_out,
                   'dset_Qo_out': dset_Qo_out,
                   'dset_Qc_out': dset_Qc_out,
                   'dset_Q_down': dset_Q_down,
                   'dset_ET_out': dset_ET_out,
                   'dset_Ec_out': dset_Ec_out,
                   'solve_s': solve_s,
                   'solve_o': solve_o,
                   'solve_c': solve_c,
                   'channel_flag': channel_flag,
                   'W': W,
                   'Xc': Xc,
                   'Kc': Kc,
                   'cell_external_flow': cell_external_flow,
                   'external_flow_records': external_flow_records,
                   'node_hierarchy': node_hierarchy,
                   'li_cell_up': li_cell_up,
                   'nworkers': nworkers}

    if not parallel_exec:
        # Serial execution. Solve by timestep in a single process.
        # Outer loop timesteps - inner loop cells
        _serial_execute(exec_params)
    else:
        # Parallel execution. Solve by cell using multiple processes.
        # Outer loop cells - inner loop timesteps
        _parallel_execute(exec_params)

    h5file.close()
Exemple #17
0
import pygeoutil.util as util

# Parse config file
parser = SafeConfigParser()
parser.read('config_rotations.txt')

# Get directory path (3 levels up is the parent directory)
dir_prj = str(Path(__file__).parents[3])

FAO_START_YR = parser.getint('PARAMETERS', 'FAO_START_YR') # Starting year of FAO data
FAO_END_YR = parser.getint('PARAMETERS', 'FAO_END_YR')    # Ending year of FAO data
TAG = parser.get('PROJECT', 'TAG')
FAO_FILE = parser.get('PROJECT', 'fao_data')
FAO_SHEET = parser.get('PROJECT', 'fao_sheet')
PROJ_NAME = parser.get('PROJECT', 'project_name')
DO_PARALLEL = parser.getboolean('PARAMETERS', 'DO_PARALLEL')           # Use multiprocessing or not?
NUM_LATS = 180.0
NUM_LONS = 360.0
PLOT_CNTRS = 10
PLOT_CROPS = 10
DPI = 300
CFT_FRAC_YR = parser.getint('PARAMETERS', 'CFT_FRAC_YR')
GLM_STRT_YR = parser.getint('PARAMETERS', 'GLM_STRT_YR')
GLM_END_YR = parser.getint('PARAMETERS', 'GLM_END_YR')
FILL_ZEROS = parser.getboolean('PARAMETERS', 'FILL_ZEROS')
TEST_CFT = parser.getboolean('PROJECT', 'TEST_CFT')

# Directories
data_dir = dir_prj + os.sep + parser.get('PATHS', 'data_dir') + os.sep
out_dir = dir_prj + os.sep + parser.get('PATHS', 'out_dir') + os.sep + PROJ_NAME + os.sep
log_dir = out_dir + os.sep + 'Logs'
Exemple #18
0
# Parse config file
parser = SafeConfigParser()
parser.read('../config_IAM.txt')

# Get directory path (3 levels up is the parent directory)
dir_prj = str(Path(__file__).parents[3])

# Common values
dict_conts = {0: 'Antartica', 1: 'North_America', 2: 'South_America', 3: 'Europe', 4: 'Asia', 5: 'Africa',
              6: 'Australia'}

#####################################################################################
# Tags to be modified by user
#####################################################################################
do_email = parser.getboolean('CONTROL', 'do_email')
email_list = ast.literal_eval(parser.get('CONTROL', 'email_list'))
SHFT_MAP = parser.get('CONTROL', 'SHFT_MAP')  # Use Andreas or Butler?
MOVIE_SEP = 10
do_LUH1 = parser.getboolean('CONTROL', 'do_LUH1')
PLOT_HYDE = parser.getboolean('CONTROL', 'PLOT_HYDE')
PREPROCESS_GCAM = parser.getboolean('CONTROL', 'PREPROCESS_GCAM')
PREPROCESS_IMAG = parser.getboolean('CONTROL', 'PREPROCESS_IMAG')
CONVERT_WH = parser.getboolean('CONTROL', 'CONVERT_WH')  # Convert WH information from AEZ to country level
ending_diag_cols = ast.literal_eval(parser.get('CONTROL', 'ending_diag_cols'))
MATURITY_AGE = parser.getfloat('CONTROL', 'MATURITY_AGE')

# Directories
input_dir = dir_prj + os.sep + parser.get('GLM', 'path_input')
gcam_dir = input_dir + os.sep + parser.get('PATHS', 'gcam_dir') + os.sep
out_dir = dir_prj + os.sep + parser.get('PATHS', 'out_dir') + os.sep + parser.get('PROJECT', 'project_name') + os.sep
Exemple #19
0
    'update_startup_delay': '30',
    'autocheck_updates': '0',
    'update_notify': True,
    'update_showicon': True
    }

CONFIG = SafeConfigParser(CONF_DEFAULTS)
if not CONFIG.has_section('yumex'):
    CONFIG.add_section('yumex')
if os.path.exists(CONF_FILE):
    CONFIG.read(CONF_FILE)


TIMER_STARTUP_DELAY = CONFIG.getint('yumex', 'update_startup_delay')
UPDATE_INTERVAL = CONFIG.getint('yumex', 'update_interval')
AUTOCHECK_UPDATE = CONFIG.getboolean('yumex', 'autocheck_updates')
NOTIFY = CONFIG.getboolean('yumex', 'update_notify')
SHOWICON = CONFIG.getboolean('yumex', 'update_showicon')


def check_pid(pid):
    """ Check For the existence of a unix pid. """
    try:
        os.kill(pid, 0)
    except OSError:
        return False
    else:
        return True


class UpdateTimestamp:
Exemple #20
0
class Buildozer(object):

    ERROR = 0
    INFO = 1
    DEBUG = 2

    standard_cmds = ('distclean', 'update', 'debug', 'release', 'deploy',
                     'run', 'serve')

    def __init__(self, filename='buildozer.spec', target=None):
        super(Buildozer, self).__init__()
        self.log_level = 2
        self.environ = {}
        self.specfilename = filename
        self.state = None
        self.build_id = None
        self.config_profile = ''
        self.config = SafeConfigParser(allow_no_value=True)
        self.config.optionxform = lambda value: value
        self.config.getlist = self._get_config_list
        self.config.getlistvalues = self._get_config_list_values
        self.config.getdefault = self._get_config_default
        self.config.getbooldefault = self._get_config_bool
        self.config.getrawdefault = self._get_config_raw_default

        if exists(filename):
            try:
                self.config.read(filename, "utf-8")
            except TypeError:  # python 2 has no second arg here
                self.config.read(filename)
            self.check_configuration_tokens()

        # Check all section/tokens for env vars, and replace the
        # config value if a suitable env var exists.
        set_config_from_envs(self.config)

        try:
            self.log_level = int(
                self.config.getdefault('buildozer', 'log_level', '2'))
        except Exception:
            pass

        self.user_bin_dir = self.config.getdefault('buildozer', 'bin_dir',
                                                   None)
        if self.user_bin_dir:
            self.user_bin_dir = realpath(join(self.root_dir,
                                              self.user_bin_dir))

        self.targetname = None
        self.target = None
        if target:
            self.set_target(target)

    def set_target(self, target):
        '''Set the target to use (one of buildozer.targets, such as "android")
        '''
        self.targetname = target
        m = __import__('buildozer.targets.{0}'.format(target),
                       fromlist=['buildozer'])
        self.target = m.get_target(self)
        self.check_build_layout()
        self.check_configuration_tokens()

    def prepare_for_build(self):
        '''Prepare the build.
        '''
        assert (self.target is not None)
        if hasattr(self.target, '_build_prepared'):
            return

        self.info('Preparing build')

        self.info('Check requirements for {0}'.format(self.targetname))
        self.target.check_requirements()

        self.info('Install platform')
        self.target.install_platform()

        self.info('Check application requirements')
        self.check_application_requirements()

        self.info('Check garden requirements')
        self.check_garden_requirements()

        self.info('Compile platform')
        self.target.compile_platform()

        # flag to prevent multiple build
        self.target._build_prepared = True

    def build(self):
        '''Do the build.

        The target can set build_mode to 'release' or 'debug' before calling
        this method.

        (:meth:`prepare_for_build` must have been call before.)
        '''
        assert (self.target is not None)
        assert (hasattr(self.target, '_build_prepared'))

        if hasattr(self.target, '_build_done'):
            return

        # increment the build number
        self.build_id = int(self.state.get('cache.build_id', '0')) + 1
        self.state['cache.build_id'] = str(self.build_id)

        self.info('Build the application #{}'.format(self.build_id))
        self.build_application()

        self.info('Package the application')
        self.target.build_package()

        # flag to prevent multiple build
        self.target._build_done = True

    #
    # Log functions
    #

    def log(self, level, msg):
        if level > self.log_level:
            return
        if USE_COLOR:
            color = COLOR_SEQ(LOG_LEVELS_C[level])
            print(''.join((RESET_SEQ, color, '# ', msg, RESET_SEQ)))
        else:
            print('{} {}'.format(LOG_LEVELS_T[level], msg))

    def debug(self, msg):
        self.log(self.DEBUG, msg)

    def log_env(self, level, env):
        """dump env into debug logger in readable format"""
        self.log(level, "ENVIRONMENT:")
        for k, v in env.items():
            self.log(level, "    {} = {}".format(k, pformat(v)))

    def info(self, msg):
        self.log(self.INFO, msg)

    def error(self, msg):
        self.log(self.ERROR, msg)

    #
    # Internal check methods
    #

    def checkbin(self, msg, fn):
        self.debug('Search for {0}'.format(msg))
        if exists(fn):
            return realpath(fn)
        for dn in environ['PATH'].split(':'):
            rfn = realpath(join(dn, fn))
            if exists(rfn):
                self.debug(' -> found at {0}'.format(rfn))
                return rfn
        self.error('{} not found, please install it.'.format(msg))
        exit(1)

    def cmd(self, command, **kwargs):
        # prepare the environ, based on the system + our own env
        env = copy(environ)
        env.update(self.environ)

        # prepare the process
        kwargs.setdefault('env', env)
        kwargs.setdefault('stdout', PIPE)
        kwargs.setdefault('stderr', PIPE)
        kwargs.setdefault('close_fds', True)
        kwargs.setdefault('shell', True)
        kwargs.setdefault('show_output', self.log_level > 1)

        show_output = kwargs.pop('show_output')
        get_stdout = kwargs.pop('get_stdout', False)
        get_stderr = kwargs.pop('get_stderr', False)
        break_on_error = kwargs.pop('break_on_error', True)
        sensible = kwargs.pop('sensible', False)

        if not sensible:
            self.debug('Run {0!r}'.format(command))
        else:
            if type(command) in (list, tuple):
                self.debug('Run {0!r} ...'.format(command[0]))
            else:
                self.debug('Run {0!r} ...'.format(command.split()[0]))
        self.debug('Cwd {}'.format(kwargs.get('cwd')))

        # open the process
        if sys.platform == 'win32':
            kwargs.pop('close_fds', None)
        process = Popen(command, **kwargs)

        # prepare fds
        fd_stdout = process.stdout.fileno()
        fd_stderr = process.stderr.fileno()
        if fcntl:
            fcntl.fcntl(fd_stdout, fcntl.F_SETFL,
                        fcntl.fcntl(fd_stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
            fcntl.fcntl(fd_stderr, fcntl.F_SETFL,
                        fcntl.fcntl(fd_stderr, fcntl.F_GETFL) | os.O_NONBLOCK)

        ret_stdout = [] if get_stdout else None
        ret_stderr = [] if get_stderr else None
        while True:
            try:
                readx = select.select([fd_stdout, fd_stderr], [], [])[0]
            except select.error:
                break
            if fd_stdout in readx:
                chunk = process.stdout.read()
                if not chunk:
                    break
                if get_stdout:
                    ret_stdout.append(chunk)
                if show_output:
                    if IS_PY3:
                        stdout.write(chunk.decode('utf-8', 'replace'))
                    else:
                        stdout.write(chunk)
            if fd_stderr in readx:
                chunk = process.stderr.read()
                if not chunk:
                    break
                if get_stderr:
                    ret_stderr.append(chunk)
                if show_output:
                    if IS_PY3:
                        stderr.write(chunk.decode('utf-8', 'replace'))
                    else:
                        stderr.write(chunk)

            stdout.flush()
            stderr.flush()

        process.communicate()
        if process.returncode != 0 and break_on_error:
            self.error('Command failed: {0}'.format(command))
            self.log_env(self.ERROR, kwargs['env'])
            self.error('')
            self.error('Buildozer failed to execute the last command')
            if self.log_level <= self.INFO:
                self.error(
                    'If the error is not obvious, please raise the log_level to 2'
                )
                self.error('and retry the latest command.')
            else:
                self.error(
                    'The error might be hidden in the log above this error')
                self.error(
                    'Please read the full log, and search for it before')
                self.error('raising an issue with buildozer itself.')
            self.error(
                'In case of a bug report, please add a full log with log_level = 2'
            )
            raise BuildozerCommandException()
        if ret_stdout:
            ret_stdout = b''.join(ret_stdout)
        if ret_stderr:
            ret_stderr = b''.join(ret_stderr)
        return (ret_stdout.decode('utf-8', 'ignore') if ret_stdout else None,
                ret_stderr.decode('utf-8') if ret_stderr else None,
                process.returncode)

    def cmd_expect(self, command, **kwargs):
        from pexpect import spawnu

        # prepare the environ, based on the system + our own env
        env = copy(environ)
        env.update(self.environ)

        # prepare the process
        kwargs.setdefault('env', env)
        kwargs.setdefault('show_output', self.log_level > 1)
        sensible = kwargs.pop('sensible', False)
        show_output = kwargs.pop('show_output')

        if show_output:
            if IS_PY3:
                kwargs['logfile'] = codecs.getwriter('utf8')(stdout.buffer)
            else:
                kwargs['logfile'] = codecs.getwriter('utf8')(stdout)

        if not sensible:
            self.debug('Run (expect) {0!r}'.format(command))
        else:
            self.debug('Run (expect) {0!r} ...'.format(command.split()[0]))

        self.debug('Cwd {}'.format(kwargs.get('cwd')))
        return spawnu(command, **kwargs)

    def check_configuration_tokens(self):
        '''Ensure the spec file is 'correct'.
        '''
        self.info('Check configuration tokens')
        self.migrate_configuration_tokens()
        get = self.config.getdefault
        errors = []
        adderror = errors.append
        if not get('app', 'title', ''):
            adderror('[app] "title" is missing')
        if not get('app', 'source.dir', ''):
            adderror('[app] "source.dir" is missing')

        package_name = get('app', 'package.name', '')
        if not package_name:
            adderror('[app] "package.name" is missing')
        elif package_name[0] in map(str, range(10)):
            adderror('[app] "package.name" may not start with a number.')

        version = get('app', 'version', '')
        version_regex = get('app', 'version.regex', '')
        if not version and not version_regex:
            adderror('[app] One of "version" or "version.regex" must be set')
        if version and version_regex:
            adderror('[app] Conflict between "version" and "version.regex"'
                     ', only one can be used.')
        if version_regex and not get('app', 'version.filename', ''):
            adderror('[app] "version.filename" is missing'
                     ', required by "version.regex"')

        orientation = get('app', 'orientation', 'landscape')
        if orientation not in ('landscape', 'portrait', 'all',
                               'sensorLandscape'):
            adderror('[app] "orientation" have an invalid value')

        if errors:
            self.error('{0} error(s) found in the buildozer.spec'.format(
                len(errors)))
            for error in errors:
                print(error)
            exit(1)

    def migrate_configuration_tokens(self):
        config = self.config
        if config.has_section("app"):
            migration = (("android.p4a_dir", "p4a.source_dir"),
                         ("android.p4a_whitelist", "android.whitelist"),
                         ("android.bootstrap",
                          "p4a.bootstrap"), ("android.branch", "p4a.branch"),
                         ("android.p4a_whitelist_src",
                          "android.whitelist_src"),
                         ("android.p4a_blacklist_src",
                          "android.blacklist_src"))
            for entry_old, entry_new in migration:
                if not config.has_option("app", entry_old):
                    continue
                value = config.get("app", entry_old)
                config.set("app", entry_new, value)
                config.remove_option("app", entry_old)
                self.error(
                    "In section [app]: {} is deprecated, rename to {}!".format(
                        entry_old, entry_new))

    def check_build_layout(self):
        '''Ensure the build (local and global) directory layout and files are
        ready.
        '''
        self.info('Ensure build layout')

        if not exists(self.specfilename):
            print('No {0} found in the current directory. Abandon.'.format(
                self.specfilename))
            exit(1)

        # create global dir
        self.mkdir(self.global_buildozer_dir)
        self.mkdir(self.global_cache_dir)

        # create local .buildozer/ dir
        self.mkdir(self.buildozer_dir)
        # create local bin/ dir
        self.mkdir(self.bin_dir)

        self.mkdir(self.applibs_dir)
        self.state = JsonStore(join(self.buildozer_dir, 'state.db'))

        target = self.targetname
        if target:
            self.mkdir(join(self.global_platform_dir, target, 'platform'))
            self.mkdir(join(self.buildozer_dir, target, 'platform'))
            self.mkdir(join(self.buildozer_dir, target, 'app'))

    def check_application_requirements(self):
        '''Ensure the application requirements are all available and ready to be
        packaged as well.
        '''
        requirements = self.config.getlist('app', 'requirements', '')
        target_available_packages = self.target.get_available_packages()
        if target_available_packages is True:
            # target handles all packages!
            return

        # remove all the requirements that the target can compile
        onlyname = lambda x: x.split('==')[0]  # noqa: E731
        requirements = [
            x for x in requirements
            if onlyname(x) not in target_available_packages
        ]

        if requirements and hasattr(sys, 'real_prefix'):
            e = self.error
            e('virtualenv is needed to install pure-Python modules, but')
            e('virtualenv does not support nesting, and you are running')
            e('buildozer in one. Please run buildozer outside of a')
            e('virtualenv instead.')
            exit(1)

        # did we already installed the libs ?
        if (exists(self.applibs_dir)
                and self.state.get('cache.applibs', '') == requirements):
            self.debug('Application requirements already installed, pass')
            return

        # recreate applibs
        self.rmdir(self.applibs_dir)
        self.mkdir(self.applibs_dir)

        # ok now check the availability of all requirements
        for requirement in requirements:
            self._install_application_requirement(requirement)

        # everything goes as expected, save this state!
        self.state['cache.applibs'] = requirements

    def _install_application_requirement(self, module):
        self._ensure_virtualenv()
        self.debug('Install requirement {} in virtualenv'.format(module))
        self.cmd('pip install --target={} {}'.format(self.applibs_dir, module),
                 env=self.env_venv,
                 cwd=self.buildozer_dir)

    def check_garden_requirements(self):
        '''Ensure required garden packages are available to be included.
        '''
        garden_requirements = self.config.getlist('app', 'garden_requirements',
                                                  '')

        # have we installed the garden packages?
        if exists(self.gardenlibs_dir) and \
                self.state.get('cache.gardenlibs', '') == garden_requirements:
            self.debug('Garden requirements already installed, pass')
            return

        # we're going to reinstall all the garden libs.
        self.rmdir(self.gardenlibs_dir)

        # but if we don't have requirements, or if the user removed everything,
        # don't do anything.
        if not garden_requirements:
            self.state['cache.gardenlibs'] = garden_requirements
            return

        self._ensure_virtualenv()
        self.cmd('pip install Kivy-Garden==0.1.1', env=self.env_venv)

        # recreate gardenlibs
        self.mkdir(self.gardenlibs_dir)

        for requirement in garden_requirements:
            self._install_garden_package(requirement)

        # save gardenlibs state
        self.state['cache.gardenlibs'] = garden_requirements

    def _install_garden_package(self, package):
        self._ensure_virtualenv()
        self.debug(
            'Install garden package {} in buildozer_dir'.format(package))
        self.cmd('garden install --app {}'.format(package),
                 env=self.env_venv,
                 cwd=self.buildozer_dir)

    def _ensure_virtualenv(self):
        if hasattr(self, 'venv'):
            return
        self.venv = join(self.buildozer_dir, 'venv')
        if not self.file_exists(self.venv):
            self.cmd('virtualenv --python=python2.7 ./venv',
                     cwd=self.buildozer_dir)

        # read virtualenv output and parse it
        output = self.cmd('bash -c "source venv/bin/activate && env"',
                          get_stdout=True,
                          cwd=self.buildozer_dir)
        self.env_venv = copy(self.environ)
        for line in output[0].splitlines():
            args = line.split('=', 1)
            if len(args) != 2:
                continue
            key, value = args
            if key in ('VIRTUAL_ENV', 'PATH'):
                self.env_venv[key] = value
        if 'PYTHONHOME' in self.env_venv:
            del self.env_venv['PYTHONHOME']

        # ensure any sort of compilation will fail
        self.env_venv['CC'] = '/bin/false'
        self.env_venv['CXX'] = '/bin/false'

    def mkdir(self, dn):
        if exists(dn):
            return
        self.debug('Create directory {0}'.format(dn))
        makedirs(dn)

    def rmdir(self, dn):
        if not exists(dn):
            return
        self.debug('Remove directory and subdirectory {}'.format(dn))
        rmtree(dn)

    def file_matches(self, patterns):
        from glob import glob
        result = []
        for pattern in patterns:
            matches = glob(expanduser(pattern.strip()))
            result.extend(matches)
        return result

    def file_exists(self, *args):
        return exists(join(*args))

    def file_rename(self, source, target, cwd=None):
        if cwd:
            source = join(cwd, source)
            target = join(cwd, target)
        self.debug('Rename {0} to {1}'.format(source, target))
        if not os.path.isdir(os.path.dirname(target)):
            self.error(('Rename {0} to {1} fails because {2} is not a '
                        'directory').format(source, target, target))
        move(source, target)

    def file_copy(self, source, target, cwd=None):
        if cwd:
            source = join(cwd, source)
            target = join(cwd, target)
        self.debug('Copy {0} to {1}'.format(source, target))
        copyfile(source, target)

    def file_extract(self, archive, cwd=None):
        if archive.endswith('.tgz') or archive.endswith('.tar.gz'):
            # XXX tarfile doesn't work for NDK-r8c :(
            #tf = tarfile.open(archive, 'r:*')
            #tf.extractall(path=cwd)
            #tf.close()
            self.cmd('tar xzf {0}'.format(archive), cwd=cwd)
            return

        if archive.endswith('.tbz2') or archive.endswith('.tar.bz2'):
            # XXX same as before
            self.cmd('tar xjf {0}'.format(archive), cwd=cwd)
            return

        if archive.endswith('.bin'):
            # To process the bin files for linux and darwin systems
            self.cmd('chmod a+x {0}'.format(archive), cwd=cwd)
            self.cmd('./{0}'.format(archive), cwd=cwd)
            return

        if archive.endswith('.zip'):
            self.cmd('unzip -q {}'.format(join(cwd, archive)), cwd=cwd)
            return

        raise Exception('Unhandled extraction for type {0}'.format(archive))

    def file_copytree(self, src, dest):
        print('copy {} to {}'.format(src, dest))
        if os.path.isdir(src):
            if not os.path.isdir(dest):
                os.makedirs(dest)
            files = os.listdir(src)
            for f in files:
                self.file_copytree(os.path.join(src, f), os.path.join(dest, f))
        else:
            copyfile(src, dest)

    def clean_platform(self):
        self.info('Clean the platform build directory')
        if not exists(self.platform_dir):
            return
        rmtree(self.platform_dir)

    def download(self, url, filename, cwd=None):
        def report_hook(index, blksize, size):
            if size <= 0:
                progression = '{0} bytes'.format(index * blksize)
            else:
                progression = '{0:.2f}%'.format(index * blksize * 100. /
                                                float(size))
            if "CI" not in environ:
                stdout.write('- Download {}\r'.format(progression))
                stdout.flush()

        url = url + filename
        if cwd:
            filename = join(cwd, filename)
        if self.file_exists(filename):
            unlink(filename)

        self.debug('Downloading {0}'.format(url))
        urlretrieve(url, filename, report_hook)
        return filename

    def get_version(self):
        c = self.config
        has_version = c.has_option('app', 'version')
        has_regex = c.has_option('app', 'version.regex')
        has_filename = c.has_option('app', 'version.filename')

        # version number specified
        if has_version:
            if has_regex or has_filename:
                raise Exception(
                    'version.regex and version.filename conflict with version')
            return c.get('app', 'version')

        # search by regex
        if has_regex or has_filename:
            if has_regex and not has_filename:
                raise Exception('version.filename is missing')
            if has_filename and not has_regex:
                raise Exception('version.regex is missing')

            fn = c.get('app', 'version.filename')
            with open(fn) as fd:
                data = fd.read()
                regex = c.get('app', 'version.regex')
                match = search(regex, data)
                if not match:
                    raise Exception('Unable to find capture version in {0}\n'
                                    ' (looking for `{1}`)'.format(fn, regex))
                version = match.groups()[0]
                self.debug('Captured version: {0}'.format(version))
                return version

        raise Exception('Missing version or version.regex + version.filename')

    def build_application(self):
        self._copy_application_sources()
        self._copy_application_libs()
        self._copy_garden_libs()
        self._add_sitecustomize()

    def _copy_application_sources(self):
        # XXX clean the inclusion/exclusion algo.
        source_dir = realpath(self.config.getdefault('app', 'source.dir', '.'))
        include_exts = self.config.getlist('app', 'source.include_exts', '')
        exclude_exts = self.config.getlist('app', 'source.exclude_exts', '')
        exclude_dirs = self.config.getlist('app', 'source.exclude_dirs', '')
        exclude_patterns = self.config.getlist('app',
                                               'source.exclude_patterns', '')
        include_patterns = self.config.getlist('app',
                                               'source.include_patterns', '')
        app_dir = self.app_dir

        self.debug('Copy application source from {}'.format(source_dir))

        rmtree(self.app_dir)

        for root, dirs, files in walk(source_dir, followlinks=True):
            # avoid hidden directory
            if True in [x.startswith('.') for x in root.split(sep)]:
                continue

            # need to have sort-of normalization. Let's say you want to exclude
            # image directory but not images, the filtered_root must have a / at
            # the end, same for the exclude_dir. And then we can safely compare
            filtered_root = root[len(source_dir) + 1:].lower()
            if filtered_root:
                filtered_root += '/'

                # manual exclude_dirs approach
                is_excluded = False
                for exclude_dir in exclude_dirs:
                    if exclude_dir[-1] != '/':
                        exclude_dir += '/'
                    if filtered_root.startswith(exclude_dir.lower()):
                        is_excluded = True
                        break

                # pattern matching
                if not is_excluded:
                    # match pattern if not ruled out by exclude_dirs
                    for pattern in exclude_patterns:
                        if fnmatch(filtered_root, pattern):
                            is_excluded = True
                            break
                for pattern in include_patterns:
                    if fnmatch(filtered_root, pattern):
                        is_excluded = False
                        break

                if is_excluded:
                    continue

            for fn in files:
                # avoid hidden files
                if fn.startswith('.'):
                    continue

                # pattern matching
                is_excluded = False
                dfn = fn.lower()
                if filtered_root:
                    dfn = join(filtered_root, fn)
                for pattern in exclude_patterns:
                    if fnmatch(dfn, pattern):
                        is_excluded = True
                        break
                for pattern in include_patterns:
                    if fnmatch(dfn, pattern):
                        is_excluded = False
                        break
                if is_excluded:
                    continue

                # filter based on the extension
                # TODO more filters
                basename, ext = splitext(fn)
                if ext:
                    ext = ext[1:]
                    if include_exts and ext not in include_exts:
                        continue
                    if exclude_exts and ext in exclude_exts:
                        continue

                sfn = join(root, fn)
                rfn = realpath(join(app_dir, root[len(source_dir) + 1:], fn))

                # ensure the directory exists
                dfn = dirname(rfn)
                self.mkdir(dfn)

                # copy!
                self.debug('Copy {0}'.format(sfn))
                copyfile(sfn, rfn)

    def _copy_application_libs(self):
        # copy also the libs
        copytree(self.applibs_dir, join(self.app_dir, '_applibs'))

    def _copy_garden_libs(self):
        if exists(self.gardenlibs_dir):
            copytree(self.gardenlibs_dir, join(self.app_dir, 'libs'))

    def _add_sitecustomize(self):
        copyfile(join(dirname(__file__), 'sitecustomize.py'),
                 join(self.app_dir, 'sitecustomize.py'))

        main_py = join(self.app_dir, 'service', 'main.py')
        if not self.file_exists(main_py):
            #self.error('Unable to patch main_py to add applibs directory.')
            return

        header = (b'import sys, os; '
                  b'sys.path = [os.path.join(os.getcwd(),'
                  b'"..", "_applibs")] + sys.path\n')
        with open(main_py, 'rb') as fd:
            data = fd.read()
        data = header + data
        with open(main_py, 'wb') as fd:
            fd.write(data)
        self.info('Patched service/main.py to include applibs')

    def namify(self, name):
        '''Return a "valid" name from a name with lot of invalid chars
        (allowed characters: a-z, A-Z, 0-9, -, _)
        '''
        return re.sub('[^a-zA-Z0-9_\-]', '_', name)

    @property
    def root_dir(self):
        return realpath(dirname(self.specfilename))

    @property
    def user_build_dir(self):
        """The user-provided build dir, if any."""
        # Check for a user-provided build dir
        # Check the (deprecated) builddir token, for backwards compatibility
        build_dir = self.config.getdefault('buildozer', 'builddir', None)
        if build_dir is not None:
            # for backwards compatibility, append .buildozer to builddir
            build_dir = join(build_dir, '.buildozer')
        build_dir = self.config.getdefault('buildozer', 'build_dir', build_dir)

        if build_dir is not None:
            build_dir = realpath(join(self.root_dir, build_dir))

        return build_dir

    @property
    def buildozer_dir(self):
        '''The directory in which to run the app build.'''
        if self.user_build_dir is not None:
            return self.user_build_dir
        return join(self.root_dir, '.buildozer')

    @property
    def bin_dir(self):
        if self.user_bin_dir:
            return self.user_bin_dir
        return join(self.root_dir, 'bin')

    @property
    def platform_dir(self):
        return join(self.buildozer_dir, self.targetname, 'platform')

    @property
    def app_dir(self):
        return join(self.buildozer_dir, self.targetname, 'app')

    @property
    def applibs_dir(self):
        return join(self.buildozer_dir, 'applibs')

    @property
    def gardenlibs_dir(self):
        return join(self.buildozer_dir, 'libs')

    @property
    def global_buildozer_dir(self):
        return join(expanduser('~'), '.buildozer')

    @property
    def global_platform_dir(self):
        return join(self.global_buildozer_dir, self.targetname, 'platform')

    @property
    def global_packages_dir(self):
        return join(self.global_buildozer_dir, self.targetname, 'packages')

    @property
    def global_cache_dir(self):
        return join(self.global_buildozer_dir, 'cache')

    @property
    def package_full_name(self):
        package_name = self.config.getdefault('app', 'package.name', '')
        package_domain = self.config.getdefault('app', 'package.domain', '')
        if package_domain == '':
            return package_name
        return '{}.{}'.format(package_domain, package_name)

    #
    # command line invocation
    #

    def targets(self):
        for fn in listdir(join(dirname(__file__), 'targets')):
            if fn.startswith('.') or fn.startswith('__'):
                continue
            if not fn.endswith('.py'):
                continue
            target = fn[:-3]
            try:
                m = __import__('buildozer.targets.{0}'.format(target),
                               fromlist=['buildozer'])
                yield target, m
            except NotImplementedError:
                pass
            except:
                raise
                pass

    def usage(self):
        print('Usage:')
        print(
            '    buildozer [--profile <name>] [--verbose] [target] <command>...'
        )
        print('    buildozer --version')
        print('')
        print('Available targets:')
        targets = list(self.targets())
        for target, m in targets:
            try:
                doc = m.__doc__.strip().splitlines()[0].strip()
            except Exception:
                doc = '<no description>'
            print('  {0:<18} {1}'.format(target, doc))

        print('')
        print('Global commands (without target):')
        cmds = [x for x in dir(self) if x.startswith('cmd_')]
        for cmd in cmds:
            name = cmd[4:]
            meth = getattr(self, cmd)

            if not meth.__doc__:
                continue
            doc = [x for x in meth.__doc__.strip().splitlines()][0].strip()
            print('  {0:<18} {1}'.format(name, doc))

        print('')
        print('Target commands:')
        print('  clean      Clean the target environment')
        print('  update     Update the target dependencies')
        print('  debug      Build the application in debug mode')
        print('  release    Build the application in release mode')
        print('  deploy     Deploy the application on the device')
        print('  run        Run the application on the device')
        print('  serve      Serve the bin directory via SimpleHTTPServer')

        for target, m in targets:
            mt = m.get_target(self)
            commands = mt.get_custom_commands()
            if not commands:
                continue
            print('')
            print('Target "{0}" commands:'.format(target))
            for command, doc in commands:
                if not doc:
                    continue
                doc = textwrap.fill(textwrap.dedent(doc).strip(),
                                    59,
                                    subsequent_indent=' ' * 21)
                print('  {0:<18} {1}'.format(command, doc))

        print('')

    def run_default(self):
        self.check_build_layout()
        if 'buildozer:defaultcommand' not in self.state:
            print('No default command set.')
            print('Use "buildozer setdefault <command args...>"')
            print('Use "buildozer help" for a list of all commands"')
            exit(1)
        cmd = self.state['buildozer:defaultcommand']
        self.run_command(cmd)

    def run_command(self, args):
        while args:
            if not args[0].startswith('-'):
                break
            arg = args.pop(0)

            if arg in ('-v', '--verbose'):
                self.log_level = 2

            elif arg in ('-h', '--help'):
                self.usage()
                exit(0)

            elif arg in ('-p', '--profile'):
                self.config_profile = args.pop(0)

            elif arg == '--version':
                print('Buildozer {0}'.format(__version__))
                exit(0)

        self._merge_config_profile()

        self.check_root()

        if not args:
            self.run_default()
            return

        command, args = args[0], args[1:]
        cmd = 'cmd_{0}'.format(command)

        # internal commands ?
        if hasattr(self, cmd):
            getattr(self, cmd)(*args)
            return

        # maybe it's a target?
        targets = [x[0] for x in self.targets()]
        if command not in targets:
            print('Unknown command/target {}'.format(command))
            exit(1)

        self.set_target(command)
        self.target.run_commands(args)

    def check_root(self):
        '''If effective user id is 0, display a warning and require
        user input to continue (or to cancel)'''

        if IS_PY3:
            input_func = input
        else:
            input_func = raw_input

        warn_on_root = self.config.getdefault('buildozer', 'warn_on_root', '1')
        try:
            euid = os.geteuid() == 0
        except AttributeError:
            if sys.platform == 'win32':
                import ctypes
            euid = ctypes.windll.shell32.IsUserAnAdmin() != 0
        if warn_on_root == '1' and euid:
            print('\033[91m\033[1mBuildozer is running as root!\033[0m')
            print(
                '\033[91mThis is \033[1mnot\033[0m \033[91mrecommended, and may lead to problems later.\033[0m'
            )
            cont = None
            while cont not in ('y', 'n'):
                cont = input_func('Are you sure you want to continue [y/n]? ')

            if cont == 'n':
                sys.exit()

    def cmd_init(self, *args):
        '''Create a initial buildozer.spec in the current directory
        '''
        if exists('buildozer.spec'):
            print('ERROR: You already have a buildozer.spec file.')
            exit(1)
        copyfile(join(dirname(__file__), 'default.spec'), 'buildozer.spec')
        print('File buildozer.spec created, ready to customize!')

    def cmd_distclean(self, *args):
        '''Clean the whole Buildozer environment.
        '''
        print("Warning: Your ndk, sdk and all other cached packages will be"
              " removed. Continue? (y/n)")
        if sys.stdin.readline().lower()[0] == 'y':
            self.info('Clean the global build directory')
            if not exists(self.global_buildozer_dir):
                return
            rmtree(self.global_buildozer_dir)

    def cmd_appclean(self, *args):
        '''Clean the .buildozer folder in the app directory.

        This command specifically refuses to delete files in a
        user-specified build directory, to avoid accidentally deleting
        more than the user intends.
        '''
        if self.user_build_dir is not None:
            self.error((
                'Failed: build_dir is specified as {} in the buildozer config. `appclean` will '
                'not attempt to delete files in a user-specified build directory.'
            ).format(self.user_build_dir))
        elif exists(self.buildozer_dir):
            self.info('Deleting {}'.format(self.buildozer_dir))
            rmtree(self.buildozer_dir)
        else:
            self.error('{} already deleted, skipping.'.format(
                self.buildozer_dir))

    def cmd_help(self, *args):
        '''Show the Buildozer help.
        '''
        self.usage()

    def cmd_setdefault(self, *args):
        '''Set the default command to run when no arguments are given
        '''
        self.check_build_layout()
        self.state['buildozer:defaultcommand'] = args

    def cmd_version(self, *args):
        '''Show the Buildozer version
        '''
        print('Buildozer {0}'.format(__version__))

    def cmd_serve(self, *args):
        '''Serve the bin directory via SimpleHTTPServer
        '''
        try:
            from http.server import SimpleHTTPRequestHandler
            from socketserver import TCPServer
        except ImportError:
            from SimpleHTTPServer import SimpleHTTPRequestHandler
            from SocketServer import TCPServer

        os.chdir(self.bin_dir)
        handler = SimpleHTTPRequestHandler
        httpd = TCPServer(("", SIMPLE_HTTP_SERVER_PORT), handler)
        print("Serving via HTTP at port {}".format(SIMPLE_HTTP_SERVER_PORT))
        print("Press Ctrl+c to quit serving.")
        httpd.serve_forever()

    #
    # Private
    #

    def _merge_config_profile(self):
        profile = self.config_profile
        if not profile:
            return
        for section in self.config.sections():

            # extract the profile part from the section name
            # example: [app@default,hd]
            parts = section.split('@', 1)
            if len(parts) < 2:
                continue

            # create a list that contain all the profiles of the current section
            # ['default', 'hd']
            section_base, section_profiles = parts
            section_profiles = section_profiles.split(',')
            if profile not in section_profiles:
                continue

            # the current profile is one available in the section
            # merge with the general section, or make it one.
            if not self.config.has_section(section_base):
                self.config.add_section(section_base)
            for name, value in self.config.items(section):
                print('merged ({}, {}) into {} (profile is {})'.format(
                    name, value, section_base, profile))
                self.config.set(section_base, name, value)

    def _get_config_list_values(self, *args, **kwargs):
        kwargs['with_values'] = True
        return self._get_config_list(*args, **kwargs)

    def _get_config_list(self,
                         section,
                         token,
                         default=None,
                         with_values=False):
        # monkey-patch method for ConfigParser
        # get a key as a list of string, separated from the comma

        # check if an env var exists that should replace the file config
        set_config_token_from_env(section, token, self.config)

        # if a section:token is defined, let's use the content as a list.
        l_section = '{}:{}'.format(section, token)
        if self.config.has_section(l_section):
            values = self.config.options(l_section)
            if with_values:
                return [
                    '{}={}'.format(key, self.config.get(l_section, key))
                    for key in values
                ]
            else:
                return [x.strip() for x in values]

        values = self.config.getdefault(section, token, '')
        if not values:
            return default
        values = values.split(',')
        if not values:
            return default
        return [x.strip() for x in values]

    def _get_config_default(self, section, token, default=None):
        # monkey-patch method for ConfigParser
        # get an appropriate env var if it exists, else
        # get a key in a section, or the default

        # check if an env var exists that should replace the file config
        set_config_token_from_env(section, token, self.config)

        if not self.config.has_section(section):
            return default
        if not self.config.has_option(section, token):
            return default
        return self.config.get(section, token)

    def _get_config_bool(self, section, token, default=False):
        # monkey-patch method for ConfigParser
        # get a key in a section, or the default

        # check if an env var exists that should replace the file config
        set_config_token_from_env(section, token, self.config)

        if not self.config.has_section(section):
            return default
        if not self.config.has_option(section, token):
            return default
        return self.config.getboolean(section, token)

    def _get_config_raw_default(self,
                                section,
                                token,
                                default=None,
                                section_sep="=",
                                split_char=" "):
        l_section = '{}:{}'.format(section, token)
        if self.config.has_section(l_section):
            return [
                section_sep.join(item) for item in self.config.items(l_section)
            ]
        if not self.config.has_option(section, token):
            return default.split(split_char)
        return self.config.get(section, token).split(split_char)
Exemple #21
0
def run(ini_file='TOPKAPI.ini',
        verbose=False,
        quiet=False,
        parallel_exec=True,
        nworkers=int(mp.cpu_count() - 1)):
    """Run the model.

    Parameters
    ----------
    ini_file : str
       The name of the PyTOPKAPI initialization file. This file describes the
       locations of the parameter files and model setup options. Default is to
       use a file named `TOPKAPI.ini` in the current directory.
    verbose : bool
        Prints runtime information [default False - don't display runtime
        info]. Is independent of the `quiet` keyword argument.
    quiet : bool
        Toggles whether to display an informational banner at runtime [default
        False - display banner]. Is independent of the `verbose` keyword
        argument.
    nworkers : int
        Number of worker processes to spawn for solving each cell's time-series
        in parallel. Default is one fewer than CPU count reported by
        multiprocessing.

    """

    ##================================##
    ##  Read the input file (*.ini)   ##
    ##================================##
    config = SafeConfigParser()
    config.read(ini_file)

    ##~~~~~~ Numerical_options ~~~~~~##
    solve_s = config.getint('numerical_options', 'solve_s')
    solve_o = config.getint('numerical_options', 'solve_o')
    solve_c = config.getint('numerical_options', 'solve_c')

    ##~~~~~~~~~~~ input files ~~~~~~~~~~~##
    #Param
    file_global_param = config.get('input_files', 'file_global_param')
    file_cell_param = config.get('input_files', 'file_cell_param')
    #Rain
    file_rain = config.get('input_files', 'file_rain')
    #ETP
    file_ET = config.get('input_files', 'file_ET')

    #~~~~~~~~~~~ Group (simulated event) ~~~~~~~~~~~##
    group_name = config.get('groups', 'group_name')

    ##~~~~~~ Calibration ~~~~~~##
    fac_L = config.getfloat('calib_params', 'fac_L')
    fac_Ks = config.getfloat('calib_params', 'fac_Ks')
    fac_n_o = config.getfloat('calib_params', 'fac_n_o')
    fac_n_c = config.getfloat('calib_params', 'fac_n_c')

    ##~~~~~~ External flows ~~~~~~##
    external_flow = config.getboolean('external_flow', 'external_flow')
    if external_flow:
        file_Qexternal_flow = config.get('external_flow',
                                         'file_Qexternal_flow')
        Xexternal_flow = config.getfloat('external_flow', 'Xexternal_flow')
        Yexternal_flow = config.getfloat('external_flow', 'Yexternal_flow')

    ##~~~~~~~~~~~ output files ~~~~~~~~~~##
    file_out = config.get('output_files', 'file_out')
    ut.check_file_exist(file_out)  #create path_out if it doesn't exist
    if os.path.exists(file_out):
        first_run = False
    else:
        first_run = True

    append_output = config.getboolean('output_files', 'append_output')
    if append_output is True:
        fmode = 'a'
    else:
        fmode = 'w'

    ##============================##
    ##   Read the forcing data    ##
    ##============================##
    if verbose:
        print('Read the forcing data')

    #~~~~Rainfall
    h5_rain = h5py.File(file_rain)
    dset_name = '/{}/rainfall'.format(group_name)
    rainfall_forcing = h5_rain[dset_name][...]
    h5_rain.close()

    #~~~~ETr - Reference crop ET
    h5_ET = h5py.File(file_ET)
    dset_name = '/{}/ETr'.format(group_name)
    ETr_forcing = h5_ET[dset_name][...]

    #~~~~ETo - Open water potential evap.
    dset_name = '/{}/ETo'.format(group_name)
    ET0_forcing = h5_ET[dset_name][...]
    h5_ET.close()

    #~~~~external_flow flows
    if external_flow:
        external_flow_records = np.loadtxt(file_Qexternal_flow)[:, 5]
    else:
        external_flow_records = None

    ##============================##
    ## Pretreatment of input data ##
    ##============================##
    if verbose:
        print('Pretreatment of input data')

    #~~~~Read Global parameters file
    X, Dt, alpha_s, \
    alpha_o, alpha_c, \
    A_thres, W_min, W_max = pm.read_global_parameters(file_global_param)

    #~~~~Read Cell parameters file
    ar_cell_label, ar_coorx, \
    ar_coory, channel_flag, \
    Xc, ar_dam, \
    ar_tan_beta, ar_tan_beta_channel, \
    ar_L, Ks, \
    ar_theta_r, ar_theta_s, \
    ar_n_o, ar_n_c, \
    ar_cell_down, ar_pVs_t0, \
    ar_Vo_t0, ar_Qc_t0, \
    Kc, psi_b, lamda = pm.read_cell_parameters(file_cell_param)

    #~~~~Number of cell in the catchment
    nb_cell = len(ar_cell_label)

    #~~~~Computation of cell order
    node_hierarchy = pm.compute_node_hierarchy(ar_cell_label, ar_cell_down)
    ar_label_sort = pm.sort_cell(ar_cell_label, ar_cell_down)

    #~~~~Computation of upcells
    li_cell_up = pm.direct_up_cell(ar_cell_label, ar_cell_down, ar_label_sort)

    #~~~~Computation of drained area
    ar_A_drained = pm.drained_area(ar_label_sort, li_cell_up, X)

    #~~~~Apply calibration factors to the parameter values
    ar_L = ar_L * fac_L
    Ks = Ks * fac_Ks
    ar_n_o = ar_n_o * fac_n_o
    ar_n_c = ar_n_c * fac_n_c

    if verbose:
        print('Max L=', max(ar_L))
        print('Max Ks=', max(Ks))
        print('Max n_o=', max(ar_n_o))
        print('Max n_c=', max(ar_n_c))

    #~~~~Computation of model parameters from physical parameters
    Vsm, b_s, b_o, \
    W, b_c = pm.compute_cell_param(X, Xc, Dt, alpha_s,
                                         alpha_o, alpha_c, nb_cell,
                                         A_thres, W_max, W_min,
                                         channel_flag, ar_tan_beta,
                                         ar_tan_beta_channel, ar_L,
                                         Ks, ar_theta_r, ar_theta_s,
                                         ar_n_o, ar_n_c, ar_A_drained)

    #~~~~Look for the cell of external_flow tunnel
    if external_flow:
        cell_external_flow = ut.find_cell_coordinates(ar_cell_label,
                                                      Xexternal_flow,
                                                      Yexternal_flow, ar_coorx,
                                                      ar_coory, channel_flag)

        if verbose:
            print('external flows will be taken into account for cell no',\
                  cell_external_flow, ' coordinates ('\
                  ,Xexternal_flow,',',Yexternal_flow,')')
    else:
        cell_external_flow = None

    #~~~~Number of simulation time steps
    nb_time_step = rainfall_forcing.shape[0]

    ##=============================##
    ##  Variable array definition  ##
    ##=============================##

    ## Initialisation of the reservoirs
    #Matrix of soil,overland and channel store at the begining of the time step
    if append_output and not first_run:
        if verbose:
            print('Initialize from simulation file')

        h5file_in = h5py.File(file_out)

        Vs_t0 = h5file_in['/Soil/V_s'][-1, :]
        Vc_t0 = h5file_in['/Channel/V_c'][-1, :]
        Vo_t0 = h5file_in['/Overland/V_o'][-1, :]

        h5file_in.close()
    else:
        if verbose:
            print('Initialize from parameters')
        Vs_t0 = fl.initial_volume_soil(ar_pVs_t0, Vsm)
        Vo_t0 = ar_Vo_t0
        Vc_t0 = fl.initial_volume_channel(ar_Qc_t0, W, X, ar_n_c)

    ##=============================##
    ## HDF5 output file definition ##
    ##=============================##
    h5file, dset_Vs, dset_Vo, dset_Vc,     \
    dset_Qs_out, dset_Qo_out, dset_Qc_out, \
    dset_Q_down, dset_ET_out, dset_Ec_out  \
                                    = ut.open_simulation_file(file_out, fmode,
                                                   Vs_t0, Vo_t0, Vc_t0, no_data,
                                                   nb_cell, nb_time_step,
                                                   append_output, first_run)

    eff_theta = ar_theta_s - ar_theta_r

    ##===========================##
    ##     Core of the Model     ##
    ##===========================##
    if not quiet:
        ut.show_banner(ini_file, nb_cell, nb_time_step)
        progress_desc = 'Simulation'
    else:
        progress_desc = 'PyTOPKAPI v{}'.format(pytopkapi.__version__)

    # prepare parameter dict
    exec_params = {
        'nb_cell': nb_cell,
        'nb_time_step': nb_time_step,
        'progress_desc': progress_desc,
        'Dt': Dt,
        'rainfall_forcing': rainfall_forcing,
        'ETr_forcing': ETr_forcing,
        'ET0_forcing': ET0_forcing,
        'psi_b': psi_b,
        'lamda': lamda,
        'eff_theta': eff_theta,
        'Ks': Ks,
        'X': X,
        'b_s': b_s,
        'b_o': b_o,
        'b_c': b_c,
        'alpha_s': alpha_s,
        'alpha_o': alpha_o,
        'alpha_c': alpha_c,
        'Vs_t0': Vs_t0,
        'Vo_t0': Vo_t0,
        'Vc_t0': Vc_t0,
        'Vsm': Vsm,
        'dset_Vs': dset_Vs,
        'dset_Vo': dset_Vo,
        'dset_Vc': dset_Vc,
        'dset_Qs_out': dset_Qs_out,
        'dset_Qo_out': dset_Qo_out,
        'dset_Qc_out': dset_Qc_out,
        'dset_Q_down': dset_Q_down,
        'dset_ET_out': dset_ET_out,
        'dset_Ec_out': dset_Ec_out,
        'solve_s': solve_s,
        'solve_o': solve_o,
        'solve_c': solve_c,
        'channel_flag': channel_flag,
        'W': W,
        'Xc': Xc,
        'Kc': Kc,
        'cell_external_flow': cell_external_flow,
        'external_flow_records': external_flow_records,
        'node_hierarchy': node_hierarchy,
        'li_cell_up': li_cell_up,
        'nworkers': nworkers
    }

    if not parallel_exec:
        # Serial execution. Solve by timestep in a single process.
        # Outer loop timesteps - inner loop cells
        _serial_execute(exec_params)
    else:
        # Parallel execution. Solve by cell using multiple processes.
        # Outer loop cells - inner loop timesteps
        _parallel_execute(exec_params)

    h5file.close()
Exemple #22
0
target1_freq = config.get('hw_server', 'target1_freq')
mgt_rx = config.get('mgt_parameters', 'mgt_rx')
mgt_tx = config.get('mgt_parameters', 'mgt_tx')
TXDIFFSWING = config.get('mgt_parameters', 'TXDIFFSWING')
TXPOST = config.get('mgt_parameters', 'TXPOST')
TXPRE = config.get('mgt_parameters', 'TXPRE')
RXTERM = config.get('mgt_parameters', 'RXTERM')
tcl_dir = config.get('test', 'tcl_dir')
tcl_transm_name = config.get('test', 'tcl_transm_name')
tcl_rcv_name = config.get('test', 'tcl_rcv_name')
results_dir = config.get('test', 'results_dir')
results_name = config.get('test', 'results_name')
desired_area = config.getint('test', 'desired_area')
BER = config.get('test', 'BER')
err_req = config.getint('test', 'err_req')
include_all_results = config.getboolean('test', 'include_all_results')

mgt_rx = format_to_list(mgt_rx)
mgt_tx = format_to_list(mgt_tx)
TXDIFFSWING = format_to_list(TXDIFFSWING)
TXPOST = format_to_list(TXPOST)
TXPRE = format_to_list(TXPRE)
RXTERM = format_to_list(RXTERM)

# Main script
print("----------------------------------------------------------------------")
print("Creating Instance 0")
rcv = pyIBERT(server0_addr, server0_port, target0_name, target0_freq)
print("Creating Instance 1")
transm = pyIBERT(server1_addr, server1_port, target1_name, target1_freq)
Exemple #23
0
host = '0.0.0.0'  # 'localhost'
port = 8080
debug = False
datapath_sub = dict()
search_path = ['/etc', '']
for path in search_path:
    candidate = os.path.join(path, 'spider.conf')
    if os.path.isfile(candidate):
        try:
            config = SafeConfigParser()
            config.read([candidate])
            database = config.get('Spider', 'database')
            title = config.get('WebUI', 'title')
            host = config.get('WebUI', 'host')
            port = config.getint('WebUI', 'port')
            debug = config.getboolean('WebUI', 'debug')
            #datapathsub = list(filter(None, [x.strip() for x in datapath_regexps.splitlines()]))
        except:
            pass
        datapath_sub = dict()
        try:
            datapath_sub = config.items('datapath_substitutions')
        except:
            pass

engine = create_engine(database)
Session = sessionmaker(bind=engine)
session = Session()
Base.metadata.create_all(engine)

@app.route('/')
Exemple #24
0
args = parser.parse_args()
# === PARAMS ===

np.random.seed(rank)

Nsims = args.Nsims
exp_name = args.Exp
expf_name = args.ExpFilter

# Read config
iniFile = "input/recon.ini"
Config = SafeConfigParser()
Config.optionxform=str
Config.read(iniFile)

pol = Config.getboolean("reconstruction","pol")

output_dir = Config.get("general","output_dir")


lmax,tellmin,tellmax,pellmin,pellmax,kellmin,kellmax = aio.ellbounds_from_config(Config,"reconstruction")
gradCut = None
pol_list = ['TT']
debug = False
out_dir = os.environ['WWW']+"plots/halotest/smallpatch_"



# === SIM AND RECON LOOP ===

kappa_stack = {}
Exemple #25
0
        encrypted_text = self.gpg.encrypt(message, recipientEmail, sign=self.signID, passphrase=self.passphrase)
        return encrypted_text

    def sign(self, message):
        signed_text = self.gpg.sign(message, keyid=self.signID, passphrase=self.passphrase)
        return signed_text


# read config file in
parser = SafeConfigParser()
parser.read(configFile)

# Check for debug flag
DEBUG = False
if parser.has_option('debug', 'debug'):
    DEBUG = parser.getboolean('debug', 'debug')

# Ignore config.cfg file if --debug flag is present in args
if args.debug is True:
    DEBUG = True

#################
# Logging Setup #
#################
validLogLevels = ('ERROR', 'DEBUG', 'INFO', 'NOTSET')
# check for logging flag from config file
if parser.has_option('debug', 'loglevel'):
    loglevel = parser.get('debug', 'loglevel').upper()
    if loglevel not in validLogLevels:
        loglevel = 'NOTSET'
else:
def cli():
    parser = argparse.ArgumentParser(
        description='Check HTTPs rules for validity')
    parser.add_argument(
        'checker_config', help='an integer for the accumulator')
    parser.add_argument('rule_files', nargs="*", default=[],
                        help="Specific XML rule files")
    parser.add_argument('--json_file', default=None,
                        help='write results in json file')
    args = parser.parse_args()

    config = SafeConfigParser()
    config.read(args.checker_config)

    logfile = config.get("log", "logfile")
    loglevel = convertLoglevel(config.get("log", "loglevel"))
    if logfile == "-":
        logging.basicConfig(stream=sys.stderr, level=loglevel,
                            format="%(levelname)s %(message)s")
    else:
        logging.basicConfig(filename=logfile, level=loglevel,
                            format="%(asctime)s %(levelname)s %(message)s [%(pathname)s:%(lineno)d]")

    autoDisable = False
    if config.has_option("rulesets", "auto_disable"):
        autoDisable = config.getboolean("rulesets", "auto_disable")
    # Test rules even if they have default_off=...
    includeDefaultOff = False
    if config.has_option("rulesets", "include_default_off"):
        includeDefaultOff = config.getboolean(
            "rulesets", "include_default_off")
    ruledir = config.get("rulesets", "rulesdir")
    checkCoverage = False
    if config.has_option("rulesets", "check_coverage"):
        checkCoverage = config.getboolean("rulesets", "check_coverage")
    checkTargetValidity = False
    if config.has_option("rulesets", "check_target_validity"):
        checkTargetValidity = config.getboolean(
            "rulesets", "check_target_validity")
    checkNonmatchGroups = False
    if config.has_option("rulesets", "check_nonmatch_groups"):
        checkNonmatchGroups = config.getboolean(
            "rulesets", "check_nonmatch_groups")
    checkTestFormatting = False
    if config.has_option("rulesets", "check_test_formatting"):
        checkTestFormatting = config.getboolean(
            "rulesets", "check_test_formatting")
    certdir = config.get("certificates", "basedir")
    if config.has_option("rulesets", "skiplist") and config.has_option("rulesets", "skipfield"):
        skiplist = config.get("rulesets", "skiplist")
        skipfield = config.get("rulesets", "skipfield")
        with open(skiplist) as f:
            f.readline()
            for line in f:
                splitLine = line.split(",")
                fileHash = splitLine[0]
                if splitLine[int(skipfield)] == "1":
                    skipdict[binascii.unhexlify(fileHash)] = 1

    threadCount = config.getint("http", "threads")
    httpEnabled = True
    if config.has_option("http", "enabled"):
        httpEnabled = config.getboolean("http", "enabled")

    metricName = config.get("thresholds", "metric")
    thresholdDistance = config.getfloat("thresholds", "max_distance")
    metricClass = getMetricClass(metricName)
    metric = metricClass()

    # Debugging options, graphviz dump
    dumpGraphvizTrie = False
    if config.has_option("debug", "dump_graphviz_trie"):
        dumpGraphvizTrie = config.getboolean("debug", "dump_graphviz_trie")
    if dumpGraphvizTrie:
        graphvizFile = config.get("debug", "graphviz_file")
        exitAfterDump = config.getboolean("debug", "exit_after_dump")

    if args.rule_files:
        xmlFnames = args.rule_files
    else:
        xmlFnames = glob.glob(os.path.join(ruledir, "*.xml"))
    trie = RuleTrie()

    rulesets = []
    coverageProblemsExist = False
    targetValidityProblemExist = False
    nonmatchGroupProblemsExist = False
    testFormattingProblemsExist = False
    for xmlFname in xmlFnames:
        logging.debug("Parsing {}".format(xmlFname))
        if skipFile(xmlFname):
            logging.debug(
                "Skipping rule file '{}', matches skiplist.".format(xmlFname))
            continue

        ruleset = Ruleset(etree.parse(open(xmlFname, "rb")).getroot(), xmlFname)
        if ruleset.defaultOff and not includeDefaultOff:
            logging.debug("Skipping rule '{}', reason: {}".format(
                          ruleset.name, ruleset.defaultOff))
            continue
        # Check whether ruleset coverage by tests was sufficient.
        if checkCoverage:
            logging.debug("Checking coverage for '{}'.".format(ruleset.name))
            problems = ruleset.getCoverageProblems()
            for problem in problems:
                coverageProblemsExist = True
                logging.error(problem)
        if checkTargetValidity:
            logging.debug("Checking target validity for '{}'.".format(ruleset.name))
            problems = ruleset.getTargetValidityProblems()
            for problem in problems:
                targetValidityProblemExist = True
                logging.error(problem)
        if checkNonmatchGroups:
            logging.debug("Checking non-match groups for '{}'.".format(ruleset.name))
            problems = ruleset.getNonmatchGroupProblems()
            for problem in problems:
                nonmatchGroupProblemsExist = True
                logging.error(problem)
        if checkTestFormatting:
            logging.debug("Checking test formatting for '{}'.".format(ruleset.name))
            problems = ruleset.getTestFormattingProblems()
            for problem in problems:
                testFormattingProblemsExist = True
                logging.error(problem)
        trie.addRuleset(ruleset)
        rulesets.append(ruleset)

    # Trie is built now, dump it if it's set in config
    if dumpGraphvizTrie:
        logging.debug("Dumping graphviz ruleset trie")
        graph = trie.generateGraphizGraph()
        if graphvizFile == "-":
            graph.dot()
        else:
            with open(graphvizFile, "w") as gvFd:
                graph.dot(gvFd)
        if exitAfterDump:
            sys.exit(0)
    fetchOptions = http_client.FetchOptions(config)
    fetchers = list()

    # Ensure "default" is in the platform dirs
    if not os.path.isdir(os.path.join(certdir, "default")):
        raise RuntimeError(
            "Platform 'default' is missing from certificate directories")

    platforms = http_client.CertificatePlatforms(
        os.path.join(certdir, "default"))
    fetchers.append(http_client.HTTPFetcher(
        "default", platforms, fetchOptions, trie))
    # fetches pages with unrewritten URLs
    fetcherPlain = http_client.HTTPFetcher("default", platforms, fetchOptions)

    urlList = []
    if config.has_option("http", "url_list"):
        with open(config.get("http", "url_list")) as urlFile:
            urlList = [line.rstrip() for line in urlFile.readlines()]

    if httpEnabled:
        taskQueue = queue.Queue(1000)
        resQueue = queue.Queue()
        startTime = time.time()
        testedUrlPairCount = 0
        config.getboolean("debug", "exit_after_dump")

        for i in range(threadCount):
            t = UrlComparisonThread(
                taskQueue, metric, thresholdDistance, autoDisable, resQueue)
            t.setDaemon(True)
            t.start()

        # set of main pages to test
        mainPages = set(urlList)
        # If list of URLs to test/scan was not defined, use the test URL extraction
        # methods built into the Ruleset implementation.
        if not urlList:
            for ruleset in rulesets:
                if ruleset.platform != "default" and os.path.isdir(os.path.join(certdir, ruleset.platform)):
                    theseFetchers = copy.deepcopy(fetchers)
                    platforms.addPlatform(ruleset.platform, os.path.join(certdir, ruleset.platform))
                    theseFetchers.append(http_client.HTTPFetcher(
                        ruleset.platform, platforms, fetchOptions, trie))
                else:
                    theseFetchers = fetchers
                testUrls = []
                for test in ruleset.tests:
                    if not ruleset.excludes(test.url):
                        testedUrlPairCount += 1
                        testUrls.append(test.url)
                    else:
                        # TODO: We should fetch the non-rewritten exclusion URLs to make
                        # sure they still exist.
                        logging.debug("Skipping excluded URL {}".format(test.url))
                task = ComparisonTask(testUrls, fetcherPlain, theseFetchers, ruleset)
                taskQueue.put(task)

        taskQueue.join()
        logging.info("Finished in {:.2f} seconds. Loaded rulesets: {}, URL pairs: {}.".format(
                     time.time() - startTime, len(xmlFnames), testedUrlPairCount))
        if args.json_file:
            json_output(resQueue, args.json_file, problems)
    if checkCoverage:
        if coverageProblemsExist:
            return 1  # exit with error code
    if checkTargetValidity:
        if targetValidityProblemExist:
            return 1  # exit with error code
    if checkNonmatchGroups:
        if nonmatchGroupProblemsExist:
            return 1  # exit with error code
    if checkTestFormatting:
        if testFormattingProblemsExist:
            return 1  # exit with error code
    return 0  # exit with success
Exemple #27
0
class Buildozer(object):

    standard_cmds = ('distclean', 'update', 'debug', 'release',
                     'deploy', 'run', 'serve')

    def __init__(self, filename='buildozer.spec', target=None):
        super(Buildozer, self).__init__()
        self.log_level = 1
        self.environ = {}
        self.specfilename = filename
        self.state = None
        self.build_id = None
        self.config_profile = ''
        self.config = SafeConfigParser(allow_no_value=True)
        self.config.optionxform = lambda value: value
        self.config.getlist = self._get_config_list
        self.config.getlistvalues = self._get_config_list_values
        self.config.getdefault = self._get_config_default
        self.config.getbooldefault = self._get_config_bool

        if exists(filename):
            self.config.read(filename)
            self.check_configuration_tokens()

        # Check all section/tokens for env vars, and replace the
        # config value if a suitable env var exists.
        set_config_from_envs(self.config)

        try:
            self.log_level = int(self.config.getdefault(
                'buildozer', 'log_level', '1'))
        except:
            pass
        self.builddir = self.config.getdefault('buildozer', 'builddir', None)

        self.targetname = None
        self.target = None
        if target:
            self.set_target(target)

    def set_target(self, target):
        '''Set the target to use (one of buildozer.targets, such as "android")
        '''
        self.targetname = target
        m = __import__('buildozer.targets.{0}'.format(target),
                fromlist=['buildozer'])
        self.target = m.get_target(self)
        self.check_build_layout()
        self.check_configuration_tokens()

    def prepare_for_build(self):
        '''Prepare the build.
        '''
        assert(self.target is not None)
        if hasattr(self.target, '_build_prepared'):
            return

        self.info('Preparing build')

        self.info('Check requirements for {0}'.format(self.targetname))
        self.target.check_requirements()

        self.info('Install platform')
        self.target.install_platform()

        self.info('Check application requirements')
        self.check_application_requirements()

        self.info('Check garden requirements')
        self.check_garden_requirements()

        self.info('Compile platform')
        self.target.compile_platform()

        # flag to prevent multiple build
        self.target._build_prepared = True

    def build(self):
        '''Do the build.

        The target can set build_mode to 'release' or 'debug' before calling
        this method.

        (:meth:`prepare_for_build` must have been call before.)
        '''
        assert(self.target is not None)
        assert(hasattr(self.target, '_build_prepared'))

        if hasattr(self.target, '_build_done'):
            return

        # increment the build number
        self.build_id = int(self.state.get('cache.build_id', '0')) + 1
        self.state['cache.build_id'] = str(self.build_id)

        self.info('Build the application #{}'.format(self.build_id))
        self.build_application()

        self.info('Package the application')
        self.target.build_package()

        # flag to prevent multiple build
        self.target._build_done = True

    #
    # Log functions
    #

    def log(self, level, msg):
        if level > self.log_level:
            return
        if USE_COLOR:
            color = COLOR_SEQ(LOG_LEVELS_C[level])
            print(''.join((RESET_SEQ, color, '# ', msg, RESET_SEQ)))
        else:
            print('{} {}'.format(LOG_LEVELS_T[level], msg))

    def debug(self, msg):
        self.log(2, msg)

    def info(self, msg):
        self.log(1, msg)

    def error(self, msg):
        self.log(0, msg)

    #
    # Internal check methods
    #

    def checkbin(self, msg, fn):
        self.debug('Search for {0}'.format(msg))
        if exists(fn):
            return realpath(fn)
        for dn in environ['PATH'].split(':'):
            rfn = realpath(join(dn, fn))
            if exists(rfn):
                self.debug(' -> found at {0}'.format(rfn))
                return rfn
        self.error('{} not found, please install it.'.format(msg))
        exit(1)

    def cmd(self, command, **kwargs):
        # prepare the environ, based on the system + our own env
        env = copy(environ)
        env.update(self.environ)

        # prepare the process
        kwargs.setdefault('env', env)
        kwargs.setdefault('stdout', PIPE)
        kwargs.setdefault('stderr', PIPE)
        kwargs.setdefault('close_fds', True)
        kwargs.setdefault('shell', True)
        kwargs.setdefault('show_output', self.log_level > 1)

        show_output = kwargs.pop('show_output')
        get_stdout = kwargs.pop('get_stdout', False)
        get_stderr = kwargs.pop('get_stderr', False)
        break_on_error = kwargs.pop('break_on_error', True)
        sensible = kwargs.pop('sensible', False)

        if not sensible:
            self.debug('Run {0!r}'.format(command))
        else:
            if type(command) in (list, tuple):
                self.debug('Run {0!r} ...'.format(command[0]))
            else:
                self.debug('Run {0!r} ...'.format(command.split()[0]))
        self.debug('Cwd {}'.format(kwargs.get('cwd')))

        # open the process
        if sys.platform == 'win32':
            kwargs.pop('close_fds', None)
        process = Popen(command, **kwargs)

        # prepare fds
        fd_stdout = process.stdout.fileno()
        fd_stderr = process.stderr.fileno()
        if fcntl:
            fcntl.fcntl(
                fd_stdout, fcntl.F_SETFL,
                fcntl.fcntl(fd_stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
            fcntl.fcntl(
                fd_stderr, fcntl.F_SETFL,
                fcntl.fcntl(fd_stderr, fcntl.F_GETFL) | os.O_NONBLOCK)

        ret_stdout = [] if get_stdout else None
        ret_stderr = [] if get_stderr else None
        while True:
            try:
                readx = select.select([fd_stdout, fd_stderr], [], [])[0]
            except select.error:
                break
            if fd_stdout in readx:
                chunk = process.stdout.read()
                if not chunk:
                    break
                if get_stdout:
                    ret_stdout.append(chunk)
                if show_output:
                    if IS_PY3:
                        stdout.write(chunk.decode('utf-8'))
                    else:
                        stdout.write(chunk)
            if fd_stderr in readx:
                chunk = process.stderr.read()
                if not chunk:
                    break
                if get_stderr:
                    ret_stderr.append(chunk)
                if show_output:
                    if IS_PY3:
                        stderr.write(chunk.decode('utf-8'))
                    else:
                        stderr.write(chunk)

        stdout.flush()
        stderr.flush()

        process.communicate()
        if process.returncode != 0 and break_on_error:
            self.error('Command failed: {0}'.format(command))
            self.error('')
            self.error('Buildozer failed to execute the last command')
            if self.log_level <= 1:
                self.error('If the error is not obvious, please raise the log_level to 2')
                self.error('and retry the latest command.')
            else:
                self.error('The error might be hidden in the log above this error')
                self.error('Please read the full log, and search for it before')
                self.error('raising an issue with buildozer itself.')
            self.error('In case of a bug report, please add a full log with log_level = 2')
            raise BuildozerCommandException()
        if ret_stdout:
            ret_stdout = b''.join(ret_stdout)
        if ret_stderr:
            ret_stderr = b''.join(ret_stderr)
        return (ret_stdout.decode('utf-8', 'ignore') if ret_stdout else None,
                ret_stderr.decode('utf-8') if ret_stderr else None,
                process.returncode)

    def cmd_expect(self, command, **kwargs):
        from pexpect import spawnu

        # prepare the environ, based on the system + our own env
        env = copy(environ)
        env.update(self.environ)

        # prepare the process
        kwargs.setdefault('env', env)
        kwargs.setdefault('show_output', self.log_level > 1)
        sensible = kwargs.pop('sensible', False)
        show_output = kwargs.pop('show_output')

        if show_output:
            kwargs['logfile'] = codecs.getwriter('utf8')(stdout)

        if not sensible:
            self.debug('Run (expect) {0!r}'.format(command))
        else:
            self.debug('Run (expect) {0!r} ...'.format(command.split()[0]))

        self.debug('Cwd {}'.format(kwargs.get('cwd')))
        return spawnu(command, **kwargs)

    def check_configuration_tokens(self):
        '''Ensure the spec file is 'correct'.
        '''
        self.info('Check configuration tokens')
        get = self.config.getdefault
        errors = []
        adderror = errors.append
        if not get('app', 'title', ''):
            adderror('[app] "title" is missing')
        if not get('app', 'source.dir', ''):
            adderror('[app] "source.dir" is missing')

        package_name = get('app', 'package.name', '')
        if not package_name:
            adderror('[app] "package.name" is missing')
        elif package_name[0] in map(str, range(10)):
            adderror('[app] "package.name" may not start with a number.')

        version = get('app', 'version', '')
        version_regex = get('app', 'version.regex', '')
        if not version and not version_regex:
            adderror('[app] One of "version" or "version.regex" must be set')
        if version and version_regex:
            adderror('[app] Conflict between "version" and "version.regex"'
                    ', only one can be used.')
        if version_regex and not get('app', 'version.filename', ''):
            adderror('[app] "version.filename" is missing'
                ', required by "version.regex"')

        orientation = get('app', 'orientation', 'landscape')
        if orientation not in ('landscape', 'portrait', 'all'):
            adderror('[app] "orientation" have an invalid value')

        if errors:
            self.error('{0} error(s) found in the buildozer.spec'.format(
                len(errors)))
            for error in errors:
                print(error)
            exit(1)


    def check_build_layout(self):
        '''Ensure the build (local and global) directory layout and files are
        ready.
        '''
        self.info('Ensure build layout')

        if not exists(self.specfilename):
            print('No {0} found in the current directory. Abandon.'.format(
                    self.specfilename))
            exit(1)

        # create global dir
        self.mkdir(self.global_buildozer_dir)
        self.mkdir(self.global_cache_dir)

        # create local dir
        specdir = dirname(self.specfilename)
        if self.builddir:
            specdir = self.builddir

        self.mkdir(join(specdir, '.buildozer'))
        self.mkdir(join(specdir, 'bin'))
        self.mkdir(self.applibs_dir)
        self.state = JsonStore(join(self.buildozer_dir, 'state.db'))

        if self.targetname:
            target = self.targetname
            self.mkdir(join(self.global_platform_dir, target, 'platform'))
            self.mkdir(join(specdir, '.buildozer', target, 'platform'))
            self.mkdir(join(specdir, '.buildozer', target, 'app'))

    def check_application_requirements(self):
        '''Ensure the application requirements are all available and ready to be
        packaged as well.
        '''
        requirements = self.config.getlist('app', 'requirements', '')
        target_available_packages = self.target.get_available_packages()

        # remove all the requirements that the target can compile
        onlyname = lambda x: x.split('==')[0]
        requirements = [x for x in requirements if onlyname(x) not in
                target_available_packages]

        # did we already installed the libs ?
        if exists(self.applibs_dir) and \
            self.state.get('cache.applibs', '') == requirements:
                self.debug('Application requirements already installed, pass')
                return

        # recreate applibs
        self.rmdir(self.applibs_dir)
        self.mkdir(self.applibs_dir)

        # ok now check the availability of all requirements
        for requirement in requirements:
            self._install_application_requirement(requirement)

        # everything goes as expected, save this state!
        self.state['cache.applibs'] = requirements

    def _install_application_requirement(self, module):
        self._ensure_virtualenv()
        # resetup distribute, just in case
        self.debug('Install distribute')
        self.cmd('curl http://python-distribute.org/distribute_setup.py | venv/bin/python', get_stdout=True, cwd=self.buildozer_dir)

        self.debug('Install requirement {} in virtualenv'.format(module))
        self.cmd('pip install --download-cache={} --target={} {}'.format(
                self.global_cache_dir, self.applibs_dir, module),
                env=self.env_venv,
                cwd=self.buildozer_dir)

    def check_garden_requirements(self):
        '''Ensure required garden packages are available to be included.
        '''
        garden_requirements = self.config.getlist('app',
                'garden_requirements', '')

        # have we installed the garden packages?
        if exists(self.gardenlibs_dir) and \
                self.state.get('cache.gardenlibs', '') == garden_requirements:
            self.debug('Garden requirements already installed, pass')
            return

        # we're going to reinstall all the garden libs.
        self.rmdir(self.gardenlibs_dir)

        # but if we don't have requirements, or if the user removed everything,
        # don't do anything.
        if not garden_requirements:
            self.state['cache.gardenlibs'] = garden_requirements
            return

        self._ensure_virtualenv()
        self.cmd('pip install Kivy-Garden==0.1.1', env=self.env_venv)

        # recreate gardenlibs
        self.mkdir(self.gardenlibs_dir)

        for requirement in garden_requirements:
            self._install_garden_package(requirement)

        # save gardenlibs state
        self.state['cache.gardenlibs'] = garden_requirements

    def _install_garden_package(self, package):
        self._ensure_virtualenv()
        self.debug('Install garden package {} in buildozer_dir'.format(package))
        self.cmd('garden install --app {}'.format(package),
                env=self.env_venv,
                cwd=self.buildozer_dir)

    def _ensure_virtualenv(self):
        if hasattr(self, 'venv'):
            return
        self.venv = join(self.buildozer_dir, 'venv')
        if not self.file_exists(self.venv):
            self.cmd('virtualenv --python=python2.7 ./venv',
                    cwd=self.buildozer_dir)

        # read virtualenv output and parse it
        output = self.cmd('bash -c "source venv/bin/activate && env"',
                get_stdout=True,
                cwd=self.buildozer_dir)
        self.env_venv = copy(self.environ)
        for line in output[0].splitlines():
            args = line.split('=', 1)
            if len(args) != 2:
                continue
            key, value = args
            if key in ('VIRTUAL_ENV', 'PATH'):
                self.env_venv[key] = value
        if 'PYTHONHOME' in self.env_venv:
            del self.env_venv['PYTHONHOME']

        # ensure any sort of compilation will fail
        self.env_venv['CC'] = '/bin/false'
        self.env_venv['CXX'] = '/bin/false'

    def mkdir(self, dn):
        if exists(dn):
            return
        self.debug('Create directory {0}'.format(dn))
        makedirs(dn)

    def rmdir(self, dn):
        if not exists(dn):
            return
        self.debug('Remove directory and subdirectory {}'.format(dn))
        rmtree(dn)

    def file_matches(self, patterns):
        from glob import glob
        result = []
        for pattern in patterns:
            matches = glob(expanduser(pattern.strip()))
            if not matches:
                return
            result.extend(matches)
        return result

    def file_exists(self, *args):
        return exists(join(*args))

    def file_rename(self, source, target, cwd=None):
        if cwd:
            source = join(cwd, source)
            target = join(cwd, target)
        self.debug('Rename {0} to {1}'.format(source, target))
        if not os.path.isdir(os.path.dirname(target)):
            self.error('Rename {0} to {1} fails becaues {2} is not a directory'.format(source, target, os.path.directory(target)))
        rename(source, target)

    def file_copy(self, source, target, cwd=None):
        if cwd:
            source = join(cwd, source)
            target = join(cwd, target)
        self.debug('Copy {0} to {1}'.format(source, target))
        copyfile(source, target)

    def file_extract(self, archive, cwd=None):
        if archive.endswith('.tgz') or archive.endswith('.tar.gz'):
            # XXX tarfile doesn't work for NDK-r8c :(
            #tf = tarfile.open(archive, 'r:*')
            #tf.extractall(path=cwd)
            #tf.close()
            self.cmd('tar xzf {0}'.format(archive), cwd=cwd)
            return

        if archive.endswith('.tbz2') or archive.endswith('.tar.bz2'):
            # XXX same as before
            self.cmd('tar xjf {0}'.format(archive), cwd=cwd)
            return

        if archive.endswith('.zip'):
            archive = join(cwd, archive)
            zf = zipfile.ZipFile(archive)
            zf.extractall(path=cwd)
            zf.close()
            return

        raise Exception('Unhandled extraction for type {0}'.format(archive))

    def file_copytree(self, src, dest):
        print('copy {} to {}'.format(src, dest))
        if os.path.isdir(src):
            if not os.path.isdir(dest):
                os.makedirs(dest)
            files = os.listdir(src)
            for f in files:
                self.file_copytree(
                    os.path.join(src, f),
                    os.path.join(dest, f))
        else:
            copyfile(src, dest)

    def clean_platform(self):
        self.info('Clean the platform build directory')
        if not exists(self.platform_dir):
            return
        rmtree(self.platform_dir)

    def download(self, url, filename, cwd=None):
        def report_hook(index, blksize, size):
            if size <= 0:
                progression = '{0} bytes'.format(index * blksize)
            else:
                progression = '{0:.2f}%'.format(
                        index * blksize * 100. / float(size))
            stdout.write('- Download {}\r'.format(progression))
            stdout.flush()

        url = url + filename
        if cwd:
            filename = join(cwd, filename)
        if self.file_exists(filename):
            unlink(filename)

        self.debug('Downloading {0}'.format(url))
        urlretrieve(url, filename, report_hook)
        return filename

    def get_version(self):
        c = self.config
        has_version = c.has_option('app', 'version')
        has_regex = c.has_option('app', 'version.regex')
        has_filename = c.has_option('app', 'version.filename')

        # version number specified
        if has_version:
            if has_regex or has_filename:
                raise Exception(
                    'version.regex and version.filename conflict with version')
            return c.get('app', 'version')

        # search by regex
        if has_regex or has_filename:
            if has_regex and not has_filename:
                raise Exception('version.filename is missing')
            if has_filename and not has_regex:
                raise Exception('version.regex is missing')

            fn = c.get('app', 'version.filename')
            with open(fn) as fd:
                data = fd.read()
                regex = c.get('app', 'version.regex')
                match = search(regex, data)
                if not match:
                    raise Exception(
                        'Unable to find capture version in {0}\n'
                        ' (looking for `{1}`)'.format(fn, regex))
                version = match.groups()[0]
                self.debug('Captured version: {0}'.format(version))
                return version

        raise Exception('Missing version or version.regex + version.filename')

    def build_application(self):
        self._copy_application_sources()
        self._copy_application_libs()
        self._copy_garden_libs()
        self._add_sitecustomize()

    def _copy_application_sources(self):
        # XXX clean the inclusion/exclusion algo.
        source_dir = realpath(self.config.getdefault('app', 'source.dir', '.'))
        include_exts = self.config.getlist('app', 'source.include_exts', '')
        exclude_exts = self.config.getlist('app', 'source.exclude_exts', '')
        exclude_dirs = self.config.getlist('app', 'source.exclude_dirs', '')
        exclude_patterns = self.config.getlist('app', 'source.exclude_patterns', '')
        app_dir = self.app_dir

        self.debug('Copy application source from {}'.format(source_dir))

        rmtree(self.app_dir)

        for root, dirs, files in walk(source_dir):
            # avoid hidden directory
            if True in [x.startswith('.') for x in root.split(sep)]:
                continue

            # need to have sort-of normalization. Let's say you want to exclude
            # image directory but not images, the filtered_root must have a / at
            # the end, same for the exclude_dir. And then we can safely compare
            filtered_root = root[len(source_dir) + 1:].lower()
            if filtered_root:
                filtered_root += '/'

                # manual exclude_dirs approach
                is_excluded = False
                for exclude_dir in exclude_dirs:
                    if exclude_dir[-1] != '/':
                        exclude_dir += '/'
                    if filtered_root.startswith(exclude_dir):
                        is_excluded = True
                        break
                if is_excluded:
                    continue

                # pattern matching
                for pattern in exclude_patterns:
                    if fnmatch(filtered_root, pattern):
                        is_excluded = True
                        break
                if is_excluded:
                    continue

            for fn in files:
                # avoid hidden files
                if fn.startswith('.'):
                    continue

                # exclusion by pattern matching
                is_excluded = False
                dfn = fn.lower()
                if filtered_root:
                    dfn = join(filtered_root, fn)
                for pattern in exclude_patterns:
                    if fnmatch(dfn, pattern):
                        is_excluded = True
                        break
                if is_excluded:
                    continue

                # filter based on the extension
                # TODO more filters
                basename, ext = splitext(fn)
                if ext:
                    ext = ext[1:]
                    if include_exts and ext not in include_exts:
                        continue
                    if exclude_exts and ext in exclude_exts:
                        continue

                sfn = join(root, fn)
                rfn = realpath(join(app_dir, root[len(source_dir) + 1:], fn))

                # ensure the directory exists
                dfn = dirname(rfn)
                self.mkdir(dfn)

                # copy!
                self.debug('Copy {0}'.format(sfn))
                copyfile(sfn, rfn)

    def _copy_application_libs(self):
        # copy also the libs
        copytree(self.applibs_dir, join(self.app_dir, '_applibs'))

    def _copy_garden_libs(self):
        if exists(self.gardenlibs_dir):
            copytree(self.gardenlibs_dir, join(self.app_dir, 'libs'))

    def _add_sitecustomize(self):
        copyfile(join(dirname(__file__), 'sitecustomize.py'),
                join(self.app_dir, 'sitecustomize.py'))

        main_py = join(self.app_dir, 'service', 'main.py')
        if not self.file_exists(main_py):
            #self.error('Unable to patch main_py to add applibs directory.')
            return

        header = ('import sys, os; '
                  'sys.path = [os.path.join(os.getcwd(),'
                  '"..", "_applibs")] + sys.path\n')
        with open(main_py, 'rb') as fd:
            data = fd.read()
        data = header + data
        with open(main_py, 'wb') as fd:
            fd.write(data)
        self.info('Patched service/main.py to include applibs')

    def namify(self, name):
        '''Return a "valid" name from a name with lot of invalid chars
        (allowed characters: a-z, A-Z, 0-9, -, _)
        '''
        return re.sub('[^a-zA-Z0-9_\-]', '_', name)

    @property
    def root_dir(self):
        return realpath(join(dirname(self.specfilename)))

    @property
    def buildozer_dir(self):
        if self.builddir:
            return join(self.builddir, '.buildozer')
        return join(self.root_dir, '.buildozer')

    @property
    def bin_dir(self):
        return join(self.root_dir, 'bin')

    @property
    def platform_dir(self):
        return join(self.buildozer_dir, self.targetname, 'platform')

    @property
    def app_dir(self):
        return join(self.buildozer_dir, self.targetname, 'app')

    @property
    def applibs_dir(self):
        return join(self.buildozer_dir, 'applibs')

    @property
    def gardenlibs_dir(self):
        return join(self.buildozer_dir, 'libs')

    @property
    def global_buildozer_dir(self):
        return join(expanduser('~'), '.buildozer')

    @property
    def global_platform_dir(self):
        return join(self.global_buildozer_dir, self.targetname, 'platform')

    @property
    def global_packages_dir(self):
        return join(self.global_buildozer_dir, self.targetname, 'packages')

    @property
    def global_cache_dir(self):
        return join(self.global_buildozer_dir, 'cache')

    @property
    def package_full_name(self):
        package_name = self.config.getdefault('app', 'package.name', '')
        package_domain = self.config.getdefault('app', 'package.domain', '')
        if package_domain == '':
            return package_name
        return '{}.{}'.format(package_domain, package_name)


    #
    # command line invocation
    #

    def targets(self):
        for fn in listdir(join(dirname(__file__), 'targets')):
            if fn.startswith('.') or fn.startswith('__'):
                continue
            if not fn.endswith('.py'):
                continue
            target = fn[:-3]
            try:
                m = __import__('buildozer.targets.{0}'.format(target),
                        fromlist=['buildozer'])
                yield target, m
            except NotImplementedError:
                pass
            except:
                raise
                pass

    def usage(self):
        print('Usage:')
        print('    buildozer [--profile <name>] [--verbose] [target] <command>...')
        print('    buildozer --version')
        print('')
        print('Available targets:')
        targets = list(self.targets())
        for target, m in targets:
            doc = m.__doc__.strip().splitlines()[0].strip()
            print('  {0:<18} {1}'.format(target, doc))

        print('')
        print('Global commands (without target):')
        cmds = [x for x in dir(self) if x.startswith('cmd_')]
        for cmd in cmds:
            name = cmd[4:]
            meth = getattr(self, cmd)

            if not meth.__doc__:
                continue
            doc = [x for x in
                    meth.__doc__.strip().splitlines()][0].strip()
            print('  {0:<18} {1}'.format(name, doc))

        print('')
        print('Target commands:')
        print('  clean      Clean the target environment')
        print('  update     Update the target dependencies')
        print('  debug      Build the application in debug mode')
        print('  release    Build the application in release mode')
        print('  deploy     Deploy the application on the device')
        print('  run        Run the application on the device')
        print('  serve      Serve the bin directory via SimpleHTTPServer')

        for target, m in targets:
            mt = m.get_target(self)
            commands = mt.get_custom_commands()
            if not commands:
                continue
            print('')
            print('Target "{0}" commands:'.format(target))
            for command, doc in commands:
                if not doc:
                    continue
                doc = doc.strip().splitlines()[0].strip()
                print('  {0:<18} {1}'.format(command, doc))

        print('')

    def run_default(self):
        self.check_build_layout()
        if 'buildozer:defaultcommand' not in self.state:
            print('No default command set.')
            print('Use "buildozer setdefault <command args...>"')
            print('Use "buildozer help" for a list of all commands"')
            exit(1)
        cmd = self.state['buildozer:defaultcommand']
        self.run_command(cmd)

    def run_command(self, args):
        while args:
            if not args[0].startswith('-'):
                break
            arg = args.pop(0)

            if arg in ('-v', '--verbose'):
                self.log_level = 2

            elif arg in ('-h', '--help'):
                self.usage()
                exit(0)

            elif arg in ('-p', '--profile'):
                self.config_profile = args.pop(0)

            elif arg == '--version':
                print('Buildozer {0}'.format(__version__))
                exit(0)

        self._merge_config_profile()

        self.check_root()

        if not args:
            self.run_default()
            return

        command, args = args[0], args[1:]
        cmd = 'cmd_{0}'.format(command)

        # internal commands ?
        if hasattr(self, cmd):
            getattr(self, cmd)(*args)
            return

        # maybe it's a target?
        targets = [x[0] for x in self.targets()]
        if command not in targets:
            print('Unknown command/target {}'.format(command))
            exit(1)

        self.set_target(command)
        self.target.run_commands(args)

    def check_root(self):
        '''If effective user id is 0, display a warning and require
        user input to continue (or to cancel)'''

        if IS_PY3:
            input_func = input
        else:
            input_func = raw_input

        warn_on_root = self.config.getdefault('buildozer', 'warn_on_root', '1')
        try:
            euid = os.geteuid() == 0
        except AttributeError:
            if sys.platform == 'win32':
                import ctypes
            euid = ctypes.windll.shell32.IsUserAnAdmin() != 0
        if warn_on_root == '1' and euid:
            print('\033[91m\033[1mBuildozer is running as root!\033[0m')
            print('\033[91mThis is \033[1mnot\033[0m \033[91mrecommended, and may lead to problems later.\033[0m')
            cont = None
            while cont not in ('y', 'n'):
                cont = input_func('Are you sure you want to continue [y/n]? ')

            if cont == 'n':
                sys.exit()

    def cmd_init(self, *args):
        '''Create a initial buildozer.spec in the current directory
        '''
        if exists('buildozer.spec'):
            print('ERROR: You already have a buildozer.spec file.')
            exit(1)
        copyfile(join(dirname(__file__), 'default.spec'), 'buildozer.spec')
        print('File buildozer.spec created, ready to customize!')

    def cmd_distclean(self, *args):
        '''Clean the whole Buildozer environment.
        '''
        print("Warning: Your ndk, sdk and all other cached packages will be"
              " removed. Continue? (y/n)")
        if sys.stdin.readline().lower()[0] == 'y':
            self.info('Clean the global build directory')
            if not exists(self.global_buildozer_dir):
                return
            rmtree(self.global_buildozer_dir)

    def cmd_help(self, *args):
        '''Show the Buildozer help.
        '''
        self.usage()

    def cmd_setdefault(self, *args):
        '''Set the default command to do when to arguments are given
        '''
        self.check_build_layout()
        self.state['buildozer:defaultcommand'] = args

    def cmd_version(self, *args):
        '''Show the Buildozer version
        '''
        print('Buildozer {0}'.format(__version__))

    def cmd_serve(self, *args):
        '''Serve the bin directory via SimpleHTTPServer
        '''
        try:
            from http.server import SimpleHTTPRequestHandler
            from socketserver import TCPServer
        except ImportError:
            from SimpleHTTPServer import SimpleHTTPRequestHandler
            from SocketServer import TCPServer

        os.chdir(self.bin_dir)
        handler = SimpleHTTPRequestHandler
        httpd = TCPServer(("", SIMPLE_HTTP_SERVER_PORT), handler)
        print("Serving via HTTP at port {}".format(SIMPLE_HTTP_SERVER_PORT))
        print("Press Ctrl+c to quit serving.")
        httpd.serve_forever()

    #
    # Private
    #

    def _merge_config_profile(self):
        profile = self.config_profile
        if not profile:
            return
        for section in self.config.sections():

            # extract the profile part from the section name
            # example: [app@default,hd]
            parts = section.split('@', 1)
            if len(parts) < 2:
                continue

            # create a list that contain all the profiles of the current section
            # ['default', 'hd']
            section_base, section_profiles = parts
            section_profiles = section_profiles.split(',')
            if profile not in section_profiles:
                continue

            # the current profile is one available in the section
            # merge with the general section, or make it one.
            if not self.config.has_section(section_base):
                self.config.add_section(section_base)
            for name, value in self.config.items(section):
                print('merged ({}, {}) into {} (profile is {})'.format(name,
                        value, section_base, profile))
                self.config.set(section_base, name, value)



    def _get_config_list_values(self, *args, **kwargs):
        kwargs['with_values'] = True
        return self._get_config_list(*args, **kwargs)

    def _get_config_list(self, section, token, default=None, with_values=False):
        # monkey-patch method for ConfigParser
        # get a key as a list of string, seperated from the comma

        # check if an env var exists that should replace the file config
        set_config_token_from_env(section, token, self.config)

        # if a section:token is defined, let's use the content as a list.
        l_section = '{}:{}'.format(section, token)
        if self.config.has_section(l_section):
            values = self.config.options(l_section)
            if with_values:
                return ['{}={}'.format(key, self.config.get(l_section, key)) for
                        key in values]
            else:
                return [x.strip() for x in values]

        values = self.config.getdefault(section, token, '')
        if not values:
            return default
        values = values.split(',')
        if not values:
            return default
        return [x.strip() for x in values]

    def _get_config_default(self, section, token, default=None):
        # monkey-patch method for ConfigParser
        # get an appropriate env var if it exists, else
        # get a key in a section, or the default

        # check if an env var exists that should replace the file config
        set_config_token_from_env(section, token, self.config)

        if not self.config.has_section(section):
            return default
        if not self.config.has_option(section, token):
            return default
        return self.config.get(section, token)

    def _get_config_bool(self, section, token, default=False):
        # monkey-patch method for ConfigParser
        # get a key in a section, or the default

        # check if an env var exists that should replace the file config
        set_config_token_from_env(section, token, self.config)

        if not self.config.has_section(section):
            return default
        if not self.config.has_option(section, token):
            return default
        return self.config.getboolean(section, token)
Exemple #28
0
    def __init__(self, config_file = 'explosion.cfg'):
        """
        Initialize explosion from config file explosion.cfg.

        Actual values need to be in the [explosion] section, the
        [DEFAULT] section is a backup.  It is also hard-coded, so you
        don't need to overwrite it unless it is being changed.  The
        current defaults are in this class in attribute
        'default_config'.

        [DEFAULT]
        program = ../k
        ext = #presn
        dump = ../%(base)s%(ext)s
        template = explosion.link
        logfile = explosion.log
        cmdfile = explosion.cmd
        logall = True
        verbose = True
        alpha = 1.0
        precision = 2
        force = False
        run = True

        [Here is what you *have* to add:]
        link = <link sentinal like "Da">
        ekin_or_mni = <value>
        base = <run name>

        [example]
        link = Da
        alpha = 1.0
        ekin = 1.2e+51
        base = s25

        [explosion]
        link = Pa
        alpha = 1.0
        ekin = 1.2e+51
        base = u25

        ======

        So, with the defaults, the files that you need in the
        explosion directory are just

        explosion.link
        explosion.cmd
        explosion.cfg

        """

        start_time = datetime.datetime.now()

        config = Config(self.default_config)
#        config.readfp(io.BytesIO(self.default_config))
        config.read(config_file)

        section = 'explosion'
        run = config.getboolean(section,'run')
        force = config.getboolean(section,'force')
        kepler = config.get(section,'program')
        presn_dump_name = config.get(section,'dump')
        self.base_name = config.get(section,'base')
        self.generator_tempelete = config.get(section,'template')
        generator_start = config.get(section,'link')

        logfile = config.get(section,'logfile')
        self.exp_cmd_file = config.get(section,'cmdfile')
        verbose = config.getboolean(section,'verbose')
        logall = config.getboolean(section,'logall')

        # set up log output
        # maybe this this be replaced by
        # deriving class from Logged and
        # self.setup_logger(silent = not verbose, logfile=logfile, format='UTC')
        # and at the end: self.close_logger()
        self.logger = logging.getLogger(self.__class__.__name__)
        self.logger.setLevel(logging.DEBUG)
        fh = logging.FileHandler(logfile, 'w')
        if verbose:
            level = logging.DEBUG
        else:
            level = logging.WARNING
        fh.setLevel(level)
        formatter = utils.UTCFormatter('%(asctime)s%(msecs)03d %(nameb)-12s %(levelname)s: %(message)s',
                                      datefmt = '%Y%m%d%H%M%S')
        fh.setFormatter(formatter)
        root_logger = logging.getLogger('')
        if logall and len(root_logger.handlers) == 0:
            root_logger.addHandler(fh)
            root_logger.setLevel(level)
        else:
            self.logger.addHandler(fh)

        # set up state for explosion
        state = State(
            config = config)

        link = Link(generator_start)

        self.logger.info("{:s}: first alpha = {:g}".format(link, state.alpha))

        while state.best is None:
            if run:
                finished = True
                dump_file_name = self.base_name + link + "#final"
                if not os.path.exists(dump_file_name):
                    finished = False
                dump_file_name = self.base_name + link + "#envel"
                if not os.path.exists(dump_file_name):
                    finished = False
                # check if run parameters are identical
                if finished and not force:
                    if not state.eq_alpha(self.existing_alpha(link)):
                        self.logger.warning('Previous run '+link+' had different alpha.  RERUNNING.')
                        finished = False
                        force = True
                        os.remove(self.base_name + link + "#final")
                        os.remove(self.base_name + link + "#envel")
                        os.remove(self.base_name + link + "z")
                        os.remove(link + ".link")
                if (not finished) or force:
                    # call_string = "{} {}{} ".format(kepler, self.base_name, link)

                    # # look if a restart (z) dump is present
                    # dump_file_name = self.base_name + link + "z"
                    # if (not os.path.exists(dump_file_name)) or force:
                    #     self.logger.info("RUN")
                    #     self.generator(state.alpha, link)
                    #     self.make_cmd(link);
                    #     call_string += presn_dump_name
                    # else:
                    #     self.logger.info("CONTINUE")
                    #     call_string += "z"
                    # call_string += " k </dev/null >/dev/null"
                    # self.logger.info("CALL: {}".format(call_string))
                    # os.system(call_string)

                    args = [kepler, self.base_name + link]

                    # look if a restart (z) dump is present
                    dump_file_name = self.base_name + link + "z"
                    if (not os.path.exists(dump_file_name)) or force:
                        self.logger.info("RUN")
                        self.generator(state.alpha, link)
                        self.make_cmd(link);
                        args += [presn_dump_name]
                    else:
                        self.logger.info("CONTINUE")
                        args += ['z']
                    args += ['k']
                    self.logger.info("CALL: {}".format(' '.join(args)))
                    with open(os.devnull,'r') as null_in:
                        with open(os.devnull,'w') as null_out:
                            subprocess.call(args,
                                            shell  = False,
                                            stdin  = null_in,
                                            stdout = null_out,
                                            stderr = subprocess.STDOUT)
                else:
                    self.logger.info("FINISHED")

            state.update(link)

            if state.best is None:
                link += 1
                self.logger.info("{}: new alpha = {} ({},{})".format(
                    link,
                    state.alpha,
                    state.lo_val(),
                    state.hi_val()))

        end_time = datetime.datetime.now()
        load_time = end_time - start_time
        self.logger.info('finished in ' + time2human(load_time.total_seconds()))

        self.logger.critical(self.out_format.format(
            state.flag,
            state.goal,
            state.val(state.best),
            abs(state.goal - state.val(state.best)),
            float2str(state.best.alpha),
            state.best.link,
            state.best.mass,
            state.best.zone))

        state.save('explosion.res')

        # clean up
        for filename in glob.iglob("xxx*"):
            os.remove(filename)
Exemple #29
0
def main(args=None):

    parser1 = argparse.ArgumentParser(add_help=False)

    parser1.add_argument(
        '--config-file', default='.bumpversion.cfg', metavar='FILE',
        help='Config file to read most of the variables from', required=False)

    known_args, remaining_argv = parser1.parse_known_args(args)

    defaults = {}
    vcs_info = {}

    for vcs in VCS:
        if vcs.is_usable():
            vcs_info.update(vcs.latest_tag_info())

    if 'current_version' in vcs_info:
        defaults['current_version'] = vcs_info['current_version']

    config = None
    if os.path.exists(known_args.config_file):
        config = SafeConfigParser()
        config.readfp(io.open(known_args.config_file, 'rt', encoding='utf-8'))

        defaults.update(dict(config.items("bumpversion")))

        for boolvaluename in ("commit", "tag", "dry_run"):
            try:
                defaults[boolvaluename] = config.getboolean(
                    "bumpversion", boolvaluename)
            except NoOptionError:
                pass  # no default value then ;)

    elif known_args.config_file != parser1.get_default('config_file'):
        raise argparse.ArgumentTypeError("Could not read config file at {}".format(
            known_args.config_file))

    parser2 = argparse.ArgumentParser(add_help=False, parents=[parser1])
    parser2.set_defaults(**defaults)

    parser2.add_argument('--current-version', metavar='VERSION',
                         help='Version that needs to be updated', required=False)
    parser2.add_argument('--parse', metavar='REGEX',
                         help='Regex parsing the version string',
                         default='(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)')
    parser2.add_argument('--serialize', metavar='FORMAT',
                         help='How to format what is parsed back to a version',
                         default='{major}.{minor}.{patch}')

    parser2_2 = argparse.ArgumentParser(
        description=DESCRIPTION,
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        conflict_handler='resolve',
        add_help=False,
        parents=[parser2],
    )
    parser2_2.add_argument('part', help='Part of the version to be bumped.', nargs='?')

    known_args, remaining_argv = parser2_2.parse_known_args(remaining_argv)

    if known_args.part:
        remaining_argv[0:0] = [known_args.part]

    defaults.update(vars(known_args))

    v = Version(
        known_args.parse,
        known_args.serialize,
        context=dict(list(prefixed_environ().items()) + list(vcs_info.items()))
    )

    if not 'new_version' in defaults and known_args.current_version:
        v.parse(known_args.current_version)
        v.bump(known_args.part)
        defaults['new_version'] = v.serialize()

    parser3 = argparse.ArgumentParser(
        description=DESCRIPTION,
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        conflict_handler='resolve',
        parents=[parser2],
    )

    parser3.set_defaults(**defaults)

    parser3.add_argument('--current-version', metavar='VERSION',
                         help='Version that needs to be updated',
                         required=not 'current_version' in defaults)
    parser3.add_argument('--dry-run', '-n', action='store_true',
                         default=False, help="Don't write any files, just pretend.")
    parser3.add_argument('--new-version', metavar='VERSION',
                         help='New version that should be in the files',
                         required=not 'new_version' in defaults)

    commitgroup = parser3.add_mutually_exclusive_group()

    commitgroup.add_argument('--commit', action='store_true', dest="commit",
                             help='Commit to version control', default=defaults.get("commit", False))
    commitgroup.add_argument('--no-commit', action='store_false', dest="commit",
                             help='Do not commit to version control', default=argparse.SUPPRESS)

    taggroup = parser3.add_mutually_exclusive_group()

    taggroup.add_argument('--tag', action='store_true', dest="tag", default=defaults.get("commit", False),
                          help='Create a tag in version control')
    taggroup.add_argument('--no-tag', action='store_false', dest="tag",
                          help='Do not create a tag in version control', default=argparse.SUPPRESS)

    parser3.add_argument('--tag-name', metavar='TAG_NAME',
                         help='Tag name (only works with --tag)',
                         default=defaults.get('tag_name', 'v{new_version}'))

    parser3.add_argument('--message', '-m', metavar='COMMIT_MSG',
                         help='Commit message',
                         default=defaults.get('message', 'Bump version: {current_version} → {new_version}'))

    files = []
    if 'files' in defaults:
        assert defaults['files'] != None
        files = defaults['files'].split(' ')

    parser3.add_argument('part',
                         help='Part of the version to be bumped.')
    parser3.add_argument('files', metavar='file',
                         nargs='+' if len(files) == 0 else '*',
                         help='Files to change', default=files)

    args = parser3.parse_args(remaining_argv)

    if len(args.files) is 0:
        warnings.warn("No files specified")

    for vcs in VCS:
        if vcs.is_usable():
            vcs.assert_nondirty()
            break

    # make sure files exist and contain version string
    for path in args.files:
        with io.open(path, 'rb') as f:
            before = f.read().decode('utf-8')

        assert args.current_version in before, 'Did not find string {} in file {}'.format(
            args.current_version, path)

    # change version string in files
    for path in args.files:
        with io.open(path, 'rb') as f:
            before = f.read().decode('utf-8')

        # assert type(args.current_version) == bytes
        # assert type(args.new_version) == bytes

        after = before.replace(args.current_version, args.new_version)

        if not args.dry_run:
            with io.open(path, 'wt', encoding='utf-8') as f:
                f.write(after)

    commit_files = args.files

    if config:
        config.remove_option('bumpversion', 'new_version')

        config.set('bumpversion', 'current_version', args.new_version)

        if not args.dry_run:
            s = StringIO()

            try:
                config.write(s)
                with io.open(known_args.config_file, 'wb') as f:
                    f.write(s.getvalue().encode('utf-8'))
            except UnicodeEncodeError:
                warnings.warn(
                    "Unable to write UTF-8 to config file, because of an old configparser version. "
                    "Update with `pip install --upgrade configparser`."
                )

            commit_files.append(known_args.config_file)

    if args.commit:
        if not args.dry_run:
            for path in commit_files:
                vcs.add_path(path)

            formatargs = {
                "current_version": args.current_version,
                "new_version": args.new_version,
            }
            formatargs.update(prefixed_environ())

            vcs.commit(message=args.message.format(**formatargs))

            if args.tag:
                vcs.tag(args.tag_name.format(**formatargs))
from configparser import SafeConfigParser
from bottle import route, run, template, static_file, redirect, error
from pymongo import MongoClient
from random import randint
from cherrypy import wsgiserver

config = SafeConfigParser()
environment = SafeConfigParser()
config.read('config.ini')
environment.read('environment.ini')
development = environment.getboolean('environment', 'development')
db = MongoClient()[config.get('mongodb', 'db_name')]
register_count = 0

@route('/')
def index():

    count = db.domains.find({'status': 'inactive'}).limit(-1).count()
    domains = []

    if count > 0:
        # this is inefficient and slow, but oh well
        for x in range(0, 20):
            random = randint(0, count - 1)
            domain = db.domains.find({'status': 'inactive'}).skip(random).limit(1).next()
            domains.append(domain['domain'])

    return template('index', page=1, domains=domains)

@route('/about')
def about():
Exemple #31
0
def main():  # pragma: no cover
    config = SafeConfigParser()
    dirs = ('.', '/etc', '/usr/local/etc')
    if not config.read([os.path.join(dir, config_file) for dir in dirs]):
        sys.exit('Could not find {} in {}'.format(config_file, dirs))

    try:
        logfile = config.get('logging', 'file')
        rotating = config.getboolean('logging', 'rotate', fallback=False)
        if rotating:
            max_size = config.get('logging', 'max_size', fallback=1048576)
            backup_count = config.get('logging', 'backup_count', fallback=5)
            handler = logbook.RotatingFileHandler(logfile, max_size=max_size,
                                                  backup_count=backup_count)
        else:
            handler = logbook.FileHandler(logfile)
        handler.push_application()
    except Exception:
        logbook.StderrHandler().push_application()

    try:
        kwargs = dict(config.items('mongodb'))
    except NoSectionError:
        sys.exit('No "mongodb" section in config file')
    args = []
    for arg in ('hosts', 'database', 'username', 'password'):
        try:
            args.append(config.get('mongodb', arg))
        except NoOptionError:
            sys.exit('No "{}" setting in "mongodb" section of config file'.
                     format(arg))
        kwargs.pop(arg)
    args[0] = [s.strip() for s in args[0].split(',')]
    store = MongoStore(*args, **kwargs)

    try:
        email_sender = config.get('email', 'sender')
    except NoSectionError:
        sys.exit('No "email" section in config file')
    except NoOptionError:
        sys.exit('No "sender" setting in "email" section of config file')

    business_logic = BusinessLogic(store, email_sender)

    try:
        listen_port = int(config.get('wsgi', 'port'))
        log.info('Binding to port {}'.format(listen_port))
    except Exception:
        listen_port = 80
        log.info('Binding to default port {}'.format(listen_port))

    try:
        auth_key = config.get('wsgi', 'auth_key')
        log.info('Server authentication enabled')
    except Exception:
        log.warning('Server authentication DISABLED')
        auth_key = None

    httpd = make_server('', listen_port,
                        partial(application, business_logic, auth_key),
                        handler_class=LogbookWSGIRequestHandler)
    business_logic.schedule_next_deadline()
    httpd.serve_forever()
except ImportError:
    from ConfigParser import SafeConfigParser

parser = SafeConfigParser()
parser.read("test.cfg")


def do_nothing(self):
    pass


# add a bunch of decorator metafunctions like LIB_CORE
# which can be wrapped around individual tests as needed
for section in parser.sections():
    for option in parser.options(section):
        if parser.getboolean(section, option):
            vars()["{}_{}".format(section.upper(), option.upper())] = \
                lambda function: function
        else:
            vars()["{}_{}".format(section.upper(), option.upper())] = \
                lambda function: do_nothing


def BLANK_PCM_Reader(length, sample_rate=44100, channels=2,
                     bits_per_sample=16, channel_mask=None):
    from audiotools.decoders import SameSample

    if channel_mask is None:
        channel_mask = int(audiotools.ChannelMask.from_channels(channels))

    return SameSample(sample=1,
Exemple #33
0
def main():
    '''
    Main function for this module. Parses all command line arguments, reads in data
    from stdin, and sends it to the proper BLS algorithm.
    '''
    # This is a global list of default values that will be used by the argument parser
    # and the configuration parser.
    defaults = {'min_duration':'0.0416667', 'max_duration':'0.5', 'n_bins':'100',
        'direction':'0', 'mode':'vec', 'print_format':'encoded', 'verbose':'0', 'profiling':'0'}

    # Set up the parser for command line arguments and read them.
    parser = __init_parser(defaults)
    args = parser.parse_args()

    if not args.config:
        # No configuration file specified -- read in command line arguments.
        if not args.segment:
            parser.error('No trial segment specified and no configuration file given.')

        segment = args.segment
        mindur = args.mindur
        maxdur = args.maxdur
        nbins = args.nbins
        direction = args.direction
        mode = args.mode
        fmt = args.fmt
        verbose = args.verbose
        profile = args.profile
    else:
        # Configuration file was given; read in that instead.
        cp = SafeConfigParser(defaults)
        cp.read(args.config)

        segment = cp.getfloat('DEFAULT', 'segment')
        mindur = cp.getfloat('DEFAULT', 'min_duration')
        maxdur = cp.getfloat('DEFAULT', 'max_duration')
        nbins = cp.getint('DEFAULT', 'n_bins')
        direction = cp.getint('DEFAULT', 'direction')
        mode = cp.get('DEFAULT', 'mode')
        fmt = cp.get('DEFAULT', 'print_format')
        verbose = cp.getboolean('DEFAULT', 'verbose')
        profile = cp.getboolean('DEFAULT', 'profiling')

    # Perform any sanity-checking on the arguments.
    __check_args(segment, mindur, maxdur, nbins, direction)

    # Send the data to the algorithm.
    for k, q, time, flux, fluxerr in read_mapper_output(sys.stdin):
        # Extract the array columns.
        time = np.array(time, dtype='float64')
        flux = np.array(flux, dtype='float64')
        fluxerr = np.array(fluxerr, dtype='float64')

        if profile:
            # Turn on profiling.
            pr = cProfile.Profile()
            pr.enable()

        if mode == 'python':
            raise NotImplementedError
            out = bls_pulse_python(time, flux, fluxerr, nbins, segment, mindur, maxdur,
                direction=direction)
        elif mode == 'vec':
            raise NotImplementedError
            out = bls_pulse_vec(time, flux, fluxerr, nbins, segment, mindur, maxdur,
                direction=direction)
        elif mode == 'cython':
            out = bls_pulse_cython(time, flux, fluxerr, nbins, segment, mindur, maxdur,
                direction=direction)
        else:
            raise ValueError('Invalid mode: %s' % mode)

        if profile:
            # Turn off profiling.
            pr.disable()
            ps = pstats.Stats(pr, stream=sys.stderr).sort_stats('time')
            ps.print_stats()

        if direction == 2:
            srsq_dip = out['srsq_dip']
            duration_dip = out['duration_dip']
            depth_dip = out['depth_dip']
            midtime_dip = out['midtime_dip']
            srsq_blip = out['srsq_blip']
            duration_blip = out['duration_blip']
            depth_blip = out['depth_blip']
            midtime_blip = out['midtime_blip']
            segstart = out['segstart']
            segend = out['segend']

            # Print output.
            if fmt == 'encoded':
                print "\t".join([k, q, encode_array(segstart), encode_array(segend), encode_array(srsq_dip),
                    encode_array(duration_dip), encode_array(depth_dip), encode_array(midtime_dip),
                    encode_array(srsq_blip), encode_array(duration_blip),
                    encode_array(depth_blip), encode_array(midtime_blip)])
            elif fmt == 'normal':
                print "-" * 120
                print "Kepler " + k
                print "Quarters: " + q
                print "-" * 120
                print '{0: <7s} {1: <13s} {2: <13s} {3: <13s} {4: <13s} {5: <13s} {6: <13s} {7: <13s} ' \
                    '{8: <13s}'.format('Segment', 'Dip SR^2', 'Dip dur.', 'Dip depth', 'Dip mid.',
                    'Blip SR^2', 'Blip dur.', 'Blip depth', 'Blip mid.')
                for i in xrange(len(srsq_dip)):
                    print '{0: <7d} {1: <13.6f} {2: <13.6f} {3: <13.6f} {4: <13.6f} ' \
                        '{5: <13.6f} {6: <13.6f} {7: <13.6f} {8: <13.6f}'.format(i,
                        srsq_dip[i], duration_dip[i], depth_dip[i], midtime_dip[i],
                        srsq_blip[i], duration_blip[i], depth_blip[i], midtime_blip[i])
                print "-" * 120
                print
                print
        else:
            srsq = out['srsq']
            duration = out['duration']
            depth = out['depth']
            midtime = out['midtime']
            segstart = out['segstart']
            segend = out['segend']

            # Print output.
            if fmt == 'encoded':
                print "\t".join([k, q, encode_array(segstart), encode_array(segend), encode_array(srsq),
                    encode_array(duration), encode_array(depth), encode_array(midtime)])
            elif fmt == 'normal':
                print "-" * 80
                print "Kepler " + k
                print "Quarters: " + q
                print "-" * 80
                print '{0: <7s} {1: <13s} {2: <10s} {3: <9s} {4: <13s}'.format('Segment',
                    'SR^2', 'Duration', 'Depth', 'Midtime')
                for i in xrange(len(srsq)):
                    print '{0: <7d} {1: <13.6f} {2: <10.6f} {3: <9.6f} {4: <13.6f}'.format(i,
                        srsq[i], duration[i], depth[i], midtime[i])
                print "-" * 80
                print
                print
class Configuration(object):
    """Class storing configuration details for Modula"""

    def __init__(self, rootdir=".", cfg=None):
        defaults = {
            "@paths.modules": "modules",
            "@paths.storage": "storage"
        }

        if cfg:
            self.cfg = cfg
        else:
            cfgfiles = ["modules.cfg", "modula.cfg"]
            cfgfiles = [os.path.join(rootdir, file) for file in cfgfiles]

            self.cfg = Parser()
            self.cfg.read(cfgfiles)

        # Add defaults
        for k, v in defaults.items():
            if k not in self:
                self[k] = v

        # Make sure all paths are absolute
        for k, v in self.items("@paths"):
            self["@paths.%s" % k] = os.path.abspath(os.path.expanduser(v))

        # Make sure all input files are absolute
        for k, v in self.items("@inputs"):
            self["@inputs.%s" % k] = os.path.abspath(os.path.expanduser(v))

    def _parse_name(self, name):
        if "." in name:
            section, option = name.split(".", 1)
        else:
            section = "@global"
            option = name
        return section, option

    def sections(self):
        return self.cfg.sections()

    def get(self, name):
        return self.cfg.get(*self._parse_name(name))

    def getInt(self, name):
        return self.cfg.getint(*self._parse_name(name))

    def getFloat(self, name):
        return self.cfg.getfloat(*self._parse_name(name))

    def getBoolean(self, name):
        return self.cfg.getboolean(*self._parse_name(name))

    def items(self, section):
        return self.cfg.items(section)

    def __contains__(self, name):
        return self.cfg.has_option(*self._parse_name(name))

    def __getitem__(self, name):
        try:
            return self.cfg.get(*self._parse_name(name))
        except KeyError:
            raise KeyError(name)

    def __setitem__(self, name, value):
        section, option = self._parse_name(name)
        if not self.cfg.has_section(section):
            self.cfg.add_section(section)
        self.cfg.set(section, option, value)

    def __delitem__(self, name):
        section, option = self._parse_name(name)
        try:
            ok = self.cfg.remove_option(section, option)
            if not ok:
                raise NoOptionError()
        except NoOptionError:
            raise KeyError(name)
Exemple #35
0
cc = Cosmology(cosmoDict, constDict, lmax)
theory = cc.theory

expX = Config.get('general', 'X')
expY = Config.get('general', 'Y')

beamX,beamFileX,fgFileX,noiseTX,noisePX,tellminX,tellmaxX,pellminX, \
    pellmaxX,lxcutTX,lycutTX,lxcutPX,lycutPX,lkneeTX,alphaTX,lkneePX,alphaPX = getLensParams(Config,expX)
beamY,beamFileY,fgFileY,noiseTY,noisePY,tellminY,tellmaxY,pellminY, \
    pellmaxY,lxcutTY,lycutTY,lxcutPY,lycutPY,lkneeTY,alphaTY,lkneePY,alphaPY = getLensParams(Config,expY)

cmb_bin_edges = np.arange(10, 9000, 10)

TCMB = Config.getfloat('general', 'TCMB')
gradCut = Config.getint('general', 'gradCut')
halo = Config.getboolean('general', 'halo')
fsky = Config.getfloat('general', 'sqDeg') / 41250.
kmin = 40

deg = 10.
px = 0.5
dell = 10

kellrange = np.arange(80., 2100., 10.)

Nlmvinv = 0.
pl = Plotter(scaleY='log')
for polComb in ['TT', 'TE', 'EE', 'EB']:
    kmax = getMax(polComb, tellmaxY, pellmaxY)
    bin_edges = np.arange(kmin, kmax, dell) + dell
    lmap = lm.makeEmptyCEATemplate(raSizeDeg=deg,
def main(args):
    if args.cluster:
        cluster(args.cluster, args.config_file)
        return

    config = ConfigParser({"sentences": "False"})
    config.read(args.config_file)
    corpus_filename = config.get("main", "corpus_file")
    model_path = config.get("main", "path")

    if config.getboolean("main", "sentences"):
        from vsm.extensions.ldasentences import CorpusSent as Corpus
    else:
        from vsm.corpus import Corpus

    if args.k is None:
        try:
            if config.get("main", "topics"):
                default = ' '.join(map(str, eval(config.get("main", "topics"))))
                if args.quiet:
                    args.k = [int(n) for n in default.split()]
            else:
                raise NoOptionError
        except NoOptionError:
            default = ' '.join(map(str, range(20, 100, 20)))

        while args.k is None:
            ks = input("Number of Topics [Default '{0}']: ".format(default))
            try:
                if ks:
                    args.k = [int(n) for n in ks.split()]
                elif not ks.strip():
                    args.k = [int(n) for n in default.split()]

                if args.k:
                    print("\nTIP: number of topics can be specified with argument '-k N N N ...':")
                    print("         topicexplorer train %s -k %s\n" %\
                        (args.config_file, ' '.join(map(str, args.k))))
            except ValueError:
                print("Enter valid integers, separated by spaces!")

    if args.processes < 0:
        import multiprocessing
        args.processes = multiprocessing.cpu_count() + args.processes

    print("Loading corpus... ")
    corpus = Corpus.load(corpus_filename)

    try:
        model_pattern = config.get("main", "model_pattern")
    except NoOptionError:
        model_pattern = None

    if (model_pattern is not None and not args.rebuild and (args.quiet or args.cont or
            bool_prompt("""Existing topic models found. You can continue training or start a new model. 
Do you want to continue training your existing models? """, default=True))):

        from vsm.model.lda import LDA
        m = LDA.load(model_pattern.format(args.k[0]),
                     multiprocessing=args.processes > 1,
                     n_proc=args.processes)

        if args.iter is None and not args.quiet:    # pragma: no cover
            args.iter = int_prompt("Total number of training iterations:",
                                   default=int(m.iteration * 1.5), min=m.iteration)

            print("\nTIP: number of training iterations can be specified with argument '--iter N':")
            print("         topicexplorer train --iter %d %s\n" % (args.iter, args.config_file))
        elif args.iter is None and args.quiet:      # pragma: no cover
            args.iter = int(m.iteration * 1.5)

        del m

        # if the set changes, build some new models and continue some old ones

        config_topics = eval(config.get("main", "topics"))
        if args.k != config_topics:
            new_models = set(args.k) - set(config_topics)
            continuing_models = set(args.k) & set(config_topics)

            build_models(corpus, corpus_filename, model_path,
                         config.get("main", "context_type"),
                         new_models, n_iterations=args.iter,
                         n_proc=args.processes, seed=args.seed,
                         dry_run=args.dry_run)

            model_pattern = continue_training(model_pattern, continuing_models,
                                              args.iter, n_proc=args.processes,
                                              dry_run=args.dry_run)

        else:
            model_pattern = continue_training(model_pattern, args.k, args.iter,
                                              n_proc=args.processes, 
                                              dry_run=args.dry_run)
    else:
        # build a new model
        if args.iter is None and not args.quiet:    # pragma: no cover
            args.iter = int_prompt("Number of training iterations:", default=200)

            print("\nTIP: number of training iterations can be specified with argument '--iter N':")
            print("         topicexplorer train --iter %d %s\n" % (args.iter, args.config_file))
        elif args.iter is None and args.quiet:      # pragma: no cover
            args.iter = 200

        # TODO: if only one context_type, make it just the one context type.
        ctxs = corpus.context_types
        if len(ctxs) == 1:
            args.context_type = ctxs[0]
        else:
            ctxs = sorted(ctxs, key=lambda ctx: len(corpus.view_contexts(ctx)))
            if args.context_type not in ctxs:
                while args.context_type not in ctxs:
                    contexts = ctxs[:]
                    contexts[0] = contexts[0].upper()
                    contexts = '/'.join(contexts)
                    args.context_type = input("Select a context type [%s] : " % contexts)
                    if args.context_type.strip() == '':
                        args.context_type = ctxs[0]
                    if args.context_type == ctxs[0].upper():
                        args.context_type = ctxs[0]
    
                print("\nTIP: context type can be specified with argument '--context-type TYPE':")
                print("         topicexplorer train --context-type %s %s\n" % (args.context_type, args.config_file))


        print("\nTIP: This configuration can be automated as:")
        print("         topicexplorer train %s --iter %d --context-type %s -k %s\n" %\
            (args.config_file, args.iter, args.context_type, 
                ' '.join(map(str, args.k))))
        model_pattern = build_models(corpus, corpus_filename, model_path,
                                     args.context_type, args.k,
                                     n_iterations=args.iter,
                                     n_proc=args.processes, seed=args.seed,
                                     dry_run=args.dry_run)
    config.set("main", "model_pattern", model_pattern)
    if args.context_type:
        # test for presence, since continuing doesn't require context_type
        config.set("main", "context_type", args.context_type)
    args.k.sort()
    config.set("main", "topics", str(args.k))

    if not args.dry_run:
        if config.has_option("main", "cluster"):
            cluster_path = config.get("main", "cluster", fallback=None)
            config.remove_option("main", "cluster")
            try:
                os.remove(cluster_path)
            except IOError:
                # fail silently on IOError
                pass


        with open(args.config_file, "w") as configfh:
            config.write(configfh)
        with open(config_file, 'r') as f:
            config.readfp(f, config_file)
    except IOError:
        pass
    for option in ("jid", "jabber_password", "conference_domain", "mode",
                   "zulip_email_suffix", "jabber_server_address",
                   "jabber_server_port"):
        if (getattr(options, option) is None
                and config.has_option("jabber_mirror", option)):
            setattr(options, option, config.get("jabber_mirror", option))

    for option in ("no_use_tls", ):
        if getattr(options, option) is None:
            if config.has_option("jabber_mirror", option):
                setattr(options, option,
                        config.getboolean("jabber_mirror", option))
            else:
                setattr(options, option, False)

    if options.mode is None:
        options.mode = "personal"

    if options.zulip_email_suffix is None:
        options.zulip_email_suffix = ''

    if options.mode not in ('public', 'personal'):
        config_error(
            "Bad value for --mode: must be one of 'public' or 'personal'")

    if None in (options.jid, options.jabber_password):
        config_error(
Exemple #38
0
class GlobalSettings(GObject.Object):

    """
    Pitivi app settings.

    The settings object loads settings from different sources, currently:
    - the local configuration file,
    - environment variables.

    Modules declare which settings they wish to access by calling the
    addConfigOption() class method during initialization.

    @cvar options: A dictionnary of available settings.
    @cvar environment: A list of the controlled environment variables.
    """

    options = {}
    environment = set()
    defaults = {}

    __gsignals__ = {}

    def __init__(self, **unused_kwargs):
        GObject.Object.__init__(self)
        self._config = SafeConfigParser()
        self._readSettingsFromConfigurationFile()
        self._readSettingsFromEnvironmentVariables()

    def _readSettingsFromConfigurationFile(self):
        """
        Read the configuration from the user configuration file.
        """

        try:
            conf_file_path = os.path.join(xdg_config_home(), "pitivi.conf")
            self._config.read(conf_file_path)
        except UnicodeDecodeError:
            unicode_error_dialog()
            return
        except ParsingError:
            return

        for (section, attrname, typ, key, env, value) in self.iterAllOptions():
            if not self._config.has_section(section):
                continue
            if key and self._config.has_option(section, key):
                if typ == int or typ == int:
                    try:
                        value = self._config.getint(section, key)
                    except ValueError:
                        # In previous configurations we incorrectly stored
                        # ints using float values.
                        value = int(self._config.getfloat(section, key))
                elif typ == float:
                    value = self._config.getfloat(section, key)
                elif typ == bool:
                    value = self._config.getboolean(section, key)
                else:
                    value = self._config.get(section, key)
                setattr(self, attrname, value)

    @classmethod
    def readSettingSectionFromFile(self, cls, section):
        """
        Force reading a particular section of the settings file.

        Use this if you dynamically determine settings sections/keys at runtime
        (like in tabsmanager.py). Otherwise, the settings file would be read
        only once (at the initialization phase of your module) and your config
        sections would never be read, and thus values would be reset to defaults
        on every startup because GlobalSettings would think they don't exist.
        """
        if cls._config.has_section(section):
            for option in cls._config.options(section):
                # We don't know the value type in advance, just try them all.
                try:
                    value = cls._config.getfloat(section, option)
                except:
                    try:
                        value = cls._config.getint(section, option)
                    except:
                        try:
                            value = cls._config.getboolean(section, option)
                        except:
                            value = cls._config.get(section, option)

                setattr(cls, section + option, value)

    def _readSettingsFromEnvironmentVariables(self):
        """
        Override options values using their registered environment variables.
        """
        for section, attrname, typ, key, env, value in self.iterAllOptions():
            if not env:
                # This option does not have an environment variable name.
                continue
            var = get_env_by_type(typ, env)
            if var is not None:
                setattr(self, attrname, var)

    def _writeSettingsToConfigurationFile(self):
        conf_file_path = os.path.join(xdg_config_home(), "pitivi.conf")

        for (section, attrname, typ, key, env_var, value) in self.iterAllOptions():
            if not self._config.has_section(section):
                self._config.add_section(section)
            if key:
                if value is not None:
                    self._config.set(section, key, str(value))
                else:
                    self._config.remove_option(section, key)
        try:
            file = open(conf_file_path, 'w')
        except IOError as OSError:
            return
        self._config.write(file)
        file.close()

    def storeSettings(self):
        """
        Write settings to the user's local configuration file. Note that only
        those settings which were added with a section and a key value are
        stored.
        """
        self._writeSettingsToConfigurationFile()

    def iterAllOptions(self):
        """
        Iterate over all registered options

        @return: an iterator which yields a tuple of (attrname, type, key,
        environment, value) for each option.
        """
        for section, options in list(self.options.items()):
            for attrname, (typ, key, environment) in list(options.items()):
                yield section, attrname, typ, key, environment, getattr(self, attrname)

    def isDefault(self, attrname):
        return getattr(self, attrname) == self.defaults[attrname]

    def setDefault(self, attrname):
        setattr(self, attrname, self.defaults[attrname])

    @classmethod
    def addConfigOption(cls, attrname, type_=None, section=None, key=None,
                        environment=None, default=None, notify=False,):
        """
        Add a configuration option.

        This function should be called during module initialization, before
        the config file is actually read. By default, only options registered
        beforehand will be loaded.
        See mainwindow.py and medialibrary.py for examples of usage.

        If you want to add configuration options after initialization,
        use the readSettingSectionFromFile method to force reading later on.
        See tabsmanager.py for an example of such a scenario.

        @param attrname: the attribute of this class which represents the option
        @type attrname: C{str}
        @param type_: type of the attribute. Unnecessary if default is given.
        @type type_: a builtin or class
        @param section: The section of the config file under which this option is
        saved. This section must have been added with addConfigSection(). Not
        necessary if key is not given.
        @param key: the key under which this option is to be saved. Can be none if
        this option should not be saved.
        @type key: C{str}
        @param notify: whether or not this attribute should emit notification
        signals when modified (default is False).
        @type notify: C{boolean}
        """
        if section and section not in cls.options:
            raise ConfigError(
                "You must add the section \"%s\" first." % section)
        if key and not section:
            raise ConfigError(
                "You must specify a section for key \"%s\"" % key)
        if section and key in cls.options[section]:
            raise ConfigError("Option \"%s\" is already in use.")
        if hasattr(cls, attrname):
            raise ConfigError("Settings attribute \"%s\" is already in use.")
        if environment and environment in cls.environment:
            raise ConfigError("Settings environment varaible \"%s\" is"
                              "already in use.")
        if not type_ and default is None:
            raise ConfigError("Settings attribute \"%s\" has must have a"
                              " type or a default." % attrname)
        if not type_:
            type_ = type(default)
        if notify:
            notification = Notification(attrname)
            setattr(cls, attrname, notification)
            setattr(cls, "_" + attrname, default)
            GObject.signal_new(notification.signame,
                               cls,
                               GObject.SIGNAL_RUN_LAST,
                               None,
                               ())
        else:
            setattr(cls, attrname, default)
        if section and key:
            cls.options[section][attrname] = type_, key, environment
        cls.environment.add(environment)
        cls.defaults[attrname] = default

    @classmethod
    def addConfigSection(cls, section):
        """
        Add a section to the local config file.

        @param section: The section name. This section must not already exist.
        @type section: C{str}
        """
        if section in cls.options:
            raise ConfigError("Duplicate Section \"%s\"." % section)
        cls.options[section] = {}

    @classmethod
    def notifiesConfigOption(cls, attrname):
        signal_name = Notification.signalName(attrname)
        GObject.signal_lookup(signal_name, cls)
Exemple #39
0
except ImportError:
    from ConfigParser import SafeConfigParser

parser = SafeConfigParser()
parser.read("test.cfg")


def do_nothing(self):
    pass


# add a bunch of decorator metafunctions like LIB_CORE
# which can be wrapped around individual tests as needed
for section in parser.sections():
    for option in parser.options(section):
        if (parser.getboolean(section, option)):
            vars()["%s_%s" % (section.upper(),
                              option.upper())] = lambda function: function
        else:
            vars()["%s_%s" % (section.upper(),
                              option.upper())] = lambda function: do_nothing


def BLANK_PCM_Reader(length, sample_rate=44100, channels=2,
                     bits_per_sample=16, channel_mask=None):
    from audiotools.decoders import SameSample

    if (channel_mask is None):
        channel_mask = int(audiotools.ChannelMask.from_channels(channels))

    return SameSample(sample=1,
Exemple #40
0
    def __init__(self):
        """Init"""
        cp = SafeConfigParser(defaults={
            # True if axial symmetry
            'axisym': True,
            # "Physical" length of the domain (in meters)
            'LENGTH': 3000,
            # Number of elements
            'NSPEC': 250,
            # Degree of the basis functions
            'N': 4,
            # Degree of basis functions in the first element
            'NGLJ': 4,
            # Number of time steps
            'NTS': 2,
            # Courant CFL number
            'CFL': 0.45,
            # Grid description
            'GRID_TYPE': 'homogeneous',
            'GRID_FILE': 'grid_homogeneous.txt',
            'TICKS_FILE': 'ticks_homogeneous.txt',
            # kg/m^3
            'DENSITY': 2500,
            # Pa
            'RIGIDITY': 30000000000,
            # Duration of the source in dt
            'TSOURCE': 100,
            # GLL point number on which the source is situated
            'ISOURCE': 0,
            # Maximum amplitude
            'MAX_AMPL': 1e7,
            # Source's type
            'SOURCE_TYPE': 'ricker',
            # Decay rate for the ricker
            'DECAY_RATE': 2.628,
            # Time steps between snapshots (0 == disabled)
            'SNAPSHOT': 0,
            # Plot grid, source, and periodic results
            'PLOT': False,
            # One image is displayed each DPLOT time step
            'DPLOT': 10,
        })
        with open('Par_file') as f:
            try:
                # Python 3
                cp.read_string('[global]\n' + f.read(), source='Par_file')
            except AttributeError:
                # Python 2
                cp.readfp(FakeGlobalSectionHead(f))

        self.axisym = cp.getboolean('global', 'AXISYM')
        self.length = cp.getfloat('global', 'LENGTH')
        self.nSpec = cp.getint('global', 'NSPEC')
        self.N = cp.getint('global', 'N')
        self.NGLJ = cp.getint('global', 'NGLJ')
        self.nts = cp.getint('global', 'NTS')
        self.cfl = cp.getfloat('global', 'CFL')
        self.gridType = cp.get('global', 'GRID_TYPE').strip("'\"")
        self.gridFile = cp.get('global', 'GRID_FILE').strip("'\"")
        self.ticksFile = cp.get('global', 'TICKS_FILE').strip("'\"")
        self.meanRho = cp.getfloat('global', 'DENSITY')
        self.meanMu = cp.getfloat('global', 'RIGIDITY')
        self.tSource = cp.getfloat('global', 'TSOURCE')
        self.iSource = cp.getint('global', 'ISOURCE')
        self.maxAmpl = cp.getfloat('global', 'MAX_AMPL')
        self.sourceType = cp.get('global', 'SOURCE_TYPE').strip("'\"")
        self.decayRate = cp.getfloat('global', 'DECAY_RATE')
        self.snapshot = cp.getint('global', 'SNAPSHOT')
        self.plot = cp.getboolean('global', 'PLOT')
        self.dplot = cp.getfloat('global', 'DPLOT')

        parser = argparse.ArgumentParser(
            description='Spectral element method in a 1D medium')
        parser.add_argument('--no-plot', action='store_true',
                            help='Force disable plotting')
        args = parser.parse_args()
        self.plot = self.plot and not args.no_plot

        # Number of GLL points per elements
        self.nGLL = self.N + 1
        # Number of GLJ in the first element
        self.nGLJ = self.NGLJ + 1
        # Number of points in the array
        self.nGlob = (self.nSpec - 1) * self.N + self.NGLJ + 1
        self.ibool = self.make_global_index()
        # Time step (will be updated)
        self.dt = 0

        # Gauss Lobatto Legendre points and integration weights:
        try:
            # Position of the GLL points in [-1,1]
            self.ksiGLL = gll.GLL_POINTS[self.N]
            # Integration weights
            self.wGLL = gll.GLL_WEIGHTS[self.N]
        except KeyError:
            raise ValueError('N = %d is invalid!' % (self.N, ))
        try:
            # Position of the GLJ points in [-1,1]
            self.ksiGLJ = gll.GLJ_POINTS[self.NGLJ]
            # Integration weights
            self.wGLJ = gll.GLJ_WEIGHTS[self.NGLJ]
        except KeyError:
            raise ValueError('NGLJ = %d is invalid!' % (self.NGLJ, ))

        # Derivatives of the Lagrange polynomials at the GLL points
        self.deriv = gll.lagrange_derivative(self.ksiGLL)
        self.derivGLJ = gll.glj_derivative(self.ksiGLJ)
    for opt, arg in options:
        if opt in ('-l', '--list'):
            list_option = True
        elif opt in ('-c', '--config'):
            config_file = arg
        else:
            showJelp("unknow option")

    config = SafeConfigParser()
    config.read(config_file)

    if debug: eprint("CONFIG FILE PATH: " + config_file)

    try:
        debug = config.getboolean('shh', 'debug')
    except:
        pass

    try:
        SHH_INSTANCE = config.get('shh', 'instance').strip('"').strip()
        if debug: eprint("INSTANCE: " + SHH_INSTANCE)
    except Exception as e:
        sys.exit("ERROR: instance is mandatory - " + str(e))

    try:
        SHH_USERNAME = config.get('shh', 'username').strip('"').strip()
        if debug: eprint("SHH_USERNAME: "******"ERROR: username is mandatory")