Exemple #1
0
 def __init__(self):
     self._categories = []
     self._config_parser = ConfigParser.RawConfigParser()
     self.input_seq_filepaths = []
Exemple #2
0
import string
import sys
import base64
import ConfigParser

STR_RED = "\033[01;31m{0}\033[00m"
STR_GREEN = "\033[1;36m{0}\033[00m"

# Script configuration

# dry-run: set it to False to actually write into the OpenLDAP
#DRY_RUN = True
DRY_RUN = False

# Lecture des paramètres AD et LDAP
config = ConfigParser.RawConfigParser()
config.read('.ldap.conf')

try:
    # Active Directory
    IN_LDAP_URI = config.get('IN_LDAP', 'IN_LDAP_URI')
    IN_LDAP_BINDDN = config.get('IN_LDAP', 'IN_LDAP_BINDDN')
    IN_LDAP_PASSWD = config.get('IN_LDAP', 'IN_LDAP_PASSWD')

    # OpenLDAP
    OUT_LDAP_URI = config.get('OUT_LDAP', 'OUT_LDAP_URI')
    OUT_LDAP_BINDDN = config.get('OUT_LDAP', 'OUT_LDAP_BINDDN')
    OUT_LDAP_PASSWD = config.get('OUT_LDAP', 'OUT_LDAP_PASSWD')

except ConfigParser.Error, err:
    print 'Oops, une erreur dans votre fichier de conf (%s)' % err
Exemple #3
0
 def _build_config_wrapper(self, data, filename):
     fileconfig = configparser.RawConfigParser()
     self._parse_config(data, filename, fileconfig, set())
     return ConfigWrapper(self.printer, fileconfig, {}, 'printer')
Exemple #4
0
def load_master(master, force_format="default"):
    try:
        tree = ET.parse(master)
        root = tree.getroot()
    except IOError:
        root = ET.fromstring(master)
    xmlpipes, xmloptions = list(root)

    options = ConfigParser.RawConfigParser()
    exporter = None
    couples = {}
    for xmloption in xmloptions:
        section = xmloption.tag
        options.add_section(section)
        attribs = {}
        for key, val in xmloption.attrib.items():
            key = key.replace(u"-", u"_")
            try:
                attribs[key] = sem.misc.str2bool(val)
            except ValueError:
                attribs[key] = val
        for key, val in attribs.items():
            options.set(section, key, val)
        if xmloption.tag == "export":
            couples = dict(options.items("export"))
            export_format = couples["format"]
            if force_format is not None and force_format != "default":
                sem_tagger_logger.info("using forced format: %s" %
                                       force_format)
                export_format = force_format
            exporter = sem.exporters.get_exporter(export_format)(**couples)

    if get_option(options, "log", "log_file") is not None:
        sem_tagger_logger.addHandler(
            file_handler(get_option(options, "log", "log_file")))
    sem_tagger_logger.setLevel(get_option(options, "log", "level", "WARNING"))

    classes = {}
    pipes = []
    for xmlpipe in xmlpipes:
        if xmlpipe.tag == "export": continue

        Class = classes.get(xmlpipe.tag, None)
        if Class is None:
            Class = get_module(xmlpipe.tag)
            classes[xmlpipe.tag] = Class
        arguments = {}
        for key, value in xmlpipe.attrib.items():
            if value.startswith(u"~/"):
                value = os.path.expanduser(value)
            elif sem.misc.is_relative_path(value):
                value = os.path.abspath(
                    os.path.join(os.path.dirname(master), value))
            arguments[key.replace(u"-", u"_")] = value
        for section in options.sections():
            if section == "export": continue
            for key, value in options.items(section):
                if key not in arguments:
                    arguments[key] = value
                else:
                    sem_tagger_logger.warn(
                        'Not adding already existing option: %s' % (key))
        sem_tagger_logger.info("loading %s" % xmlpipe.tag)
        pipes.append(Class(**arguments))
    pipeline = sem.modules.pipeline.Pipeline(pipes)

    return pipeline, options, exporter, couples
Exemple #5
0
    def __init__(self, root):
        self.images = []
        self.axes = []
        self.filename = ''
        self.sequenz = Daten()
        self.serialState = ''
        self.decstart = 0
        self.decend = 1500
        #GUI
        self.config = ConfigParser.RawConfigParser()
        self.configfile = 'LogikAnalyse.cfg'
        self.defaultDir = "Daten"
        try:
            self.loadConfig()
        except:
            print 'Kein Configfile gefunden, nehme Anfangswerte'
            #initial defaults
            self.pulseMin = 300
            self.samples = 1500
            self.comPortSelect = 'COM9'
            self.messTyp = 'pulse'

        self.root = root
        self.root.title('Logik Analyse')
        self.root.protocol("WM_DELETE_WINDOW", self.exit_)

        self.fig = matplotlib.figure.Figure()
        self.axes = self.fig.add_subplot(1, 1, 1)

        figframe = Tk.Frame(root)
        figframe.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
        canvas = FigureCanvasTkAgg(self.fig, master=figframe)
        toolbar = NavigationToolbar2TkAgg(canvas, figframe)
        toolbar.pack(side=Tk.TOP, fill=Tk.BOTH)  # expand=1)
        self.modify_toolbar()

        self.scaleY()
        self.add_menu()

        configframe = Tk.Frame(root)
        configframe.pack(side=Tk.BOTTOM, fill=Tk.BOTH, expand=1)

        self.samplesSpinboxVal = Tk.StringVar()
        self.samplesSpinboxVal.set(str(self.samples))
        self.samplesSpinboxLabel = Tk.Label(configframe,
                                            text='Samples').pack(side=Tk.LEFT,
                                                                 anchor=Tk.E)
        self.samplesSpinbox = Tk.Spinbox(configframe,
                                         from_=500,
                                         to=5000,
                                         increment=500,
                                         width=5,
                                         textvariable=self.samplesSpinboxVal)
        self.samplesSpinbox.pack(side=Tk.LEFT)
        self.pulseMinSpinboxVal = Tk.StringVar()
        self.pulseMinSpinboxVal.set(str(self.pulseMin))
        self.pulseMinSpinboxLabel = Tk.Label(
            configframe, text='pulseMin').pack(side=Tk.LEFT, anchor=Tk.E)
        self.pulseMinSpinbox = Tk.Spinbox(configframe,
                                          from_=100,
                                          to=500,
                                          increment=50,
                                          width=5,
                                          textvariable=self.pulseMinSpinboxVal)
        self.pulseMinSpinbox.pack(side=Tk.LEFT)
        self.comPortSpinboxVal = Tk.StringVar()
        self.comPortSpinboxVal.set(str(self.comPortSelect))
        #print self.comPortSpinboxVal.get()
        self.comPorts = tuple(serial_ports())
        self.comPortSpinboxLabel = Tk.Label(configframe,
                                            text='Port').pack(side=Tk.LEFT,
                                                              anchor=Tk.E)
        self.comPortSpinbox = Tk.Spinbox(configframe,
                                         command=self.switchconnectSer(),
                                         values=self.comPorts,
                                         textvariable=self.comPortSpinboxVal,
                                         width=14)
        self.comPortSpinbox.pack(side=Tk.LEFT)
        self.messTypRadioVal = Tk.StringVar()
        self.messTypRadioVal.set(self.messTyp)
        self.messTypDtRadio = Tk.Radiobutton(configframe,
                                             text='dt',
                                             padx=0,
                                             variable=self.messTypRadioVal,
                                             value='dt')
        self.messTypDtRadio.pack(side=Tk.LEFT, anchor=Tk.W)
        self.messTypPulseRadio = Tk.Radiobutton(configframe,
                                                text='Pulse',
                                                padx=0,
                                                variable=self.messTypRadioVal,
                                                value='pulse')
        self.messTypPulseRadio.pack(side=Tk.LEFT, anchor=Tk.W)

        canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)

        self.root.bind('<Control-y>', lambda e: self.scaleY())
        self.root.bind('<Control-m>', lambda e: self.aufnahme())

        # implement the default mpl key bindings
        def on_key_event(event):
            #if (event.key=='y'):
            #    self.scaleY()
            key_press_handler(event, canvas, toolbar)

        self.tmp_cid1 = canvas.mpl_connect('key_press_event', on_key_event)

        def on_button_event(event):
            self.scaleY()

        self.tmp_cid2 = canvas.mpl_connect('button_release_event',
                                           on_button_event)
Exemple #6
0
import json
import sys

if sys.version_info[0] < 3:
    import ConfigParser as configparser
    from urllib2 import Request, urlopen, URLError, HTTPError
else:
    import configparser
    from urllib.request import Request, urlopen
    from urllib.error import URLError, HTTPError

red = lambda s: '\033[31m%s\033[39m' % s
green = lambda s: '\033[32m%s\033[39m' % s
underline = lambda s: '\033[4m%s\033[24m' % s

config = configparser.RawConfigParser()
config.read('.pl_upload.cfg')

base_url = config.get('PrairieLearn', 'base_url')

auth_uid = config.get('Authentication', 'auth_uid')
auth_name = config.get('Authentication', 'auth_name')
auth_date = config.get('Authentication', 'auth_date')
auth_signature = config.get('Authentication', 'auth_signature')

user_uid = config.get('Question', 'user_uid')
qid = config.get('Question', 'qid')
qiid = config.get('Question', 'qiid')
tiid = config.get('Question', 'tiid')
upload_file = config.get('Question', 'upload_file')
Exemple #7
0
def updateStatusIniFile():
    config = ConfigParser.RawConfigParser()
    config.add_section(STATUS_DATES_SECTION)
    config.set(STATUS_DATES_SECTION, START_DATE, eventDatetime)
    with open(STATUS_INI, 'wb') as configfile:
        config.write(configfile)
Exemple #8
0
def makeService(config):

    ini = ConfigParser.RawConfigParser()
    ini.read(config['config'])

    configPath = FilePath(config['config']).parent()

    rproxyConf = dict(ini.items("rproxy"))
    hostsConf = dict(ini.items("hosts"))

    hosts = {}

    for k, v in hostsConf.items():

        k = k.lower()
        hostname, part = k.rsplit("_", 1)

        if hostname not in hosts:
            hosts[hostname] = {}

        hosts[hostname][part] = v

    if not hosts:
        raise ValueError("No hosts configured.")

    for i in hosts:

        if "port" not in hosts[i]:
            raise ValueError("All hosts need a port.")

        if "host" not in hosts[i]:
            print("%s does not have a host, making localhost" % (i, ))
            hosts[i]["host"] = "localhost"

        if "wwwtoo" not in hosts[i]:
            print("%s does not have an wwwtoo setting, making True" % (i, ))
            hosts[i]["wwwtoo"] = "True"

        if "proxysecure" not in hosts[i]:
            print("%s does not have an proxysecure setting, making False" %
                  (i, ))
            hosts[i]["proxysecure"] = False

        hosts[i]["wwwtoo"] = True if hosts[i]["wwwtoo"] == "True" else False
        hosts[i]["proxysecure"] = True if hosts[i][
            "proxysecure"] == "True" else False
        hosts[i]["sendhsts"] = True if hosts[i].get(
            "sendhsts") == "True" else False

    from twisted.internet import reactor
    pool = HTTPConnectionPool(reactor)

    resource = EncodingResourceWrapper(
        RProxyResource(hosts, rproxyConf.get("clacks"), pool, reactor, {},
                       False), [server.GzipEncoderFactory()])

    responder = HTTP01Responder()
    site = server.Site(EnsureHTTPS(resource, responder.resource), )
    multiService = service.MultiService()
    certificates = rproxyConf.get("certificates", None)

    if certificates:
        try:
            configPath.child(certificates).makedirs()
        except:
            pass

        certificates = configPath.child(certificates).path
        for i in rproxyConf.get("https_ports").split(","):
            print("Starting HTTPS on port " + i)
            multiService.addService(
                strports.service('txsni:' + certificates + ':tcp:' + i, site))

        for host in hosts.keys():
            with open(FilePath(certificates).child(host + ".pem").path, 'w'):
                # Open it so that txacme can find it
                pass
            if hosts[host]["wwwtoo"]:
                with open(
                        FilePath(certificates).child("www." + host +
                                                     ".pem").path, 'w'):
                    # Open it so that txacme can find it
                    pass

    for i in rproxyConf.get("http_ports", "").split(","):
        print("Starting HTTP on port " + i)
        multiService.addService(strports.service('tcp:' + i, site))

    issuingService = AcmeIssuingService(
        cert_store=DirectoryStore(FilePath(certificates)),
        client_creator=(lambda: Client.from_url(
            reactor=reactor,
            url=LETSENCRYPT_DIRECTORY,
            key=load_or_create_client_key(FilePath(certificates)),
            alg=RS256,
        )),
        clock=reactor,
        responders=[responder],
    )

    issuingService.setServiceParent(multiService)

    return multiService
def read_config(options):
        """
        Read config file and if provided overwrite default values
        If no config file exist, create one with default values
        """
        global work_dir
        work_dir = ''
        if getattr(sys, 'frozen', False):
                work_dir = os.path.dirname(sys.executable)
        elif __file__:
                work_dir = os.path.dirname(__file__)
        _configfile = os.path.join(work_dir, options.config)
        if os.path.exists(options.config):
                _configfile = options.config
        if options.verbose:
                print "Config file: {0}".format(_configfile)
        if os.path.exists(_configfile):
                try:
                        config = ConfigParser.SafeConfigParser()
                        config.read(_configfile)
                        if config.has_option('TRAKT','CLIENT_ID') and len(config.get('TRAKT','CLIENT_ID')) != 0:
                                _trakt['client_id'] = config.get('TRAKT','CLIENT_ID')
                        else:
                                print 'Error, you must specify a trakt.tv CLIENT_ID'
                                sys.exit(1)
                        if config.has_option('TRAKT','CLIENT_SECRET') and len(config.get('TRAKT','CLIENT_SECRET')) != 0:
                                _trakt['client_secret'] = config.get('TRAKT','CLIENT_SECRET')
                        else:
                                print 'Error, you must specify a trakt.tv CLIENT_SECRET'
                                sys.exit(1)
                        if config.has_option('TRAKT','OAUTH_TOKEN') and len(config.get('TRAKT','OAUTH_TOKEN')) != 0:
                                _trakt['oauth_token'] = config.get('TRAKT','OAUTH_TOKEN')
                        else:
                                print 'Warning, authentification is required'
                        if config.has_option('TRAKT','BASEURL'):
                                _trakt['baseurl'] = config.get('TRAKT','BASEURL')
                        if config.has_option('SETTINGS','PROXY'):
                                _proxy['proxy'] = config.getboolean('SETTINGS','PROXY')
                        if _proxy['proxy'] and config.has_option('SETTINGS','PROXY_HOST') and config.has_option('SETTINGS','PROXY_PORT'):
                                _proxy['host'] = config.get('SETTINGS','PROXY_HOST')
                                _proxy['port'] = config.get('SETTINGS','PROXY_PORT')
                                _proxyDict['http'] = _proxy['host']+':'+_proxy['port']
                                _proxyDict['https'] = _proxy['host']+':'+_proxy['port']
                except:
                        print "Error reading configuration file {0}".format(_configfile)
                        sys.exit(1)
        else:
                try:
                        print '%s file was not found!' % _configfile
                        config = ConfigParser.RawConfigParser()
                        config.add_section('TRAKT')
                        config.set('TRAKT', 'CLIENT_ID', '')
                        config.set('TRAKT', 'CLIENT_SECRET', '')
                        config.set('TRAKT', 'OAUTH_TOKEN', '')
                        config.set('TRAKT', 'BASEURL', 'https://api.trakt.tv')
                        config.add_section('SETTINGS')
                        config.set('SETTINGS', 'PROXY', False)
                        config.set('SETTINGS', 'PROXY_HOST', 'https://127.0.0.1')
                        config.set('SETTINGS', 'PROXY_PORT', '3128')
                        with open(_configfile, 'wb') as configfile:
                                config.write(configfile)
                                print "Default settings wrote to file {0}".format(_configfile)
                except:
                        print "Error writing configuration file {0}".format(_configfile)
                sys.exit(1)
Exemple #10
0
    def store_data(self, bucket, item_name, headers, data):
        key_name = os.path.join(bucket.name, item_name)
        dirname = os.path.join(self.root, key_name)
        filename = os.path.join(dirname, CONTENT_FILE)
        metafile = os.path.join(dirname, METADATA_FILE)

        metadata = {}
        config = ConfigParser.RawConfigParser()
        files_parsed = config.read(metafile)
        if files_parsed:
            metadata['size'] = config.getint('metadata', 'size')
            metadata['md5'] = config.get('metadata', 'md5')
            metadata['filename'] = config.get('metadata', 'filename')
            metadata['content_type'] = config.get('metadata', 'content_type')
            metadata['creation_date'] = config.get('metadata', 'creation_date')

        m = md5.new()

        lower_headers = {}
        for key in headers:
            lower_headers[key.lower()] = headers[key]
        headers = lower_headers
        if 'content-type' not in headers:
            headers['content-type'] = 'application/octet-stream'

        size = int(headers['content-length'])
        m.update(data)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        with open(filename, 'wb') as f:
            f.write(data)

        if metadata:
            metadata['md5'] = m.hexdigest()
            metadata['modified_date'] = datetime.now().strftime(
                '%Y-%m-%dT%H:%M:%S.000Z')
            metadata['content_type'] = headers['content-type']
            metadata['size'] = size
        else:
            metadata = {
                'content_type': headers['content-type'],
                'creation_date':
                datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000Z'),
                'md5': m.hexdigest(),
                'filename': filename,
                'size': size,
            }
        config.add_section('metadata')
        config.set('metadata', 'size', metadata['size'])
        config.set('metadata', 'md5', metadata['md5'])
        config.set('metadata', 'filename', metadata['filename'])
        config.set('metadata', 'content_type', metadata['content_type'])
        config.set('metadata', 'creation_date', metadata['creation_date'])
        if 'modified_date' in metadata:
            config.set('metadata', 'modified_date', metadata['modified_date'])
        with open(metafile, 'wb') as configfile:
            config.write(configfile)

        s3_item = S3Item(key, **metadata)
        s3_item.io = open(filename, 'rb')
        return s3_item
Exemple #11
0
def ipcores(env, aXilSimLibsPath, aToScript, aToStdout):
    '''
    Generate the vivado libraries and cores required to simulate the current design.

    '''
    lSessionId = 'ipcores'
    lIpCoresModelsimIni = 'modelsim.ipcores.ini'

    # -------------------------------------------------------------------------
    if not which('vivado'):
        # if 'XILINX_VIVADO' not in os.environ:
        raise click.ClickException(
            'Vivado is not available. Have you sourced the environment script?')
    # -------------------------------------------------------------------------

    lDryRun = aToScript or aToStdout

    # Use compiler executable to detect Modelsim's flavour
    lSimVariant = autodetect()

    # For questa and modelsim the simulator name is the variant name in lowercase
    lSimulator = lSimVariant.lower()
    echo(style(lSimVariant, fg='blue')+" detected")

    lDepFileParser = env.depParser

    # Guess the current vivado version from environment
    lVivadoVersion = basename(os.environ['XILINX_VIVADO'])
    secho('Using Vivado version: '+lVivadoVersion, fg='green')

    # -------------------------------------------------------------------------
    # Store the target path in the env, for it to be retrieved by Vivado
    # i.e. .xilinx_sim_libs/2017.4
    lSimlibPath = expandvars(join(aXilSimLibsPath, lVivadoVersion))

    echo ("Using Xilinx simulation library path: " + style(lSimlibPath, fg='blue'))

    if not exists(lSimlibPath):
        secho("Warning: Simulation Xilinx libraries not found. Likely this is a problem.\nPlease execute {} sim setup-simlibs to generate them.".format(getClickRootName()), fg='yellow')
        confirm("Do you want to continue anyway?", abort=True)
    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # Extract the list of cores
    lIPCores = [ 
                split(name)[1] for name, ext in 
                ( splitext(src.FilePath) for src in lDepFileParser.commands["src"] )
                    if ext in [".xci", ".edn"]
                ]

    if not lIPCores:
        secho ("WARNING: No ipcore files detected in this project", fg='yellow')
        return
    else:
        echo ('List of ipcores in project')
        for lIPCore in lIPCores:
            echo('- ' + style(lIPCore, fg='blue'))
    # -------------------------------------------------------------------------

    # For questa and modelsim the simulator name is the variant name in lowercase
    lIPCoreSimMaker = IPCoresSimMaker(lSimlibPath, lSimVariant, lSimulator, kIPExportDir)

    secho("Generating ipcore simulation code", fg='blue')

    try:
        with (
            # Pipe commands to Vivado console
            VivadoOpen(lSessionId) if not lDryRun 
            else SmartOpen(
                # Dump to script
                aToScript if not aToStdout 
                # Dump to terminal
                else None
            )
        ) as lVivadoConsole:
        
            lIPCoreSimMaker.write(
                lVivadoConsole,
                lDepFileParser.vars,
                lDepFileParser.components,
                lDepFileParser.commands,
                lDepFileParser.libs,
                lDepFileParser.maps
            )
    except VivadoConsoleError as lExc:
        echoVivadoConsoleError(lExc)
        raise click.Abort()
    except RuntimeError as lExc:
        secho("Error caught while generating Vivado TCL commands:\n" +
              "\n".join(lExc), fg='red'
              )
        raise click.Abort()

    # Copy the generated modelsim ini file locally, with a new name
    shutil.copy(join(lSimlibPath, 'modelsim.ini'), join(os.getcwd(), lIpCoresModelsimIni))
    secho("Imported modelsim.ini from {} to {}".format(lSimlibPath, lIpCoresModelsimIni), fg='blue')

    # Prepare the area where to compile the simulation
    lIPSimDir = join(kIPExportDir,lSimulator)
    # Create the target directory for the code simulation
    mkdir(join(lIPSimDir, '{0}_lib'.format(lSimulator), 'work'))
    # and copy the simlibrary config file into it
    shutil.copy(join(lSimlibPath, 'modelsim.ini'), lIPSimDir)

    # Compile 
    secho("Compiling ipcore simulation", fg='blue')

    with ModelSimBatch(echo=aToStdout, dryrun=lDryRun, cwd=lIPSimDir) as lSim:
        lSim('do compile.do')
    

    # ----------------------------------------------------------
    # Collect the list of libraries generated by ipcores to add them to
    # modelsim.ini
    lVivadoYear = [int(v) for v in lVivadoVersion.split('.')]
    
    if lVivadoYear[0] >= 2017:
        # Vivado 2017 requires an additional folder on the simulation path
        lCoreSimDir = abspath(join(
                kIPExportDir, 
                lSimulator,
                '{0}_lib'.format(lSimulator),
                'msim'
            ))
    else:
        # Vivado 2016<
        lCoreSimDir = abspath(join(
                kIPExportDir, 
                lSimulator,
                'msim'
            ))

    if not exists( lCoreSimDir ):
        raise click.ClickException("Simlib directory not found")

    lSimLibs = next(os.walk(lCoreSimDir))[1]
    echo ('Detected simulation libraries: '+ style(', '.join(lSimLibs), fg='blue'))

    # add newly generated libraries to modelsim.ini
    echo ('Adding generated simulation libraries to modelsim.ini')
    import ConfigParser

    lModelsimIni = ConfigParser.RawConfigParser()
    lModelsimIni.read('modelsim.ipcores.ini')
    for lSimLib in lSimLibs:
        echo (' - ' + lSimLib)
        lModelsimIni.set('Library', lSimLib, join(lCoreSimDir, lSimLib))

    lLibSearchPaths = lModelsimIni.get('vsim', 'librarysearchpath').split() if  lModelsimIni.has_option('vsim', 'librarysearchpath') else []

    lLibSearchPaths += lSimLibs

    lNoDups = []
    for lSimLib in lLibSearchPaths:
        if lSimLib in lNoDups:
            continue
        lNoDups.append(lSimLib)

    lModelsimIni.set('vsim', 'librarysearchpath', ' '.join(lNoDups))

    # Make a backup copy of modelsim.ini (generated by ipcores)
    with SmartOpen('modelsim.ini') as newIni:
        lModelsimIni.write(newIni.target)
Exemple #12
0
    def setup(self):
        self.queues = queues.declare_queues(self)

        ################# CONFIGURATION
        # AMQP is required
        if not self.amqp_host:
            raise ValueError("amqp_host not set in the .ini")

        if not self.cassandra_seeds:
            raise ValueError("cassandra_seeds not set in the .ini")

        # heavy load mode is read only mode with a different infobar
        if self.heavy_load_mode:
            self.read_only_mode = True

        origin_prefix = self.domain_prefix + "." if self.domain_prefix else ""
        self.origin = "http://" + origin_prefix + self.domain
        self.secure_domains = set([urlparse(self.payment_domain).netloc])

        self.trusted_domains = set([self.domain])
        self.trusted_domains.update(self.authorized_cnames)
        if self.https_endpoint:
            https_url = urlparse(self.https_endpoint)
            self.secure_domains.add(https_url.netloc)
            self.trusted_domains.add(https_url.hostname)
        if getattr(self, 'oauth_domain', None):
            self.secure_domains.add(self.oauth_domain)

        # load the unique hashed names of files under static
        static_files = os.path.join(self.paths.get('static_files'), 'static')
        names_file_path = os.path.join(static_files, 'names.json')
        if os.path.exists(names_file_path):
            with open(names_file_path) as handle:
                self.static_names = json.load(handle)
        else:
            self.static_names = {}

        # make python warnings go through the logging system
        logging.captureWarnings(capture=True)

        log = logging.getLogger('reddit')

        # when we're a script (paster run) just set up super simple logging
        if self.running_as_script:
            log.setLevel(logging.INFO)
            log.addHandler(logging.StreamHandler())

        # if in debug mode, override the logging level to DEBUG
        if self.debug:
            log.setLevel(logging.DEBUG)

        # attempt to figure out which pool we're in and add that to the
        # LogRecords.
        try:
            with open("/etc/ec2_asg", "r") as f:
                pool = f.read().strip()
            # clean up the pool name since we're putting stuff after "-"
            pool = pool.partition("-")[0]
        except IOError:
            pool = "reddit-app"
        self.log = logging.LoggerAdapter(log, {"pool": pool})

        # make cssutils use the real logging system
        csslog = logging.getLogger("cssutils")
        cssutils.log.setLog(csslog)

        # load the country list
        countries_file_path = os.path.join(static_files, "countries.json")
        try:
            with open(countries_file_path) as handle:
                self.countries = json.load(handle)
            self.log.debug("Using countries.json.")
        except IOError:
            self.log.warning("Couldn't find countries.json. Using pycountry.")
            self.countries = get_countries_and_codes()

        if not self.media_domain:
            self.media_domain = self.domain
        if self.media_domain == self.domain:
            print("Warning: g.media_domain == g.domain. " +
                  "This may give untrusted content access to user cookies")

        for arg in sys.argv:
            tokens = arg.split("=")
            if len(tokens) == 2:
                k, v = tokens
                self.log.debug("Overriding g.%s to %s" % (k, v))
                setattr(self, k, v)

        self.reddit_host = socket.gethostname()
        self.reddit_pid = os.getpid()

        if hasattr(signal, 'SIGUSR1'):
            # not all platforms have user signals
            signal.signal(signal.SIGUSR1, thread_dump)

        self.startup_timer.intermediate("configuration")

        ################# ZOOKEEPER
        # for now, zookeeper will be an optional part of the stack.
        # if it's not configured, we will grab the expected config from the
        # [live_config] section of the ini file
        zk_hosts = self.config.get("zookeeper_connection_string")
        if zk_hosts:
            from r2.lib.zookeeper import (connect_to_zookeeper, LiveConfig,
                                          LiveList)
            zk_username = self.config["zookeeper_username"]
            zk_password = self.config["zookeeper_password"]
            self.zookeeper = connect_to_zookeeper(zk_hosts,
                                                  (zk_username, zk_password))
            self.live_config = LiveConfig(self.zookeeper, LIVE_CONFIG_NODE)
            self.throttles = LiveList(self.zookeeper,
                                      "/throttles",
                                      map_fn=ipaddress.ip_network,
                                      reduce_fn=ipaddress.collapse_addresses)
        else:
            self.zookeeper = None
            parser = ConfigParser.RawConfigParser()
            parser.read([self.config["__file__"]])
            self.live_config = extract_live_config(parser, self.plugins)
            self.throttles = tuple()  # immutable since it's not real

        self.startup_timer.intermediate("zookeeper")

        ################# MEMCACHE
        num_mc_clients = self.num_mc_clients

        # the main memcache pool. used for most everything.
        self.memcache = CMemcache(
            self.memcaches,
            min_compress_len=50 * 1024,
            num_clients=num_mc_clients,
        )

        # a pool just used for @memoize results
        memoizecaches = CMemcache(
            self.memoizecaches,
            min_compress_len=50 * 1024,
            num_clients=num_mc_clients,
        )

        # a smaller pool of caches used only for distributed locks.
        # TODO: move this to ZooKeeper
        self.lock_cache = CMemcache(self.lockcaches,
                                    num_clients=num_mc_clients)
        self.make_lock = make_lock_factory(self.lock_cache, self.stats)

        # memcaches used in front of the permacache CF in cassandra.
        # XXX: this is a legacy thing; permacache was made when C* didn't have
        # a row cache.
        if self.permacache_memcaches:
            permacache_memcaches = CMemcache(self.permacache_memcaches,
                                             min_compress_len=50 * 1024,
                                             num_clients=num_mc_clients)
        else:
            permacache_memcaches = None

        # the stalecache is a memcached local to the current app server used
        # for data that's frequently fetched but doesn't need to be fresh.
        if self.stalecaches:
            stalecaches = CMemcache(self.stalecaches,
                                    num_clients=num_mc_clients)
        else:
            stalecaches = None

        # rendercache holds rendered partial templates.
        rendercaches = CMemcache(
            self.rendercaches,
            noreply=True,
            no_block=True,
            num_clients=num_mc_clients,
            min_compress_len=1400,
        )

        # pagecaches hold fully rendered pages
        pagecaches = CMemcache(
            self.pagecaches,
            noreply=True,
            no_block=True,
            num_clients=num_mc_clients,
            min_compress_len=1400,
        )

        self.startup_timer.intermediate("memcache")

        ################# CASSANDRA
        keyspace = "reddit"
        self.cassandra_pools = {
            "main":
            StatsCollectingConnectionPool(keyspace,
                                          stats=self.stats,
                                          logging_name="main",
                                          server_list=self.cassandra_seeds,
                                          pool_size=self.cassandra_pool_size,
                                          timeout=4,
                                          max_retries=3,
                                          prefill=False),
        }

        permacache_cf = CassandraCache(
            'permacache',
            self.cassandra_pools[self.cassandra_default_pool],
            read_consistency_level=self.cassandra_rcl,
            write_consistency_level=self.cassandra_wcl)

        self.startup_timer.intermediate("cassandra")

        ################# POSTGRES
        event.listens_for(engine.Engine, 'before_cursor_execute')(
            self.stats.pg_before_cursor_execute)
        event.listens_for(engine.Engine, 'after_cursor_execute')(
            self.stats.pg_after_cursor_execute)

        self.dbm = self.load_db_params()
        self.startup_timer.intermediate("postgres")

        ################# CHAINS
        # initialize caches. Any cache-chains built here must be added
        # to cache_chains (closed around by reset_caches) so that they
        # can properly reset their local components
        cache_chains = {}
        localcache_cls = (SelfEmptyingCache
                          if self.running_as_script else LocalCache)

        if stalecaches:
            self.cache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                self.memcache,
            )
        else:
            self.cache = MemcacheChain((localcache_cls(), self.memcache))
        cache_chains.update(cache=self.cache)

        if stalecaches:
            self.memoizecache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                memoizecaches,
            )
        else:
            self.memoizecache = MemcacheChain(
                (localcache_cls(), memoizecaches))
        cache_chains.update(memoizecache=self.memoizecache)

        self.rendercache = MemcacheChain((
            localcache_cls(),
            rendercaches,
        ))
        cache_chains.update(rendercache=self.rendercache)

        self.pagecache = MemcacheChain((
            localcache_cls(),
            pagecaches,
        ))
        cache_chains.update(pagecache=self.pagecache)

        # the thing_cache is used in tdb_cassandra.
        self.thing_cache = CacheChain((localcache_cls(), ))
        cache_chains.update(thing_cache=self.thing_cache)

        self.permacache = CassandraCacheChain(
            localcache_cls(),
            permacache_cf,
            memcache=permacache_memcaches,
            lock_factory=self.make_lock,
        )
        cache_chains.update(permacache=self.permacache)

        # hardcache is used for various things that tend to expire
        # TODO: replace hardcache w/ cassandra stuff
        self.hardcache = HardcacheChain(
            (localcache_cls(), self.memcache, HardCache(self)),
            cache_negative_results=True,
        )
        cache_chains.update(hardcache=self.hardcache)

        # I know this sucks, but we need non-request-threads to be
        # able to reset the caches, so we need them be able to close
        # around 'cache_chains' without being able to call getattr on
        # 'g'
        def reset_caches():
            for name, chain in cache_chains.iteritems():
                chain.reset()
                chain.stats = CacheStats(self.stats, name)

        self.cache_chains = cache_chains

        self.reset_caches = reset_caches
        self.reset_caches()

        self.startup_timer.intermediate("cache_chains")

        # try to set the source control revision numbers
        self.versions = {}
        r2_root = os.path.dirname(os.path.dirname(self.paths["root"]))
        r2_gitdir = os.path.join(r2_root, ".git")
        self.short_version = self.record_repo_version("r2", r2_gitdir)

        if I18N_PATH:
            i18n_git_path = os.path.join(os.path.dirname(I18N_PATH), ".git")
            self.record_repo_version("i18n", i18n_git_path)

        self.startup_timer.intermediate("revisions")
Exemple #13
0
    def play_cast(self):
        if self.debug is True:
            print('def play_cast(self):')
        localip = self.ip

        try:
            print(colors.options('The IP of ') +
                  colors.success(self.cast_to) + colors.options(' is:') +
                  ' ' + self.cast.host)
        except TypeError:
            print(colors.options('The IP of ') +
                  colors.success(self.cast_to.player_name) +
                  colors.options(' is:') + ' ' + self.cast_to.ip_address)
        except AttributeError:
            for _ in self.sonos_list:
                if self.cast_to in _.player_name:
                    self.cast_to = _
            print(colors.options('The IP of ') +
                  colors.success(self.cast_to.player_name) +
                  colors.options(' is:') + ' ' + self.cast_to.ip_address)

        if self.host is None:
            print(colors.options('Your local IP is:') + ' ' + localip)
        else:
            print(colors.options('Your manually entered local IP is:') +
                  ' ' + localip)

        try:
            media_controller = self.cast.media_controller

            if self.tray is True:
                config = ConfigParser.RawConfigParser()
                # Class from mkchromecast.config
                from mkchromecast.config import config_manager
                configurations = config_manager()
                configf = configurations.configf

                if os.path.exists(configf) and self.tray is True:
                    print(self.tray)
                    print(colors.warning('Configuration file exists'))
                    print(colors.warning('Using defaults set there'))
                    config.read(configf)
                    self.backend = ConfigSectionMap('settings')['backend']

            if self.sourceurl is not None:
                if args.video is True:
                    import mkchromecast.video
                    mtype = mkchromecast.video.mtype
                else:
                    import mkchromecast.audio
                    mtype = mkchromecast.audio.mtype
                print(' ')
                print(colors.options('Casting from stream URL:') + ' ' +
                      self.sourceurl)
                print(colors.options('Using media type:') + ' ' +
                      mtype)
                media_controller.play_media(
                        self.sourceurl,
                        mtype,
                        title=self.title
                        )
            elif (self.backend == 'ffmpeg' or self.backend == 'node' or
                    self.backend == 'avconv' or self.backend == 'parec' or
                    self.backend == 'gstreamer' and self.sourceurl is None):
                if args.video is True:
                    import mkchromecast.video
                    mtype = mkchromecast.video.mtype
                else:
                    import mkchromecast.audio
                    mtype = mkchromecast.audio.mtype
                print(' ')
                print(colors.options('The media type string used is:') +
                      ' ' + mtype)
                media_controller.play_media(
                        'http://' + localip + ':' + self.port + '/stream',
                        mtype,
                        title=self.title
                        )
            print(' ')
            print(colors.important('Cast media controller status'))
            print(' ')
            print(self.cast.status)
            print(' ')
            if self.hijack is True:
                self.r = Thread(target=self.hijack_cc)
                # This has to be set to True so that we catch
                # KeyboardInterrupt.
                self.r.daemon = True
                self.r.start()
        except AttributeError:
            self.sonos = self.cast_to
            self.sonos.play_uri('x-rincon-mp3radio://' + localip +
                                ':' + self.port + '/stream',
                                title=self.title)
            if self.tray is True:
                self.cast = self.sonos
Exemple #14
0
def main():
    config = ConfigParser.RawConfigParser()
    config.read('mt500.conf')
    MT500(config)
Exemple #15
0
def main(argv):
    """
    Main function
    """
    try:
        opts, args = getopt.getopt(argv, "c:hv", ["config", "help", "version"])
    except getopt.GetoptError:
        syntax()
        exit(2)

    config_file = ""
    config_url = _CONF_FILE
    for opt, arg in opts:
        if opt in ("-c", "--config"):
            if arg.startswith("http://") or \
                arg.startswith("https://") or \
                arg.startswith("ftp://"):
                config_url = arg
            else:
                config_file = arg
        elif opt in ("-h", "--help"):
            syntax()
            exit()
        elif opt in ('-v', "--version"):
            version()
            exit()

    # Are your root ?
    if (not isroot()):
        showexec(_("Script should be run as root"), "tpastroot", exitonerror=1)

    # Is it Precise Pangolin ?
    _UBUNTU_VERSION = platform.linux_distribution()[2]
    if (_UBUNTU_VERSION != _FOR_UBUNTU):
        showexec(_("Script only for Ubuntu %s") % _FOR_UBUNTU,
                 "badubuntuversion",
                 exitonerror=1)

    # Read the configuration file
    if (config_file == ""):
        config_file = "/tmp/%s.cfg" % __appname__
        showexec(
            _("Download the configuration file"), "rm -f " + config_file +
            " ; " + _WGET + " -O " + config_file + " " + config_url)
    config = ConfigParser.RawConfigParser()
    config.read(config_file)

    # Parse and exec pre-actions
    for action_name, action_cmd in config.items("preactions"):
        showexec(
            _("Execute preaction ") + action_name.lstrip("action_"),
            action_cmd)

    # Update repos
    showexec(_("Update repositories"), _APT_UPDATE)

    # Upgrade system
    showexec(_("System upgrade (~20 mins, please be patient...)"),
             _APT_UPGRADE)

    # Parse and install packages
    for pkg_type, pkg_list in config.items("packages"):
        if (pkg_type.startswith("remove_")):
            showexec(
                _("Remove packages ") + pkg_type.lstrip("remove_"),
                _APT_REMOVE + " " + pkg_list)
        else:
            showexec(
                _("Install packages ") + pkg_type,
                _APT_INSTALL + " " + pkg_list)
    '''
    I needed to comment out all the dotfiles peices because of issues with os.system. You can
    look at the issues section to see why it is an issue. os.system is outdated and should be 
    replaced with subprocess. I will work on that, until then this script now works.
    '''
    # # Install packages related to repositories
    # #~ print pkg_list_others
    # for pkg in pkg_list_others.keys():
    #     showexec (_("Install packages ")+pkg, _APT_INSTALL+" "+pkg_list_others[pkg])

    # # Allow user to read DVD (CSS)
    # showexec (_("DVDs CSS encryption reader"), "sh /usr/share/doc/libdvdread4/install-css.sh")

    # Download and install dotfiles: vimrc, prompt...
    # if (config.has_section("dotfiles")):
    # Create the bashrc.d subfolder
    #showexec (_("Create the ~/.bashrc.d subfolder"), "mkdir -p $HOME/.bashrc.d")
    #if (config.has_option("dotfiles", "bashrc")):
    #    showexec (_("Download bash main configuration file"), _WGET+" -O $HOME/.bashrc "+config.get("dotfiles", "bashrc"))
    #if (config.has_option("dotfiles", "bashrc_prompt")):
    #    showexec (_("Download bash prompt configuration file"), _WGET+" -O $HOME/.bashrc.d/bashrc_prompt "+config.get("dotfiles", "bashrc_prompt"))
    #if (config.has_option("dotfiles", "bashrc_aliases")):
    #    showexec (_("Download bash aliases configuration file"), _WGET+" -O $HOME/.bashrc.d/bashrc_aliases "+config.get("dotfiles", "bashrc_aliases"))
    #showexec (_("Install the bash configuration file"), "chown -R $me:$me $HOME/.bashrc*")
    # Vim
    #if (config.has_option("dotfiles", "vimrc")):
    #   showexec (_("Donwload the Vim configuration file"), _WGET+" -O $HOME/.vimrc "+config.get("dotfiles", "vimrc"))
    #  showexec (_("Install the Vim configuration file"), "chown -R $me:$me $HOME/.vimrc")

    # Htop
    #if (config.has_option("dotfiles", "htoprc")):
    #   showexec (_("Download the Htop configuration file"), _WGET+" -O $HOME/.htoprc "+config.get("dotfiles", "htoprc"))
    #  showexec (_("Install the Htop configuration file"), "chown -R $me:$me $HOME/.htoprc")

    # Xresources
    #if (config.has_option("dotfiles", "xres")):
    #   showexec(_("Downloading the Xresources file"), _WGET+"-O $HOME/.Xresources "+config.get("dotfiles", "xres"))
    #  showexec(_("Installing the Xresources file"), "chown -R me:me $HOME/.Xresources")

    # xinitrc
    #if (config.has_option("dotfiles", "xinit")):
    #    showexec(_("Downloading the xinitrc file"), _WGET+"-O $HOME/.xinitrc "+config.get("dotfiles", "xres"))
    #   showexec(_("Installing the xinitrc file"), "chown -R me:me $HOME/.xinitrc"

    # Parse and exec post-actions
    for action_name, action_cmd in config.items("postactions"):
        showexec(
            _("Execute postaction ") + action_name.lstrip("action_"),
            action_cmd)

    # End of the script
    print("---")
    print(_("End of the script."))
    print(_(" - Cfg file: ") + config_file)
    print(_(" - Log file: ") + _LOG_FILE)
    print("")
    print(_("Please restart your session to complete."))
    print("---")
Exemple #16
0
def get_idx_server(hashval):
    configParser = ConfigParser.RawConfigParser()
    configFilePath = r'/home/ubuntu/config.cfg'
    configParser.read(configFilePath)
    port = int(configParser.get('ports', str(hashval)))
    return (port)
Exemple #17
0
def load_config(cfgfile=_default_config_file):

    conf = _defaults.copy()

    # read configuration from waptserver.ini
    _config = ConfigParser.RawConfigParser()
    if os.path.exists(cfgfile):
        _config.read(cfgfile)
    else:
        # XXX this is a kludge
        if os.getenv('USERNAME') == 'buildbot':
            return conf
        raise Exception(
            "FATAL : couldn't open configuration file : {}.".format(cfgfile))

    if not _config.has_section('options'):
        raise Exception(
            "FATAL, configuration file {} has no section [options]. Please check the waptserver documentation."
            .format(cfgfile))

    if _config.has_option('options', 'client_tasks_timeout'):
        conf['client_tasks_timeout'] = int(
            _config.get('options', 'client_tasks_timeout'))

    if _config.has_option('options', 'clients_connect_timeout'):
        conf['clients_connect_timeout'] = int(
            _config.get('options', 'clients_connect_timeout'))

    if _config.has_option('options', 'clients_read_timeout'):
        conf['clients_read_timeout'] = int(
            _config.get('options', 'clients_read_timeout'))

    if _config.has_option('options', 'loglevel'):
        conf['loglevel'] = _config.get('options', 'loglevel')

    if _config.has_option('options', 'mongodb_ip'):
        conf['mongodb_ip'] = _config.get('options', 'mongodb_ip')

    if _config.has_option('options', 'mongodb_port'):
        conf['mongodb_port'] = _config.getint('options', 'mongodb_port')

    if _config.has_option('options', 'secret_key'):
        config['secret_key'] = _config.get('options', 'secret_key')

    if _config.has_option('options', 'server_uuid'):
        conf['server_uuid'] = _config.get('options', 'server_uuid')

    if _config.has_option('options', 'wapt_folder'):
        conf['wapt_folder'] = _config.get('options', 'wapt_folder').rstrip('/')

    if _config.has_option('options', 'wapt_huey_db'):
        conf['wapt_huey_db'] = _config.get('options', 'wapt_huey_db')

    if _config.has_option('options', 'wapt_password'):
        conf['wapt_password'] = _config.get('options', 'wapt_password')

    if _config.has_option('options', 'wapt_user'):
        conf['wapt_user'] = _config.get('options', 'wapt_user')

    if _config.has_option('options', 'waptserver_port'):
        conf['waptserver_port'] = _config.get('options', 'waptserver_port')

    if _config.has_option('options', 'waptservice_port'):
        conf['waptservice_port'] = _config.get('options', 'waptservice_port')

    # XXX must be processed after conf['wapt_folder']
    if _config.has_option('options', 'waptwua_folder'):
        conf['waptwua_folder'] = _config.get('options',
                                             'waptwua_folder').rstrip('/')
    if not conf['waptwua_folder']:
        conf['waptwua_folder'] = conf['wapt_folder'] + 'wua'

    if _config.has_option('options', 'db_url'):
        conf['db_url'] = _config.get('options', 'db_url')

    return conf
Exemple #18
0
def get_peer_port(peer):
    configParser = ConfigParser.RawConfigParser()
    configFilePath = r'/home/ubuntu/config.cfg'
    configParser.read(configFilePath)
    port = int(configParser.get('ports', peer))
    return (port)
Exemple #19
0
TVHEADEND = 'TVH'
SHELL = 'SHELL'
HTTP = 'HTTP'
TV_GRAB = 'TV_GRAB'
KODI = 'KODI'

SETTINGS = {
    'userdata': kodi_home,
    'proxy_type': SHELL,
    'interactive': None,
    'repo_url': 'https://k.slyguy.xyz/.repo',
    'debug': 0,
}

config = ConfigParser.RawConfigParser(defaults=SETTINGS)
config.read(os.path.join(kodi_home, 'config.ini'))

for key in SETTINGS:
    SETTINGS[key] = os.environ.get(key, config.get('DEFAULT', key))

addons_dir = os.path.join(SETTINGS['userdata'], 'addons')
addons_data = os.path.join(SETTINGS['userdata'], 'addon_data')
tmp_dir = os.path.join(SETTINGS['userdata'], 'tmp')

if SETTINGS['interactive'] == None:
    SETTINGS['interactive'] = SETTINGS['proxy_type'] == SHELL

if not os.path.exists(tmp_dir):
    os.makedirs(tmp_dir)
Exemple #20
0
def setup():

    cfg = ConfigParser.RawConfigParser()
    try:
        path = os.environ['S3TEST_CONF']
    except KeyError:
        raise RuntimeError(
            'To run tests, point environment ' +
            'variable S3TEST_CONF to a config file.', )
    with file(path) as f:
        cfg.readfp(f)

    global prefix
    global targets

    try:
        template = cfg.get('fixtures', 'bucket prefix')
    except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
        template = 'test-{random}-'
    prefix = choose_bucket_prefix(template=template)

    s3.clear()
    config.clear()

    for section in cfg.sections():
        try:
            (type_, name) = section.split(None, 1)
        except ValueError:
            continue
        if type_ != 'region':
            continue
        regions.add(name, TargetConfig(cfg, section))

    for section in cfg.sections():
        try:
            (type_, name) = section.split(None, 1)
        except ValueError:
            continue
        if type_ != 's3':
            continue

        if len(regions.get()) == 0:
            regions.add("default", TargetConfig(cfg, section))

        config[name] = bunch.Bunch()
        for var in [
                'user_id',
                'display_name',
                'email',
        ]:
            try:
                config[name][var] = cfg.get(section, var)
            except ConfigParser.NoOptionError:
                pass

        targets[name] = RegionsConn()

        for (k, conf) in regions.iteritems():
            conn = boto.s3.connection.S3Connection(
                aws_access_key_id=cfg.get(section, 'access_key'),
                aws_secret_access_key=cfg.get(section, 'secret_key'),
                is_secure=conf.is_secure,
                port=conf.port,
                host=conf.host,
                # TODO test vhost calling format
                calling_format=conf.calling_format,
            )
            targets[name].add(k, TargetConnection(conf, conn))
        s3[name] = targets[name].default.connection

    # WARNING! we actively delete all buckets we see with the prefix
    # we've chosen! Choose your prefix with care, and don't reuse
    # credentials!

    # We also assume nobody else is going to use buckets with that
    # prefix. This is racy but given enough randomness, should not
    # really fail.
    nuke_prefixed_buckets(prefix=prefix)
Exemple #21
0
class Iblox(object):
    """manage infoblox entries"""
    config = ConfigParser.RawConfigParser()

    def __init__(self, network, record, alias):
        self.network = network
        self.record = record
        self.alias = alias
        self.config.readfp(open(IBLOX_CONF))
        self.opts = {
            'host': self.config.get('iblox', 'iblox_server'),
            'username': self.config.get('iblox', 'iblox_username'),
            'password': self.config.get('iblox', 'iblox_password')
            }
        self.conn = connector.Connector(self.opts)

    def query_alias(self):
        """ query for CNAME record: return None if it does not exist or
            if self.alias matches the existing one """
        try:
            alias_rec = self.conn.get_object('record:cname', {'name': self.alias})[0]
        except TypeError:
            return None
        else:
            if self.record == str(alias_rec['canonical']):
                return 'already_there'
            else:
                return alias_rec

    def destroy(self):
        """ clean up CNAME entry """
        try:
            self.conn.delete_object(self.conn.get_object(
                'record:cname', {'name': self.alias})[0]['_ref'])
        except TypeError:
            print "cound not find CNAME {}".format(self.alias)
        else:
            print "destroyed CNAME {}".format(self.alias)

    def destroy_conditional(self):
        """ clean up host entries """
        alias_entry = self.query_alias()
        if alias_entry and alias_entry != 'already_there':
            self.conn.delete_object(alias_entry['_ref'])
            print "destroyed CNAME record {}".format(self.alias)
            return 'did something'
        elif alias_entry == 'already_there':
            return 'already_there'
        else:
            return None

    def rebuild(self):
        """ - destroy alias record (if it is not matching)
            - create a new alias record if there isn't one already
        """

        try_destroy = self.destroy_conditional()

        if try_destroy == 'already_there':
            print "A CNAME {} associated to {} is already there".format(
                self.alias, self.record)
        else:
            try:
                objects.CNAMERecord.create(self.conn, view=self.network,
                                           name=self.alias, canonical=self.record)
            except Exception as err:
                print "couldn't create CNAME {} to Record {}: {}".format(
                    self.alias, self.record, err)
                os.sys.exit(1)
            else:
                print "created CNAME record {} associated to {}".format(
                    self.alias, self.record)

        print '-'*74
Exemple #22
0
def main_merge(stats_list, stats_file, verbose):

    process_parameter_file = 'processParameters.txt'

    # Output file
    config_out = ConfigParser.RawConfigParser()
    config_out.optionxform = str  # don't convert to lowercase
    config_out.add_section('global')

    for i, maskstats in enumerate(stats_list):

        if verbose:
            print "Reading", maskstats

        config = ConfigParser.RawConfigParser()
        config.read(maskstats)

        keys = [
            'Excluded Wells', 'Empty Wells', 'Pinned Wells', 'Ignored Wells',
            'Bead Wells', 'Dud Beads', 'Reference Beads', 'Live Beads',
            'Test Fragment Beads', 'Library Beads',
            'TF Filtered Beads (read too short)',
            'TF Filtered Beads (fail keypass)',
            'TF Filtered Beads (too many positive flows)',
            'TF Filtered Beads (poor signal fit)', 'TF Validated Beads',
            'Lib Filtered Beads (read too short)',
            'Lib Filtered Beads (fail keypass)',
            'Lib Filtered Beads (too many positive flows)',
            'Lib Filtered Beads (poor signal fit)', 'Lib Validated Beads'
        ]

        if i == 0:
            head, tail = os.path.split(maskstats)
            config_pp = ConfigParser.RawConfigParser()
            config_pp.read(os.path.join(head, process_parameter_file))
            chip = config_pp.get('global', 'Chip')
            size = chip.split(',')
            config_out.set('global', 'Start Row', '0')
            config_out.set('global', 'Start Column', '0')
            config_out.set('global', 'Width', int(size[0]))
            config_out.set('global', 'Height', int(size[1]))
            config_out.set('global', 'Total Wells',
                           int(size[0]) * int(size[1]))
            config_out.set('global', 'Percent Template-Positive Library Beads',
                           '0')  # TODO

            for key in keys:
                config_out.set('global', key, '0')

        for key in keys:
            try:
                value_in = config.get('global', key)
                value_out = config_out.get('global', key)
                config_out.set('global', key, int(value_in) + int(value_out))
            except:
                print "ERROR: StatsMerge: key %s doesn't exist" % key

    sum_wells = 0
    sum_wells += config_out.getint('global', 'Empty Wells')
    sum_wells += config_out.getint('global', 'Pinned Wells')
    sum_wells += config_out.getint('global', 'Ignored Wells')
    sum_wells += config_out.getint('global', 'Bead Wells')
    sum_wells += config_out.getint('global', 'Excluded Wells')

    if config_out.get('global', 'Total Wells') != sum_wells:
        print "ERROR: StatsMerge: Total Wells: %s (sum) != %s (expected)" % (
            sum_wells, config_out.get('global', 'Total Wells'))

    with open(stats_file, 'wb') as configfile:
        if verbose:
            print "Writing", stats_file

        config_out.write(configfile)
Exemple #23
0
            config_file_directory = config_file.get('r_settings',
                                                    'config_file_directory')
        except ConfigParser.Error, e:
            traceback.print_exc(e)
        else:
            fl_sites = florida_sample_sites(True)
            fl_sites.load_sites(file_name=sites_location_file,
                                boundary_file=boundaries_location_file)

            for site in fl_sites:
                if site.name == options.site_name:
                    site_data_wb = load_workbook(filename=options.site_xl_file)
                    site_config_filename = os.path.join(
                        station_model_dir, '%s.ini' % (site.name))
                    try:
                        site_config_file = ConfigParser.RawConfigParser()
                        site_config_file.read(site_config_filename)
                    except Exception, e:
                        traceback.print_exc(e)
                    else:
                        models = OrderedDict()
                        sheet_names = site_data_wb.get_sheet_names()
                        for ws_name in sheet_names:
                            work_sheet = site_data_wb[ws_name]
                            print "Processing sheet: %s" % (ws_name)
                            wb_data = read_sheet_data(work_sheet, site.name,
                                                      config_file_directory)
                            models[ws_name] = wb_data
                        write_ini_file(site, models, config_file)
    return
def main():
    parser = optparse.OptionParser()
    parser.add_option("--ConfigFile",
                      dest="config_file",
                      help="INI Configuration file.")
    parser.add_option("--LoggingConfig",
                      dest="logging_conf",
                      help="Logging Configuration file.")

    (options, args) = parser.parse_args()

    logging.config.fileConfig(options.logging_conf)
    logger = logging.getLogger(__name__)
    logger.info("Log file opened.")

    db_config_file = ConfigParser.RawConfigParser()
    db_config_file.read(options.config_file)
    db_user = db_config_file.get('database', 'user')
    db_pwd = db_config_file.get('database', 'password')
    db_host = db_config_file.get('database', 'host')
    db_name = db_config_file.get('database', 'name')
    db_connectionstring = db_config_file.get('database', 'connectionstring')
    db_obj = xeniaAlchemy()
    if (db_obj.connectDB(db_connectionstring, db_user, db_pwd, db_host,
                         db_name, False) == True):
        logger.info("Succesfully connect to DB: %s at %s" % (db_name, db_host))

        default_prune = datetime.now() - timedelta(weeks=8)

        prune_start_time = time.time()

        # Get list of platforms to prune.
        platform_recs = db_obj.session.query(platform) \
            .order_by(platform.organization_id).all()
        platform_count = 0
        connectionString = "%s://%s:%s@%s/%s" % (db_connectionstring, db_user,
                                                 db_pwd, db_host, db_name)
        db_engine = create_engine(connectionString, echo=False)
        connection = db_engine.raw_connection()
        connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
        cursor = connection.cursor()
        try:
            delete_start_time = time.time()
            delete_where = "m_date < '%s'" % (
                default_prune.strftime('%Y-%m-%dT%H:%M:%S'))
            cursor.execute("DELETE FROM multi_obs WHERE %s" % (delete_where))
        except Exception as e:
            logger.exception(e)
        else:
            logger.info("Pruned records WHERE %s in %f seconds" %
                        (delete_where, time.time() - delete_start_time))
        '''        
        for platform_rec in platform_recs:
            delete_start_time = time.time()
            db_obj.session.query(multi_obs) \
                .filter(multi_obs.m_date < default_prune.strftime('%Y-%m-%dT%H:%M:%S')) \
                .filter(multi_obs.platform_handle == platform_rec.platform_handle)\
                .delete()
                
            logger.info("Platform: %s pruned records older than: %s in %f seconds" % (
            platform_rec.platform_handle, default_prune, time.time() - delete_start_time))
            platform_count += 1
        '''
        cursor.close()
        connection.close()
        db_obj.disconnect()

        try:
            # Now create raw connection to database to do vacuum.
            connectionString = "%s://%s:%s@%s/%s" % (
                db_connectionstring, db_user, db_pwd, db_host, db_name)
            logger.info("Preparing to vacuum and reindex")
            db_engine = create_engine(connectionString, echo=False)
            connection = db_engine.raw_connection()
            connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
        except (exc.OperationalError, Exception) as e:
            logger.exception(e)
        else:
            cursor = connection.cursor()
            vacuum_start_time = time.time()
            #cursor.execute("VACUUM ANALYSE multi_obs")
            cursor.execute("VACUUM FULL multi_obs")
            logger.info("VACUUMed multi_obs in %f seconds" %
                        (time.time() - vacuum_start_time))
            reindex_start_time = time.time()
            cursor.execute("REINDEX TABLE multi_obs")
            logger.info("Reindexed multi_obs in %f seconds" %
                        (time.time() - reindex_start_time))
            cursor.close()

            connection.close()
            db_engine.dispose()

    else:
        logger.error("Unable to connect to DB: %s at %s. Terminating script." %
                     (db_name, db_host))

    return
Exemple #25
0
import sys
import serial
import time
import ConfigParser
import io
import os
import json
from influxdb import InfluxDBClient
from influxdb.client import InfluxDBClientError

with open("config.ini") as f:
    sample_config = f.read()
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.readfp(io.BytesIO(sample_config))

################
# Variables
################
influx_server = config.get('INFLUXDB',
                           'influxdb_server')  # Ifluxdb server adre$
influx_port = int(config.get('INFLUXDB', 'influxdb_port'))  # Influxdb port
influx_db = config.get('INFLUXDB', 'influxdb_databasename')  # influxdb name
influx_user = config.get('INFLUXDB',
                         'influxdb_user')  # influxdb gebruikersnaam
influx_passwd = config.get('INFLUXDB', 'influxdb_password')  # influxdb login


################
#Error display #
################
def show_error():
Exemple #26
0
 def __init__(self, cf):
     self.config = config = ConfigParser.RawConfigParser()
     config.readfp(cf)
Exemple #27
0
[Icons]
show: yes
use_gtk_theme: yes
theme: Mint-X
size: 24
application: application-x-executable
bookmark: user-bookmarks
folder: gtk-directory
file: gtk-file

internal_drive: gtk-harddisk
optical_drive: gtk-cdrom
removable_drive: gnome-dev-removable
mount: gtk-execute
unmount: media-eject
"""

########################################################
#                                                      #
# -------------------- Config Run -------------------- #
#                                                      #
########################################################

# Parse uxdgmenu default config
__parser = ConfigParser.RawConfigParser()
__parser.readfp(StringIO.StringIO(DEFAULT_CONFIG))
# Create working dirs & config files if needed
check()
# Parse config files
__parser.read([SYSTEM_CONFIG_FILE, USER_CONFIG_FILE])
Exemple #28
0
    def _get_config_from_file(self):
        # Parse Liota configuration file
        config = ConfigParser.RawConfigParser()
        config.optionxform = str
        fullPath = LiotaConfigPath().get_liota_fullpath()
        if fullPath != '':
            try:
                if config.read(fullPath) != []:
                    try:
                        # retrieve device info file storage directory
                        self.dev_file_path = config.get(
                            'IOTCC_PATH', 'dev_file_path')
                        # retrieve organization info file storage directory
                        self.org_file_path = config.get(
                            'IOTCC_PATH', 'iotcc_path')
                    except ConfigParser.ParsingError as err:
                        log.error('Could not parse log config file' + err)
                        exit(-4)
                    if not os.path.exists(self.dev_file_path):
                        try:
                            os.makedirs(self.dev_file_path)
                        except OSError as exc:  # Python >2.5
                            if exc.errno == errno.EEXIST and os.path.isdir(
                                    self.dev_file_path):
                                pass
                            else:
                                log.error(
                                    'Could not create device file storage directory'
                                )
                                exit(-4)
                    if not os.path.exists(self.org_file_path):
                        log.error('No organization info file (iotcc.json)')
                    try:
                        # retrieve discovery cmd pip
                        self.cmd_messenger_pipe = os.path.abspath(
                            config.get('DISC_CFG', 'disc_cmd_msg_pipe'))
                        # retrieve package path (later use config file to register device)
                        self.package_path = os.path.abspath(
                            config.get('PKG_CFG', 'pkg_path'))
                        # retrieve endpoint list
                        tmp_list = config.items('DISC_ENDPOINT_LIST')
                        for key, value in tmp_list[:]:
                            if value is None or value == "None":
                                continue
                            if key.find('disc_msg_pipe') != -1:
                                self.discovery_messenger_pipe = os.path.abspath(
                                    value)
                                if self.discovery_messenger_pipe is not None:
                                    self.endpoint_list[
                                        key] = self.discovery_messenger_pipe
                            elif key.find('socket') != -1:
                                self.endpoint_list[key] = value
                            elif key.find('mqtt') != -1:
                                # retrieve mqtt configurations
                                mqtt_cfg_dict = dict(
                                    config.items('DISC_MQTT_CFG'))
                                # make broker_ip:port:topic included inside mqtt_cfg_dict
                                mqtt_cfg_dict["broker_ip_port_topic"] = value
                                self.endpoint_list[key] = mqtt_cfg_dict
                            elif key.find('coap') != -1:
                                self.endpoint_list[key] = value
                            else:
                                log.error(key + ' is currently not supported!')
                        for key in self.endpoint_list.iterkeys():
                            log.debug("endpoint_list:(%s : %s)\n" %
                                      (key, self.endpoint_list[key]))

                        global DEVICE_TYPE_SAFE_REGEX
                        # retrieve device type to unique key mapping list
                        tmp_list = config.items(
                            'DEVICE_TYPE_TO_UNIQUEKEY_MAPPING')
                        for key, value in tmp_list[:]:
                            if not re.match(DEVICE_TYPE_SAFE_REGEX, key):
                                log.warning(
                                    "device type {0} contains unacceptable character"
                                    .format(key))
                                continue
                            if value is None or value == "None":
                                continue
                            self.type_key_map[key] = value
                        for key in self.type_key_map.iterkeys():
                            log.debug("type_key_map:(%s : %s)\n" %
                                      (key, self.type_key_map[key]))

                        # retrieve device type to DCC mapping list
                        tmp_list = config.items('DEVICE_TYPE_TO_DCC_MAPPING')
                        for key, value in tmp_list[:]:
                            if not re.match(DEVICE_TYPE_SAFE_REGEX, key):
                                log.warning(
                                    "device type {0} contains unacceptable character"
                                    .format(key))
                                continue
                            if value is None or value == "None":
                                continue
                            tmp_list2 = []
                            tmp_list2 = [x.strip() for x in value.split(',')]
                            self.type_dcc_map[key] = tmp_list2
                            self.type_tuple_key_dcc_pkg[key] = (
                                self.type_key_map[key], tmp_list2)
                        for key in self.type_dcc_map.iterkeys():
                            log.debug("type_dcc_map:(%s : %s)\n" %
                                      (key, self.type_dcc_map[key]))
                        for key in self.type_tuple_key_dcc_pkg.iterkeys():
                            log.debug("type_tuple_key_dcc_pkg:(%s : %s)\n" %
                                      (key, self.type_tuple_key_dcc_pkg[key]))
                    except ConfigParser.ParsingError:
                        log.error('Could not parse log config file')
                        exit(-4)
                else:
                    raise IOError('Could not open configuration file: ' +
                                  fullPath)
            except IOError:
                raise IOError('Could not open configuration file: ' + fullPath)
        else:
            # missing config file
            log.error('liota.conf file missing')

        assert (isinstance(self.cmd_messenger_pipe, basestring))
Exemple #29
0
def run():
    """
    Run the wind multiplier calculations.

    This will attempt to run the calculation in parallel by tiling the
    domain, but also provides a sane fallback mechanism to execute
    in serial.

    """

    # add subfolders into path
    cmd_folder = os.path.realpath(
        os.path.abspath(
            os.path.split(inspect.getfile(inspect.currentframe()))[0]))
    if cmd_folder not in sys.path:
        sys.path.insert(0, cmd_folder)

    cmd_subfolder1 = pjoin(cmd_folder, "terrain")
    if cmd_subfolder1 not in sys.path:
        sys.path.insert(0, cmd_subfolder1)

    cmd_subfolder2 = pjoin(cmd_folder, "shielding")
    if cmd_subfolder2 not in sys.path:
        sys.path.insert(0, cmd_subfolder2)

    cmd_subfolder3 = pjoin(cmd_folder, "topographic")
    if cmd_subfolder3 not in sys.path:
        sys.path.insert(0, cmd_subfolder3)

    cmd_subfolder4 = pjoin(cmd_folder, "utilities")
    if cmd_subfolder4 not in sys.path:
        sys.path.insert(0, cmd_subfolder4)

    config = ConfigParser.RawConfigParser()
    config.read(pjoin(cmd_folder, 'multiplier_conf.cfg'))

    root = config.get('inputValues', 'root')
    upwind_length = float(config.get('inputValues', 'upwind_length'))

    logfile = config.get('Logging', 'LogFile')
    logdir = dirname(realpath(logfile))

    # If log file directory does not exist, create it
    if not isdir(logdir):
        try:
            os.makedirs(logdir)
        except OSError:
            logfile = pjoin(os.getcwd(), 'multipliers.log')

    loglevel = config.get('Logging', 'LogLevel')
    verbose = config.getboolean('Logging', 'Verbose')

    if verbose:
        verbose = True
    else:
        verbose = False

    attempt_parallel()

    if pp.size() > 1 and pp.rank() > 0:
        logfile += '_' + str(pp.rank())
        verbose = False
    else:
        pass

    fl_start_log(logfile, loglevel, verbose)

    # set input maps and output folder
    terrain_map = pjoin(pjoin(root, 'input'), "lc_terrain_class.img")
    dem = pjoin(pjoin(root, 'input'), "dems1_whole.img")

    do_output_directory_creation(root)
    global output_folder
    output_folder = pjoin(root, 'output')

    log.info("get the tiles based on the DEM")
    tg = TileGrid(upwind_length, dem)
    tiles = get_tiles(tg)
    log.info('the number of tiles is {0}'.format(str(len(tiles))))

    pp.barrier()

    multiplier = Multipliers(terrain_map, dem)
    multiplier.parallelise_on_tiles(tiles)

    pp.barrier()

    log.info("Successfully completed wind multipliers calculation")
Exemple #30
0
def parse_gh():

    config = ConfigParser.RawConfigParser(allow_no_value=True)
    config.read('.deploy.ini')

    if not config.getboolean('Github', 'enable'):
        return None

    prerelease = config.getboolean('Github', 'prerelease')
    if not prerelease:
        conditional_prerelease = config.getboolean('Github',
                                                   'conditional_prerelease')
        if conditional_prerelease and ("alpha" in os.environ["TRAVIS_TAG"]
                                       or "beta" in os.environ["TRAVIS_TAG"]):
            prerelease = True

    draft = config.getboolean('Github', 'draft')
    if not draft:
        conditional_draft = config.getboolean('Github', 'conditional_draft')
        if conditional_draft and ("alpha" in os.environ["TRAVIS_TAG"]
                                  or "beta" in os.environ["TRAVIS_TAG"]):
            draft = True

    title = config.get('Github', 'title')
    description = config.get('Github', 'description')
    branch = config.get('Github', 'branch')
    packagename = config.get('Github', 'packagename')
    include_version = config.getboolean('Github', 'include_version')

    deploy_gh = {
        "provider": "releases",
        "api_key": os.environ["GH_TOKEN"],
        "target_commitish": os.environ["TRAVIS_COMMIT"],
        "draft": draft,
        "prerelease": prerelease,
        "skip_cleanup": "true",
        "on": {}
    }

    if title:
        deploy_gh["name"] = title
    else:
        deploy_gh["name"] = os.environ["TRAVIS_TAG"]

    if description:
        deploy_gh["description"] = description

    if branch:
        deploy_gh["on"]["branch"] = branch
    else:
        deploy_gh["on"]["all_branches"] = "true"

    if packagename:
        if include_version:
            deploy_gh["file"] = "./Deploy/%s_%s.zip" % (
                packagename, os.environ["TRAVIS_TAG"])
        else:
            deploy_gh["file"] = "./Deploy/%s.zip" % packagename
    else:
        if include_version:
            deploy_gh["file"] = "./Deploy/%s_%s.zip" % (
                os.environ["TRAVIS_REPO_SLUG"].split("/")[1],
                os.environ["TRAVIS_TAG"])
        else:
            deploy_gh["file"] = "./Deploy/%s.zip" % os.environ[
                "TRAVIS_REPO_SLUG"].split("/")[1]  #this is the repo name

    return deploy_gh