Exemple #1
0
    parser = OptionParser()
    parser.add_option("--threshold",help="Threshold size in KB",type="float",default=1000.)
    parser.add_option("--format",help="Output format",default="html")
    parser.add_option("--config",help="Configuration file",default="/usr/local/apache2/cgi-bin/diskusage.cfg")
    opts,args = parser.parse_args()

    threshold = opts.threshold * 1024.
    format=opts.format
    config = opts.config
    hostname = socket.gethostname()

    if not os.path.exists(config):

        raise IOError,"Configuration file %s does not exist" % config

    cp = ConfigParser()
    cp.read(config)
    
    title = "Disk usage"
            
    print """Content-type:text/html\n\n

            
    <head>
    <title>%s</title>
    <link rel="stylesheet" href="style.css" type="text/css" />
    </head>
    <body>
    """ % (title)
            
    t = time.asctime(time.localtime(time.time()))
Exemple #2
0
DEVELOPMENT_HOSTS = [
    "Dans-MacBook-Pro.local",
    "Dans-iMac.local",
    "sirvig-cr_68_ministries-1972797",
]

if socket.gethostname() in DEVELOPMENT_HOSTS:
    DEVELOPMENT = True
    ALLOWED_HOSTS = []
else:
    DEVELOPMENT = True
    ALLOWED_HOSTS = []

BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CONFIG_LOCATION = os.path.join(BASE_DIR, "CostaRica/CostaRica.cfg")
config = ConfigParser()
config.read(CONFIG_LOCATION)

# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/

# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config.get("settings", "secret_key")

# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = DEVELOPMENT

# Application definition

INSTALLED_APPS = (
    'django.contrib.admin',
Exemple #3
0
# A dirty hack to get around some early import/configurations ambiguities
import builtins
builtins._CUBEVIZ_SETUP_ = True

from astropy_helpers.setup_helpers import (register_commands, get_debug_option,
                                           get_package_info)
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py

# Get some values from the setup.cfg
try:
    from ConfigParser import ConfigParser
except ImportError:
    from configparser import ConfigParser

conf = ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))

PACKAGENAME = metadata.get('package_name', 'cubeviz')
DESCRIPTION = metadata.get('description', 'Data analysis package for cubes.')
AUTHOR = metadata.get('author', 'JDADF Developers')
AUTHOR_EMAIL = metadata.get('author_email', '')
LICENSE = metadata.get('license', 'unknown')
URL = metadata.get('url', 'https://github.com/spacetelescope/cubeviz')

# order of priority for long_description:
#   (1) set in setup.cfg,
#   (2) load LONG_DESCRIPTION.rst,
#   (3) load README.rst,
#   (4) package docstring
Exemple #4
0
def load(config_files):
    """ initialize and load a configuration"""
    global parser
    parser = ConfigParser()
    parser.read(config_files)

    import config, planet
    from planet import opml, foaf, csv_config
    log = planet.logger
    if not log:
        log = planet.getLogger(config.log_level(), config.log_format())

    # Theme support
    theme = config.output_theme()
    if theme:
        for path in ("", os.path.join(sys.path[0], 'themes')):
            theme_dir = os.path.join(path, theme)
            theme_file = os.path.join(theme_dir, 'config.ini')
            if os.path.exists(theme_file):
                # initial search list for theme directories
                dirs = config.template_directories()
                if theme_dir not in dirs:
                    dirs.append(theme_dir)
                if not hasattr(config_files, 'append'):
                    config_files = [config_files]
                for config_file in config_files:
                    if os.path.dirname(config_file) not in dirs:
                        dirs.append(os.path.dirname(config_file))

                # read in the theme
                parser = ConfigParser()
                parser.read(theme_file)
                bom = config.bill_of_materials()

                # complete search list for theme directories
                dirs += [
                    os.path.join(theme_dir, dir)
                    for dir in config.template_directories() if dir not in dirs
                ]

                # merge configurations, allowing current one to override theme
                template_files = config.template_files()
                parser.set('Planet', 'template_files', '')
                parser.read(config_files)
                for file in config.bill_of_materials():
                    if not file in bom: bom.append(file)
                parser.set('Planet', 'bill_of_materials', ' '.join(bom))
                parser.set('Planet', 'template_directories', ' '.join(dirs))
                parser.set('Planet', 'template_files',
                           ' '.join(template_files + config.template_files()))
                break
        else:
            log.error('Unable to find theme %s', theme)

    # Filter support
    dirs = config.filter_directories()
    filter_dir = os.path.join(sys.path[0], 'filters')
    if filter_dir not in dirs and os.path.exists(filter_dir):
        parser.set('Planet', 'filter_directories',
                   ' '.join(dirs + [filter_dir]))

    # Reading list support
    reading_lists = config.reading_lists()
    if reading_lists:
        if not os.path.exists(config.cache_lists_directory()):
            os.makedirs(config.cache_lists_directory())

        def data2config(data, cached_config):
            if content_type(list).find('opml') >= 0:
                opml.opml2config(data, cached_config)
            elif content_type(list).find('foaf') >= 0:
                foaf.foaf2config(data, cached_config)
            elif content_type(list).find('csv') >= 0:
                csv_config.csv2config(data, cached_config)
            elif content_type(list).find('config') >= 0:
                cached_config.readfp(data)
            else:
                from planet import shell
                import StringIO
                cached_config.readfp(
                    StringIO.StringIO(
                        shell.run(content_type(list),
                                  data.getvalue(),
                                  mode="filter")))

            if cached_config.sections() in [[], [list]]:
                raise Exception

        for list in reading_lists:
            downloadReadingList(list, parser, data2config)
Exemple #5
0
def strava_upload():
    """
        upload to strava, borrowed from https://github.com/dlenski/stravacli
    """
    allowed_exts = {
        '.tcx': lambda v: '<TrainingCenterDatabase' in v[:200],
        '.gpx': lambda v: '<gpx' in v[:200],
        '.fit': lambda v: v[8:12] == '.FIT'
    }

    par = argparse.ArgumentParser(description='Uploads activities to Strava.')
    par.add_argument(
        'activities',
        nargs='*',
        type=argparse.FileType("rb"),
        default=(stdin, ),
        help="Activity files to upload (plain or gzipped {})".format(', '.join(allowed_exts)))
    par.add_argument(
        '-P', '--no-popup', action='store_true', help="Don't browse to activities after upload.")
    par.add_argument(
        '-E',
        '--env',
        help='Look for ACCESS_TOKEN in environment variable '
        'rather than ~/.stravacli')
    grp = par.add_argument_group('Activity file details')
    grp.add_argument('-p', '--private', action='store_true', help='Make activities private')
    grp.add_argument(
        '-t',
        '--type',
        choices=allowed_exts,
        default=None,
        help='Force files to be interpreted as being of given '
        'type (default is to autodetect based on name, or '
        'contents for stdin)')
    grp.add_argument(
        '-x',
        '--xml-desc',
        action='store_true',
        help='Parse name/description fields from GPX and TCX '
        'files.')
    grp.add_argument('-T', '--title', help='Activity title')
    grp.add_argument('-D', '--desc', dest='description', help='Activity description')
    grp.add_argument(
        '-A',
        '--activity-type',
        default=None,
        help='Type of activity. If not specified, the default '
        'value is taken from user profile. '
        'Supported values: \n\t ride, run, swim, workout, '
        'hike, walk, nordicski, alpineski, backcountryski, '
        'iceskate, inlineskate, kitesurf, rollerski, '
        'windsurf, workout, snowboard, snowshoe')
    args = par.parse_args()

    if args.xml_desc:
        if args.title:
            print('argument -T/--title not allowed with argument ' '-x/--xml-desc', file=stderr)
        if args.description:
            print('argument -D/--desc not allowed with argument ' '-x/--xml-desc', file=stderr)

    # Authorize Strava
    cp_ = ConfigParser()
    cp_.read(os.path.expanduser('~/.stravacli'))
    cat = None
    if cp_.has_section('API'):
        if 'access_token' in cp_.options('API'):
            cat = cp_.get('API', 'ACCESS_TOKEN')
            cs = cp_.get('API', 'CLIENT_SECRET')
            cat = cp_.get('API', 'ACCESS_TOKEN')

    while True:
        client = Client(cat)
        try:
            athlete = client.get_athlete()
        except requests.exceptions.ConnectionError:
            print("Could not connect to Strava API", file=stderr)
        except Exception as e:
            print("NOT AUTHORIZED %s" % e, file=stderr)
            print(
                "Need Strava API access token. Launching web browser to "
                "obtain one.",
                file=stderr)
            client = Client()
            webserver = QueryGrabber(response='<title>Strava auth code received!</title>This window can be closed.')
            _scope = 'view_private,write'
            authorize_url = client.authorization_url(client_id=cid, redirect_uri=webserver.root_uri(), scope=_scope)
            webbrowser.open_new_tab(authorize_url)
            webserver.handle_request()
            client.access_token = cat = client.exchange_code_for_token(client_id=cid,client_secret=cs,code=webserver.received['code'])
            cp_.add_section('API')
            cp_.set('API','CLIENT_ID', cid)
            cp_.set('API','CLIENT_SECRET', cs)
            cp_.set('API','ACCESS_TOKEN', cat)
            cp_.write(open(os.path.expanduser('~/.stravacli'),"w"))
        else:
            if not cp_.has_section('API'):
                cp_.add_section('API')
            if 'ACCESS_TOKEN' not in cp_.options('API') or cp_.get('API', 'ACCESS_TOKEN',
                                                                   None) != cat:
                cp_.set('API', 'ACCESS_TOKEN', cat)
                cp_.write(open(os.path.expanduser('~/.stravacli'), "w"))
            break

    print("Authorized to access account of {} {} (id {:d}).".format(athlete.firstname,
                                                                    athlete.lastname, athlete.id))

    for act in args.activities:
        if act is stdin:
            contents = act.read()
            act = StringIO(contents)
            if args.type is None:
                # autodetect gzip and extension based on content
                if contents.startswith('\x1f\x8b'):
                    gz_, cf_, uf_ = '.gz', act, gzip.GzipFile(fileobj=act, mode='rb')
                    contents = uf_.read()
                else:
                    gz_, uf_, cf_ = '', act, NamedTemporaryFile(suffix='.gz')
                    gzip.GzipFile(fileobj=cf_, mode='w+b').writelines(act)
                for ext, checker in allowed_exts.items():
                    if checker(contents):
                        print("Uploading {} activity from stdin...".format(ext + gz_))
                        break
                else:
                    print("Could not determine file type of stdin", file=stderr)
            else:
                base, ext = 'activity', args.type
        else:
            base, ext = os.path.splitext(act.name if args.type is None else 'activity.' + args.type)
            # autodetect based on extensions
            if ext.lower() == '.gz':
                base, ext = os.path.splitext(base)
                # un-gzip it in order to parse it
                gz_, cf_, uf_ = '.gz', act, None if args.no_parse else \
                                gzip.GzipFile(fileobj=act, mode='rb')
            else:
                gz_, uf_, cf_ = '', act, NamedTemporaryFile(suffix='.gz')
                gzip.GzipFile(fileobj=cf_, mode='w+b').writelines(act)
            if ext.lower() not in allowed_exts:
                print(
                    "Don't know how to handle extension "
                    "{} (allowed are {}).".format(ext, ', '.join(allowed_exts)),
                    file=stderr)
            print("Uploading {} activity from {}...".format(ext + gz_, act.name))

        # try to parse activity name, description from file if requested
        if args.xml_desc:
            uf_.seek(0, 0)
            if ext.lower() == '.gpx':
                x = etree.parse(uf_)
                nametag, desctag = x.find("{*}name"), x.find("{*}desc")
                title = nametag and nametag.text
                desc = desctag and desctag.text
            elif ext.lower() == '.tcx':
                x = etree.parse(uf_)
                notestag = x.find("{*}Activities/{*}Activity/{*}Notes")
                if notestag is not None:
                    title, desc = (notestag.text.split('\n', 1) + [None])[:2]
        else:
            title = args.title
            desc = args.description

        # upload activity
        try:
            cf_.seek(0, 0)
            upstat = client.upload_activity(
                cf_,
                ext[1:] + '.gz',
                title,
                desc,
                private=args.private,
                activity_type=args.activity_type)
            activity = upstat.wait()
            duplicate = False
        except exc.ActivityUploadFailed as e:
            words = e.args[0].split()
            if words[-4:-1] == ['duplicate', 'of', 'activity']:
                activity = client.get_activity(words[-1])
                duplicate = True
            else:
                raise

        # show results
        uri = "http://strava.com/activities/{:d}".format(activity.id)
        print("  {}{}".format(uri, " (duplicate)" if duplicate else ''), file=stderr)
        if not args.no_popup:
            webbrowser.open_new_tab(uri)
def create_config(config_path):
    """ Parse config. """
    config = dict()
    config_raw = ConfigParser()
    config_raw.read(DEFAULT_CONFIG)
    config_raw.read(config_path)
    config['timespan'] = config_raw.getint('Nest Config',
                                           'TIMESPAN_SINCE_CHANGE')
    config['min_pokemon'] = config_raw.getint('Nest Config',
                                              'MIN_POKEMON_NEST_COUNT')
    config['min_spawn'] = config_raw.getint('Nest Config',
                                            'MIN_SPAWNPOINT_NEST_COUNT')
    config['delete_old'] = config_raw.getboolean('Nest Config',
                                                 'DELETE_OLD_NESTS')
    config['event_poke'] = json.loads(
        config_raw.get('Nest Config', 'EVENT_POKEMON'))
    config['pokestop_pokemon'] = config_raw.getboolean('Nest Config',
                                                       'POKESTOP_POKEMON')
    config['p1_lat'] = config_raw.getfloat('Area', 'POINT1_LAT')
    config['p1_lon'] = config_raw.getfloat('Area', 'POINT1_LON')
    config['p2_lat'] = config_raw.getfloat('Area', 'POINT2_LAT')
    config['p2_lon'] = config_raw.getfloat('Area', 'POINT2_LON')
    config['db_r_host'] = config_raw.get('DB Read', 'HOST')
    config['db_r_name'] = config_raw.get('DB Read', 'NAME')
    config['db_r_user'] = config_raw.get('DB Read', 'USER')
    config['db_r_pass'] = config_raw.get('DB Read', 'PASSWORD')
    config['db_r_port'] = config_raw.getint('DB Read', 'PORT')
    config['db_r_charset'] = config_raw.get('DB Read', 'CHARSET')
    config['db_pokemon'] = config_raw.get('DB Read', 'TABLE_POKEMON')
    config['db_pokemon_spawnid'] = config_raw.get('DB Read',
                                                  'TABLE_POKEMON_SPAWNID')
    config['db_pokemon_timestamp'] = config_raw.get('DB Read',
                                                    'TABLE_POKEMON_TIMESTAMP')
    config['db_pokestop'] = config_raw.get('DB Read', 'TABLE_POKESTOP')
    config['db_spawnpoint'] = config_raw.get('DB Read', 'TABLE_SPAWNPOINT')
    config['db_spawnpoint_id'] = config_raw.get('DB Read',
                                                'TABLE_SPAWNPOINT_ID')
    config['db_spawnpoint_lat'] = config_raw.get('DB Read',
                                                 'TABLE_SPAWNPOINT_LAT')
    config['db_spawnpoint_lon'] = config_raw.get('DB Read',
                                                 'TABLE_SPAWNPOINT_LON')
    config['db_w_host'] = config_raw.get('DB Write', 'HOST')
    config['db_w_name'] = config_raw.get('DB Write', 'NAME')
    config['db_w_user'] = config_raw.get('DB Write', 'USER')
    config['db_w_pass'] = config_raw.get('DB Write', 'PASSWORD')
    config['db_w_port'] = config_raw.getint('DB Write', 'PORT')
    config['db_w_charset'] = config_raw.get('DB Write', 'CHARSET')
    config['db_nest'] = config_raw.get('DB Write', 'TABLE_NESTS')
    config['save_path'] = config_raw.get('Geojson', 'SAVE_PATH')
    config['json-stroke'] = config_raw.get('Geojson', 'STROKE')
    config['json-stroke-width'] = config_raw.getfloat('Geojson',
                                                      'STROKE-WIDTH')
    config['json-stroke-opacity'] = config_raw.getfloat(
        'Geojson', 'STROKE-OPACITY')
    config['json-fill'] = config_raw.get('Geojson', 'FILL')
    config['json-fill-opacity'] = config_raw.getfloat('Geojson',
                                                      'FILL-OPACITY')
    config['verbose'] = config_raw.getboolean('Other', 'VERBOSE')
    config['osm_date'] = config_raw.get('Other', 'OSM_DATE')

    return config
Exemple #7
0
    
    Uso: reles.py  [cambia | on | off]  [#rele ...] | all]  [--help]
         sin argumentos muestra el estado
         
    NOTA:  se lee un archivo INI con la sección [reles] que 
           etiqueta los reles del modulo de reles:
           /home/firtro/custom/userconfig.ini
          
"""
# v1.0: 

HOME = "/home/firtro"

from sys import argv as sys_argv, exit as sys_exit
from ConfigParser import ConfigParser
firtroINI = ConfigParser()
firtroINI.read("/home/firtro/custom/userconfig.ini")

# https://github.com/amorphic/tosr0x
import tosr0x
# especificamos relayCount=4 para eludir el ciclo inicial
tosr04 = tosr0x.handler(relayCount=4)
misReles = tosr04[0] # es el único que hay conectado
# Diccionario con el estado de los relés
relesStatus = misReles.get_relay_positions()

# Diccionario de reles
relesDic = {}
for rele in firtroINI.options("reles"):
    relesDic[rele] = firtroINI.get("reles", rele)
Exemple #8
0
    'bucket.types': '',
    'bucket.failed-password.size': '3',
    'bucket.failed-password.input': '1',
    'bucket.failed-password.freq': '120000',
    'bucket.password-reset.size': '2',
    'bucket.password-reset.input': '1',
    'bucket.password-reset.freq': '1800000',
    'bucket.email-confirm.size': '2',
    'bucket.email-confirm.input': '1',
    'bucket.email-confirm.freq': '1800000'
}

configfile = expanduser("~/cloudsession.properties")
print('Looking for config file: %s' % configfile)
if isfile(configfile):
    configs = ConfigParser(defaults)
    configs.readfp(FakeSecHead(open(configfile)))

    app_configs = {}
    for (key, value) in configs.items('section'):
        app_configs[key] = value
    app.config['CLOUD_SESSION_PROPERTIES'] = app_configs
else:
    app.config['CLOUD_SESSION_PROPERTIES'] = defaults

# -------------------------------------- Module initialization -------------------------------------------------
logging.basicConfig(level=logging.DEBUG)

if app.config['CLOUD_SESSION_PROPERTIES']['sentry-dsn'] is not None:
    logging.info("Initializing Sentry")
    sentry = Sentry(app,
def main():
    """
    Main
    """

    logger = bp_logger('GET_EMAIL_ATTACHMENTS')

    config = ConfigParser()
    config.read(['../project_config'])

    imap_server = imaputils.get_imapserv_conn()

    connection = dbutils.get_connection()

    # This lists out the list of all the
    msg, emails = imap_server.search(None, "(UNSEEN)")
    # print emails

    # Go through each of the ids given out by IMAP
    for email_id in emails[0].split():

        attachments = imaputils.get_attachment(imap_server, email_id)

        for filename, content in attachments.items():

            # Filename has to match the required pattern.
            if re.search("run-[0-9]+-benchmark.log", filename) == None:
                logger.debug(
                    "The filename {:s} did not match the needed pattern. IMAP id : {:d}"
                    .format(filename, int(email_id)))
                continue

            filelocation = os.path.join(config.get("locations", "queue"),
                                        filename)

            md5 = hashlib.md5()
            md5.update(content)
            md5_hash = md5.hexdigest()
            filelocation = filelocation + '.' + md5_hash

            #
            if util.check_if_file_exists_on_disk(filelocation):
                logger.debug(
                    "The file {:s} exists on the disk.".format(filelocation))
                continue
            else:
                file_queue = open(filelocation, 'w')
                file_queue.write(content)
                logger.info(
                    "The file {:s} has been written to the disk.".format(
                        filelocation))
                file_queue.close()

            # Add code to write it to the db
            query = """
                    INSERT INTO email_logs (`imap_id`, `md5`, `time`)
                    VALUES ({:d}, \"{:s}\", NOW())
                    """.format(int(email_id), md5_hash)

            hw_id = dbutils.db_insert(connection, query)

            if hw_id == None:
                logger.error(
                    "The query \n {:s} has not been inserted.".format(query))
Exemple #10
0
 def __init__(self):
     self.cf = ConfigParser()
     self.cf.read(pageElementLocatorPath)
Exemple #11
0
        return key in tup[0]

    return False


def _tuple_from_string(string):
    '''
    Accepts well formed seed string, returns tuple (url:port, key)
    '''
    l = string.split(',')
    if len(l) == 1:
        l.append(None)
    return tuple(l)


cfg = ConfigParser(DEFAULTS)

if isfile(CONFIG_FILE):
    cfg.read(CONFIG_FILE)
else:
    print 'Warning: configuration file not found: (%s), using default values' % CONFIG_FILE

DATA_FOLDER = _platform_agnostic_data_path(cfg.get('CONSTANTS', 'DATA_FOLDER'))
KSIZE = int(cfg.get('CONSTANTS', 'KSIZE'))
ALPHA = int(cfg.get('CONSTANTS', 'ALPHA'))
TRANSACTION_FEE = int(cfg.get('CONSTANTS', 'TRANSACTION_FEE'))
RESOLVER = cfg.get('CONSTANTS', 'RESOLVER')
SSL = str_to_bool(cfg.get('AUTHENTICATION', 'SSL'))
SSL_CERT = cfg.get('AUTHENTICATION', 'SSL_CERT')
SSL_KEY = cfg.get('AUTHENTICATION', 'SSL_KEY')
USERNAME = cfg.get('AUTHENTICATION', 'USERNAME')
from netcontrolldap import netcontrolldap
from pprint import pprint
from ConfigParser import ConfigParser

cfg = ConfigParser()
cfg.read('/etc/ldap/netcontrol')
server = cfg.get('base', 'server')
bindDN = cfg.get('base', 'bindDN')
adminPW = cfg.get('base', 'adminPW')
del cfg

o = netcontrolldap.LDAPConnection(server,
                                  ssl=True,
                                  admPasswd=adminPW,
                                  baseDN=bindDN)

print o.getSAMSID()
print o.getSAMDomain()

o.closeConnection()
Exemple #13
0
 def init_config(self, config_file_path):
     conf = ConfigParser()
     conf.read(config_file_path)
     self.info.id = conf.get('device', 'id')
     self.info.key = conf.get('device', 'key')
class AccountManager:
    monitor = xbmc.Monitor()
    config = ConfigParser()
    addon = xbmcaddon.Addon()
    addonname = addon.getAddonInfo('name')
    addon_data_path = utils.Utils.unicode(
        xbmc.translatePath(addon.getAddonInfo('profile')))
    config_file = 'onedrive.ini'
    config_path = os.path.join(addon_data_path, config_file)

    def __init__(self):
        if not os.path.exists(self.addon_data_path):
            try:
                os.makedirs(self.addon_data_path)
            except:
                self.monitor.waitForAbort(3)
                os.makedirs(self.addon_data_path)
        self.config.read(self.config_path)

    def reload(self):
        self.config.read(self.config_path)

    def map(self):
        onedrives = {}
        for driveid in self.config.sections():
            onedrive = OneDrive(self.addon.getSetting('client_id_oauth2'))
            onedrive.driveid = driveid
            onedrive.event_listener = self.event_listener
            onedrive.name = self.config.get(driveid, 'name')
            onedrive.access_token = self.config.get(driveid, 'access_token')
            onedrive.refresh_token = self.config.get(driveid, 'refresh_token')
            onedrives[driveid] = onedrive
        return onedrives

    def get(self, driveid):
        onedrives = self.map()
        if driveid in onedrives:
            return onedrives[driveid]
        raise AccountNotFoundException()

    def event_listener(self, onedrive, event, obj):
        if event == 'login_success':
            self.save(onedrive)

    def save(self, onedrive):
        onedrives = self.map()
        if onedrive.driveid not in onedrives:
            self.config.add_section(onedrive.driveid)

        self.config.set(onedrive.driveid, 'name', onedrive.name)
        self.config.set(onedrive.driveid, 'access_token',
                        onedrive.access_token)
        self.config.set(onedrive.driveid, 'refresh_token',
                        onedrive.refresh_token)

        with open(self.config_path, 'wb') as configfile:
            self.config.write(configfile)

    def remove(self, driveid):
        onedrives = self.map()
        if driveid in onedrives:
            self.config.remove_section(driveid)
            with open(self.config_path, 'wb') as configfile:
                self.config.write(configfile)
def read_file(filename):
    config = ConfigParser()
    config.read(filename)
    return config.defaults()
    def ReadSettingsFile(self, in_file=''):
        if in_file != '':
            if os.path.isfile(in_file):
                pars = ConfigParser()
                pars.read(in_file)
                print 'Loading settings from file:', in_file

                if pars.has_section('BASIC'):
                    if pars.has_option('BASIC', 'run'):
                        self.run = pars.getint('BASIC', 'run')
                    else:
                        ExitMessage('Must specify run under [BASIC]. Exiting...')
                    if pars.has_option('BASIC', 'events'):
                        self.total_events = pars.getint('BASIC', 'events')
                    if pars.has_option('BASIC', 'ana_events'):
                        self.ana_events = pars.getint('BASIC', 'ana_events')
                    if pars.has_option('BASIC', 'first_event'):
                        self.first_event = pars.getint('BASIC', 'first_event')
                    if pars.has_option('BASIC', 'repeater_card'):
                        self.repeater_card = pars.getint('BASIC', 'repeater_card')
                    if pars.has_option('BASIC', 'voltage'):
                        self.voltage = pars.getfloat('BASIC', 'voltage')
                    if pars.has_option('BASIC', 'current_begin'):
                        self.current_begin = pars.getfloat('BASIC', 'current_begin')
                    if pars.has_option('BASIC', 'current_end'):
                        self.current_end = pars.getfloat('BASIC', 'current_end')
                    if pars.has_option('BASIC', 'dut_input'):
                        self.dut_input = pars.getint('BASIC', 'dut_input')
                    if pars.has_option('BASIC', 'dut_saturation'):
                        self.dut_saturation = pars.getint('BASIC', 'dut_saturation')
                    if pars.has_option('BASIC', 'data_dir'):
                        self.data_dir = pars.get('BASIC', 'data_dir')
                        if not os.path.isdir(self.data_dir):
                            ExitMessage('The specified data directory does not exist. Exiting...')
                    if pars.has_option('BASIC', 'sub_dir'):
                        self.sub_dir = pars.get('BASIC', 'sub_dir')
                    if pars.has_option('BASIC', 'output_dir'):
                        self.output_dir = pars.get('BASIC', 'output_dir')
                    if pars.has_option('BASIC', 'analysis_path'):
                        self.analysis_path = pars.get('BASIC', 'analysis_path')
                        if not os.path.isdir(self.analysis_path):
                            ExitMessage('Must give a valid analysis_path under [BASIC]. Exiting...')
                    if pars.has_option('BASIC', 'num_parallel') and self.do_parallel:
                        self.num_parallel = pars.getint('BASIC', 'num_parallel') if pars.getint('BASIC', 'num_parallel') <= mp.cpu_count() else self.num_parallel

                if pars.has_section('DUTS'):
                    if pars.has_option('DUTS', 'num'):
                        self.dut_num = pars.getint('DUTS', 'num')
                    if pars.has_option('DUTS', 'not_connected'):
                        self.not_connected = ChannelStringToArray(pars.get('DUTS', 'not_connected'))
                    if pars.has_option('DUTS', 'screened'):
                        self.screened = ChannelStringToArray(pars.get('DUTS', 'screened'))
                    if pars.has_option('DUTS', 'noisy'):
                        self.noisy = ChannelStringToArray(pars.get('DUTS', 'noisy'))

                for dut in xrange(self.dut_num):
                    if pars.has_section('DUT{d}'.format(d=dut)):
                        if pars.has_option('DUT{d}'.format(d=dut), 'name'):
                            self.dut_name[dut] = pars.get('DUT{d}'.format(d=dut), 'name')
                        if pars.has_option('DUT{d}'.format(d=dut), 'x0'):
                            self.dut_pos[dut] = pars.getfloat('DUT{d}'.format(d=dut), 'x0')
                        if pars.has_option('DUT{d}'.format(d=dut), 'pitch'):
                            self.dut_pitch[dut] = pars.getfloat('DUT{d}'.format(d=dut), 'pitch')
                        if pars.has_option('DUT{d}'.format(d=dut), 'first'):
                            self.dut_first_ch[dut] = pars.getint('DUT{d}'.format(d=dut), 'first')
                        if pars.has_option('DUT{d}'.format(d=dut), 'skip'):
                            self.dut_skip_ch[dut] = ChannelStringToArray(pars.get('DUT{d}'.format(d=dut), 'skip'))
                        if pars.has_option('DUT{d}'.format(d=dut), 'last'):
                            self.dut_last_ch[dut] = pars.getint('DUT{d}'.format(d=dut), 'last')

                if pars.has_section('ANALYSIS'):
                    if pars.has_option('ANALYSIS', 'do_pedestal'):
                        self.do_pedestal = pars.getboolean('ANALYSIS', 'do_pedestal')
                    if pars.has_option('ANALYSIS', 'do_cluster'):
                        self.do_cluster = pars.getboolean('ANALYSIS', 'do_cluster')
                    if pars.has_option('ANALYSIS', 'do_cluster_analysis'):
                        self.do_cluster_ana = pars.getboolean('ANALYSIS', 'do_cluster_analysis')
                    if pars.has_option('ANALYSIS', 'do_alignment'):
                        self.do_alignment = pars.getboolean('ANALYSIS', 'do_alignment')
                    if pars.has_option('ANALYSIS', 'do_alignment_analysis'):
                        self.do_alignment_ana = pars.getboolean('ANALYSIS', 'do_alignment_analysis')
                    if pars.has_option('ANALYSIS', 'do_transparent'):
                        self.do_transparent = pars.getboolean('ANALYSIS', 'do_transparent')
                    if pars.has_option('ANALYSIS', 'do_3d'):
                        self.do_3d = pars.getboolean('ANALYSIS', 'do_3d')

                if pars.has_section('PEDESTAL'):
                    if pars.has_option('PEDESTAL', 'tel_ped_hit_factor'):
                        self.tel_hit_factor = pars.getfloat('PEDESTAL', 'tel_ped_hit_factor')
                    if pars.has_option('PEDESTAL', 'dut_ped_hit_factor'):
                        self.dut_hit_factor = pars.getfloat('PEDESTAL', 'dut_ped_hit_factor')
                    if pars.has_option('PEDESTAL', 'do_cmc'):
                        self.do_cmc = pars.getboolean('PEDESTAL', 'do_cmc')
                    if pars.has_option('PEDESTAL', 'cm_cut'):
                        self.cm_cut = pars.getfloat('PEDESTAL', 'cm_cut')
                    if pars.has_option('PEDESTAL', 'sliding_length'):
                        self.sliding_length = pars.getint('PEDESTAL', 'sliding_length')

                if pars.has_section('CLUSTER'):
                    if pars.has_option('CLUSTER', 'clust_seed_facts'):
                        self.clust_seed = eval(pars.get('CLUSTER', 'clust_seed_facts'))
                    if pars.has_option('CLUSTER', 'clust_hit_facts'):
                        self.clust_hit = eval(pars.get('CLUSTER', 'clust_hit_facts'))

                if pars.has_section('SELECTION_SCINT'):
                    if pars.has_option('SELECTION_SCINT', 'xlow'):
                        self.scint_fid_cut['xlow'] = pars.getfloat('SELECTION_SCINT', 'xlow')
                    if pars.has_option('SELECTION_SCINT', 'xhigh'):
                        self.scint_fid_cut['xhigh'] = pars.getfloat('SELECTION_SCINT', 'xhigh')
                    if pars.has_option('SELECTION_SCINT', 'ylow'):
                        self.scint_fid_cut['ylow'] = pars.getfloat('SELECTION_SCINT', 'ylow')
                    if pars.has_option('SELECTION_SCINT', 'yhigh'):
                        self.scint_fid_cut['yhigh'] = pars.getfloat('SELECTION_SCINT', 'yhigh')

                for dut in xrange(self.dut_num):
                    if pars.has_section('SELECTION{d}'.format(d=dut)):
                        if pars.has_option('SELECTION{d}'.format(d=dut), 'xlow') and pars.has_option('SELECTION{d}'.format(d=dut), 'xhigh') and pars.has_option('SELECTION{d}'.format(d=dut), 'ylow') and pars.has_option('SELECTION{d}'.format(d=dut), 'yhigh'):
                            self.fid_region[dut] = {'xlow': pars.getfloat('SELECTION{d}'.format(d=dut), 'xlow'), 'xhigh': pars.getfloat('SELECTION{d}'.format(d=dut), 'xhigh'), 'ylow': pars.getfloat('SELECTION{d}'.format(d=dut), 'ylow'), 'yhigh': pars.getfloat('SELECTION{d}'.format(d=dut), 'yhigh')}
                        elif pars.has_option('SELECTION{d}'.format(d=dut), 'xlow') or pars.has_option('SELECTION{d}'.format(d=dut), 'xhigh') or pars.has_option('SELECTION{d}'.format(d=dut), 'ylow') or pars.has_option('SELECTION{d}'.format(d=dut), 'yhigh'):
                            print 'setting default fiducial region for nos specifying all the parameters xlow, xhigh, ylow, yhigh'
                            self.fid_region[dut] = {'xlow': 0, 'xhigh': 255, 'ylow': 0, 'yhigh': 255}
                        else:
                            self.fid_region[dut] = {'xlow': 0, 'xhigh': 255, 'ylow': 0, 'yhigh': 255}
                    else:
                        ExitMessage('You should have a SELECTION section for each dut i.e. SELECTION0 and SELECTION1 if num or DUTS is 2')
                if pars.has_section('ALIGNMENT'):
                    if pars.has_option('ALIGNMENT', 'align_dut'):
                        self.align_dut = min(0, pars.getint('ALIGNMENT', 'align_dut'))
                    if pars.has_option('ALIGNMENT', 'z_coordinates'):
                        self.z_coordinates = eval(pars.get('ALIGNMENT', 'z_coordinates'))
                    if pars.has_option('ALIGNMENT', 'alignment_method'):
                        if pars.get('ALIGNMENT', 'alignment_method') in ['events', 'percentage']:
                            self.align_method = pars.get('ALIGNMENT', 'alignment_method')
                    if pars.has_option('ALIGNMENT', 'alignment_factor'):
                        self.align_factor = pars.getint('ALIGNMENT', 'alignment_factor')
                    if pars.has_option('ALIGNMENT', 'do_align_dut'):
                        self.do_align_dut = pars.getboolean('ALIGNMENT', 'do_align_dut')
                    if pars.has_option('ALIGNMENT', 'no_align_dut_chs'):
                        self.no_align_dut_chs = ChannelStringToArray(pars.get('ALIGNMENT', 'no_align_dut_chs'))
                    if pars.has_option('ALIGNMENT', 'alignment_chi2_cut'):
                        self.align_chi2_cut = pars.getfloat('ALIGNMENT', 'alignment_chi2_cut')

                if pars.has_section('TRANSPARENT'):
                    if pars.has_option('TRANSPARENT', 'max_transp_cluster_size'):
                        self.max_transp_clust_size = pars.getint('TRANSPARENT', 'max_transp_cluster_size')
                    if pars.has_option('TRANSPARENT', 'save_transp_cluster_size'):
                        self.save_transp_clust_size = pars.getint('TRANSPARENT', 'save_transp_cluster_size')
                    if pars.has_option('TRANSPARENT', 'analyse_align'):
                        self.do_analyse_alignment = pars.getboolean('TRANSPARENT', 'analyse_align')
        self.SetFileAndTreeName()
        CreateDirectoryIfNecessary(self.output_dir + '/' + self.sub_dir + '/' + str(self.run))
        self.SaveSettingsAsPickle()
Exemple #17
0
def config_read(config_fnam, val_doc_adv=False):
    """
    Parse configuration files of the form:
        # comment <-- not parsed
        [group-name]
        key = val ; doc string
        [group-name-advanced]
        key = val ; doc string
    
    In the output dictionary all lines in 'group-name-advanced' are merged with 'group-name'.
    We attempt to parse 'val' as a python object, if this fails then it is parsed as a string. 
    
    Parameters
    ----------
    config_fnam : string or list or strings
        filename of the configuration file/s to be parsed, the first sucessfully parsed file is parsed.
    
    val_doc_adv : bool
        Toggles the output format of 'config_dict', see below.
    
    Returns
    -------
    config_dict : OrderedDict
        If val_doc_adv is True then config_dict is an ordered dictionary of the form:
            output = {group-name: {key : (eval(val), doc_string, is_advanced)}}
        
        Every value in the returned dicionary is a len 3 tuple of the form:
            (val, doc_string, is_advanced)
        
        If the doc string is not suplied in the file then doc_string is None. If 
        val_doc_adv is False then config_dict is an ordered dictionary of the form:
            output = {group-name: {key : eval(val)}}
    
    fnam : str
        A string containing the name of the sucessfully read file.
    """
    c     = ConfigParser()
    fnams = c.read(config_fnam)
    if len(fnams) == 0 :
        raise ValueError('could not find config file: ' + str(config_fnam))

    # read the first sucessfully read file
    c    = ConfigParser()
    fnam = c.read(fnams[0])
    out  = OrderedDict()
    for sect in c.sections():
        s = sect.split('-advanced')[0]
        
        if s not in out :
            out[s] = OrderedDict()
        
        advanced = sect.endswith('-advanced')
        
        for op in c.options(sect):
            # split on ';' to separate doc string from value
            doc  = None
            vals = c.get(sect, op).split(';')
            if len(vals) == 1 :
                v = vals[0].strip()
            elif len(vals) == 2 :
                v   = vals[0].strip()
                doc = vals[1].strip()
            else :
                raise ValueError('could not parse config line' + str(sect) + ' ' + c.get(sect, op))
            
            # try to safely evaluate the str as a python object
            try : 
                v = literal_eval(v)
            except ValueError :
                pass
            except SyntaxError :
                pass
            
            # add to the dictionary 
            out[s][op] = (v, doc, advanced)
    
    if val_doc_adv is False :
        out2 = OrderedDict()
        for s in out.keys():
            out2[s] = OrderedDict()
            for k in out[s].keys():
                out2[s][k] = out[s][k][0]
    else :
        out2 = out
    return out2, fnam[0]
Exemple #18
0
 def __init__(self):
     self.conf = ConfigParser()
     self.conf.read("wsbs.cfg")
Exemple #19
0
        # (i.e. $HOME/.pypoprc) so that in subsequent invocations of the
        # script it will use the previous choices as defaults.

        # For systems without a concept of a $HOME directory (i.e.
        # Windows), it will look for .pypoprc in the current directory.

        # The '.pypoprc' file will be created if it does not previously
        # exist.  The format of this file is identical to the ConfigParser
        # format (i.e. the .ini file format).

        if os.environ['HOME']:
            pypoprcFilename = os.path.join(os.environ['HOME'], '.pypoprc')
        else:
            pypoprcFilename = '.pypoprc'

        pypoprc = ConfigParser()

        if os.path.isfile(pypoprcFilename):
            pypoprc.read(pypoprcFilename)
            configFilename = pypoprc.get('Files', 'config')
            fileName = pypoprc.get('Files', 'pop')
        else:
            configFilename = 'config.ini'
            fileName = 'no default'

        print interactive_message

        # read user input for both filenames
        configFilename = getUserFilenameInput("config", configFilename)
        fileNames.append(getUserFilenameInput("population", fileName))
Exemple #20
0
def main():
    # parse OPNsense external ACLs config
    if os.path.exists(acl_config_fn):
        # create acl directory (if new)
        if not os.path.exists(acl_target_dir):
            os.mkdir(acl_target_dir)
        else:
            # remove index files
            for filename in glob.glob('%s/*.index' % acl_target_dir):
                os.remove(filename)
        # read config and download per section
        cnf = ConfigParser()
        cnf.read(acl_config_fn)
        for section in cnf.sections():
            target_filename = acl_target_dir + '/' + section
            if cnf.has_option(section, 'url'):
                # collect filters to apply
                acl_filters = list()
                if cnf.has_option(section, 'filter'):
                    for acl_filter in cnf.get(section,
                                              'filter').strip().split(','):
                        if len(acl_filter.strip()) > 0:
                            acl_filters.append(acl_filter)

                # define target(s)
                targets = {
                    'domain': {
                        'filename': target_filename,
                        'handle': None,
                        'class': DomainSorter
                    }
                }

                # only generate files if enabled, otherwise dump empty files
                if cnf.has_option(section, 'enabled') and cnf.get(
                        section, 'enabled') == '1':
                    download_url = cnf.get(section, 'url')
                    if cnf.has_option(section, 'username'):
                        download_username = cnf.get(section, 'username')
                        download_password = cnf.get(section, 'password')
                    else:
                        download_username = None
                        download_password = None
                    if cnf.has_option(section, 'sslNoVerify') and cnf.get(
                            section, 'sslNoVerify') == '1':
                        sslNoVerify = True
                    else:
                        sslNoVerify = False
                    acl = Downloader(download_url, download_username,
                                     download_password, acl_max_timeout,
                                     sslNoVerify)
                    all_filenames = list()
                    for filename, basefilename, file_ext, line in acl.download(
                    ):
                        if filename_in_ignorelist(basefilename, file_ext):
                            # ignore documents, licenses and readme's
                            continue

                        # detect output type
                        if '/' in line or '|' in line:
                            filetype = 'url'
                        else:
                            filetype = 'domain'

                        if filename not in all_filenames:
                            all_filenames.append(filename)

                        if len(acl_filters) > 0:
                            acl_found = False
                            for acl_filter in acl_filters:
                                if acl_filter in filename:
                                    acl_found = True
                                    break
                            if not acl_found:
                                # skip this acl entry
                                continue

                        if filetype in targets and targets[filetype][
                                'handle'] is None:
                            targets[filetype]['handle'] = targets[filetype][
                                'class'](targets[filetype]['filename'], 'w')
                        if filetype in targets:
                            targets[filetype]['handle'].write(line)
                            targets[filetype]['handle'].write('\n')
                    # save index to disc
                    with open('%s.index' % target_filename,
                              'w',
                              buffering=10240) as idx_out:
                        index_data = dict()
                        for filename in all_filenames:
                            if len(filename.split('/')) > 2:
                                index_key = '/'.join(filename.split('/')[1:-1])
                                if index_key not in index_data:
                                    index_data[index_key] = index_key
                        idx_out.write(json.dumps(index_data))

                # cleanup
                for filetype in targets:
                    if targets[filetype]['handle'] is not None:
                        targets[filetype]['handle'].close()
                    elif cnf.has_option(
                            section,
                            'enabled') and cnf.get(section, 'enabled') != '1':
                        if os.path.isfile(targets[filetype]['filename']):
                            # disabled, remove previous data
                            os.remove(targets[filetype]['filename'])
                    elif not os.path.isfile(targets[filetype]['filename']):
                        # no data fetched and no file available, create new empty file
                        with open(targets[filetype]['filename'],
                                  'w') as target_out:
                            target_out.write("")
Exemple #21
0
  print config.name()
  print config.link()

  # per template configuration
  print config.days_per_page('atom.xml.tmpl')
  print config.encoding('index.html.tmpl')

Todo:
  * error handling (example: no planet section)
"""

import os, sys, re, urllib
from ConfigParser import ConfigParser
from urlparse import urljoin

parser = ConfigParser()

planet_predefined_options = ['filters']


def __init__():
    """define the struture of an ini file"""
    import config

    # get an option from a section
    def get(section, option, default):
        if section and parser.has_option(section, option):
            return parser.get(section, option)
        elif parser.has_option('Planet', option):
            if option == 'log_format':
                return parser.get('Planet', option, raw=True)
Exemple #22
0
 def __init__(self, path):
     self.path = path
     self.config_parser = ConfigParser(DEFAULTS)
Exemple #23
0
def downloadReadingList(list,
                        orig_config,
                        callback,
                        use_cache=True,
                        re_read=True):
    from planet import logger
    import config
    try:

        import urllib2, StringIO
        from planet.spider import filename

        # list cache file name
        cache_filename = filename(config.cache_lists_directory(), list)

        # retrieve list options (e.g., etag, last-modified) from cache
        options = {}

        # add original options
        for key in orig_config.options(list):
            options[key] = orig_config.get(list, key)

        try:
            if use_cache:
                cached_config = ConfigParser()
                cached_config.read(cache_filename)
                for option in cached_config.options(list):
                    options[option] = cached_config.get(list, option)
        except:
            pass

        cached_config = ConfigParser()
        cached_config.add_section(list)
        for key, value in options.items():
            cached_config.set(list, key, value)

        # read list
        curdir = getattr(os.path, 'curdir', '.')
        if sys.platform.find('win') < 0:
            base = urljoin('file:', os.path.abspath(curdir))
        else:
            path = os.path.abspath(os.path.curdir)
            base = urljoin('file:///',
                           path.replace(':', '|').replace('\\', '/'))

        request = urllib2.Request(urljoin(base + '/', list))
        if options.has_key("etag"):
            request.add_header('If-None-Match', options['etag'])
        if options.has_key("last-modified"):
            request.add_header('If-Modified-Since', options['last-modified'])
        response = urllib2.urlopen(request)
        if response.headers.has_key('etag'):
            cached_config.set(list, 'etag', response.headers['etag'])
        if response.headers.has_key('last-modified'):
            cached_config.set(list, 'last-modified',
                              response.headers['last-modified'])

        # convert to config.ini
        data = StringIO.StringIO(response.read())

        if callback: callback(data, cached_config)

        # write to cache
        if use_cache:
            cache = open(cache_filename, 'w')
            cached_config.write(cache)
            cache.close()

        # re-parse and proceed
        logger.debug("Using %s readinglist", list)
        if re_read:
            if use_cache:
                orig_config.read(cache_filename)
            else:
                cdata = StringIO.StringIO()
                cached_config.write(cdata)
                cdata.seek(0)
                orig_config.readfp(cdata)
    except:
        try:
            if re_read:
                if use_cache:
                    if not orig_config.read(cache_filename): raise Exception()
                else:
                    cdata = StringIO.StringIO()
                    cached_config.write(cdata)
                    cdata.seek(0)
                    orig_config.readfp(cdata)
                logger.info("Using cached %s readinglist", list)
        except:
            logger.exception("Unable to read %s readinglist", list)
def main():

    ini_total = datetime.now()

    #Path descriptors
    path_descriptors = "descriptors/"

    #Path image databases
    path_img_databases = "img_databases/"

    #Path results (generated feature vectors)
    id_exp = sys.argv[1]
    path_results = "results/" + id_exp + "/"

    print "\n*******************************************"
    print "************** Extraction *****************"

    # these parameters will be provided by the main program
    descritores = []
    bases = []
    medidas = []

    #uses a configuration file to get experiment information
    from ConfigParser import ConfigParser
    cfg = ConfigParser()
    cfg.readfp(open(path_results + "exp_cfg.ini"))
    descritores = cfg.get("Experiment", "descritores").split(",")
    bases = cfg.get("Experiment", "bases").split(",")
    usaClasses = int(cfg.get("Experiment", "classes"))
    iBases = 0
    for b in bases:
        bases[iBases] = b.split(":")
        iBases = iBases + 1

    print "descriptors =", descritores
    print "image databases =", bases
    print "classes = ", usaClasses
    #Still missing data from evaluation metrics - no impact currently

    select_n_img_paths(1000000, bases)
    print "Collection: \n", collection, "\n"
    collection_size = len(collection)
    print "SIZE of collection: ", collection_size, "\n"

    #Dealing with empty collection
    if (collection_size == 0):
        print "Empty Collection!"
        sys.exit(1)

    #time accumulator - initializes with zero
    tempo = {}
    tempos = {}  #saves all computed times
    for desc in descritores:
        tempo[desc] = {'total': 0, 'avg': 0}
        tempos[desc] = {}

    last_progress = 0  #indicates the extraction progress (steps of 10%)
    print "ext_progress:", last_progress

    ##########################################
    ### SCANS ALL IMAGES IN THE COLLECTION ###
    for img in collection:

        print "\n", img

        #converts the image to ppm - descriptors need PPM P6 input

        #whenever 10% of progress is achieved, updates the progress
        #BUG - when an invalid image is found, progress counter can become incorrect
        if ((((collection.index(img) + 1) / collection_size) - last_progress)
                >= (10 / 100)):
            last_progress = (collection.index(img) + 1) / collection_size
            print "ext_progress:", str(last_progress)

        try:

            #gets only file name (no extension, no path)
            nome_img = img[1].split("/")[-1:][0].split(".")[0]

            #by default, every image is considered as PPM
            isPPM = 1  # flag to indicate if the image is PPM or not

            #if image is not PPM (from file name extension), converts to PPM
            if (img[1].split("/")[-1:][0].split(".")[-1].upper() != "PPM"):

                isPPM = 0  #flag now indicating that image was not PPM

                #converts to PPM
                im = Image.open(img[1])
                im = im.convert("RGB")
                #save on temp directory /tmp/ -
                im.save("/tmp/" + nome_img + ".ppm")
                print "saving img: ", nome_img

            if (isPPM == 1):
                img_path = img[1]
            else:
                img_path = "/tmp/" + nome_img + ".ppm"

            #correcting names with \\
            img_path = img_path.replace("\\", "\\\\")
            #problems may happen when there are single quotes in file name

            ################################
            #### APPLYING DESCRIPTORS ######
            ################################
            # All descriptors are applied below, in order to use the same PPM image just created (if it was not originally PPM)
            ################################

            for desc in descritores:
                print "Descriptor:", desc, "..."

                nome_desc = desc

                #adjusts the fv path to have information about the experiments, image database and descriptor
                dir_path = path_results + "fv_" + id_exp + "_" + img[
                    0] + "_" + nome_desc

                #adjusts the file name to have all image path
                fv_path = dir_path + "/" + img[1].replace('/', '::') + ".txt"

                #checks if the fv directory already exists
                if (os.path.isdir(dir_path) == False):
                    os.mkdir(dir_path)  #if not, creates it

                #correcting names with \\
                fv_path = fv_path.replace("\\", "\\\\")

                print "fv_path=", fv_path
                print "fv_path_size=", len(fv_path)

                #if the feature vector (fv) was already extracted, does not extract again; this allows restarting the extraction process from where it stopped
                if not (os.path.exists(fv_path)):

                    setup = """
ctypes = __import__('ctypes')
plugin = "%s"+".so"
lib = ctypes.CDLL("%s"+plugin)
img_path = "%s"
fv_path = "%s"
                    """ % (desc, path_descriptors, img_path,
                           fv_path.replace("\\", "\\\\"))

                    cmd = '''
lib.Extraction(img_path, fv_path)
                    '''

                    #Constant: number of times the time measurement will be used
                    num_exec = 3

                    t = timeit.Timer(stmt=cmd, setup=setup)
                    avg_time = t.timeit(number=num_exec)

                    #averages of 3 executions are accumulated in 'total'
                    tempo[desc]['total'] += avg_time / num_exec

                    #saves all computed times (only the averages of the 3 executions)
                    print "(index) current image =", collection.index(img)
                    tempos[desc][collection.index(img)] = avg_time / num_exec
                    print "computed -> tempos[", desc, "][", collection.index(
                        img), "] = ", avg_time / num_exec

                else:
                    print "fv was already extracted; does not extract again."

            ################################
            ## END - APPLYING DESCRIPTORS ##
            ################################

            if (isPPM == 0):
                #removes the PPM file created
                os.remove("/tmp/" + nome_img + ".ppm")
            #images that were originally PPM cannot be removed!

            print "number of images in the vectors of time for the descriptor", desc, "=", len(
                tempos[desc])

        except IOError:
            print "Problems with image file. Skipping to the next image..."
            collection_size -= 1
            #raise
            #pass

        except:
            print "Error processing image..."
            collection_size -= 1
            #raise
            #pass

    print "ext_progress: 1"

    #POSTGRESQL CONNECTION
    try:
        conn = util.connect()
    except:
        print "Error connecting to the database"
        raise
        sys.exit()
    cur = conn.cursor()

    print "COLLECTION SIZE (collection_size):", collection_size
    print "FINAL COLLECTION SIZE (len(collection)):", len(
        collection
    )  #there may be difference if some images could not be processed

    print "\n================="
    print "Extraction times:"
    for desc in descritores:
        try:
            #tempo[desc]['avg'] = (tempo[desc]['total']/collection_size)
            tempo[desc]['avg'] = (
                tempo[desc]['total'] / len(tempos[desc])
            )  #divides by the number of times in the vector, because if the experiment was restarted,
            #the quantity will be different than the collection size
            print "\n", desc, ":\ntotal time = ", tempo[desc]['total']
            print "total avg time = ", tempo[desc]['avg']
            print "collection size = ", collection_size
            print "number of times = ", len(tempos[desc])

            #computes the standard deviation
            soma = 0
            for i, t in tempos[desc].iteritems():
                soma += pow((t - tempo[desc]['avg']), 2)

            desvio_padrao = pow(soma / len(tempos[desc]), 0.5)
            print desc, "standard deviation = ", desvio_padrao

            #POSTGRESQL - registers the average time for 1 feature extraction and the standard deviation
            query = "INSERT INTO experimenttime (idexperiment, iddescriptor, idevaluationmeasure, value, stddev) VALUES (" + str(
                id_exp) + ",'" + desc + "',1," + str(
                    tempo[desc]['avg']) + "," + str(desvio_padrao) + ")"
            cur.execute(query)

        except:
            print "Empty vector of times! Possible reasons: (1)extraction was not necessary because it was already done before or (2)there is a bug in the descriptor extraction function."

    print "EXTRACTION - SUCESS!!!"

    #POSTGRESQL - closing connection
    conn.commit()
    cur.close()
    conn.close()

    fim_total = datetime.now()
    print "[EXTRACTION] Experiment started:", ini_total.year, "/", ini_total.month, "/", ini_total.day, " - ", ini_total.hour, ":", ini_total.minute, ":", ini_total.second
    print "[EXTRACTION] Experiment concluded: ", fim_total.year, "/", fim_total.month, "/", fim_total.day, " - ", fim_total.hour, ":", fim_total.minute, ":", fim_total.second
Exemple #25
0
 def __init__(self, config_file_path=DEFAULT_CONFIG_PATH):
     self.config = ConfigParser()
     self.config.read(config_file_path)
Exemple #26
0
 def read_config(config_path):
     config = ConfigParser()
     config.readfp(file(config_path))
     return config
Exemple #27
0
def get_config():
    """ Get the configuration """
    conf = ConfigParser()
    conf.read('sample.cfg')
    return conf
import gettext
import commands
import dircache
import signal
import subprocess
import glib

from ConfigParser import ConfigParser
try:
    import pygtk
    pygtk.require("2.0")
except ImportError:
    print "Please install pyGTK or GTKv2 or set your PYTHONPATH correctly"
    sys.exit(1)

DDUCONFIG = ConfigParser()
DDUCONFIG.read(os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 
                            "ddu.conf"))
ABSPATH = DDUCONFIG.get('general','abspath')
SCRIPTDIR = os.path.join(ABSPATH,'scripts')
try:
    gettext.bindtextdomain('ddu','%s/i18n' % ABSPATH)
    gettext.textdomain('ddu')
    gtk.glade.bindtextdomain('ddu','%s/i18n' % ABSPATH)
except AttributeError:
    pass

_ = gettext.gettext

class BrowseDlg:
    """
Exemple #29
0
def read_perl_config_ini(ini_file, dummy_section='_DEFAULT_'):
    ''' 
    Return a ConfigParser() object from a perl config::inifiles type file.
    Basic differences:
    - config::inifiles allow "key=<<value"-style multi-line assignments;
    - config::inifiles do not require an initial [section]; if missing
      from ini_file, insert a "dummy" section.
    '''

    in_multiline = False
    key, endkey = False, False
    multiline_value = ''
    multiline_assignment_rx = re.compile('^(\w+)=<<(\w+)')
    section_header_rx = re.compile(r'\[([\w_]+)\]')

    fio = StringIO()
    @coroutine_closure(fio)
    def acc(fio):
        while True:
            line = (yield)
            if line is None:
                break
            fio.write(line)
            fio.write('\n')

    def emit(line):
        acc.send(line)

    # read config file, build contents in fio:
    with open(ini_file) as f:
        first_line = next(f).strip()
        if not section_header_rx.match(first_line):
            emit('[{}]'.format(dummy_section))
        emit(first_line)

        for line in (l.strip() for l in f):
            line = __strip_comments(line)
            if not in_multiline:
                mg = multiline_assignment_rx.match(line)
                if mg:
                    key = mg.group(1)
                    endkey = mg.group(2)
                    in_multiline = True
                    multiline_value = ''
                else:
                    emit(line)
            else:
                if line == endkey:
                    emit('{}={}'.format(key, multiline_value))
                    key, endkey = False, False
                    in_multiline = False
                else:
                    multiline_value += line

    tmp = tempfile.TemporaryFile()
    tmp.write(fio.getvalue())
    tmp.seek(0, 0)
    conf =  ConfigParser()
    conf.readfp(tmp)

    return conf
Exemple #30
0
import logging
import os.path
import re
import sys

# This must be called early - before the rest of the blueprint library loads.
logging.basicConfig(format='# [blueprint] %(message)s', level=logging.INFO)

import git
import rules
import util
import walk

DEFAULTS = {'io': {'server': 'https://devstructure.com'}}

cfg = ConfigParser()
for section, options in DEFAULTS.iteritems():
    cfg.add_section(section)
    for option, value in options.iteritems():
        cfg.set(section, option, str(value))
legacy_cfg = ConfigParser()
legacy_cfg.read(
    ['/etc/blueprint-io.cfg',
     os.path.expanduser('~/.blueprint-io.cfg')])
for section in legacy_cfg.sections():
    for option in legacy_cfg.options(section):
        cfg.set('io', option, legacy_cfg.get(section, option))
del legacy_cfg
cfg.read(['/etc/blueprint.cfg', os.path.expanduser('~/.blueprint.cfg')])