コード例 #1
0
storage as it helps avoid storage of credentials in the
database.

Possible values:
    * None
    * String value representing a valid configuration file path

Related options:
    * None

""")),
]

# SafeConfigParser was deprecated in Python 3.2
if sys.version_info >= (3, 2):
    CONFIG = configparser.ConfigParser()
else:
    CONFIG = configparser.SafeConfigParser()

LOG = logging.getLogger(__name__)

CONF = cfg.CONF
CONF.register_opts(swift_opts)


def is_multiple_swift_store_accounts_enabled():
    if CONF.swift_store_config_file is None:
        return False
    return True

コード例 #2
0
def main(_):
    '''does everything for testing'''

    decoder_cfg_file = None

    #read the database config file
    parsed_database_cfg = configparser.ConfigParser()
    parsed_database_cfg.read(os.path.join(FLAGS.expdir, 'database.cfg'))
    database_cfg = dict(parsed_database_cfg.items('database'))

    #read the asr config file
    parsed_nnet_cfg = configparser.ConfigParser()
    parsed_nnet_cfg.read(os.path.join(FLAGS.expdir, 'model', 'lm.cfg'))
    nnet_cfg = dict(parsed_nnet_cfg.items('lm'))

    #read the decoder config file
    if decoder_cfg_file is None:
        decoder_cfg_file = os.path.join(FLAGS.expdir, 'model', 'decoder.cfg')
    parsed_decoder_cfg = configparser.ConfigParser()
    parsed_decoder_cfg.read(decoder_cfg_file)
    decoder_cfg = dict(parsed_decoder_cfg.items('decoder'))

    #create the coder
    with open(os.path.join(FLAGS.expdir, 'model', 'alphabet')) as fid:
        alphabet = fid.read().split(' ')
    coder = target_coder.TargetCoder(alphabet)

    #read the maximum length
    with open(os.path.join(database_cfg['test_dir'], 'max_num_chars')) as fid:
        max_length = int(fid.read())

    #create a text reader
    textreader = text_reader.TextReader(textfile=os.path.join(
        database_cfg['test_dir'], 'text'),
                                        max_length=max_length,
                                        coder=coder)

    #create the classifier
    classifier = lm_factory.factory(conf=nnet_cfg, output_dim=coder.num_labels)

    #create a decoder
    graph = tf.Graph()
    with graph.as_default():
        decoder = decoder_factory.factory(conf=decoder_cfg,
                                          classifier=classifier,
                                          input_dim=1,
                                          max_input_length=max_length,
                                          coder=coder,
                                          expdir=FLAGS.expdir)

        saver = tf.train.Saver(tf.trainable_variables())

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  #pylint: disable=E1101
    config.allow_soft_placement = True

    with tf.Session(graph=graph, config=config) as sess:
        #load the model
        saver.restore(sess, os.path.join(FLAGS.expdir, 'model',
                                         'network.ckpt'))

        #decode with te neural net
        decoded = decoder.decode(textreader, sess)

    #compute the character error rate
    score = decoder.score(decoded, None)

    print 'perplexity: %f' % score
コード例 #3
0
    def read_settings(self):
        ''' Reads the settings from the packet_net.ini file '''
        if six.PY3:
            config = configparser.ConfigParser()
        else:
            config = configparser.SafeConfigParser()

        _ini_path_raw = os.environ.get('PACKET_NET_INI_PATH')

        if _ini_path_raw:
            packet_ini_path = os.path.expanduser(
                os.path.expandvars(_ini_path_raw))
        else:
            packet_ini_path = os.path.join(
                os.path.dirname(os.path.realpath(__file__)), 'packet_net.ini')
        config.read(packet_ini_path)

        # items per page
        self.items_per_page = 999
        if config.has_option(ini_section, 'items_per_page'):
            config.get(ini_section, 'items_per_page')

        # Instance states to be gathered in inventory. Default is all of them.
        packet_valid_device_states = [
            'active', 'inactive', 'queued', 'provisioning'
        ]
        self.packet_device_states = []
        if config.has_option(ini_section, 'device_states'):
            for device_state in config.get(ini_section,
                                           'device_states').split(','):
                device_state = device_state.strip()
                if device_state not in packet_valid_device_states:
                    continue
                self.packet_device_states.append(device_state)
        else:
            self.packet_device_states = packet_valid_device_states

        # Cache related
        cache_dir = os.path.expanduser(config.get(ini_section, 'cache_path'))
        if not os.path.exists(cache_dir):
            os.makedirs(cache_dir)

        self.cache_path_cache = cache_dir + "/ansible-packet.cache"
        self.cache_path_index = cache_dir + "/ansible-packet.index"
        self.cache_max_age = config.getint(ini_section, 'cache_max_age')

        # Configure nested groups instead of flat namespace.
        if config.has_option(ini_section, 'nested_groups'):
            self.nested_groups = config.getboolean(ini_section,
                                                   'nested_groups')
        else:
            self.nested_groups = False

        # Replace dash or not in group names
        if config.has_option(ini_section, 'replace_dash_in_groups'):
            self.replace_dash_in_groups = config.getboolean(
                ini_section, 'replace_dash_in_groups')
        else:
            self.replace_dash_in_groups = True

        # Configure which groups should be created.
        group_by_options = [
            'group_by_device_id',
            'group_by_hostname',
            'group_by_facility',
            'group_by_project',
            'group_by_operating_system',
            'group_by_plan_type',
            'group_by_tags',
            'group_by_tag_none',
        ]
        for option in group_by_options:
            if config.has_option(ini_section, option):
                setattr(self, option, config.getboolean(ini_section, option))
            else:
                setattr(self, option, True)

        # Do we need to just include hosts that match a pattern?
        try:
            pattern_include = config.get(ini_section, 'pattern_include')
            if pattern_include and len(pattern_include) > 0:
                self.pattern_include = re.compile(pattern_include)
            else:
                self.pattern_include = None
        except configparser.NoOptionError:
            self.pattern_include = None

        # Do we need to exclude hosts that match a pattern?
        try:
            pattern_exclude = config.get(ini_section, 'pattern_exclude')
            if pattern_exclude and len(pattern_exclude) > 0:
                self.pattern_exclude = re.compile(pattern_exclude)
            else:
                self.pattern_exclude = None
        except configparser.NoOptionError:
            self.pattern_exclude = None

        # Projects
        self.projects = []
        configProjects = config.get(ini_section, 'projects')
        configProjects_exclude = config.get(ini_section, 'projects_exclude')
        if (configProjects == 'all'):
            for projectInfo in self.get_projects():
                if projectInfo.name not in configProjects_exclude:
                    self.projects.append(projectInfo.name)
        else:
            self.projects = configProjects.split(",")
コード例 #4
0
    def read_settings(self):
        ''' Reads the settings from the vmware_inventory.ini file '''

        scriptbasename = __file__
        scriptbasename = os.path.basename(scriptbasename)
        scriptbasename = scriptbasename.replace('.py', '')

        defaults = {
            'vmware': {
                'server':
                '',
                'port':
                443,
                'username':
                '',
                'password':
                '',
                'ini_path':
                os.path.join(os.path.dirname(__file__),
                             '%s.ini' % scriptbasename),
                'cache_name':
                'ansible-vmware',
                'cache_path':
                '~/.ansible/tmp',
                'cache_max_age':
                3600,
                'max_object_level':
                1,
                'alias_pattern':
                '{{ config.name + "_" + config.uuid }}',
                'host_pattern':
                '{{ guest.ipaddress }}',
                'host_filters':
                '{{ guest.gueststate == "running" }}',
                'groupby_patterns':
                '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}',
                'lower_var_keys':
                True
            }
        }

        if six.PY3:
            config = configparser.ConfigParser()
        else:
            config = configparser.SafeConfigParser()

        # where is the config?
        vmware_ini_path = os.environ.get('VMWARE_INI_PATH',
                                         defaults['vmware']['ini_path'])
        vmware_ini_path = os.path.expanduser(
            os.path.expandvars(vmware_ini_path))
        config.read(vmware_ini_path)

        # apply defaults
        for k, v in defaults['vmware'].iteritems():
            if not config.has_option('vmware', k):
                config.set('vmware', k, str(v))

        # where is the cache?
        self.cache_dir = os.path.expanduser(config.get('vmware', 'cache_path'))
        if self.cache_dir and not os.path.exists(self.cache_dir):
            os.makedirs(self.cache_dir)

        # set the cache filename and max age
        cache_name = config.get('vmware', 'cache_name')
        self.cache_path_cache = self.cache_dir + "/%s.cache" % cache_name
        self.cache_max_age = int(config.getint('vmware', 'cache_max_age'))

        # mark the connection info
        self.server = os.environ.get('VMWARE_SERVER',
                                     config.get('vmware', 'server'))
        self.port = int(
            os.environ.get('VMWARE_PORT', config.get('vmware', 'port')))
        self.username = os.environ.get('VMWARE_USERNAME',
                                       config.get('vmware', 'username'))
        self.password = os.environ.get('VMWARE_PASSWORD',
                                       config.get('vmware', 'password'))

        # behavior control
        self.maxlevel = int(config.get('vmware', 'max_object_level'))
        self.lowerkeys = config.get('vmware', 'lower_var_keys')
        if type(self.lowerkeys) != bool:
            if str(self.lowerkeys).lower() in ['yes', 'true', '1']:
                self.lowerkeys = True
            else:
                self.lowerkeys = False

        self.host_filters = list(
            config.get('vmware', 'host_filters').split(','))
        self.groupby_patterns = list(
            config.get('vmware', 'groupby_patterns').split(','))

        # save the config
        self.config = config
コード例 #5
0
    def _search_launchers(directories):
        ini_name = 'launcher.ini'
        config_defaults = {
            'name': 'Launcher',
            'command': '',
            'description': '',
            'image': '',
            'shell': 'false',
            'workdir': '.',
            'type': '',
            'manufacturer': '',
            'model': '',
            'variant': '',
            'priority': '0',
        }

        launchers = []
        ids = {}
        index = 0
        for root_dir in directories:
            for root, _, files in os.walk(root_dir):
                if ini_name not in files:
                    continue

                root = os.path.abspath(os.path.expanduser(root))
                ini_file = os.path.join(root, ini_name)
                cfg = configparser.ConfigParser(config_defaults)
                cfg.read(ini_file)
                for section in cfg.sections():
                    launcher = Launcher()
                    # descriptive data
                    launcher.name = cfg.get(section, 'name')
                    launcher.description = cfg.get(section, 'description')
                    info = MachineInfo()
                    info.type = cfg.get(section, 'type')
                    info.manufacturer = cfg.get(section, 'manufacturer')
                    info.model = cfg.get(section, 'model')
                    info.variant = cfg.get(section, 'variant')
                    launcher.priority = cfg.getint(section, 'priority')
                    launcher.importance = 0
                    launcher.info.MergeFrom(info)
                    # command data
                    launcher.command = cfg.get(section, 'command')
                    launcher.shell = cfg.getboolean(section, 'shell')
                    workdir = cfg.get(section, 'workdir')
                    if not os.path.isabs(workdir):
                        workdir = os.path.join(root, workdir)
                    launcher.workdir = os.path.normpath(workdir)
                    launcher.returncode = 0
                    launcher.running = False
                    launcher.terminating = False
                    # storing the image file
                    image_file = cfg.get(section, 'image')
                    if image_file is not '':
                        if not os.path.isabs(image_file):
                            image_file = os.path.join(root, image_file)
                        file_buffer = open(image_file, 'rb').read()
                        image = File()
                        image.name = os.path.basename(image_file)
                        image.encoding = CLEARTEXT
                        image.blob = file_buffer
                        launcher.image.MergeFrom(image)

                    launcher.index = index
                    index += 1
                    launchers.append(launcher)
                    ids[launcher.index] = '%s:%s' % (root, section)

        # sort using the priority attribute before distribution
        return sorted(launchers, key=attrgetter('priority'), reverse=True), ids
コード例 #6
0
def change_repository_name_in_hgrc_file(hgrc_file, new_name):
    config = configparser.ConfigParser()
    config.read(hgrc_file)
    config.set('web', 'name', new_name)
    with open(hgrc_file, 'wb') as fh:
        config.write(fh)
コード例 #7
0
 def _get_base_config(self):
     cp = configparser.ConfigParser()
     cp.add_section('connection')
     cp.set('connection', 'api_key', 'exampleapikey')
     cp.set('connection', 'location', 'ams1')
     return cp
コード例 #8
0
def main(_):
    '''main function'''

    #pointers to the config files
    computing_cfg_file = 'config/computing/non_distributed.cfg'
    database_cfg_file = 'config/asr_databases/TIMIT.conf'
    if FLAGS.type == 'asr':
        feat_cfg_file = 'config/features/fbank.cfg'
    classifier_cfg_file = 'config/asr/LASACNN.cfg'
    trainer_cfg_file = 'config/trainer/cross_entropytrainer.cfg'
    decoder_cfg_file = 'config/decoder/BeamSearchDecoder.cfg'

    #read the computing config file
    parsed_computing_cfg = configparser.ConfigParser()
    parsed_computing_cfg.read(computing_cfg_file)
    computing_cfg = dict(parsed_computing_cfg.items('computing'))

    #read the trainer config file
    parsed_trainer_cfg = configparser.ConfigParser()
    parsed_trainer_cfg.read(trainer_cfg_file)
    trainer_cfg = dict(parsed_trainer_cfg.items('trainer'))

    if os.path.isdir(os.path.join(FLAGS.expdir, 'processes')):
        shutil.rmtree(os.path.join(FLAGS.expdir, 'processes'))
    os.makedirs(os.path.join(FLAGS.expdir, 'processes'))

    if trainer_cfg['resume_training'] == 'True':
        if not os.path.isdir(FLAGS.expdir):
            raise Exception(
                'cannot find %s, please set resume_training to '
                'False if you want to start a new training process' %
                FLAGS.expdir)
    else:
        if os.path.isdir(os.path.join(FLAGS.expdir, 'logdir')):
            shutil.rmtree(os.path.join(FLAGS.expdir, 'logdir'))

        if not os.path.isdir(FLAGS.expdir):
            os.makedirs(FLAGS.expdir)

        if not os.path.isdir(os.path.join(FLAGS.expdir, 'model')):
            os.makedirs(os.path.join(FLAGS.expdir, 'model'))

        #copy the configs to the expdir so they can be read there and the
        #experiment information is stored
        shutil.copyfile(database_cfg_file,
                        os.path.join(FLAGS.expdir, 'database.cfg'))
        if FLAGS.type == 'asr':
            shutil.copyfile(
                feat_cfg_file,
                os.path.join(FLAGS.expdir, 'model', 'features.cfg'))
        shutil.copyfile(
            classifier_cfg_file,
            os.path.join(FLAGS.expdir, 'model', '%s.cfg' % FLAGS.type))

    shutil.copyfile(computing_cfg_file,
                    os.path.join(FLAGS.expdir, 'computing.cfg'))
    shutil.copyfile(trainer_cfg_file, os.path.join(FLAGS.expdir,
                                                   'trainer.cfg'))
    shutil.copyfile(decoder_cfg_file,
                    os.path.join(FLAGS.expdir, 'model', 'decoder.cfg'))

    if computing_cfg['distributed'] == 'condor_non-distributed':

        if not os.path.isdir(os.path.join(FLAGS.expdir, 'outputs')):
            os.makedirs(os.path.join(FLAGS.expdir, 'outputs'))

        subprocess.call([
            'condor_submit',
            'expdir=%s' % FLAGS.expdir,
            'memory=%s' % computing_cfg['minmemory'],
            'type=%s' % FLAGS.type,
            'nabu/distributed/condor/non_distributed.job'
        ])

    elif computing_cfg['distributed'] == 'non-distributed':

        if FLAGS.type == 'asr':
            train_asr(clusterfile=None,
                      job_name='local',
                      task_index=0,
                      ssh_command='None',
                      expdir=FLAGS.expdir)
        else:
            train_lm(clusterfile=None,
                     job_name='local',
                     task_index=0,
                     ssh_command='None',
                     expdir=FLAGS.expdir)

    elif computing_cfg['distributed'] == 'local':

        #create the directories
        if not os.path.isdir(os.path.join(FLAGS.expdir, 'outputs')):
            os.makedirs(os.path.join(FLAGS.expdir, 'outputs'))
        if not os.path.isdir(os.path.join(FLAGS.expdir, 'cluster')):
            os.makedirs(os.path.join(FLAGS.expdir, 'cluster'))

        #create the cluster file
        with open(os.path.join(FLAGS.expdir, 'cluster', 'cluster'),
                  'w') as fid:
            port = 1024
            for _ in range(int(computing_cfg['numps'])):
                while not cluster.port_available(port):
                    port += 1
                fid.write('ps,localhost,%d,\n' % port)
                port += 1
            for i in range(int(computing_cfg['numworkers'])):
                while not cluster.port_available(port):
                    port += 1
                fid.write('worker,localhost,%d,%d\n' % (port, i))
                port += 1

        #start the training
        local_cluster.local_cluster(FLAGS.expdir, FLAGS.type)

    elif computing_cfg['distributed'] == 'static':

        #read the cluster file
        machines = dict()
        machines['worker'] = []
        machines['ps'] = []
        with open(computing_cfg['clusterfile']) as fid:
            for line in fid:
                if line.strip():
                    split = line.strip().split(',')
                    machines[split[0]].append(split[1])

        #create the outputs directory
        if not os.path.isdir(os.path.join(FLAGS.expdir, 'outputs')):
            os.makedirs(os.path.join(FLAGS.expdir, 'outputs'))

        #run all the jobs
        processes = dict()
        processes['worker'] = []
        processes['ps'] = []
        for job in machines:
            task_index = 0
            for machine in machines[job]:
                command = ('python -u train_%s.py --clusterfile=%s '
                           '--job_name=%s --task_index=%d --ssh_command=%s '
                           '--expdir=%s') % (
                               FLAGS.type, computing_cfg['clusterfile'], job,
                               task_index, computing_cfg['ssh_command'],
                               FLAGS.expdir)
                processes[job].append(
                    run_remote.run_remote(command=command, host=machine))
                task_index += 1

        #make sure the created processes are terminated at exit
        for job in processes:
            for process in processes[job]:
                atexit.register(process.terminate)

        #make sure all remotely created processes are terminated at exit
        atexit.register(kill_processes.kill_processes,
                        processdir=os.path.join(FLAGS.expdir, 'processes'))

        #wait for all worker processes to finish
        for process in processes['worker']:
            process.wait()

    elif computing_cfg['distributed'] == 'condor':

        #create the directories
        if not os.path.isdir(os.path.join(FLAGS.expdir, 'outputs')):
            os.makedirs(os.path.join(FLAGS.expdir, 'outputs'))
        if os.path.isdir(os.path.join(FLAGS.expdir, 'cluster')):
            shutil.rmtree(os.path.join(FLAGS.expdir, 'cluster'))
        os.makedirs(os.path.join(FLAGS.expdir, 'cluster'))

        #submit the parameter server jobs
        subprocess.call([
            'condor_submit',
            'expdir=%s' % FLAGS.expdir,
            'numjobs=%s' % computing_cfg['numps'],
            'type=%s' % FLAGS.type,
            'ssh_command=%s' % computing_cfg['ssh_command'],
            'nabu/distributed/condor/ps.job'
        ])

        #submit the worker jobs
        subprocess.call([
            'condor_submit',
            'expdir=%s' % FLAGS.expdir,
            'numjobs=%s' % computing_cfg['numworkers'],
            'memory=%s' % computing_cfg['minmemory'],
            'type=%s' % FLAGS.type,
            'ssh_command=%s' % computing_cfg['ssh_command'],
            'nabu/distributed/condor/worker.job'
        ])

        ready = False

        try:
            print 'waiting for the machines to report...'
            numworkers = 0
            numps = 0
            while not ready:
                #check the machines in the cluster
                machines = cluster.get_machines(
                    os.path.join(FLAGS.expdir, 'cluster'))

                if (len(machines['ps']) > numps
                        or len(machines['worker']) > numworkers):

                    numworkers = len(machines['worker'])
                    numps = len(machines['ps'])

                    print('parameter servers ready %d/%s' %
                          (len(machines['ps']), computing_cfg['numps']))

                    print(
                        'workers ready %d/%s' %
                        (len(machines['worker']), computing_cfg['numworkers']))

                    print 'press Ctrl-C to run with the current machines'

                #check if the required amount of machines has reported
                if (len(machines['worker']) == int(computing_cfg['numworkers'])
                        and len(machines['ps']) == int(
                            computing_cfg['numps'])):

                    ready = True

                sleep(1)

        except KeyboardInterrupt:

            #remove all jobs that are not running
            os.system('condor_rm -constraint \'JobStatus =!= 2\'')

            #check if enough machines are available
            if machines['worker'] or machines['ps']:

                #stop the ps jobs
                cidfile = os.path.join(FLAGS.expdir, 'cluster', 'ps-cid')
                if os.path.exists(cidfile):
                    with open(cidfile) as fid:
                        cid = fid.read()
                    subprocess.call(['condor_rm', cid])

                #stop the worker jobs
                cidfile = os.path.join(FLAGS.expdir, 'cluster', 'worker-cid')
                if os.path.exists(cidfile):
                    with open(cidfile) as fid:
                        cid = fid.read()
                    subprocess.call(['condor_rm', cid])

                raise Exception('at leat one ps and one worker needed')

        print('starting training with %s parameter servers and %s workers' %
              (len(machines['ps']), len(machines['worker'])))

        #create the cluster file
        with open(os.path.join(FLAGS.expdir, 'cluster', 'cluster'),
                  'w') as cfid:
            for job in machines:
                if job == 'ps':
                    GPU = ''
                else:
                    GPU = '0'
                for machine in machines[job]:
                    cfid.write('%s,%s,%d,%s\n' %
                               (job, machine[0], machine[1], GPU))

        #notify the machine that the cluster is ready
        fid = open(FLAGS.expdir + '/cluster/ready', 'w')
        fid.close()

        print('training has started look in %s/outputs for the job outputs' %
              FLAGS.expdir)

        print 'waiting for worker jobs to finish'

        for machine in machines['worker']:
            machine_file = os.path.join(FLAGS.expdir, 'cluster',
                                        '%s-%d' % (machine[0], machine[1]))
            while os.path.exists(machine_file):
                sleep(1)

        #stop the ps jobs
        with open(os.path.join(FLAGS.expdir, 'cluster', 'ps-cid')) as fid:
            cid = fid.read()

        subprocess.call(['condor_rm', cid])

    elif computing_cfg['distributed'] == 'condor_local':

        #create the directories
        if not os.path.isdir(os.path.join(FLAGS.expdir, 'outputs')):
            os.makedirs(os.path.join(FLAGS.expdir, 'outputs'))
        if not os.path.isdir(os.path.join(FLAGS.expdir, 'cluster')):
            os.makedirs(os.path.join(FLAGS.expdir, 'cluster'))

        #create the cluster file
        with open(os.path.join(FLAGS.expdir, 'cluster', 'cluster'),
                  'w') as fid:
            port = 1024
            for _ in range(int(computing_cfg['numps'])):
                while not cluster.port_available(port):
                    port += 1
                fid.write('ps,localhost,%d,\n' % port)
                port += 1
            for i in range(int(computing_cfg['numworkers'])):
                while not cluster.port_available(port):
                    port += 1
                fid.write('worker,localhost,%d,%d\n' % (port, i))
                port += 1

        #submit the job
        subprocess.call([
            'condor_submit',
            'expdir=%s' % FLAGS.expdir,
            'GPUs=%d' % (int(computing_cfg['numworkers'])),
            'memory=%s' % computing_cfg['minmemory'],
            'type=%s' % FLAGS.type, 'nabu/distributed/condor/local.job'
        ])

        print('job submitted look in %s/outputs for the job outputs' %
              FLAGS.expdir)

    else:
        raise Exception('Unknown distributed type in %s' % computing_cfg_file)
コード例 #9
0
def train(clusterfile, job_name, task_index, ssh_command, expdir):
    ''' does everything for ss training

    Args:
        clusterfile: the file where all the machines in the cluster are
            specified if None, local training will be done
        job_name: one of ps or worker in the case of distributed training
        task_index: the task index in this job
        ssh_command: the command to use for ssh, if 'None' no tunnel will be
            created
        expdir: the experiments directory
    '''

    #read the database config file
    parsed_database_cfg = configparser.ConfigParser()
    parsed_database_cfg.read(os.path.join(expdir, 'database.cfg'))

    #read the ss config file
    model_cfg = configparser.ConfigParser()
    model_cfg.read(os.path.join(expdir, 'model.cfg'))

    #read the trainer config file
    parsed_trainer_cfg = configparser.ConfigParser()
    parsed_trainer_cfg.read(os.path.join(expdir, 'trainer.cfg'))
    trainer_cfg = dict(parsed_trainer_cfg.items('trainer'))

    #read the decoder config file
    evaluator_cfg = configparser.ConfigParser()
    evaluator_cfg.read(os.path.join(expdir, 'evaluator.cfg'))

    #Get the config files for each training stage. Each training stage has a different
    #segment length and its network is initliazed with the network of the previous
    #training stage
    segment_lengths = trainer_cfg['segment_lengths'].split(' ')
    #segment_lengths = [segment_lengths[-1]]
    #os.environ['CUDA_VISIBLE_DEVICES'] = '1'
    for i, segment_length in enumerate(segment_lengths):

        segment_expdir = os.path.join(expdir, segment_length)

        segment_parsed_database_cfg = configparser.ConfigParser()
        segment_parsed_database_cfg.read(
            os.path.join(segment_expdir, 'database.cfg'))

        segment_parsed_trainer_cfg = configparser.ConfigParser()
        segment_parsed_trainer_cfg.read(
            os.path.join(segment_expdir, 'trainer.cfg'))
        segment_trainer_cfg = dict(segment_parsed_trainer_cfg.items('trainer'))

        if segment_trainer_cfg['trainer'] == 'multi_task':
            segment_tasks_cfg = dict()
            for task in segment_trainer_cfg['tasks'].split(' '):
                segment_tasks_cfg[task] = dict(
                    segment_parsed_trainer_cfg.items(task))
        else:
            segment_tasks_cfg = None

        #If there was no previously validated training sessions, use the model of the
        #previous segment length as initialization for the current one
        if i > 0 and not os.path.exists(
                os.path.join(segment_expdir, 'logdir',
                             'validated.ckpt.index')):
            init_filename = os.path.join(expdir, segment_lengths[i - 1],
                                         'model', 'network.ckpt')
            if not os.path.exists(init_filename + '.index'):
                init_filename = None

        else:
            init_filename = None

        #if this training stage has already succesfully finished, skipt it
        if os.path.exists(
                os.path.join(expdir, segment_lengths[i], 'model',
                             'network.ckpt.index')):
            print 'Already found a fully trained model for segment length %s' % segment_length
        else:

            #create the cluster and server
            server = create_server.create_server(clusterfile=clusterfile,
                                                 job_name=job_name,
                                                 task_index=task_index,
                                                 expdir=expdir,
                                                 ssh_command=ssh_command)

            #parameter server
            if job_name == 'ps':
                raise 'Parameter server is currently not implemented correctly'
                ##create the parameter server
                #ps = multi_task_trainer.ParameterServer(
                #conf=segment_trainer_cfg,
                #tasksconf=segment_tasks_cfg,
                #modelconf=model_cfg,
                #dataconf=segment_parsed_database_cfg,
                #server=server,
                #task_index=task_index)

                #if task_index ==0:
                ##let the ps wait untill all workers are finished
                #ps.join()
                #return

            tr = trainer_factory.factory(segment_trainer_cfg['trainer'])(
                conf=segment_trainer_cfg,
                tasksconf=segment_tasks_cfg,
                dataconf=segment_parsed_database_cfg,
                modelconf=model_cfg,
                evaluatorconf=evaluator_cfg,
                expdir=segment_expdir,
                init_filename=init_filename,
                server=server,
                task_index=task_index)

            print 'starting training for segment length: %s' % segment_length

            #train the model
            tr.train()
コード例 #10
0
                    sys.stderr.write("%s doesn't have %s option in %s "
                                     "section.\n" % (path2, key, section))
                    failed = 1
                else:
                    val2 = conf2.get(section, key)
                    if not val1 == val2:
                        sys.stderr.write("%s in %s != %s in %s under %s.%s\n" %
                                         (val1, path1, val2,
                                          path2, section, key))
                        failed = 1
    return failed


if __name__ == '__main__':
    if six.PY3:
        conf1 = configparser.ConfigParser()
        conf2 = configparser.ConfigParser()
    else:
        conf1 = configparser.SafeConfigParser()
        conf2 = configparser.SafeConfigParser()

    conf1.read(sys.argv[1])
    conf2.read(sys.argv[2])

    # compare first file to the other one
    ret = compare_ini_files(conf1, sys.argv[1], conf2, sys.argv[2])

    # compare the other file to the first one
    ret2 = compare_ini_files(conf2, sys.argv[2], conf1, sys.argv[1])

    sys.exit(ret or ret2)
コード例 #11
0
ファイル: contrail.py プロジェクト: pltf/contrail-heat
from heat.engine import resource
from heat.engine.properties import Properties
try:
    from heat.openstack.common import log as logging
except ImportError:
    from oslo_log import log as logging
from vnc_api import vnc_api
from vnc_api.exceptions import NoIdError
from vnc_api.exceptions import RefsExistError
import uuid
from threading import Lock

LOG = logging.getLogger(__name__)

cfg_parser = configparser.ConfigParser()
cfg_parser.read("/etc/heat/heat.conf")


def set_auth_token(func):
    def wrapper(self, *args, **kwargs):
        self.mutex().acquire()
        try:
            self.vnc_lib().set_auth_token(self.stack.context.auth_token)
            return func(self, *args, **kwargs)
        finally:
            self.mutex().release()

    return wrapper

コード例 #12
0
def launch_refarch_env(console_port=8443,
                       cluster_id=None,
                       deployment_type=None,
                       openshift_vers=None,
                       vcenter_host=None,
                       vcenter_username=None,
                       vcenter_password=None,
                       vcenter_template_name=None,
                       vcenter_folder=None,
                       vcenter_cluster=None,
                       vcenter_datacenter=None,
                       vcenter_datastore=None,
                       vcenter_resource_pool=None,
                       public_hosted_zone=None,
                       app_dns_prefix=None,
                       vm_dns=None,
                       vm_gw=None,
                       vm_netmask=None,
                       vm_network=None,
                       rhel_subscription_user=None,
                       rhel_subscription_pass=None,
                       rhel_subscription_server=None,
                       rhel_subscription_pool=None,
                       byo_lb=None,
                       lb_host=None,
                       byo_nfs=None,
                       nfs_host=None,
                       nfs_registry_mountpoint=None,
                       no_confirm=False,
                       tag=None,
                       verbose=0,
                       create_inventory=None,
                       master_nodes=None,
                       infra_nodes=None,
                       app_nodes=None,
                       vm_ipaddr_start=None,
                       ocp_hostname_prefix=None,
                       create_ocp_vars=None,
                       auth_type=None,
                       ldap_user=None,
                       ldap_user_password=None,
                       ldap_fqdn=None,
                       openshift_sdn=None,
                       containerized=None,
                       container_storage=None,
                       openshift_hosted_metrics_deploy=None,
                       clean=None):

    # Open config file INI for values first
    scriptbasename = __file__
    scriptbasename = os.path.basename(scriptbasename)
    scriptbasename = scriptbasename.replace('.py', '')
    defaults = {
        'vmware': {
            'ini_path':
            os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename),
            'console_port':
            '8443',
            'deployment_type':
            'openshift-enterprise',
            'openshift_vers':
            'v3_5',
            'vcenter_host':
            '',
            'vcenter_username':
            '******',
            'vcenter_password':
            '',
            'vcenter_template_name':
            'ocp-server-template-2.0.2',
            'vcenter_folder':
            'ocp',
            'vcenter_cluster':
            'devel',
            'vcenter_cluster':
            '',
            'vcenter_resource_pool':
            '/Resources/OCP3',
            'public_hosted_zone':
            '',
            'app_dns_prefix':
            'apps',
            'vm_dns':
            '',
            'vm_gw':
            '',
            'vm_netmask':
            '',
            'vm_network':
            'VM Network',
            'rhel_subscription_user':
            '',
            'rhel_subscription_pass':
            '',
            'rhel_subscription_server':
            '',
            'rhel_subscription_pool':
            'Red Hat OpenShift Container Platform, Premium*',
            'openshift_sdn':
            'redhat/openshift-ovs-subnet',
            'containerized':
            'containerized',
            'container_storage':
            'none',
            'openshift_hosted_metrics_deploy':
            'false',
            'byo_lb':
            'no',
            'lb_host':
            'haproxy-',
            'byo_nfs':
            'no',
            'nfs_host':
            'nfs-0',
            'nfs_registry_mountpoint':
            '/exports',
            'master_nodes':
            '3',
            'infra_nodes':
            '2',
            'app_nodes':
            '3',
            'vm_ipaddr_start':
            '',
            'ocp_hostname_prefix':
            '',
            'auth_type':
            'ldap',
            'ldap_user':
            '******',
            'ldap_user_password':
            '',
            'ldap_fqdn':
            ''
        }
    }
    if six.PY3:
        config = configparser.ConfigParser()
    else:
        config = configparser.SafeConfigParser()

    # where is the config?
    vmware_ini_path = os.environ.get('VMWARE_INI_PATH',
                                     defaults['vmware']['ini_path'])
    vmware_ini_path = os.path.expanduser(os.path.expandvars(vmware_ini_path))
    config.read(vmware_ini_path)

    # apply defaults
    for k, v in defaults['vmware'].iteritems():
        if not config.has_option('vmware', k):
            config.set('vmware', k, str(v))

    cluster_id = config.get('vmware', 'cluster_id')
    console_port = config.get('vmware', 'console_port')
    deployment_type = config.get('vmware', 'deployment_type')
    openshift_vers = config.get('vmware', 'openshift_vers')
    vcenter_host = config.get('vmware', 'vcenter_host')
    vcenter_username = config.get('vmware', 'vcenter_username')
    vcenter_password = config.get('vmware', 'vcenter_password')
    vcenter_template_name = config.get('vmware', 'vcenter_template_name')
    vcenter_folder = config.get('vmware', 'vcenter_folder')
    vcenter_cluster = config.get('vmware', 'vcenter_cluster')
    vcenter_datacenter = config.get('vmware', 'vcenter_datacenter')
    vcenter_datastore = config.get('vmware', 'vcenter_datastore')
    vcenter_resource_pool = config.get('vmware', 'vcenter_resource_pool')
    public_hosted_zone = config.get('vmware', 'public_hosted_zone')
    app_dns_prefix = config.get('vmware', 'app_dns_prefix')
    vm_dns = config.get('vmware', 'vm_dns')
    vm_gw = config.get('vmware', 'vm_gw')
    vm_netmask = config.get('vmware', 'vm_netmask')
    vm_network = config.get('vmware', 'vm_network')
    rhel_subscription_user = config.get('vmware', 'rhel_subscription_user')
    rhel_subscription_pass = config.get('vmware', 'rhel_subscription_pass')
    rhel_subscription_server = config.get('vmware', 'rhel_subscription_server')
    rhel_subscription_pool = config.get('vmware', 'rhel_subscription_pool')
    openshift_sdn = config.get('vmware', 'openshift_sdn')
    containerized = config.get('vmware', 'containerized')
    container_storage = config.get('vmware', 'container_storage')
    openshift_hosted_metrics_deploy = config.get(
        'vmware', 'openshift_hosted_metrics_deploy')
    byo_lb = config.get('vmware', 'byo_lb')
    lb_host = config.get('vmware', 'lb_host')
    byo_nfs = config.get('vmware', 'byo_nfs')
    nfs_host = config.get('vmware', 'nfs_host')
    nfs_registry_mountpoint = config.get('vmware', 'nfs_registry_mountpoint')
    master_nodes = config.get('vmware', 'master_nodes')
    infra_nodes = config.get('vmware', 'infra_nodes')
    app_nodes = config.get('vmware', 'app_nodes')
    vm_ipaddr_start = config.get('vmware', 'vm_ipaddr_start')
    ocp_hostname_prefix = config.get('vmware', 'ocp_hostname_prefix')
    auth_type = config.get('vmware', 'auth_type')
    ldap_user = config.get('vmware', 'ldap_user')
    ldap_user_password = config.get('vmware', 'ldap_user_password')
    ldap_fqdn = config.get('vmware', 'ldap_fqdn')

    err_count = 0
    required_vars = {
        'public_hosted_zone': public_hosted_zone,
        'vcenter_host': vcenter_host,
        'vcenter_password': vcenter_password,
        'vm_ipaddr_start': vm_ipaddr_start,
        'ldap_fqdn': ldap_fqdn,
        'ldap_user_password': ldap_user_password,
        'vm_dns': vm_dns,
        'vm_gw': vm_gw,
        'vm_netmask': vm_netmask,
        'vcenter_datacenter': vcenter_datacenter,
        'vcenter_datastore': vcenter_datastore
    }
    for k, v in required_vars.items():
        if v == '':
            err_count += 1
            print "Missing %s " % k
    if err_count > 0:
        print "Please fill out the missing variables in %s " % vmware_ini_path
        exit(1)
    wildcard_zone = "%s.%s" % (app_dns_prefix, public_hosted_zone)

    # fix nfs_host and lb_host vars with nfs_ocp_hostname_prefix
    if 'no' in byo_lb:
        if ocp_hostname_prefix is not None:
            lb_host = ocp_hostname_prefix + "haproxy-0"
        else:
            lb_host = "haproxy-0"

    if 'no' in byo_nfs:
        if ocp_hostname_prefix is not None:
            nfs_host = ocp_hostname_prefix + "nfs-0"
        else:
            nfs_host = "nfs-0"

    tags = []
    tags.append('setup')

    # Our initial support node is the wildcard_ip
    support_nodes = 1
    if byo_nfs == "no":
        support_nodes = support_nodes + 1
        tags.append('nfs')
    else:
        if nfs_host == '':
            nfs_host = click.prompt(
                "Please enter the NFS Server fqdn for persistent registry:")
        if nfs_registry_mountpoint is '':
            nfs_registry_mountpoint = click.prompt(
                "Please enter NFS share name for persistent registry:")

    tags.append('prod')

    if byo_lb == "no":
        tags.append('haproxy')
    else:
        if lb_host == '':
            lb_host = click.prompt(
                "Please enter the load balancer hostname for installation:")
            lb_host = lb_host + '.' + public_hosted_zone

    if create_ocp_vars is True:
        click.echo('Configured OCP variables:')
        click.echo('\tauth_type: %s' % auth_type)
        click.echo('\tldap_fqdn: %s' % ldap_fqdn)
        click.echo('\tldap_user: %s' % ldap_user)
        click.echo('\tldap_user_password: %s' % ldap_user_password)
        click.echo('\tpublic_hosted_zone: %s' % public_hosted_zone)
        click.echo('\tapp_dns_prefix: %s' % app_dns_prefix)
        click.echo('\tbyo_lb: %s' % byo_lb)
        click.echo('\tlb_host: %s' % lb_host)
        click.echo('\tUsing values from: %s' % vmware_ini_path)
        if not no_confirm:
            click.confirm('Continue using these values?', abort=True)
        if auth_type == 'ldap':
            l_bdn = ""

            for d in ldap_fqdn.split("."):
                l_bdn = l_bdn + "dc=" + d + ","

            l = ldap.initialize("ldap://" + ldap_fqdn)
            try:
                l.protocol_version = ldap.VERSION3
                l.set_option(ldap.OPT_REFERRALS, 0)
                bind = l.simple_bind_s(ldap_user, ldap_user_password)

                base = l_bdn[:-1]
                criteria = "(&(objectClass=user)(sAMAccountName=" + ldap_user + "))"
                attributes = 'displayName', 'distinguishedName'
                result = l.search_s(base, ldap.SCOPE_SUBTREE, criteria,
                                    attributes)

                results = [
                    entry for dn, entry in result if isinstance(entry, dict)
                ]
            finally:
                l.unbind()

            for result in results:

                bindDN = str(result['distinguishedName']).strip("'[]")
                url_base = bindDN.replace(("CN=" + ldap_user + ","), "")
                url = "ldap://" + ldap_fqdn + ":389/" + url_base + "?sAMAccountName"

            install_file = "playbooks/openshift-install.yaml"

            for line in fileinput.input(install_file, inplace=True):
                # Parse our ldap url
                if line.startswith("      url:"):
                    print "      url: " + url
                elif line.startswith("      bindPassword:"******"      bindPassword: "******"      bindDN:"):
                    print "      bindDN: " + bindDN
                elif line.startswith("    wildcard_zone:"):
                    print "    wildcard_zone: " + app_dns_prefix + "." + public_hosted_zone
                elif line.startswith("    load_balancer_hostname:"):
                    print "    load_balancer_hostname: " + lb_host + "." + public_hosted_zone
                elif line.startswith("    deployment_type:"):
                    print "    deployment_type: " + deployment_type
                elif line.startswith(
                        "    openshift_hosted_registry_storage_host:"):
                    print "    openshift_hosted_registry_storage_host: " + nfs_host + "." + public_hosted_zone
                elif line.startswith(
                        "    openshift_hosted_registry_storage_nfs_directory:"
                ):
                    print "    openshift_hosted_registry_storage_nfs_directory: " + nfs_registry_mountpoint
                elif line.startswith(
                        "    openshift_hosted_metrics_storage_host:"):
                    print "    openshift_hosted_metrics_storage_host: " + nfs_host + "." + public_hosted_zone
                elif line.startswith(
                        "    openshift_hosted_metrics_storage_nfs_directory:"):
                    print "    openshift_hosted_metrics_storage_nfs_directory: " + nfs_registry_mountpoint
                else:
                    print line,

            # Provide values for update and add node playbooks
            update_file = "playbooks/minor-update.yaml"
            for line in fileinput.input(update_file, inplace=True):
                if line.startswith("    wildcard_zone:"):
                    print "    wildcard_zone: " + app_dns_prefix + "." + public_hosted_zone
                elif line.startswith("    load_balancer_hostname:"):
                    print "    load_balancer_hostname: " + lb_host + "." + public_hosted_zone
                elif line.startswith("    deployment_type:"):
                    print "    deployment_type: " + deployment_type
                else:
                    print line,
                #End create_ocp_vars
            exit(0)

        if auth_type == 'none':
            playbooks = [
                "playbooks/openshift-install.yaml",
                "playbooks/minor-update.yaml"
            ]
            for ocp_file in playbooks:
                for line in fileinput.input(ocp_file, inplace=True):
                    if line.startswith(
                            '#openshift_master_identity_providers:'):
                        line = line.replace('#', '    ')
                        print line
                    else:
                        print line,
        exit(0)

    if create_inventory is True:
        click.echo('Configured inventory values:')
        click.echo('\tmaster_nodes: %s' % master_nodes)
        click.echo('\tinfra_nodes: %s' % infra_nodes)
        click.echo('\tapp_nodes: %s' % app_nodes)
        click.echo('\tpublic_hosted_zone: %s' % public_hosted_zone)
        click.echo('\tapp_dns_prefix: %s' % app_dns_prefix)
        click.echo('\tocp_hostname_prefix: %s' % ocp_hostname_prefix)
        click.echo('\tbyo_nfs: %s' % byo_nfs)
        if byo_nfs == "no":
            click.echo('\tnfs_host: %s' % nfs_host)
        click.echo('\tbyo_lb: %s' % byo_lb)
        if byo_lb == "no":
            click.echo('\tlb_host: %s' % lb_host)
        click.echo('\tvm_ipaddr_start: %s' % vm_ipaddr_start)
        click.echo('\tUsing values from: %s' % vmware_ini_path)
        click.echo("")
        if not no_confirm:
            click.confirm('Continue using these values?', abort=True)
        # Create the inventory file and exit
        if not cluster_id:
            #create a unique cluster_id first
            cluster_id = ''.join(
                random.choice('0123456789abcdefghijklmnopqrstuvwxyz')
                for i in range(20))
            config.set('vmware', 'cluster_id', cluster_id)
            for line in fileinput.input(vmware_ini_path, inplace=True):
                if line.startswith('cluster_id'):
                    print "cluster_id=" + str(cluster_id)
                else:
                    print line,

        total_nodes = int(master_nodes) + int(app_nodes) + int(
            infra_nodes) + int(support_nodes)

        if vm_ipaddr_start is None:
            vm_ipaddr_start = click.prompt("Starting IP address to use?")

        ip4addr = []
        for i in range(total_nodes):
            p = iptools.ipv4.ip2long(vm_ipaddr_start) + i
            ip4addr.append(iptools.ipv4.long2ip(p))
        wild_ip = ip4addr.pop()

        bind_entry = []
        bind_entry.append("$ORIGIN " + app_dns_prefix + "." +
                          public_hosted_zone + ".")
        bind_entry.append("*\tA\t" + wild_ip)
        bind_entry.append("$ORIGIN " + public_hosted_zone + ".")

        d = {}
        d['host_inventory'] = {}
        d['infrastructure_hosts'] = {}

        support_list = []
        if byo_nfs == "no":
            if ocp_hostname_prefix not in nfs_host:
                nfs_host = ocp_hostname_prefix + "nfs-0"
            d['host_inventory'][nfs_host] = {}
            d['host_inventory'][nfs_host]['guestname'] = nfs_host
            d['host_inventory'][nfs_host]['ip4addr'] = ip4addr[0]
            d['host_inventory'][nfs_host]['tag'] = str(
                cluster_id) + "-infra-nfs"
            d['infrastructure_hosts']["nfs_server"] = {}
            d['infrastructure_hosts']["nfs_server"]['guestname'] = nfs_host
            d['infrastructure_hosts']["nfs_server"]['tag'] = str(
                cluster_id) + "-infra-nfs"
            support_list.append(nfs_host)
            bind_entry.append(nfs_host + "\tA\t" + ip4addr[0])
            del ip4addr[0]

        if byo_lb == "no":
            if ocp_hostname_prefix not in lb_host:
                lb_host = ocp_hostname_prefix + "haproxy-0"
            d['host_inventory'][lb_host] = {}
            d['host_inventory'][lb_host]['guestname'] = lb_host
            d['host_inventory'][lb_host]['ip4addr'] = wild_ip
            d['host_inventory'][lb_host]['tag'] = str(
                cluster_id) + "-loadbalancer"
            d['infrastructure_hosts']["haproxy"] = {}
            d['infrastructure_hosts']["haproxy"]['guestname'] = lb_host
            d['infrastructure_hosts']["haproxy"]['tag'] = str(
                cluster_id) + "-loadbalancer"
            support_list.append(lb_host)
            bind_entry.append(lb_host + "\tA\t" + wild_ip)

        master_list = []
        d['production_hosts'] = {}
        for i in range(0, int(master_nodes)):
            if ocp_hostname_prefix is not None:
                master_name = ocp_hostname_prefix + "master-" + str(i)
            else:
                master_name = "master-" + str(i)
            d['host_inventory'][master_name] = {}
            d['host_inventory'][master_name]['guestname'] = master_name
            d['host_inventory'][master_name]['ip4addr'] = ip4addr[0]
            d['host_inventory'][master_name]['tag'] = str(
                cluster_id) + "-master"
            d['production_hosts'][master_name] = {}
            d['production_hosts'][master_name]['guestname'] = master_name
            d['production_hosts'][master_name]['tag'] = str(
                cluster_id) + "-master"
            master_list.append(master_name)
            bind_entry.append(master_name + "\tA\t" + ip4addr[0])
            del ip4addr[0]
        app_list = []
        for i in range(0, int(app_nodes)):
            if ocp_hostname_prefix is not None:
                app_name = ocp_hostname_prefix + "app-" + str(i)

            else:
                app_name = "app-" + str(i)

            d['host_inventory'][app_name] = {}
            d['host_inventory'][app_name]['guestname'] = app_name
            d['host_inventory'][app_name]['ip4addr'] = ip4addr[0]
            d['host_inventory'][app_name]['tag'] = str(cluster_id) + "-app"
            d['production_hosts'][app_name] = {}
            d['production_hosts'][app_name]['guestname'] = app_name
            d['production_hosts'][app_name]['tag'] = str(cluster_id) + "-app"
            app_list.append(app_name)
            bind_entry.append(app_name + "\tA\t" + ip4addr[0])
            del ip4addr[0]
        infra_list = []
        for i in range(0, int(infra_nodes)):
            if ocp_hostname_prefix is not None:
                infra_name = ocp_hostname_prefix + "infra-" + str(i)
            else:
                infra_name = "infra-" + str(i)
            d['host_inventory'][infra_name] = {}
            d['host_inventory'][infra_name]['guestname'] = infra_name
            d['host_inventory'][infra_name]['ip4addr'] = ip4addr[0]
            d['host_inventory'][infra_name]['tag'] = str(cluster_id) + "-infra"
            d['production_hosts'][infra_name] = {}
            d['production_hosts'][infra_name]['guestname'] = infra_name
            d['production_hosts'][infra_name]['tag'] = str(
                cluster_id) + "-infra"
            infra_list.append(infra_name)
            bind_entry.append(infra_name + "        A       " + ip4addr[0])
            del ip4addr[0]
        print "# Here is what should go into your DNS records"
        print("\n".join(bind_entry))
        print "# Please note, if you have chosen to bring your own loadbalancer and NFS Server you will need to ensure that these records are added to DNS and properly resolve. "

        with open('infrastructure.json', 'w') as outfile:
            json.dump(d, outfile)
        exit(0)
    # End create inventory

    # Display information to the user about their choices
    click.echo('Configured values:')
    for each_section in config.sections():
        for (key, val) in config.items(each_section):
            print '\t %s:  %s' % (key, val)
    click.echo("")

    if not no_confirm:
        click.confirm('Continue using these values?', abort=True)

    if not os.path.isfile('infrastructure.json'):
        print "Please create your inventory file first by running the --create_inventory flag"
        exit(1)

    inventory_file = "inventory/vsphere/vms/vmware_inventory.ini"
    # Add section here to modify inventory file based on input from user check your vmmark scripts for parsing the file and adding the values
    for line in fileinput.input(inventory_file, inplace=True):
        if line.startswith("server="):
            print "server=" + vcenter_host
        elif line.startswith("password="******"password="******"username="******"username="******"
        command = 'cp -f ~/.ssh/id_rsa ssh_key/ocp3-installer'
        os.system(command)
        command = 'cp -f ~/.ssh/id_rsa ssh_key/ocp-installer'
        os.system(command)
        # make sure the ssh keys have the proper permissions
        command = 'chmod 600 ssh_key/ocp-installer'
        os.system(command)

        # remove any cached facts to prevent stale data during a re-run
        command = 'rm -rf .ansible/cached_facts'
        os.system(command)
        tags = ",".join(tags)
        if clean is True:
            tags = 'clean'
        if tag:
            tags = tag

        #if local:
        #command='ansible-playbook'
        #else:
        #   command='docker run -t --rm --volume `pwd`:/opt/ansible:z -v ~/.ssh:/root/.ssh:z -v /tmp:/tmp:z --net=host ansible:2.2-latest'
        if 'clean' in tags:
            tags = 'all'
            command = 'ansible-playbook '
            playbook = 'playbooks/cleanup-vsphere.yaml'
        else:
            command = 'ansible-playbook'
        command = command + ' --extra-vars "@./infrastructure.json" --tags %s -e \'vcenter_host=%s \
    vcenter_username=%s \
    vcenter_password=%s \
    vcenter_template_name=%s \
    vcenter_folder=%s \
    vcenter_cluster=%s \
    vcenter_datacenter=%s \
    vcenter_datastore=%s \
    vcenter_resource_pool=%s \
    public_hosted_zone=%s \
    app_dns_prefix=%s \
    vm_dns=%s \
    vm_gw=%s \
    vm_netmask=%s \
    vm_network=%s \
    wildcard_zone=%s \
    console_port=%s \
    cluster_id=%s \
    deployment_type=%s \
    openshift_vers=%s \
    rhsm_user=%s \
    rhsm_password=%s \
    rhel_subscription_server=%s \
    rhsm_pool="%s" \
    openshift_sdn=%s \
    containerized=%s \
    container_storage=%s \
    openshift_hosted_metrics_deploy=%s \
    lb_host=%s \
    nfs_host=%s \
    nfs_registry_mountpoint=%s \' %s' % (
            tags, vcenter_host, vcenter_username, vcenter_password,
            vcenter_template_name, vcenter_folder, vcenter_cluster,
            vcenter_datacenter, vcenter_datastore, vcenter_resource_pool,
            public_hosted_zone, app_dns_prefix, vm_dns, vm_gw, vm_netmask,
            vm_network, wildcard_zone, console_port, cluster_id,
            deployment_type, openshift_vers, rhel_subscription_user,
            rhel_subscription_pass, rhel_subscription_server,
            rhel_subscription_pool, openshift_sdn, containerized,
            container_storage, openshift_hosted_metrics_deploy, lb_host,
            nfs_host, nfs_registry_mountpoint, playbook)
        if verbose > 0:
            command += " -" + "".join(['v'] * verbose)
            click.echo('We are running: %s' % command)

        status = os.system(command)
        if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
            return os.WEXITSTATUS(status)
コード例 #13
0
def main():
    csv2rdf = CSV2RDF()

    opts, files = getopt.getopt(
        sys.argv[1:],
        "hc:b:p:i:o:Cf:l:s:d:D:",
        ["out=", "base=", "delim=", "propbase=", "class=", "default="
         "ident=", "label=", "skip=", "defineclass", "help"])
    opts = dict(opts)

    if "-h" in opts or "--help" in opts:
        print(HELP)
        sys.exit(-1)

    if "-f" in opts:
        config = configparser.ConfigParser()
        config.readfp(open(opts["-f"]))
        for k, v in config.items("csv2rdf"):
            if k == "out":
                csv2rdf.OUT = codecs.open(v, "w", "utf-8")
            elif k == "base":
                csv2rdf.BASE = rdflib.Namespace(v)
            elif k == "propbase":
                csv2rdf.PROPBASE = rdflib.Namespace(v)
            elif k == "class":
                csv2rdf.CLASS = rdflib.URIRef(v)
            elif k == "defineclass":
                csv2rdf.DEFINECLASS = bool(v)
            elif k == "ident":
                csv2rdf.IDENT = eval(v)
            elif k == "label":
                csv2rdf.LABEL = eval(v)
            elif k == "delim":
                csv2rdf.DELIM = v
            elif k == "skip":
                csv2rdf.SKIP = int(v)
            elif k == "default":
                csv2rdf.DEFAULT = column(v)
            elif k.startswith("col"):
                csv2rdf.COLUMNS[int(k[3:])] = column(v)
            elif k.startswith("prop"):
                csv2rdf.PROPS[int(k[4:])] = rdflib.URIRef(v)

    if "-o" in opts:
        csv2rdf.OUT = codecs.open(opts["-o"], "w", "utf-8")
    if "--out" in opts:
        csv2rdf.OUT = codecs.open(opts["--out"], "w", "utf-8")

    if "-b" in opts:
        csv2rdf.BASE = rdflib.Namespace(opts["-b"])
    if "--base" in opts:
        csv2rdf.BASE = rdflib.Namespace(opts["--base"])

    if "-d" in opts:
        csv2rdf.DELIM = opts["-d"]
    if "--delim" in opts:
        csv2rdf.DELIM = opts["--delim"]

    if "-D" in opts:
        csv2rdf.DEFAULT = column(opts["-D"])
    if "--default" in opts:
        csv2rdf.DEFAULT = column(opts["--default"])

    if "-p" in opts:
        csv2rdf.PROPBASE = rdflib.Namespace(opts["-p"])
    if "--propbase" in opts:
        csv2rdf.PROPBASE = rdflib.Namespace(opts["--propbase"])

    if "-l" in opts:
        csv2rdf.LABEL = eval(opts["-l"])
    if "--label" in opts:
        csv2rdf.LABEL = eval(opts["--label"])

    if "-i" in opts:
        csv2rdf.IDENT = eval(opts["-i"])
    if "--ident" in opts:
        csv2rdf.IDENT = eval(opts["--ident"])

    if "-s" in opts:
        csv2rdf.SKIP = int(opts["-s"])
    if "--skip" in opts:
        csv2rdf.SKIP = int(opts["--skip"])

    if "-c" in opts:
        csv2rdf.CLASS = rdflib.URIRef(opts["-c"])
    if "--class" in opts:
        csv2rdf.CLASS = rdflib.URIRef(opts["--class"])

    for k, v in opts.items():
        if k.startswith("--col"):
            csv2rdf.COLUMNS[int(k[5:])] = column(v)
        elif k.startswith("--prop"):
            csv2rdf.PROPS[int(k[6:])] = rdflib.URIRef(v)

    if csv2rdf.CLASS and ("-C" in opts or "--defineclass" in opts):
        csv2rdf.DEFINECLASS = True

    csv2rdf.convert(
        csv_reader(fileinput.input(files), delimiter=csv2rdf.DELIM))
コード例 #14
0
ファイル: config.py プロジェクト: haroldrandom/knack
def get_config_parser():
    return configparser.ConfigParser(
    ) if sys.version_info.major == 3 else configparser.SafeConfigParser()
コード例 #15
0
def parse_args(args_str):
    '''
    Eg. python svc_monitor.py --rabbit_server localhost
                         --rabbit_port 5672
                         --rabbit_user guest
                         --rabbit_password guest
                         --cassandra_server_list 10.1.2.3:9160
                         --api_server_ip 10.1.2.3
                         --api_server_port 8082
                         --api_server_use_ssl False
                         --zk_server_ip 10.1.2.3
                         --zk_server_port 2181
                         --collectors 127.0.0.1:8086
                         --http_server_port 8090
                         --log_local
                         --log_level SYS_DEBUG
                         --log_category test
                         --log_file <stdout>
                         --trace_file /var/log/contrail/svc-monitor.err
                         --use_syslog
                         --syslog_facility LOG_USER
                         --cluster_id <testbed-name>
                         --check_service_interval 60
                         [--region_name <name>]
                         [--reset_config]
    '''

    # Source any specified config/ini file
    # Turn off help, so we show all options in response to -h
    conf_parser = argparse.ArgumentParser(add_help=False)

    conf_parser.add_argument("-c",
                             "--conf_file",
                             action='append',
                             help="Specify config file",
                             metavar="FILE")
    args, remaining_argv = conf_parser.parse_known_args(args_str.split())

    defaults = {
        'rabbit_server': 'localhost',
        'rabbit_port': '5672',
        'rabbit_user': '******',
        'rabbit_password': '******',
        'rabbit_vhost': None,
        'rabbit_ha_mode': False,
        'cassandra_server_list': '127.0.0.1:9160',
        'api_server_ip': '127.0.0.1',
        'api_server_port': '8082',
        'api_server_use_ssl': None,
        'zk_server_ip': '127.0.0.1',
        'zk_server_port': '2181',
        'collectors': None,
        'http_server_port': '8088',
        'http_server_ip': '0.0.0.0',
        'log_local': False,
        'log_level': SandeshLevel.SYS_DEBUG,
        'log_category': '',
        'log_file': Sandesh._DEFAULT_LOG_FILE,
        'trace_file': '/var/log/contrail/svc-monitor.err',
        'use_syslog': False,
        'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY,
        'region_name': None,
        'cluster_id': '',
        'logging_conf': '',
        'logger_class': None,
        'check_service_interval': '60',
        'nova_endpoint_type': 'internalURL',
        'rabbit_use_ssl': False,
        'kombu_ssl_version': '',
        'kombu_ssl_keyfile': '',
        'kombu_ssl_certfile': '',
        'kombu_ssl_ca_certs': '',
        'analytics_api_ssl_enable': False,
        'analytics_api_insecure_enable': False,
        'analytics_api_ssl_ca_cert': '',
        'analytics_api_ssl_keyfile': '',
        'analytics_api_ssl_certfile': '',
    }
    defaults.update(SandeshConfig.get_default_options(['DEFAULTS']))
    secopts = {
        'use_certs': False,
        'keyfile': '',
        'certfile': '',
        'ca_certs': '',
    }
    ksopts = {
        'auth_host': '127.0.0.1',
        'auth_protocol': 'http',
        'auth_port': '5000',
        'auth_version': 'v3',
        'auth_insecure': True,
        'admin_user': '******',
        'admin_password': '******',
        'admin_tenant_name': 'admin',
        'user_domain_name': 'Default',
        'project_domain_name': 'Default'
    }
    schedops = {
        'si_netns_scheduler_driver':
        'svc_monitor.scheduler.vrouter_scheduler.RandomScheduler',
        'analytics_server_list': '127.0.0.1:8081',
        'availability_zone': None,
        'netns_availability_zone': None,
        'aaa_mode': cfgm_common.AAA_MODE_DEFAULT_VALUE,
    }
    cassandraopts = {
        'cassandra_user': None,
        'cassandra_password': None,
    }
    sandeshopts = SandeshConfig.get_default_options()

    saved_conf_file = args.conf_file
    config = configparser.ConfigParser()
    if args.conf_file:
        config.read(args.conf_file)
        defaults.update(dict(config.items("DEFAULTS")))
        if ('SECURITY' in config.sections()
                and 'use_certs' in config.options('SECURITY')):
            if config.getboolean('SECURITY', 'use_certs'):
                secopts.update(dict(config.items("SECURITY")))
        if 'KEYSTONE' in config.sections():
            ksopts.update(dict(config.items("KEYSTONE")))
        if 'SCHEDULER' in config.sections():
            schedops.update(dict(config.items("SCHEDULER")))
        if 'CASSANDRA' in config.sections():
            cassandraopts.update(dict(config.items('CASSANDRA')))
        SandeshConfig.update_options(sandeshopts, config)

    # Override with CLI options
    # Don't surpress add_help here so it will handle -h
    parser = argparse.ArgumentParser(
        # Inherit options from config_parser
        parents=[conf_parser],
        # script description with -h/--help
        description=__doc__,
        # Don't mess with format of description
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    defaults.update(secopts)
    defaults.update(ksopts)
    defaults.update(schedops)
    defaults.update(cassandraopts)
    defaults.update(sandeshopts)
    parser.set_defaults(**defaults)

    parser.add_argument(
        "--cassandra_server_list",
        help="List of cassandra servers in IP Address:Port format",
        nargs='+')
    parser.add_argument("--cassandra_use_ssl",
                        action="store_true",
                        help="Enable TLS for cassandra communication")
    parser.add_argument("--cassandra_ca_certs", help="Cassandra CA certs")
    parser.add_argument(
        "--reset_config",
        action="store_true",
        help="Warning! Destroy previous configuration and start clean")
    parser.add_argument("--api_server_ip", help="IP address of API server")
    parser.add_argument("--api_server_port", help="Port of API server")
    parser.add_argument("--api_server_use_ssl",
                        help="Use SSL to connect with API server")
    parser.add_argument("--collectors",
                        help="List of VNC collectors in ip:port format",
                        nargs="+")
    parser.add_argument("--http_server_port", help="Port of local HTTP server")
    parser.add_argument("--http_server_ip", help="IP of local HTTP server")
    parser.add_argument("--log_local",
                        action="store_true",
                        help="Enable local logging of sandesh messages")
    parser.add_argument(
        "--log_level",
        help="Severity level for local logging of sandesh messages")
    parser.add_argument(
        "--log_category",
        help="Category filter for local logging of sandesh messages")
    parser.add_argument("--log_file",
                        help="Filename for the logs to be written to")
    parser.add_argument("--trace_file",
                        help="Filename for the error "
                        "backtraces to be written to")
    parser.add_argument("--use_syslog",
                        action="store_true",
                        help="Use syslog for logging")
    parser.add_argument("--syslog_facility",
                        help="Syslog facility to receive log lines")
    parser.add_argument("--aaa_mode",
                        choices=AAA_MODE_VALID_VALUES,
                        help="AAA mode")
    parser.add_argument("--admin_user", help="Name of keystone admin user")
    parser.add_argument("--admin_password",
                        help="Password of keystone admin user")
    parser.add_argument("--admin_tenant_name",
                        help="Tenant name for keystone admin user")
    parser.add_argument("--region_name", help="Region name for openstack API")
    parser.add_argument("--cluster_id",
                        help="Used for database keyspace separation")
    parser.add_argument(
        "--logging_conf",
        help=("Optional logging configuration file, default: None"))
    parser.add_argument("--logger_class",
                        help=("Optional external logger class, default: None"))
    parser.add_argument("--cassandra_user", help="Cassandra user name")
    parser.add_argument("--cassandra_password", help="Cassandra password")
    parser.add_argument("--check_service_interval",
                        help="Check service interval")
    parser.add_argument("--analytics_api_ssl_enable",
                        help="Enable SSL in rest api server")
    parser.add_argument("--analytics_api_insecure_enable",
                        help="Enable insecure mode")
    parser.add_argument("--analytics_api_ssl_certfile",
                        help="Location of analytics api ssl host certificate")
    parser.add_argument("--analytics_api_ssl_keyfile",
                        help="Location of analytics api ssl private key")
    parser.add_argument("--analytics_api_ssl_ca_cert",
                        type=str,
                        help="Location of analytics api ssl CA certificate")
    SandeshConfig.add_parser_arguments(parser)

    args = parser.parse_args(remaining_argv)
    args._conf_file = saved_conf_file
    args.config_sections = config
    if isinstance(args.cassandra_server_list, basestring):
        args.cassandra_server_list = args.cassandra_server_list.split()
    if isinstance(args.collectors, basestring):
        args.collectors = args.collectors.split()
    if args.region_name and args.region_name.lower() == 'none':
        args.region_name = None
    if args.availability_zone and args.availability_zone.lower() == 'none':
        args.availability_zone = None
    if args.netns_availability_zone and \
            args.netns_availability_zone.lower() == 'none':
        args.netns_availability_zone = None
    args.sandesh_config = SandeshConfig.from_parser_arguments(args)
    args.cassandra_use_ssl = (str(args.cassandra_use_ssl).lower() == 'true')

    args.analytics_api_ssl_enable = \
            (str(args.analytics_api_ssl_enable).lower() == 'true')
    args.analytics_api_insecure_enable = \
            (str(args.analytics_api_insecure_enable).lower() == 'true')
    return args
コード例 #16
0
def main(_):
    '''main'''

    if FLAGS.expdir is None:
        raise Exception('no expdir specified. Command usage: '
                        'nabu data --expdir=/path/to/recipe '
                        '--recipe=/path/to/recipe')

    if not os.path.isdir(FLAGS.expdir):
        raise Exception('cannot find expdir %s' % FLAGS.expdir)

    if FLAGS.recipe is None:
        raise Exception('no recipe specified. Command usage: '
                        'nabu data --expdir=/path/to/recipe '
                        '--recipe=/path/to/recipe')

    if not os.path.isdir(FLAGS.recipe):
        raise Exception('cannot find recipe %s' % FLAGS.recipe)

    evaluator_cfg_file = os.path.join(FLAGS.recipe, 'test_evaluator.cfg')
    database_cfg_file = os.path.join(FLAGS.recipe, 'database.conf')
    reconstructor_cfg_file = os.path.join(FLAGS.recipe, 'reconstructor.cfg')
    scorer_cfg_file = os.path.join(FLAGS.recipe, 'scorer.cfg')
    postprocessor_cfg_file = os.path.join(FLAGS.recipe, 'postprocessor.cfg')
    model_cfg_file = os.path.join(FLAGS.recipe, 'model.cfg')

    #Assuming only one (the last one) training stage needs testing
    parsed_evaluator_cfg = configparser.ConfigParser()
    parsed_evaluator_cfg.read(evaluator_cfg_file)
    training_stage = parsed_evaluator_cfg.get('evaluator', 'segment_length')

    ##create the testing dir
    #if os.path.isdir(os.path.join(FLAGS.expdir, 'test')):
    #shutil.rmtree(os.path.join(FLAGS.expdir, 'test'))
    #os.makedirs(os.path.join(FLAGS.expdir, 'test'))
    if not os.path.isdir(os.path.join(FLAGS.expdir, 'test')):
        os.makedirs(os.path.join(FLAGS.expdir, 'test'))

    #copy the config files
    parsed_database_cfg = configparser.ConfigParser()
    parsed_database_cfg.read(database_cfg_file)
    segment_parsed_database_cfg = parsed_database_cfg

    for section in segment_parsed_database_cfg.sections():
        if 'store_dir' in dict(
                segment_parsed_database_cfg.items(section)).keys():
            segment_parsed_database_cfg.set(
                section, 'store_dir',
                os.path.join(
                    segment_parsed_database_cfg.get(section, 'store_dir'),
                    training_stage))
    with open(os.path.join(FLAGS.expdir, 'test', 'database.cfg'), 'w') as fid:
        segment_parsed_database_cfg.write(fid)

    #shutil.copyfile(database_cfg_file,
    #os.path.join(FLAGS.expdir, 'test', 'database.cfg'))
    shutil.copyfile(evaluator_cfg_file,
                    os.path.join(FLAGS.expdir, 'test', 'evaluator.cfg'))
    shutil.copyfile(reconstructor_cfg_file,
                    os.path.join(FLAGS.expdir, 'test', 'reconstructor.cfg'))
    shutil.copyfile(scorer_cfg_file,
                    os.path.join(FLAGS.expdir, 'test', 'scorer.cfg'))

    try:
        shutil.copyfile(
            postprocessor_cfg_file,
            os.path.join(FLAGS.expdir, 'test', 'postprocessor.cfg'))
    except:
        pass
    shutil.copyfile(model_cfg_file,
                    os.path.join(FLAGS.expdir, 'test', 'model.cfg'))

    #create a link to the model that will be used for testing. Assuming
    #it is stored in the 'full' directory of expdir
    if not os.path.isdir(os.path.join(FLAGS.expdir, 'test', 'model')):
        os.symlink(os.path.join(FLAGS.expdir, training_stage, 'model'),
                   os.path.join(FLAGS.expdir, 'test', 'model'))

    if FLAGS.computing == 'condor':

        computing_cfg_file = 'config/computing/condor/non_distributed.cfg'
        parsed_computing_cfg = configparser.ConfigParser()
        parsed_computing_cfg.read(computing_cfg_file)
        computing_cfg = dict(parsed_computing_cfg.items('computing'))

        if not os.path.isdir(os.path.join(FLAGS.expdir, 'test', 'outputs')):
            os.makedirs(os.path.join(FLAGS.expdir, 'test', 'outputs'))

        subprocess.call([
            'condor_submit',
            'expdir=%s' % os.path.join(FLAGS.expdir, 'test'),
            'script=nabu/scripts/test.py',
            'nabu/computing/condor/non_distributed_cpu.job'
        ])

    elif FLAGS.computing == 'standard':
        os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        test(expdir=os.path.join(FLAGS.expdir, 'test'))

    else:
        raise Exception('Unknown computing type %s' % FLAGS.computing)
コード例 #17
0
    def parse(cell, config):
        """Separate input into (connection info, KQL statements, options)"""

        parsed_queries = []
        # split to max 2 parts. First part, parts[0], is the first string.
        parts = [part.strip() for part in cell.split(None, 1)]
        # print(parts)
        if not parts:
            parsed_queries.append({"connection": "", "kql": "", "options": {}})
            return parsed_queries

        #
        # replace substring of the form $name or ${name}, in windows also %name% if found in env variabes
        #
        parts[0] = expandvars(parts[0])  # for environment variables

        # assume connection is specified
        connection = parts[0]
        code = parts[1] if len(parts) > 1 else ""

        #
        # connection taken from a section in  dsn file (file name have to be define in config.dsn_filename or specified as a parameter)
        #
        if parts[0].startswith("[") and parts[0].endswith("]"):
            section = parts[0].lstrip("[").rstrip("]")
            parser = CP.ConfigParser()

            # parse to get flag, for the case that the file nema is specified
            kql, options = Parser._parse_kql_options(code, config)
            # print( "filename: {}".format(options.get("dsn_filename")))
            # with open(options.get("dsn_filename"), "r") as text_file:
            #     print ("file.content: {} ".format(text_file.read()))

            parser.read(options.get("dsn_filename", config.dsn_filename))
            cfg_dict = dict(parser.items(section))
            cfg_dict_lower = dict()
            # for k,v in cfg_dict:
            #     cfg_dict_lower[k.lower()] = v
            cfg_dict_lower = {k.lower(): v for (k, v) in cfg_dict.items()}
            if cfg_dict_lower.get("appid"):
                connection_list = []
                for key in ["appid", "appkey"]:
                    if cfg_dict_lower.get(key):
                        connection_list.append(
                            str.format("{0}('{1}')", key,
                                       cfg_dict_lower.get(key)))
                connection = "appinsights://" + ".".join(connection_list)
            elif cfg_dict_lower.get("workspace"):
                connection_list = []
                for key in ["workspace", "appkey"]:
                    if cfg_dict_lower.get(key):
                        connection_list.append(
                            str.format("{0}('{1}')", key,
                                       cfg_dict_lower.get(key)))
                connection = "loganalytics://" + ".".join(connection_list)
            else:
                if cfg_dict_lower.get("user"):
                    cfg_dict_lower["username"] = cfg_dict_lower.get("user")
                connection_list = []
                for key in ["username", "password", "cluster", "database"]:
                    if cfg_dict_lower.get(key):
                        connection_list.append(
                            str.format("{0}('{1}')", key,
                                       cfg_dict_lower.get(key)))
                connection = "kusto://" + ".".join(connection_list)
        #
        # connection not specified, override default
        #
        elif not (parts[0].startswith("kusto://")
                  or parts[0].startswith("appinsights://")
                  or parts[0].startswith("loganalytics://")
                  or parts[0].startswith("cache://") or "@" in parts[0]):
            connection = ""
            code = cell

        #
        # split string to queries
        #
        queries = []
        queryLines = []
        for line in code.splitlines(True):
            if line.isspace():
                if len(queryLines) > 0:
                    queries.append("".join(queryLines))
                    queryLines = []
            else:
                queryLines.append(line)

        if len(queryLines) > 0:
            queries.append("".join(queryLines))

        suppress_results = False
        if len(queries) > 0 and queries[-1].strip() == ";":
            suppress_results = True
            queries = queries[:-1]

        if len(queries) == 0:
            queries.append("")

        #
        # parse code to kql and options
        #
        for query in queries:
            kql, options = Parser._parse_kql_options(query.strip(), config)
            if suppress_results:
                options["suppress_results"] = True
            parsed_queries.append({
                "connection": connection.strip(),
                "kql": kql,
                "options": options
            })

        return parsed_queries
コード例 #18
0
    def handle(self, *args, **options):
        """
        Resets the database for this project.

        Note: Transaction wrappers are in reverse as a work around for
        autocommit, anybody know how to do this the right way?
        """

        if args:
            raise CommandError("reset_db takes no arguments")

        router = options.get('router')
        dbinfo = settings.DATABASES.get(router)
        if dbinfo is None:
            raise CommandError("Unknown database router %s" % router)

        engine = dbinfo.get('ENGINE').split('.')[-1]

        user = password = database_name = ''
        if engine == 'mysql':
            read_default_file = dbinfo.get('OPTIONS',
                                           {}).get('read_default_file')
            if read_default_file:
                config = configparser.ConfigParser()
                config.read(read_default_file)
                user = config.get('client', 'user')
                password = config.get('client', 'password')
                database_name = config.get('client', 'database')

        user = options.get('user') or dbinfo.get('USER') or user
        password = options.get('password') or dbinfo.get(
            'PASSWORD') or password
        owner = options.get('owner') or user

        database_name = options.get('dbname') or dbinfo.get(
            'NAME') or database_name
        if database_name == '':
            raise CommandError(
                "You need to specify DATABASE_NAME in your Django settings file."
            )

        database_host = dbinfo.get('HOST')
        database_port = dbinfo.get('PORT')

        verbosity = int(options.get('verbosity', 1))
        if options.get('interactive'):
            confirm = input("""
You have requested a database reset.
This will IRREVERSIBLY DESTROY
ALL data in the database "%s".
Are you sure you want to do this?

Type 'yes' to continue, or 'no' to cancel: """ % (database_name, ))
        else:
            confirm = 'yes'

        if confirm != 'yes':
            print("Reset cancelled.")
            return

        if engine in ('sqlite3', 'spatialite'):
            import os
            try:
                logging.info("Unlinking %s database" % engine)
                os.unlink(database_name)
            except OSError:
                pass

        elif engine in ('mysql', ):
            import MySQLdb as Database
            kwargs = {
                'user': user,
                'passwd': password,
            }
            if database_host.startswith('/'):
                kwargs['unix_socket'] = database_host
            else:
                kwargs['host'] = database_host

            if database_port:
                kwargs['port'] = int(database_port)

            connection = Database.connect(**kwargs)
            drop_query = 'DROP DATABASE IF EXISTS `%s`' % database_name
            utf8_support = options.get('no_utf8_support',
                                       False) and '' or 'CHARACTER SET utf8'
            create_query = 'CREATE DATABASE `%s` %s' % (database_name,
                                                        utf8_support)
            logging.info('Executing... "' + drop_query + '"')
            connection.query(drop_query)
            logging.info('Executing... "' + create_query + '"')
            connection.query(create_query)

        elif engine in ('postgresql', 'postgresql_psycopg2', 'postgis'):
            if engine == 'postgresql':
                import psycopg as Database  # NOQA
            elif engine in ('postgresql_psycopg2', 'postgis'):
                import psycopg2 as Database  # NOQA

            conn_string = "dbname=template1"
            if user:
                conn_string += " user=%s" % user
            if password:
                conn_string += " password='******'" % password
            if database_host:
                conn_string += " host=%s" % database_host
            if database_port:
                conn_string += " port=%s" % database_port

            connection = Database.connect(conn_string)
            connection.set_isolation_level(0)  # autocommit false
            cursor = connection.cursor()
            drop_query = "DROP DATABASE \"%s\";" % database_name
            logging.info('Executing... "' + drop_query + '"')

            try:
                cursor.execute(drop_query)
            except Database.ProgrammingError as e:
                logging.info("Error: %s" % str(e))

            create_query = "CREATE DATABASE \"%s\"" % database_name
            if owner:
                create_query += " WITH OWNER = \"%s\" " % owner
            create_query += " ENCODING = 'UTF8'"

            if engine == 'postgis':
                # fetch postgis template name if it exists
                from django.contrib.gis.db.backends.postgis.creation import PostGISCreation
                postgis_template = PostGISCreation(connection).template_postgis
                if postgis_template is not None:
                    create_query += ' TEMPLATE = %s' % postgis_template

            if settings.DEFAULT_TABLESPACE:
                create_query += ' TABLESPACE = %s;' % settings.DEFAULT_TABLESPACE
            else:
                create_query += ';'

            logging.info('Executing... "' + create_query + '"')
            cursor.execute(create_query)

        else:
            raise CommandError("Unknown database engine %s" % engine)

        if verbosity >= 2 or options.get('interactive'):
            print("Reset successful.")
コード例 #19
0
import time
from collections import OrderedDict

import pytest
from six.moves import configparser

from datahub import DataHub
from datahub.exceptions import LimitExceededException, InvalidOperationException, ResourceNotFoundException
from datahub.models import OdpsConnectorConfig, ConnectorType, ConnectorState, PartitionMode
from datahub.models.connector import DatabaseConnectorConfig, EsConnectorConfig, FcConnectorConfig, AuthMode, \
    OssConnectorConfig, OtsConnectorConfig, ConnectorShardStatus, WriteMode, DataHubConnectorConfig, ConnectorOffset

current_path = os.path.split(os.path.realpath(__file__))[0]
root_path = os.path.join(current_path, '../..')

configer = configparser.ConfigParser()
configer.read(os.path.join(current_path, '../datahub.ini'))

access_id = configer.get('datahub', 'access_id')
access_key = configer.get('datahub', 'access_key')
endpoint = configer.get('datahub', 'endpoint')

connector_test_project_name = configer.get('datahub',
                                           'connector_test_project_name')
system_time_topic_name = configer.get('datahub', 'system_time_topic_name')
event_time_topic_name = configer.get('datahub', 'event_time_topic_name')
user_define_topic_name = configer.get('datahub', 'user_define_topic_name')
ads_test_topic_name = configer.get('datahub', 'ads_test_topic_name')
es_test_topic_name = configer.get('datahub', 'es_test_topic_name')
fc_test_topic_name = configer.get('datahub', 'fc_test_topic_name')
mysql_test_topic_name = configer.get('datahub', 'mysql_test_topic_name')
コード例 #20
0
ファイル: lm_dataprep.py プロジェクト: uniq10/nabu
'''@file lm_dataprep.py
this file will do the dataprep for lm training'''

import os
from six.moves import configparser
from nabu.processing.target_normalizers import normalizer_factory

#pointer to the confif file
database_cfg_file = 'config/lm_databases/TIMIT.conf'

#read the database config file
database_cfg = configparser.ConfigParser()
database_cfg.read(database_cfg_file)
database_cfg = dict(database_cfg.items('database'))

#create the text normalizer
normalizer = normalizer_factory.factory(database_cfg['normalizer'])

print '------- normalizing training text -----------'
sourcefiles = database_cfg['train_data'].split(' ')
if not os.path.isdir(database_cfg['train_dir']):
    os.makedirs(database_cfg['train_dir'])
target_fid = open(os.path.join(database_cfg['train_dir'], 'text'), 'w')
max_num_chars = 0
numlines = 0

#read the textfiles line by line, normalize and write in target file
for sourcefile in sourcefiles:
    with open(sourcefile) as fid:
        for line in fid.readlines():
            normalized = normalizer(line.strip())
コード例 #21
0
    def read_settings(self):
        ''' Reads the settings from the vmware_inventory.ini file '''

        scriptbasename = __file__
        scriptbasename = os.path.basename(scriptbasename)
        scriptbasename = scriptbasename.replace('.py', '')

        defaults = {
            'vmware': {
                'server':
                '',
                'port':
                443,
                'username':
                '',
                'password':
                '',
                'validate_certs':
                True,
                'ini_path':
                os.path.join(os.path.dirname(__file__),
                             '%s.ini' % scriptbasename),
                'cache_name':
                'ansible-vmware',
                'cache_path':
                '~/.ansible/tmp',
                'cache_max_age':
                3600,
                'max_object_level':
                1,
                'skip_keys':
                'declaredalarmstate,'
                'disabledmethod,'
                'dynamicproperty,'
                'dynamictype,'
                'environmentbrowser,'
                'managedby,'
                'parent,'
                'childtype,'
                'resourceconfig',
                'alias_pattern':
                '{{ config.name + "_" + config.uuid }}',
                'host_pattern':
                '{{ guest.ipaddress }}',
                'host_filters':
                '{{ guest.gueststate == "running" }}',
                'groupby_patterns':
                '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}',
                'lower_var_keys':
                True,
                'custom_field_group_prefix':
                'vmware_tag_',
                'groupby_custom_field':
                False
            }
        }

        if six.PY3:
            config = configparser.ConfigParser()
        else:
            config = configparser.SafeConfigParser()

        # where is the config?
        vmware_ini_path = os.environ.get('VMWARE_INI_PATH',
                                         defaults['vmware']['ini_path'])
        vmware_ini_path = os.path.expanduser(
            os.path.expandvars(vmware_ini_path))
        config.read(vmware_ini_path)

        # apply defaults
        for k, v in defaults['vmware'].items():
            if not config.has_option('vmware', k):
                config.set('vmware', k, str(v))

        # where is the cache?
        self.cache_dir = os.path.expanduser(config.get('vmware', 'cache_path'))
        if self.cache_dir and not os.path.exists(self.cache_dir):
            os.makedirs(self.cache_dir)

        # set the cache filename and max age
        cache_name = config.get('vmware', 'cache_name')
        self.cache_path_cache = self.cache_dir + "/%s.cache" % cache_name
        self.debugl('cache path is %s' % self.cache_path_cache)
        self.cache_max_age = int(config.getint('vmware', 'cache_max_age'))

        # mark the connection info
        self.server = os.environ.get('VMWARE_SERVER',
                                     config.get('vmware', 'server'))
        self.debugl('server is %s' % self.server)
        self.port = int(
            os.environ.get('VMWARE_PORT', config.get('vmware', 'port')))
        self.username = os.environ.get('VMWARE_USERNAME',
                                       config.get('vmware', 'username'))
        self.debugl('username is %s' % self.username)
        self.password = os.environ.get('VMWARE_PASSWORD',
                                       config.get('vmware', 'password'))
        self.validate_certs = os.environ.get(
            'VMWARE_VALIDATE_CERTS', config.get('vmware', 'validate_certs'))
        if self.validate_certs in ['no', 'false', 'False', False]:
            self.validate_certs = False

        self.debugl('cert validation is %s' % self.validate_certs)

        # behavior control
        self.maxlevel = int(config.get('vmware', 'max_object_level'))
        self.debugl('max object level is %s' % self.maxlevel)
        self.lowerkeys = config.get('vmware', 'lower_var_keys')
        if type(self.lowerkeys) != bool:
            if str(self.lowerkeys).lower() in ['yes', 'true', '1']:
                self.lowerkeys = True
            else:
                self.lowerkeys = False
        self.debugl('lower keys is %s' % self.lowerkeys)
        self.skip_keys = list(config.get('vmware', 'skip_keys').split(','))
        self.debugl('skip keys is %s' % self.skip_keys)
        self.host_filters = list(
            config.get('vmware', 'host_filters').split(','))
        self.debugl('host filters are %s' % self.host_filters)
        self.groupby_patterns = list(
            config.get('vmware', 'groupby_patterns').split(','))
        self.debugl('groupby patterns are %s' % self.groupby_patterns)

        # Special feature to disable the brute force serialization of the
        # virtulmachine objects. The key name for these properties does not
        # matter because the values are just items for a larger list.
        if config.has_section('properties'):
            self.guest_props = []
            for prop in config.items('properties'):
                self.guest_props.append(prop[1])

        # save the config
        self.config = config
コード例 #22
0
 def __init__(self, f):
     self.ini_parser = configparser.ConfigParser()
     self.ini_parser.readfp(f)
コード例 #23
0
def main():

    parser = argparse.ArgumentParser()
    l.setup_logging_arguments(parser)
    parser.add_argument('--message-file', dest='message_file', default=None,
                        help='The close pull request message')

    args = parser.parse_args()
    l.configure_logging(args)

    if args.message_file:
        try:
            with open(args.message_file, 'r') as _file:
                pull_request_text = _file.read()
        except (OSError, IOError):
            log.exception("Could not open close pull request message file")
            raise
    else:
        pull_request_text = MESSAGE

    GITHUB_SECURE_CONFIG = os.environ.get('GITHUB_SECURE_CONFIG',
                                          '/etc/github/github.secure.config')

    secure_config = configparser.ConfigParser()
    secure_config.read(GITHUB_SECURE_CONFIG)
    registry = u.ProjectsRegistry()

    if secure_config.has_option("github", "oauth_token"):
        ghub = github.Github(secure_config.get("github", "oauth_token"))
    else:
        ghub = github.Github(secure_config.get("github", "username"),
                             secure_config.get("github", "password"))

    orgs = ghub.get_user().get_orgs()
    orgs_dict = dict(zip([o.login.lower() for o in orgs], orgs))
    for section in registry.configs_list:
        project = section['project']

        # Make sure we're using GitHub for this project:
        if not p.has_github(project):
            continue

        # Make sure we're supposed to close pull requests for this project:
        if 'options' in section and 'has-pull-requests' in section['options']:
            continue

        # Find the project's repo
        project_split = project.split('/', 1)

        # Handle errors in case the repo or the organization doesn't exists
        try:
            if len(project_split) > 1:
                org = orgs_dict[project_split[0].lower()]
                repo = org.get_repo(project_split[1])
            else:
                repo = ghub.get_user().get_repo(project)
        except (KeyError, github.GithubException):
            log.exception("Could not find project %s on GitHub." % project)
            continue

        # Close each pull request
        pull_requests = repo.get_pulls("open")
        for req in pull_requests:
            vars = dict(project=project)
            issue_data = {"url": repo.url + "/issues/" + str(req.number)}
            issue = github.Issue.Issue(requester=req._requester,
                                       headers={},
                                       attributes=issue_data,
                                       completed=True)
            issue.create_comment(pull_request_text % vars)
            req.edit(state="closed")
コード例 #24
0
    def _read_ini_settings(self):
        ''' Read ini file settings '''

        scriptbasename = "ocp-on-vmware"
        defaults = {'vmware': {
            'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename),
            'console_port':'8443',
            'container_storage':'none',
            'deployment_type':'openshift-enterprise',
            'openshift_vers':'v3_6',
            'vcenter_username':'******',
            'vcenter_template_name':'ocp-server-template-2.0.2',
            'vcenter_folder':'ocp',
            'vcenter_resource_pool':'/Resources/OCP3',
            'app_dns_prefix':'apps',
            'vm_network':'VM Network',
            'vm_ipaddr_allocation_type': 'static',
            'rhel_subscription_pool':'Red Hat OpenShift Container Platform, Premium*',
            'openshift_sdn':'redhat/openshift-ovs-subnet',
            'byo_lb':'False',
            'lb_host':'haproxy-',
            'byo_nfs':'False',
            'nfs_host':'nfs-0',
            'nfs_registry_mountpoint':'/exports',
            'master_nodes':'3',
            'infra_nodes':'2',
            'app_nodes':'3',
            'ocp_hostname_prefix':'',
            'auth_type':'ldap',
            'ldap_user':'******',
            'ldap_user_password':'',
            'tag': self.tag,
            'ldap_fqdn':'' }
            }
        if six.PY3:
            config = configparser.ConfigParser()
        else:
            config = configparser.SafeConfigParser()

        # where is the config?
        self.vmware_ini_path = os.environ.get('VMWARE_INI_PATH', defaults['vmware']['ini_path'])
        self.vmware_ini_path = os.path.expanduser(os.path.expandvars(self.vmware_ini_path))
        config.read(self.vmware_ini_path)

        # apply defaults
        for k,v in defaults['vmware'].iteritems():
            if not config.has_option('vmware', k):
                config.set('vmware', k, str(v))

        self.console_port = config.get('vmware', 'console_port')
        self.cluster_id = config.get('vmware', 'cluster_id')
        self.container_storage = config.get('vmware', 'container_storage')
        self.deployment_type = config.get('vmware','deployment_type')
        self.openshift_vers = config.get('vmware','openshift_vers')
        self.vcenter_host = config.get('vmware', 'vcenter_host')
        self.vcenter_username = config.get('vmware', 'vcenter_username')
        self.vcenter_password = config.get('vmware', 'vcenter_password')
        self.vcenter_template_name = config.get('vmware', 'vcenter_template_name')
        self.vcenter_folder = config.get('vmware', 'vcenter_folder')
        self.vcenter_datastore = config.get('vmware', 'vcenter_datastore')
        self.vcenter_datacenter = config.get('vmware', 'vcenter_datacenter')
        self.vcenter_cluster = config.get('vmware', 'vcenter_cluster')
        self.vcenter_datacenter = config.get('vmware', 'vcenter_datacenter')
        self.vcenter_resource_pool = config.get('vmware', 'vcenter_resource_pool')
        self.dns_zone= config.get('vmware', 'dns_zone')
        self.app_dns_prefix = config.get('vmware', 'app_dns_prefix')
        self.vm_dns = config.get('vmware', 'vm_dns')
        self.vm_gw = config.get('vmware', 'vm_gw')
        self.vm_netmask = config.get('vmware', 'vm_netmask')
        self.vm_network = config.get('vmware', 'vm_network')
        self.vm_ipaddr_allocation_type = config.get('vmware', 'vm_ipaddr_allocation_type')
        self.rhel_subscription_user = config.get('vmware', 'rhel_subscription_user')
        self.rhel_subscription_pass = config.get('vmware', 'rhel_subscription_pass')
        self.rhel_subscription_server = config.get('vmware', 'rhel_subscription_server')
        self.rhel_subscription_pool = config.get('vmware', 'rhel_subscription_pool')
	self.rhsm_katello_url = config.get('vmware', 'rhsm_katello_url')
	self.rhsm_activation_key = config.get('vmware', 'rhsm_activation_key')
	self.rhsm_org_id = config.get('vmware', 'rhsm_org_id')
        self.openshift_sdn = config.get('vmware', 'openshift_sdn')
        self.byo_lb = config.get('vmware', 'byo_lb')
        self.lb_host = config.get('vmware', 'lb_host')
        self.lb_ha_ip = config.get('vmware', 'lb_ha_ip')
        self.byo_nfs = config.get('vmware', 'byo_nfs')
        self.nfs_host = config.get('vmware', 'nfs_host')
        self.nfs_registry_mountpoint = config.get('vmware', 'nfs_registry_mountpoint')
        self.master_nodes = config.get('vmware', 'master_nodes')
        self.infra_nodes = config.get('vmware', 'infra_nodes')
        self.app_nodes = config.get('vmware', 'app_nodes')
        self.storage_nodes = config.get('vmware', 'storage_nodes')
        self.vm_ipaddr_start = config.get('vmware', 'vm_ipaddr_start')
        self.vm_ipaddr_allocation_type = config.get('vmware', 'vm_ipaddr_allocation_type')
        self.ocp_hostname_prefix = config.get('vmware', 'ocp_hostname_prefix') or ''
        self.auth_type = config.get('vmware', 'auth_type')
        self.ldap_user = config.get('vmware', 'ldap_user')
        self.ldap_user_password = config.get('vmware', 'ldap_user_password')
        self.ldap_fqdn = config.get('vmware', 'ldap_fqdn')
        err_count=0

        required_vars = {
            'dns_zone': self.dns_zone,
            'vcenter_host': self.vcenter_host,
            'vcenter_password': self.vcenter_password,
            'vm_ipaddr_start': self.vm_ipaddr_start,
            'vm_ipaddr_allocation_type': self.vm_ipaddr_allocation_type,
            'ldap_fqdn': self.ldap_fqdn,
            'ldap_user_password': self.ldap_user_password,
            'vm_dns': self.vm_dns,
            'vm_gw': self.vm_gw,
            'vm_netmask': self.vm_netmask,
            'vcenter_datacenter': self.vcenter_datacenter,
        }

        for k, v in required_vars.items():
            if v == '':
                err_count += 1
                print "Missing %s " % k
        if required_vars['vm_ipaddr_allocation_type'] not in ('dhcp', 'static'):
            err_count += 1
            print ("'vm_ipaddr_allocation_type' can take only "
                   "'dhcp' and 'static' values.")

        if err_count > 0:
            print "Please fill out the missing variables in %s " %  self.vmware_ini_path
            exit (1)
        self.wildcard_zone="%s.%s" % (self.app_dns_prefix, self.dns_zone)
        self.support_nodes=0

        if not self.cluster_id:
        #create a unique cluster_id first
            self.cluster_id = ''.join(random.choice('0123456789abcdefghijklmnopqrstuvwxyz') for i in range(7))
            config.set('vmware', 'cluster_id', self.cluster_id)
            for line in fileinput.input(self.vmware_ini_path, inplace=True):
                if line.startswith('cluster_id'):
                    print "cluster_id=" + str(self.cluster_id)
                else:
                    print line,

        print 'Configured inventory values:'
        for each_section in config.sections():
            for (key, val) in config.items(each_section):
                if 'pass' in key:
                    print '\t %s:  ******' % ( key )
                else:
                    print '\t %s:  %s' % ( key,  val )
        print '\n'
コード例 #25
0
def main():
    parser = argparse.ArgumentParser(
        description=
        'mklauncher is Machinetalk based session/configuration launcher for Machinekit'
    )
    parser.add_argument('-n',
                        '--name',
                        help='Name of the machine',
                        default="Machinekit Launcher")
    parser.add_argument(
        '-s',
        '--suppress_ip',
        help='Do not show ip of machine in service name',
        action='store_false',
    )
    parser.add_argument('-d',
                        '--debug',
                        help='Enable debug mode',
                        action='store_true')
    parser.add_argument(
        'dirs',
        nargs='*',
        help="List of directories to scan for launcher configurations",
    )

    args = parser.parse_args()
    debug = args.debug

    logging.basicConfig()
    if debug:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    mkconfig = config.Config()
    mkini = os.getenv("MACHINEKIT_INI")
    if mkini is None:
        mkini = mkconfig.MACHINEKIT_INI
    if not os.path.isfile(mkini):
        sys.stderr.write("MACHINEKIT_INI " + mkini + " does not exist\n")
        sys.exit(1)

    mki = configparser.ConfigParser()
    mki.read(mkini)
    uuid = mki.get("MACHINEKIT", "MKUUID")
    remote = mki.getint("MACHINEKIT", "REMOTE")

    if remote == 0:
        logger.info(
            "Remote communication is deactivated, configserver will use the loopback interfaces"
        )
        logger.info(("set REMOTE in " + mkini +
                     " to 1 to enable remote communication"))

    logger.debug("announcing mklauncher")

    context = zmq.Context()
    context.linger = 0

    register_exit_handler()

    hostname = '%(fqdn)s'  # replaced by service announcement
    mklauncher = Mklauncher(
        context,
        svc_uuid=uuid,
        host=hostname,
        launcher_dirs=args.dirs,
        name=args.name,
        host_in_name=bool(args.suppress_ip),
        loopback=(not remote),
        debug=debug,
    )
    mklauncher.start()

    while mklauncher.running and not check_exit():
        time.sleep(1)

    logger.debug('stopping threads')
    mklauncher.stop()

    # wait for all threads to terminate
    while threading.active_count() > 1:
        time.sleep(0.1)

    logger.debug('threads stopped')
    sys.exit(0)
コード例 #26
0
ファイル: mock.py プロジェクト: hanzz/mock
def setup_logging(config_path, config_opts, options):
    log_ini = os.path.join(config_path, config_opts["log_config_file"])

    try:
        if not os.path.exists(log_ini):
            if os.path.normpath('/etc/mock') != os.path.normpath(config_path):
                log.warning(
                    "Could not find required logging config file: %s. Using default...",
                    log_ini)
                log_ini = os.path.join("/etc/mock",
                                       config_opts["log_config_file"])
                if not os.path.exists(log_ini):
                    raise IOError("Could not find log config file %s" %
                                  log_ini)
            else:
                raise IOError("Could not find log config file %s" % log_ini)
    except IOError as exc:
        log.error(exc)
        sys.exit(50)

    try:
        log_cfg = configparser.ConfigParser()
        logging.config.fileConfig(log_ini)
        log_cfg.read(log_ini)
    except (IOError, OSError, configparser.NoSectionError) as exc:
        log.error("Log config file(%s) not correctly configured: %s", log_ini,
                  exc)
        sys.exit(50)

    try:
        # set up logging format strings
        config_opts['build_log_fmt_str'] = log_cfg.get(
            "formatter_%s" % config_opts['build_log_fmt_name'],
            "format",
            raw=1)
        config_opts['root_log_fmt_str'] = log_cfg.get(
            "formatter_%s" % config_opts['root_log_fmt_name'], "format", raw=1)
        config_opts['state_log_fmt_str'] = log_cfg.get(
            "formatter_%s" % config_opts['state_log_fmt_name'],
            "format",
            raw=1)
    except configparser.NoSectionError as exc:
        log.error("Log config file (%s) missing required section: %s", log_ini,
                  exc)
        sys.exit(50)

    # set logging verbosity
    if options.verbose == 0:
        log.handlers[0].setLevel(logging.WARNING)
        tmplog = logging.getLogger("mockbuild.Root.state")
        if tmplog.handlers:
            tmplog.handlers[0].setLevel(logging.WARNING)
    elif options.verbose == 1:
        log.handlers[0].setLevel(logging.INFO)
    elif options.verbose == 2:
        log.handlers[0].setLevel(logging.DEBUG)
        logging.getLogger("mockbuild.Root.build").propagate = 1
        logging.getLogger("mockbuild").propagate = 1

    # enable tracing if requested
    logging.getLogger("trace").propagate = 0
    if options.trace:
        logging.getLogger("trace").propagate = 1
コード例 #27
0
    def __init__(self, oracle_database="pittdb", config=None, **kwargs):
        self.connection = None
        self.oracle_database = oracle_database
        cfg = {}
        if config is not None:
            cf = configparser.ConfigParser()
            cf.read(config)
            config_keys = [
                "oracleuser",
                "oraclepass",
                "oracledsn",
                "voy_username",
                "voy_password",
                "voy_path",
                "cat_location",
                "library_id",
            ]
            for item in config_keys:
                val = cf.get("Voyager", item, fallback="", raw=True).strip('"')
                if val:
                    cfg[item] = val
            if cf.get("Voyager", "connectstring") and "oracledsn" not in cfg:
                cfg["oracledsn"] = cf.get("Voyager", "connectstring", raw=True).strip(
                    '"'
                )

        cfg.update(kwargs)

        if all(arg in cfg for arg in ["oracleuser", "oraclepass", "oracledsn"]):
            self.connection = cx.connect(
                cfg["oracleuser"], cfg["oraclepass"], cfg["oracledsn"]
            )
            self.engine = sqla.create_engine(
                "oracle://", creator=lambda: self.connection
            )
            metadata = sqla.MetaData()
            tables_to_load = TABLE_NAMES

            self.tables = {}
            for table_name in tables_to_load:
                self.tables[table_name] = sqla.Table(
                    table_name,
                    metadata,
                    schema=oracle_database,
                    autoload=True,
                    autoload_with=self.engine,
                )

            for parent, foreign in RELATIONS:
                parent_column = getattr(self.tables[parent[0]].c, parent[1])
                foreign_key = getattr(self.tables[foreign[0]].c, foreign[1])
                parent_column.append_foreign_key(sqla.ForeignKey(foreign_key))

        self.cat_location = cfg.get("cat_location")
        self.library_id = cfg.get("library_id")

        if "voy_path" not in cfg:
            cfg["voy_path"] = r"C:\Voyager"

        if batchcat is not None and all(
            arg in cfg for arg in ["voy_username", "voy_password"]
        ):
            self.batchcat = batchcat.BatchCatClient(
                username=cfg["voy_username"],
                password=cfg["voy_password"],
                voy_interface=self,
                apppath=cfg["voy_path"],
            )
        else:
            self.batchcat = None
コード例 #28
0
def get_restore_settings(SECTION,NAME):
    cfg = ConfigParser.ConfigParser()  
    configfile = os.path.join(os.environ.get("HOME", "."), ".tinybldlin")
    cfg.read(configfile)  
    VALUE= cfg.get(SECTION, NAME)  
    return VALUE
コード例 #29
0
    if cache_available(config):
        inv = get_cache('scaleway_ansible_inventory.json', config)
    else:
        inv = generate_inv_from_api(config)

    save_cache(inv, config)
    return json.dumps(inv)


if __name__ == '__main__':
    inventory = {}

    # Read config
    if six.PY3:
        config = configparser.ConfigParser()
    else:
        config = configparser.SafeConfigParser()
    for configfilename in [os.path.abspath(sys.argv[0]).rsplit('.py')[0] + '.ini', 'scaleway.ini']:
        if os.path.exists(configfilename):
            config.read(configfilename)
            break

    if cache_available(config):
        inventory = get_cache('scaleway_ansible_inventory.json', config)
    else:
        inventory = get_inventory(config)

    # return to ansible
    sys.stdout.write(str(inventory))
    sys.stdout.flush()
コード例 #30
0
 def __init__(self, config_file):
     self.parser = configparser.ConfigParser()
     self.parser.read(config_file)
     self.config_file = config_file