Beispiel #1
0
def update_eq_options(wdir, proj, label, imc, eqid):
    if any([val is None for val in locals().values()]):
        return [], None, [], None, [], []
    df_eq_imc = get_eq_imc_df(wdir, proj, label, eqid, imc)
    imt_options = get_options(df_eq_imc, IMT_REGEX)
    imt_options.append({'label': 'Pass/Fail', 'value': 'Pass/Fail'})
    imt = imt_options[0]['value']
    dist_options = get_options(df_eq_imc, DIST_REGEX)
    dist = dist_options[0]['value']
    model_options = get_model_options(MODELS_DICT, imc, imt)
    rep = get_event_object(eqid).__repr__()
    return imt_options, imt, dist_options, dist, model_options, rep
Beispiel #2
0
def main():
    """
    Инициализация логгера, бота и хранилища. Обработка почтовых сообщений  и сообщений от telegram.
    
    """
    global bot, db
    
    logger = logging.getLogger('tm_monitor')
    logger.setLevel(logging.INFO)

    formatter = logging.Formatter('[%(asctime)s] %(levelname)-8s %(filename)s[LINE:%(lineno)d]# %(message)s')

    handler = DiffFileHandler()
    handler.setLevel(logging.INFO)
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    utils.log = logger

    token = get_options('main', 'token')
    if not token:
        log.critical('В файле настроек токен бота не найден')
        return
    
    bot = TeleBot(token)

    db = shelve.open('data')

    telegram_processing()
    mail_processing()

    db.close()
Beispiel #3
0
def get_at_out_list():
    """
    Импортируем список исходящих номеров из БД АТС
    
    :return:  {string: {string}}, {гор_номер: {вн_номер, ...}} 
    """
    raw = defaultdict(set)

    options_list = get_options('asterisk', 'db')

    if options_list:
        host, user, password, db = options_list

        try:
            with pymysql.connect(host, user, password, db) as cur:

                cur.execute(
                    "SELECT extension, outboundcid FROM users "
                    "WHERE LENGTH(outboundcid) = 11 or LENGTH(outboundcid) = 7"
                )

                for ext, out in cur:
                    raw[get_cm(out)].add(ext)

        except OperationalError as e:
            log.error(e)

    return raw
Beispiel #4
0
def get_ad_list():
    """"
    Импортируем список сотрудников из AD
    
     Фильтр !(userAccountControl:1.2.840.113556.1.4.803:=2) исключает заблокированные учётные записи

     :return: [{}], cписок словарей с аттрибутами пользователей
    """
    raw = []
    options_list = get_options('ad')
    ad_search = get_options('main', 'ad_search', True)

    if options_list:
        host, user, password = options_list

        server = Server(host, get_info=ALL)

        try:
            with Connection(server,
                            user,
                            password,
                            authentication=NTLM,
                            auto_bind=True) as conn:
                filter_str = '(&(objectclass=person)(!(userAccountControl:1.2.840.113556.1.4.803:=2)))'

                conn.search(
                    ad_search,
                    filter_str,
                    attributes=[
                        'displayName',  # ФИО
                        'telephoneNumber',  # Внутренние номера
                        'mail',  # Эл. почта
                        'title',  # Должность
                        'department',  # Отдел
                        'company',  # Организация
                        'accountExpires'  # Дата блокировки учётной записи
                    ])

                raw = conn.entries
        except LDAPSocketOpenError as e:
            log.error(e)
        except LDAPBindError:
            log.error('Ошибка доменной авторизации')

    return raw
Beispiel #5
0
def main():
    # Hyper Parameters
    opt = get_options()

    log_file = '{}-{}-{}.log'.format(opt.model_name, opt.dataset,
                                     strftime("%y%m%d-%H%M", localtime()))
    logger.addHandler(logging.FileHandler(log_file))

    ins = Instructor(opt)
    ins.run()
Beispiel #6
0
def main():
    global g_settings

    options = utils.get_options()

    g_settings = Settings({})
    g_settings.tidb_host = options.tidb_host
    g_settings.tidb_port = options.tidb_port
    g_settings.tidb_user = options.tidb_user
    g_settings.tidb_pass = options.tidb_pass
    g_settings.tidb_db = options.tidb_db
    g_settings.workers = options.workers

    app.run(debug=True, port=options.server_port, host='0.0.0.0')
Beispiel #7
0
def main():
    global g_settings

    options = utils.get_options()

    g_settings = Settings({})
    g_settings.memsql_host = options.memsql_host
    g_settings.memsql_port = options.memsql_port
    g_settings.memsql_user = options.memsql_user
    g_settings.memsql_pass = options.memsql_pass
    g_settings.memsql_db = options.memsql_db
    g_settings.workers = options.workers

    app.run(debug=True, port=options.server_port, host='0.0.0.0')
Beispiel #8
0
def main():
    global g_settings

    options = utils.get_options()

    g_settings = Settings({})
    g_settings.memsql_host = options.memsql_host
    g_settings.memsql_port = options.memsql_port
    g_settings.memsql_user = options.memsql_user
    g_settings.memsql_pass = options.memsql_pass
    g_settings.memsql_db = options.memsql_db
    g_settings.workers = options.workers

    logger.error("Listening on Port: %s" % (options.server_port,))
    app.run(debug=False, port=options.server_port, host='0.0.0.0')
Beispiel #9
0
 def clean_tag(self):
     data = self.cleaned_data['tag']
     action = self.cleaned_data['action']
     # First, work out whether the tag currently exists
     options = get_options()
     tag_exists = data in options['projects']
     # Creating a new tag
     if action == 'create' and tag_exists:
         raise forms.ValidationError('This project already exists',
                                     code='invalid')
     # Deleting an existing tag
     if action == 'delete' and not tag_exists:
         raise forms.ValidationError("This project doesn't exist",
                                     code='invalid')
     return data
Beispiel #10
0
def export_xls_brief(raw):
    """
    Выгрузка краткой (без внутренних номеров) статистики звонков
    
    :param raw: 
    :return: 
    """
    wb = xlwt.Workbook()
    ws = wb.add_sheet('Краткий список')

    # Заголовок
    ws.write(0, 0, 'Гор. номер')
    ws.write_merge(0, 0, 1, 4, 'Вх.')
    ws.write_merge(0, 0, 5, 8, 'Исх.')

    path = get_options('main', 'xls_path_brief', True)

    if not path:
        log.critical('Ошибка чтения конфигурационного файла, см. ошибки выше')
        return

    line = 1

    for kc in sorted(raw):
        ws.write(line, 0, kc)

        ws.write(line, 1, format_time(raw[kc]['inc']['duration']))
        ws.write(line, 2, raw[kc]['inc']['count'])
        ws.write(line, 3, format_time(raw[kc]['inc']['billsec']))
        ws.write(line, 4, raw[kc]['inc']['answer'])

        ws.write(line, 5, format_time(raw[kc]['out']['duration']))
        ws.write(line, 6, raw[kc]['out']['count'])
        ws.write(line, 7, format_time(raw[kc]['out']['billsec']))
        ws.write(line, 8, raw[kc]['out']['answer'])

        line += 1

    try:
        wb.save(path)
    except PermissionError as e:
        log.error('Недостаточно прав для сохранения файла: %s' % e.filename)
        return
    except FileNotFoundError as e:
        log.error('Неверный путь или имя файла: %s' % e.filename)
        return

    return True
Beispiel #11
0
def edit_site(args):
	site = mysql.check_site_id()
	msg = '''
1. Add domain alias
2. Remove domain alias
3. Enable this site
4. Disable this site

Please select option:'''

	opt = utils.get_options(msg, [1, 2, 3, 4])
	if opt == 1:
		add_alias(site, args)
	elif opt == 2:
		remove_alias(site, args)
	elif opt == 3:
		enable_site(site.domain)
	elif opt == 4:
		disable_site(site.domain)
Beispiel #12
0
def export_xls(raw):
    """
    Выгрузка структуры телефонной книги в excel

    :param raw: {string: {string: [[string]]}}, структура тел. книги {организация: {отдел: [[данные_сотрудника]]}}
    :return: bool, True - если тел. книга выгружена, False - если произошла ошибка выгрузки
    """

    wb = xlwt.Workbook()
    ws = wb.add_sheet('Список номеров')

    # Заголовок
    ws.write(0, 1, 'Гор. номер')
    ws.write(0, 2, 'Вх.')
    ws.write(0, 3, 'Исх.')

    path = get_options('main', 'xls_path', True)

    if not path:
        log.critical('Ошибка чтения конфигурационного файла, см. ошибки выше')
        return

    line = 1

    for kc in sorted(raw):

        ws.write(line, 1, kc)

        ws.write(line, 2, ', '.join(raw[kc]['inc']))
        ws.write(line, 3, ', '.join(raw[kc]['out']))

        line += 1

    try:
        wb.save(path)
    except PermissionError as e:
        log.error('Недостаточно прав для сохранения файла: %s' % e.filename)
        return
    except FileNotFoundError as e:
        log.error('Неверный путь или имя файла: %s' % e.filename)
        return

    return True
Beispiel #13
0
    def __init__(self, **kwargs):
        self.form = forms.Form(**kwargs)

        #load option values from the database
        self.value_dict = get_options(optionset_label=self.optionset_label,
                                      current_only=False)
        self.formfields = []
        self.langformfields = {}

        for (attr, value) in self.attrs:

            if not attr in self.value_dict:
                self.value_dict[attr] = _init_option(self.optionset_label,
                                                     attr, value)

            if value.lang_dependant:
                for lang in get_supported_languages():
                    #generate the form field
                    field_name = '%s_%s' % (attr, lang)
                    lang_field = copy.deepcopy(value.field)
                    lang_field.label = '%s (%s)' % (lang_field.label,
                                                    lang.upper())

                    self.form.fields[field_name] = lang_field
                    try:
                        self.form.fields[field_name].initial = self.value_dict[
                            attr][lang]
                    except KeyError:
                        self.form.fields[field_name].initial = ''

                    #add to land dependant options
                    self.lang_options[field_name] = (attr, lang)

                    #langformfields fieldset
                    if not lang in self.langformfields:
                        self.langformfields[lang] = []
                    self.langformfields[lang].append(self.form[field_name])
            else:
                self.form.fields[attr] = value.field
                self.form.fields[attr].initial = self.value_dict[attr]
                self.formfields.append(self.form[attr])
Beispiel #14
0
    def __init__(self, **kwargs):
        self.form = forms.Form(**kwargs)

        #load option values from the database
        self.value_dict = get_options(optionset_label = self.optionset_label, current_only=False)
        self.formfields = []
        self.langformfields = {}

        for (attr, value) in self.option_fields:

            if not attr in self.value_dict or self.value_dict[attr] is None:
                self.value_dict[attr] = _init_option(self.optionset_label, attr, value)

            if value.lang_dependant:
                for lang in get_supported_languages():
                    #generate the form field
                    field_name = '%s_%s' % (attr, lang)
                    lang_field = copy.deepcopy(value.field)
                    lang_field.label = '%s (%s)' % (force_text(lang_field.label), lang.upper())
                    
                    self.form.fields[field_name] = lang_field
                    try:
                        self.form.fields[field_name].initial = self.value_dict[attr][lang]
                    except KeyError:
                        self.form.fields[field_name].initial = ''
                    
                    #add to land dependant options
                    self.lang_options[field_name] = (attr, lang)
                    
                    #langformfields fieldset
                    if not lang in self.langformfields:
                        self.langformfields[lang] = []
                    self.langformfields[lang].append(self.form[field_name])
            else:
                self.form.fields[attr] = value.field
                self.form.fields[attr].initial = self.value_dict[attr]
                self.formfields.append(self.form[attr])
Beispiel #15
0
def update_ael(sections):
    """
    Загружаем и обновляем конф. файлы из АТС, в случае ошибки обновления, будут использоваться файлы загруженные ранее

    :param sections: [string], список секций из конф. файла с параметрами подключения к АТС
    """
    for section in sections:
        options_list = get_options(section)

        if options_list:
            host, user, password = options_list

            try:
                with paramiko.SSHClient() as ssh:
                    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
                    ssh.connect(hostname=host,
                                username=user,
                                password=password)

                    remotepath = '/etc/asterisk/extensions.ael'
                    localpath = 'asterisk/%s/extensions.ael' % section

                    if not os.path.exists(os.path.dirname(localpath)):
                        os.makedirs(os.path.dirname(localpath))

                    try:
                        with SCPClient(ssh.get_transport()) as client:
                            client.get(remotepath, localpath)
                    except SCPException as e:
                        log.error(e)

            except TimeoutError as e:
                log.error('%s - %s' % (host, e.strerror))
            except paramiko.ssh_exception.AuthenticationException:
                log.error('Ошибка авторизации на %s' % host)

    return True
Beispiel #16
0
    raw_desc = df.describe(include='all').T
    # raw_desc.rename(columns={'': 'col_name'}, inplace=True)

    null_percent = 100 * df.isnull().sum() / df.shape[0]
    null_percent = null_percent.to_frame(name='null_percent')

    # types = df.dtypes.to_frame(name='types')

    nunique = df.apply(pd.Series.nunique).to_frame(name='n_unique')

    top = top_values(df).to_frame(name='top_10')

    cat_cols = find_categorical(df)
    col_type = pd.Series(raw_desc.index, index=raw_desc.index,
                         name='col_type').isin(cat_cols)
    col_type = col_type.apply(lambda x: 'cat' if x else 'num')

    desc = pd.concat([raw_desc, null_percent, col_type, nunique, top],
                     axis=1, copy=False)

    kept_columns = ['col_type', 'null_percent', 'n_unique', 'mean',
                    'min', 'max', 'top_10']

    desc[kept_columns].to_csv(desc_file, sep=';', float_format='%.2f')


if __name__ == '__main__':
    options = get_options()
    for f in options.filenames:
        make_audit_file(f)
Beispiel #17
0
#!/usr/bin/env python
""" Setup dataporten openid mapping """

import utils
from himlarcli.keystone import Keystone
from himlarcli import utils as himutils

options = utils.get_options('Setup dataporten openid mapping',
                            hosts=0,
                            dry_run=True)
ksclient = Keystone(options.config, debug=options.debug)

# Domain should be create from hieradata
domain = ksclient.get_domain_id('dataporten')
rules = [{
    "local": [{
        "user": {
            "name": "{0}",
            "id": "{0}"
        },
        "group": {
            "name": "{0}-group",
            "domain": {
                "id": domain
            }
        }
    }],
    "remote": [{
        "type": "OIDC-email"
    }, {
        "type": "OIDC-name"
Beispiel #18
0
    if unordered:
        print('- glossary')
        print('  - out of order')
        for item in unordered:
            print(f'    - {item}')


def get_internal(glossary, language):
    '''Create set of internal references within glossary definitions.'''
    result = set()
    for entry in glossary:
        for match in CROSS_REF.finditer(entry[language]['def']):
            result.add(match.group(1).lstrip('#'))
        if 'ref' in entry:
            result.update(entry['ref'])
    return result


def get_definitions(glossary):
    '''Create set of keys in glossary.'''
    return {entry['key'] for entry in glossary}


if __name__ == '__main__':
    options = utils.get_options(
        ['--glossary', False, 'Path to glossary YAML file'],
        ['--language', False, 'Two-letter code for language'],
        ['--sources', True, 'List of input files']
    )
    check_gloss(options)
Beispiel #19
0
#!/usr/bin/env python
'''Check for misspelled words.'''

import sys

import utils


def check_spelling(options):
    '''Main driver.'''
    actual = {word.strip() for word in sys.stdin.readlines()}
    expected = {
        word.strip()
        for word in open(options.compare, 'r').readlines()
    }
    utils.report('spelling', actual=actual, expected=expected)


if __name__ == '__main__':
    options = utils.get_options(
        ['--compare', False, 'File to compare wordlist'])
    check_spelling(options)
Beispiel #20
0
    raw = yaml.dump(cleaned, sort_keys=False, width=utils.YAML_INFINITE)
    cooked = utils.cook_yaml(raw)
    print(cooked)


def merge_inputs(options):
    '''Read all files, merging inputs.'''
    result = {}
    for fn in options.sources:
        temp = utils.read_yaml(fn)
        for entry in temp:
            assert 'key' in entry, f'Entry {entry} from {fn} lacks key'
            if options.verbose and (entry['key'] in result):
                print(f'duplicate key {entry["key"]} in {fn}', file=sys.stderr)
            result[entry['key']] = entry
    return result


def cleanup(entry):
    '''Create new dict for entry with keys in desired order.'''
    return {
        key: utils.strip_nested(entry[key])
        for key in KEY_ORDER if key in entry
    }


if __name__ == '__main__':
    options = utils.get_options(['--sources', True, 'List of input files'],
                                ['--verbose', None, 'Report duplicate keys?'])
    merge_bib_md(options)
Beispiel #21
0
        outImage = aruco.drawDetectedCornersCharuco(outImage, charucoCorners,
                                                    charucoIds)

        # use a camera parameter
        retval, rvec, tvec = aruco.estimatePoseCharucoBoard(
            charucoCorners, charucoIds, board, cameraMatrix, distCoeffs)
        if retval:
            aruco.drawAxis(outImage, cameraMatrix, distCoeffs, rvec, tvec, 0.1)

    cv2.imshow("detected", outImage)
    cv2.waitKey(0)
    cv2.destroyAllWindows()


if __name__ == '__main__':
    args = utils.get_options()

    input_image_dirpath = osp.join(osp.dirname(__file__), args.in_dir)
    # recognize any extentions
    image_paths, image_names = utils.get_file_paths(input_image_dirpath, "*")

    # read camera parameters
    camera_param_filepath = osp.join(osp.dirname(__file__),
                                     args.camera_param_path)
    cameraMatrix, distCoeffs, rvecs, tvecs, stdDevIn, stdDevEx = \
        utils.read_pickle(camera_param_filepath)

    # read parameters from arguments
    dictionary = utils.get_aruco_dict(args.aruco_dict)
    squareL = args.square_length
    markerL = args.marker_length
Beispiel #22
0
def get_problems(filenames):
    '''Look for problems.'''
    found = False
    result = {}
    for filename in options.sources:
        result[filename] = []
        with open(filename, 'r') as reader:
            for (i, line) in enumerate(reader):
                if len(line.rstrip()) > utils.WIDTH:
                    found = True
                    result[filename].append(i + 1)
    return found, result


def report(options, problems):
    '''Report any problems.'''
    print('- long lines')
    for filename in sorted(problems.keys()):
        if problems[filename]:
            if options.verbose:
                lines = ', '.join([str(i) for i in problems[filename]])
                print(f'  - {filename}: {lines}')
            else:
                print(f'  - {filename}: {len(problems[filename])}')


if __name__ == '__main__':
    options = utils.get_options(['--sources', True, 'List of input files'],
                                ['--verbose', None, 'Report line by line'])
    check_long_lines(options)
Beispiel #23
0
""" Update glance images """

import sys
import os
import urllib
import urllib2
import pprint
from datetime import datetime
import utils
from himlarcli import utils as himutils
from himlarcli.glance import Glance

print "Depricated! Use image.py"
sys.exit(0)

options = utils.get_options('Create and update golden images',
                             hosts=0, dry_run=True)
glclient = Glance(options.config, debug=options.debug)
logger = glclient.get_logger()
golden_images = himutils.load_config('config/golden_images.yaml')
if glclient.region in golden_images:
    images = golden_images[glclient.region]
else:
    if not 'default' in golden_images:
        print "Missing default in config/golden_images.yaml"
        sys.exit(1)
    images = golden_images['default']

def download_and_check(image):
    source = himutils.get_abs_path('%s' % image['latest'])
    url = '%s%s' % (image['url'], image['latest'])
    # Do not redownload
Beispiel #24
0
#!/usr/bin/env python
import utils
from himlarcli.keystone import Keystone
from himlarcli.foremanclient import ForemanClient
from himlarcli import utils as himutils

# Fix foreman functions and logger not-callable
# pylint: disable=E1101,E1102

desc = 'Setup Foreman for himlar'
options = utils.get_options(desc, hosts=False)
keystone = Keystone(options.config, debug=options.debug)
logger = keystone.get_logger()
domain = keystone.get_config('openstack', 'domain')

foreman = ForemanClient(options.config, options.debug, log=logger)
client = foreman.get_client()

# create foreman domain based on config
# get domain id

# get smart proxy id for tftp

# create subnet
# mgmt network + netmask from config
# domain_ids = domain_id
# tftp_ids = proxy_id
# dns-primary, dns-secondary, gateway is blank
# get subnet id

# Glabal parameters
Beispiel #25
0
#!/usr/bin/env python
import utils
import sys
from himlarcli.foremanclient import ForemanClient
from himlarcli.sensu import Sensu

desc = 'Toggle rebuild host in foreman'
options = utils.get_options(desc)

foreman = ForemanClient(options.config, options.debug)
sensu = Sensu(options.config, debug=options.debug)

for host in options.host:
    sensu.silence_host(host, expire=3600)
    foreman.set_host_build(host)
Beispiel #26
0
    '''Wrap lines.'''
    result = []
    for line in lines:
        line = line.replace(PROTOCOL, '').replace(HERE, FAKE)
        if len(line) == 0:
            result.append(line)
            continue
        match = INDENT.match(line)
        indent = match.group(1) if match else ''
        while len(line) > 0:
            front, line, terminator = split(line)
            result.append(f'{front}{terminator}')
            if len(line) > 0:
                line = indent + line.lstrip()
    return result


def split(line):
    '''Split a line.'''
    if len(line) <= utils.WIDTH:
        return line, '', ''
    for i in range(utils.WIDTH, 0, -1):
        if (line[i] == ' '):
            return line[:i], line[i:], ' \\'
    return line[:utils.WIDTH], line[utils.WIDTH:], ' \\'


if __name__ == '__main__':
    options = utils.get_options(['--slice', None, 'Take slice out of input?'])
    reformat(options)
Beispiel #27
0
#!/usr/bin/env python
import utils
import sys
import pprint
import yaml
import ConfigParser
from himlarcli.foremanclient import Client
from himlarcli import utils as himutils

desc = 'Create an location inventory config in ./config.<loc>'
options = utils.get_options(desc, hosts=False)

foreman = Client(options.config, options.debug)

hosts = foreman.get_hosts('*')
hostlist = dict({'nodes': {}})
for host in hosts['results']:
    hostname = str(host['name']).split('.')[0]
    loc, role, id = hostname.split('-')
    node = "%s-%s" % (role, id)
    host['name'].split('.')[0]
    if not host['managed']:
        continue
    if not host['compute_resource_name']:
        hostlist['nodes'][node] = dict()
        hostlist['nodes'][node]['mac'] = str(host['mac'])
    else:
        hostlist['nodes'][node] = dict()
        hostlist['nodes'][node]['compute_resource'] = 'controller-01'
        hostlist['nodes'][node]['compute_profile'] = 1
    #pp = pprint.PrettyPrinter(indent=2)
Beispiel #28
0
                           'systemctl',
                           'disable',
                           AUTO_START_SERVICE,
                           allowed_return_codes=[0, 1])
        utils.ssh(himn, username, 'rm', '-f',
                  '/etc/systemd/system/%s' % AUTO_START_SERVICE)
        utils.ssh(himn, username, 'rm', '-f',
                  '/etc/sysconfig/network-scripts/%s' % AUTO_SCRIPT)
        utils.ssh(himn, username, 'systemctl', 'daemon-reload')


if __name__ == '__main__':
    install_xenapi_sdk()
    astute = utils.get_astute()
    if astute:
        username, password, install_xapi = utils.get_options(astute)
        endpoints = get_endpoints(astute)
        himn_eth, himn_local = utils.init_eth()

        public_ip = utils.astute_get(
            astute, ('network_metadata', 'vips', 'public', 'ipaddr'))

        services_ssl = utils.astute_get(astute, ('public_ssl', 'services'))

        if username and password and endpoints and himn_local:
            route_to_compute(endpoints, HIMN_IP, himn_local, username)
            xcp_version = utils.get_xcp_version(HIMN_IP, username)
            if install_xapi:
                install_suppack(HIMN_IP, username, XS_PLUGIN_ISO, xcp_version)
            enable_linux_bridge(HIMN_IP, username)
            forward_from_himn(himn_eth)
Beispiel #29
0
def main():
    """Entry point of the program."""

    # Get all command line arguments
    cmd_options = vars(get_options())

    # Set if input filename is a HTTP url, defaults to false
    is_url = False

    # Determine the extension of the input file
    if cmd_options["in_filename"].startswith("http"):
        is_url = True
    else:
        input_file_extension = convert.detect_file_extension(cmd_options["in_filename"])

    # Initialize the empty dataframe
    df = None

    # Check if rows need to be skipped from start
    skip_rows = int(cmd_options["skip_rows"])

    # Check if the user wants to skip header
    skip_header = cmd_options["skip_header"]

    # Check if the user has passed a list of columns names explicitly
    column_names = cmd_options["columns"]
    if column_names is not None:
        column_names = column_names.split(",")

    if is_url:
        df = convert.url_to_df(cmd_options["in_filename"], skip_rows, skip_header, column_names)
    else:
        # Based on extension, convert the file contents to a Pandas dataframe
        if input_file_extension == "xls":
            df = convert.xls_to_df(cmd_options["in_filename"], skip_rows, skip_header, column_names)
        elif input_file_extension == "xlsx":
            df = convert.xlsx_to_df(cmd_options["in_filename"], skip_rows, skip_header, column_names)
        elif input_file_extension == "csv":
            df = convert.csv_to_df(cmd_options["in_filename"], skip_rows, skip_header, column_names)
        else:
            raise BarryFileException("Input file format not supported. Currently supported file formats - xls/xlsx/csv")

    # Handle dataframe transformations

    # Check if a column need to be sorted
    if cmd_options["sort_column"]:
        sort_column = cmd_options["sort_column"]
        ascending = True
        convert.sort_df(df, sort_column, ascending)
    elif cmd_options["rsort_column"]:
        sort_column = cmd_options["rsort_column"]
        ascending = False
        convert.sort_df(df, sort_column, ascending)

    # Based on the output format, write the dataframe to disk
    if cmd_options["out_format"] == "xls":
        convert.df_to_xls(df, cmd_options["out_filename"])
    elif cmd_options["out_format"] == "xlsx":
        convert.df_to_xlsx(df, cmd_options["out_filename"])
    elif cmd_options["out_format"] == "json":
        convert.df_to_json(df, cmd_options["out_filename"])
    else:
        convert.df_to_csv(df, cmd_options["out_filename"])
Beispiel #30
0
#!/usr/bin/python
import sys
import utils
from himlarcli.nova import Nova
from himlarcli.keystone import Keystone
from himlarcli.mail import Mail
from email.mime.text import MIMEText

# OPS! It might need some updates. We use class Mail instead of Notify now.

options = utils.get_options('Notify all users', dry_run=True, hosts=False)
keystone = Keystone(options.config, debug=options.debug)
mail = Mail(options.config, debug=options.debug)
region = keystone.region

print "Remove these lines if you want to run this and send mail to all!"
sys.exit(0)

# Edit this to send new email to all users
subject = 'UH-IaaS: Purge of all data (%s)' % region
body_file = 'notify/notify_reinstall.txt'

with open(body_file, 'r') as body_txt:
    body_content = body_txt.read()

projects = keystone.list_projects('Dataporten')
for project in projects:
    msg = MIMEText(body_content)
    msg['Subject'] = subject
    if not options.dry_run:
        mail.send_mail(project, msg)
Beispiel #31
0
    elif len(names) > 2:
        front = ', '.join(names[0:-1])
        names = f'{front}, and {names[-1]}'
    return f'{names}{suffix}:'


def key(entry):
    '''Generate bibliography key.'''
    assert 'key' in entry, \
        'Every entry must have key'
    return f'<dt id="{entry["key"].lower()}" class="bibliography">{entry["key"]}</dt>'


def title(entry, quote):
    '''Generate title (possibly linking and/or quoting).'''
    assert 'title' in entry, \
        f'Entry {entry} does not have title'
    title = f'<a href="{entry["url"]}">{entry["title"]}</a>' \
        if ('url' in entry) else entry["title"]
    title = f'"{title}"' if quote else f'<em>{title}</em>'
    edition = f' ({entry["edition"]} edition)' \
        if ('edition' in entry) else ''
    return f'{title}{edition}.'


if __name__ == '__main__':
    options = utils.get_options(
        ['--input', False, 'Path to input YAML bibliography file'],
        ['--output', False, 'Path to output Markdown file'])
    make_bib(options)
def make_numbering(options):
    '''Main driver.'''
    config = utils.read_yaml(options.config)
    entries = utils.get_entry_info(config)
    figures = {}
    tables = {}
    for entry in entries:
        text = utils.read_file(entry['file'], scrub=False)
        figures.update(get_inclusions(FIG_INC, entry, text))
        tables.update(get_inclusions(TBL_INC, entry, text))
    result = {'entries': entries, 'figures': figures, 'tables': tables}
    utils.write_yaml(options.output, result)


def get_inclusions(pattern, entry, text):
    '''Get all inclusions from file.'''
    label = entry['label']
    result = {}
    for (i, match) in enumerate(pattern.finditer(text)):
        key = match.group(1)
        result[key] = f'{label}.{i+1}'
    return result


if __name__ == '__main__':
    options = utils.get_options(
        ['--config', False, 'Path to YAML configuration file'],
        ['--output', False, 'Path to output YAML file'])
    make_numbering(options)
Beispiel #33
0
#!/usr/bin/python
import sys
import utils
from himlarcli.nova import Nova
from himlarcli.keystone import Keystone
from himlarcli.notify import Notify
from email.mime.text import MIMEText

options = utils.get_options('Notify all users', dry_run=True, hosts=False)
keystone = Keystone(options.config, debug=options.debug)
notify = Notify(options.config, debug=options.debug)
region = keystone.region

print "Remove these lines if you want to run this and send mail to all!"
sys.exit(0)

# Edit this to send new email to all users
subject = 'UH-IaaS: Purge of all data (%s)' % region
body_file = 'misc/notify_reinstall.txt'

with open(body_file, 'r') as body_txt:
    body_content = body_txt.read()

projects = keystone.list_projects('Dataporten')
for project in projects:
    msg = MIMEText(body_content)
    msg['Subject'] = subject
    if not options.dry_run:
        notify.send_mail(project, msg)
    print '\nProject: %s' % project
Beispiel #34
0
import torch
from pytorch_transformers import BertModel
from sklearn import metrics
from torch.utils.data import DataLoader

from data_utils import Tokenizer4Bert, ABSADataset
from models.aen import AEN_BERT
from utils import get_options

opt = get_options()
bert = BertModel.from_pretrained(opt.pretrained_bert_name)

model_path = 'state_dict/aen_bert_laptop_val_acc0.7821'
model = AEN_BERT(bert, opt)
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
model.eval()

tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)
test_set = ABSADataset(opt.dataset_file['test'], tokenizer)
data_loader = DataLoader(dataset=test_set, batch_size=1, shuffle=False)

n_correct, n_total = 0, 0
t_targets_all, t_outputs_all = None, None

with torch.no_grad():
    for t_batch, t_sample_batched in enumerate(data_loader):
        t_inputs = [
            t_sample_batched[col].to(opt.device) for col in opt.inputs_cols
        ]
        print("input: ", t_inputs)
        t_targets = t_sample_batched['polarity'].to(opt.device)
Beispiel #35
0
                length = len(node.code.string.split('\n'))
            except:
                assert False, f'pre node {node} has no text'
            if length > utils.LENGTH:
                entry = {'length': length, 'line': node.sourceline}
                if 'class' in node:
                    entry['class'] = node['class']
                if 'title' in node:
                    entry['title'] = node['title']
                result.append(entry)
    return result


def report(options, problems):
    '''Report any problems.'''
    print('- long chunks')
    for filename in sorted(problems.keys()):
        print(f'  - {filename}')
        for p in problems[filename]:
            print(f'    - line: {p["line"]}')
            print(f'      length: {p["length"]}')
            if 'class' in p:
                print(f'        class: {p["class"]}')
            if 'title' in p:
                print(f'        title: {p["title"]}')


if __name__ == '__main__':
    options = utils.get_options(['--sources', True, 'List of input files'])
    check_chunk_length(options)
Beispiel #36
0
def export_xls_full(raw):
    """
    Выгрузка полной (с внутренними номерами) статистики звонков
    :param raw: 
    :return: 
    """
    wb = xlwt.Workbook()
    ws = wb.add_sheet('Подробный список')

    # Заголовок
    ws.write(0, 0, 'Гор. номер')
    ws.write_merge(0, 0, 1, 5, 'Вх.')
    ws.write_merge(0, 0, 6, 10, 'Исх.')

    path = get_options('main', 'xls_path_full', True)

    if not path:
        log.critical('Ошибка чтения конфигурационного файла, см. ошибки выше')
        return

    line = 1

    for kc in sorted(raw):
        ws.write(line, 0, kc)
        ki = raw[kc]['inc']
        ko = raw[kc]['out']

        kiu = ki['users']
        kou = ko['users']

        ws.write(line, 2, format_time(ki['duration']))
        ws.write(line, 3, ki['count'])
        ws.write(line, 4, format_time(ki['billsec']))
        ws.write(line, 5, ki['answer'])

        ws.write(line, 7, format_time(ko['duration']))
        ws.write(line, 8, ko['count'])
        ws.write(line, 9, format_time(ko['billsec']))
        ws.write(line, 10, ko['answer'])

        inc_line = 0
        for inc in sorted(kiu):
            inc_line += 1
            ws.write(line + inc_line, 1, inc)
            ws.write(line + inc_line, 4, format_time(kiu[inc]['billsec']))
            ws.write(line + inc_line, 5, kiu[inc]['answer'])

        out_line = 0
        for out in sorted(kou):
            out_line += 1
            ws.write(line + out_line, 6, out)
            ws.write(line + out_line, 7, format_time(kou[out]['duration']))
            ws.write(line + out_line, 8, kou[out]['count'])
            ws.write(line + out_line, 9, format_time(kou[out]['billsec']))
            ws.write(line + out_line, 10, kou[out]['answer'])

        line += max([inc_line, out_line]) + 1

    try:
        wb.save(path)
    except PermissionError as e:
        log.error('Недостаточно прав для сохранения файла: %s' % e.filename)
        return
    except FileNotFoundError as e:
        log.error('Неверный путь или имя файла: %s' % e.filename)
        return

    return True
Beispiel #37
0
    utils.report('links', referenced=referenced, defined=defined)
    check_duplicates(options)


def get_keys(config):
    '''Create set of chapter slugs found in configuration.'''
    links = yaml.safe_load(config['kramdown']['link_defs'])
    return set(links.keys())


def check_duplicates(options):
    all_duplicates = {}
    for filename in options.sources:
        duplicates = []
        matches = utils.get_matches(LINK_REF, filename, duplicates=duplicates)
        if duplicates:
            all_duplicates[filename] = duplicates
    if all_duplicates:
        print('- duplicate links')
        for filename in sorted(all_duplicates.keys()):
            print(f'  - {filename}')
            for key in sorted(all_duplicates[filename]):
                print(f'    - {key}')


if __name__ == '__main__':
    options = utils.get_options(
        ['--config', False, 'Path to YAML configuration file'],
        ['--sources', True, 'List of input files'])
    check_links(options)
Beispiel #38
0
#!/usr/bin/env python
""" Setup dataporten openid mapping """

import utils
from himlarcli.keystone import Keystone
from himlarcli import utils as himutils

options = utils.get_options('Setup dataporten openid mapping',
                             hosts=0, dry_run=True)
ksclient = Keystone(options.config, debug=options.debug)
ksclient.set_domain('dataporten')
# Domain should be create from hieradata
domain = ksclient.get_domain_id()
rules = [{
    "local": [{
        "user": { "name": "{0}", "id": "{0}" },
        "group": { "name": "{0}-group", "domain": { "id": domain } } }],
    "remote": [{ "type": "OIDC-email" }, { "type": "OIDC-name" }]
}, {
    "local": [{
        "group": { "name": "nologin", "domain": { "id": domain } } }],
        "remote": [{ "type": "OIDC-email" }, { "type": "OIDC-name" }]
}]

# Crate nologin group
desc = 'All authenticated users are mapped to nologin which has no role grants'
ksclient.create_group('nologin', desc, 'dataporten')

# Create provider, mapping and container to connect them
ksclient.set_mapping('dataporten_personal', rules)
ksclient.set_protocol('openid', 'dataporten', 'dataporten_personal')
Beispiel #39
0
    for info in utils.get_entry_info(config):
        find_terms(info['slug'], info['file'], terms)
    report(terms)


def find_terms(slug, filename, terms):
    '''Collect index terms from file.'''
    with open(filename, 'r') as reader:
        text = reader.read()
        for match in utils.INDEX_REF.finditer(text):
            entries = [e.strip() for e in utils.WHITESPACE.sub(' ', match.group(1)).split(';')]
            for entry in entries:
                if entry not in terms:
                    terms[entry] = []
                terms[entry].append(slug)


def report(terms):
    '''Show where terms are used.'''
    print('- index')
    for key in sorted(terms):
        sites = ', '.join(terms[key])
        print(f'  - {key}: {sites}')


if __name__ == '__main__':
    options = utils.get_options(
        ['--config', False, 'Path to YAML configuration file']
    )
    show_index(options)
                  '/etc/conntrackd/conntrackd.conf',
                  '/etc/conntrackd/conntrackd.conf.back')
        utils.ssh(himn, username,
                  'cp',
                  CONNTRACK_CONF_SAMPLE,
                  '/etc/conntrackd/conntrackd.conf')

    # Restart conntrackd service
    utils.ssh(himn, username, 'service', 'conntrackd', 'restart')


if __name__ == '__main__':
    install_xenapi_sdk()
    astute = utils.get_astute()
    if astute:
        username, password, install_xapi = utils.get_options(astute)
        endpoints = get_endpoints(astute)
        himn_eth, himn_local = utils.init_eth()

        public_ip = utils.astute_get(
            astute, ('network_metadata', 'vips', 'public', 'ipaddr'))

        services_ssl = utils.astute_get(
            astute, ('public_ssl', 'services'))

        if username and password and endpoints and himn_local:
            route_to_compute(endpoints, HIMN_IP, himn_local, username)
            if install_xapi:
                install_suppack(HIMN_IP, username, XS_PLUGIN_ISO)
            enable_linux_bridge(HIMN_IP, username)
            forward_from_himn(himn_eth)
 def assistance_bot(self):
     assist_bot, user_option_vals = train_bot.train_assistance_bot()
     print("Our assistence bot is available now.")
     options = utils.get_options(user_option_vals)
     generate_response(assist_bot, "shopping assistance", options,
                       constants.USER_OPTIONS_COUNT)
    dates = [d for d in dates if is_nice_date(d)]
    #print('   ', dates)
    if dates:
        years = [get_year(d) for d in dates]
        for year in years:
            if EARLIEST_DOC < year  < LATEST_DOC:
                #print('   ', year)
                return year
    return None


def is_nice_date(date):
    """Something is a nice date if it has a month in it."""
    for t in date.split():
        if t.lower() in MONTHS:
            return True
    return False


def get_year(date):
    for t in date.split():
        if t.isdigit() and len(t) == 4:
            return int(t)
    return 0


if __name__ == '__main__':

    data_dir, filelist, start, end, crash = get_options()
    process_list(data_dir, filelist, start, end, crash, generate_metadata)