Exemple #1
0
def doit(opts):
    print ('-- Generating base APIs')
    common.mkdir_p(opts.include_dir)
    filename = os.path.join(opts.include_dir, 'functions.h')
    if not common.can_create_filename(opts, filename):
        return
    with common.open_utf8(filename) as out:
        out.write('''#ifndef NSIMD_FUNCTIONS_H
                     #define NSIMD_FUNCTIONS_H

                     '''.format(year=date.today().year))

        for op_name, operator in operators.operators.items():
            out.write('''{}

                         #include NSIMD_AUTO_INCLUDE({}.h)

                         {}

                         {}

                         '''.format(common.hbar, operator.name,
                                    get_c_base_generic(operator),
                                    get_cxx_base_generic(operator)))

        out.write('''{hbar}

                     {put_decl}

                     {hbar}

                     #endif'''. \
                     format(hbar=common.hbar, put_decl=get_put_decl()))
    common.clang_format(opts, filename)
Exemple #2
0
def save_issue(fmt, rendered, lang, is_dev=DEBUG, date=False):
    fargs = {
        "fmt": fmt,
        "date_str": datetime.utcnow().strftime("%Y%m%d"),
        "lang_shortcode": lang,
        "dev_flag": "",
        "email_flag": "",
    }
    if date:
        fargs["date_str"] = date
    if is_dev:
        fargs["dev_flag"] = "_dev"
    if fmt == "email":
        fargs["email_flag"] = "_e"
        fargs["fmt"] = "html"
    out_path = ARCHIVE_PATH_TMPL.format(**fargs)
    try:
        out_file = open(out_path, "w")
    except IOError:
        mkdir_p(os.path.dirname(out_path))
        out_file = open(out_path, "w")
        # if exception, couldn't create file or parent directories
    with out_file:
        out_file.write(rendered)
    return (out_path, len(rendered))
def add_user_certificate(username, full_name):
    # Generate passphrase and write it to file
    mkdir_p('certs')
    run("diceware -n 6 > certs/{}.passphrase".format(username), shell=True)
    # Generate user RSA key
    cmd = "openssl genrsa -aes256 -passout file:certs/{0}.passphrase -out certs/{0}.key 4096"
    run(cmd.format(username), shell=True)
    # Generate certificate config and write it to file
    create_ldif('cnf', {
        'username': username,
        'name': full_name
    }, 'tmpdir/certs/{}.cnf'.format(username))
    # Generate certificate signing request
    cmd = "openssl req -new -key certs/{0}.key -passin file:certs/{0}.passphrase -config tmpdir/certs/{0}.cnf -out tmpdir/certs/{0}.csr"
    run(cmd.format(username), shell=True)
    # Sign certificate using CA
    cmd = "openssl x509 -req -days 3650 -in tmpdir/certs/{0}.csr -passin file:/etc/ldap/ssl/team10-ca.passphrase -CA /etc/ldap/ssl/team10-ca.cert.pem -CAkey /etc/ldap/ssl/team10-ca.key.pem -CAserial /etc/ldap/ssl/team10-ca.cert.srl -out certs/{0}.crt"
    run(cmd.format(username), shell=True)
    # Convert certificate to LDAP usable format
    cmd = "openssl x509 -inform pem -outform der -in certs/{0}.crt -out tmpdir/certs/{0}.crt.der"
    run(cmd.format(username), shell=True)
    # Generate ldif for change and execute it
    filename = 'tmpdir/certs/{}.ldif'.format(username)
    create_ldif('add_certificate_ldif', {
        'username': username,
        'pwd': os.getcwd()
    }, filename)
    add_ldif(filename)
def write_aids_to_file(cfg, args, rpt_summary):
    log.info("-----------------------------")

    filepaths = {}
    if 'aids' not in rpt_summary:
        return filepaths

    from bson.json_util import dumps
    dbnames = rpt_summary['aids']['dbnames']
    if dbnames and len(dbnames) > 0:
        for dbname in dbnames:
            aidsdata = get_annotation_data(cfg, dbname)

            timestamp = ("{:%d%m%y_%H%M%S}").format(datetime.datetime.now())
            logs_basepath = os.path.join(os.getenv('AI_LOGS'), 'annon', dbname)
            ## create logs_basepath if does not exists
            common.mkdir_p(logs_basepath)
            aidsdata_keys = aidsdata.keys()
            log.info("aidsdata.keys: {}".format(aidsdata_keys))
            for split in aidsdata_keys:
                filepath = os.path.join(logs_basepath,
                                        split + '-' + timestamp + '.json')
                filepaths[split] = [filepath]
                with open(filepath, 'w') as fw:
                    json_str = dumps(aidsdata[split])
                    json.dump(json.loads(json_str), fw)

    return filepaths
Exemple #5
0
def save_issue(fmt, rendered, lang, is_dev=DEBUG, date=False):
    fargs = {
        'fmt': fmt,
        'date_str': datetime.utcnow().strftime('%Y%m%d'),
        'lang_shortcode': lang,
        'dev_flag': '',
        'email_flag': ''
    }
    if date:
        fargs['date_str'] = date
    if is_dev:
        fargs['dev_flag'] = '_dev'
    if fmt == 'email':
        fargs['email_flag'] = '_e'
        fargs['fmt'] = 'html'
    out_path = ARCHIVE_PATH_TMPL.format(**fargs)
    try:
        out_file = open(out_path, 'w')
    except IOError:
        mkdir_p(os.path.dirname(out_path))
        out_file = open(out_path, 'w')
        # if exception, couldn't create file or parent directories
    with out_file:
        out_file.write(rendered)
    return (out_path, len(rendered))
Exemple #6
0
def doit(opts):
    common.myprint(opts, 'Generating ulps')
    common.mkdir_p(opts.ulps_dir)
    for op_name, operator in operators.operators.items():
        if not operator.tests_mpfr:
            continue
        if op_name in ['gammaln', 'lgamma', 'pow']:
            continue

        mpfr_func = operator.tests_mpfr_name()
        mpfr_rnd = ", MPFR_RNDN"

        for typ in common.ftypes:
            if typ == 'f16':
                random_generator = random_f16_generator
                convert_to_type = "nsimd_f32_to_f16"
                convert_from_type = "nsimd_f16_to_f32"
                mantisse = 10
                size = 0xffff
                mpfr_suffix = "flt"
            elif typ == 'f32':
                convert_to_type = "(f32)"
                convert_from_type = ""
                random_generator = random_f32_generator
                mantisse = 23
                #size = 0xffffffff
                size = 0x00ffffff
                mpfr_suffix = "flt"
            elif typ == 'f64':
                convert_to_type = "(f64)"
                convert_from_type = ""
                random_generator = random_f64_generator
                mantisse = 52
                size = 0x00ffffff
                mpfr_suffix = "d"
            else:
                raise Exception('Unsupported type "{}"'.format(typ))

            filename = os.path.join(opts.ulps_dir, '{}_{}_{}.cpp'. \
                       format(op_name, "ulp", typ))

            if not common.can_create_filename(opts, filename):
                continue

            with common.open_utf8(opts, filename) as out:
                out.write(includes)
                out.write(gen_tests.relative_distance_cpp)
                out.write(
                    code.format(typ=typ,
                                nsimd_func=op_name,
                                mpfr_func=mpfr_func,
                                mpfr_rnd=mpfr_rnd,
                                random_generator=random_generator,
                                convert_from_type=convert_from_type,
                                convert_to_type=convert_to_type,
                                mantisse=mantisse,
                                SIZE=size,
                                mpfr_suffix=mpfr_suffix))

            common.clang_format(opts, filename)
Exemple #7
0
def get_image(cfg, json_file):

    log.info("json_file : {}".format(json_file))
    to_path = "/home/nikhil/Documents/placebo-images"
    log.info("to_path : {}".format(to_path))

    IMAGE_API = cfg['IMAGE_API']
    USE_IMAGE_API = IMAGE_API['ENABLE']
    SAVE_LOCAL_COPY = True

    tic = time.time()
    timestamp = ("{:%d%m%y_%H%M%S}").format(datetime.datetime.now())

    log.info("json_file: {}".format(json_file))
    log.info("to_path: {}".format(to_path))

    save_path = os.path.join(to_path, 'lnd-' + timestamp)
    common.mkdir_p(save_path)

    images_save_path = os.path.join(save_path, 'images')
    log.info("images_save_path: {}".format(images_save_path))
    common.mkdir_p(images_save_path)

    with open(json_file, 'r') as file:
        json_lines = json.load(file)
        for i, im in tqdm.tqdm(enumerate(json_lines), total=len(json_lines)):
            if USE_IMAGE_API:
                get_img_from_url_success = annonutils.get_image_from_url(
                    IMAGE_API,
                    im,
                    images_save_path,
                    save_local_copy=SAVE_LOCAL_COPY,
                    resize_image=True)
def main(argv):
    args = parse_cl(argv)
    if not args.tag:
        args.tag = os.path.basename(os.path.normpath(args.dir))
    mkdir_p(args.out_dir)
    out_bnames = write_patterns(args.dir, args.out_dir, args.tag)
    return out_bnames
Exemple #9
0
 def test_mkdirp(self):
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
         fname = os.tempnam()
     fname = os.path.join(fname, 'a', 'b12', 'zasdasd')
     common.mkdir_p(fname)
     self.assertTrue(os.path.isdir(fname))
     shutil.rmtree(fname)
Exemple #10
0
 def test_mkdirp(self):
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
         fname = os.tempnam()
     fname = os.path.join(fname, 'a', 'b12', 'zasdasd')
     common.mkdir_p(fname)
     self.assertTrue(os.path.isdir(fname))
     shutil.rmtree(fname)
Exemple #11
0
def doit(opts):
    common.mkdir_p(opts.src_dir)
    common.myprint(opts, 'Generating source for binary')
    opts.platforms = common.get_platforms(opts)
    for platform in opts.platforms:
        mod = opts.platforms[platform]
        for simd_ext in mod.get_simd_exts():
            write_cpp(opts, simd_ext, mod.emulate_fp16(simd_ext))
Exemple #12
0
def get_image_from_url(image_api,
                       image_name,
                       base_path_img='',
                       save_local_copy=True,
                       debug=False,
                       resize_image=False):
    """Get the image from HTTP/HTTPS URL
  REF:
  http://docs.python-requests.org/en/master/user/quickstart/
  https://realpython.com/python-requests/
  https://stackoverflow.com/questions/13137817/how-to-download-image-using-requests

  TODO:
    error handling in fetching image file
  """
    # log.info("\nget_image_from_url:-----------------------------")
    filepath_img = os.path.join(base_path_img, image_name)
    success = False

    if os.path.exists(filepath_img):
        # log.info("Image already exists: filepath_img: {}".format(filepath_img))
        success = True
    else:
        base_url = image_api['URL']
        params = image_api['PARAMS']
        params['image'] = image_name

        res = requests.get(base_url, params=params, stream=True)

        if debug:
            # log.info("Request url,status_code,content-type" )
            # log.info("res:{}".format(res))
            log.info("{}\n,{},{}".format(res.url, res.status_code,
                                         res.headers['content-type']))

        if res.status_code == 200:
            if save_local_copy:
                ## Create the base_path_img if not exists and is not empty
                if base_path_img and not os.path.exists(base_path_img):
                    common.mkdir_p(base_path_img)

                if resize_image:
                    image = Image.open(BytesIO(res.content))
                    image = image.resize((1280, 720), Image.ANTIALIAS)
                    image.save(filepath_img)
                    success = True
                else:
                    with open(filepath_img, 'wb') as of:
                        res.raw.decode_content = True
                        # shutil.copyfileobj(res.raw, of)
                        of.write(res.content)
                        success = True
                        log.info("Image saved at filepath_img: {}".format(
                            filepath_img))

        del res

    return success
Exemple #13
0
def get_filename(opts, op, lf, rt):
    tests_dir = os.path.join(opts.tests_dir, "modules/fixed_point")
    common.mkdir_p(tests_dir)
    filename = os.path.join(tests_dir, '{}.fp_{}_{}.cpp'.format(op, lf, rt))
    if os.path.exists(filename):
        os.remove(filename)
    if common.can_create_filename(opts, filename):
        return filename
    else:
        return None
Exemple #14
0
def gen_html(opts):
    # check if md2html exists
    md2html = 'md2html.exe' if platform.system() == 'Windows' else 'md2html'
    doc_dir = os.path.join(opts.script_dir, '..', 'doc')
    full_path_md2html = os.path.join(doc_dir, md2html)
    if not os.path.isfile(full_path_md2html):
        msg = '-- Cannot generate HTML: {} not found. '.format(md2html)
        if platform.system() == 'Windows':
            msg += 'Run "nmake /F Makefile.win" in {}'.format(doc_dir)
        else:
            msg += 'Run "make -f Makefile.nix" in {}'.format(doc_dir)
        print(msg)
        return

    # get all markdown files
    md_dir = os.path.join(doc_dir, 'markdown/modules/fixed_point')
    html_dir = os.path.join(doc_dir, 'html/modules/fixed_point')
    common.mkdir_p(html_dir)
    dirs = [md_dir]
    md_files = []
    while len(dirs) > 0:
        curr_dir = dirs.pop()
        entries = os.listdir(curr_dir)
        for entry in entries:
            full_path_entry = os.path.join(curr_dir, entry)
            if full_path_entry == '..' or full_path_entry == '.':
                continue
            elif os.path.isdir(full_path_entry):
                continue
            elif entry.endswith('.md'):
                md_files.append(full_path_entry)

    # header and footer
    doc_title = '`nsimd` fixed point module documentation'
    root_dir = '../..'
    assets_dir = '../../assets'
    img_dir = '../../img'
    header = header_src.format(doc_title=doc_title,
                               root_dir=root_dir,
                               img_dir=img_dir,
                               assets_dir=assets_dir)
    footer = footer_src
    tmp_file = os.path.join(doc_dir, 'tmp.html')
    for filename in md_files:
        i = filename.rfind('markdown')
        if i == -1:
            continue
        output = filename[0:i] + 'html' + filename[i + 8:-2] + 'html'
        common.mkdir_p(os.path.dirname(output))
        os.system('{} "{}" "{}"'.format(full_path_md2html, filename, tmp_file))
        with common.open_utf8(opts, output) as fout:
            fout.write(header)
            with io.open(tmp_file, mode='r', encoding='utf-8') as fin:
                fout.write(fin.read())
            fout.write(footer)
Exemple #15
0
    def initialize(self): 
        # safety check to make sure we don't blow away an existing cluster!
        if self.use_existing:
             raise RuntimeError('initialize was called on an existing cluster! Avoiding touching anything.') 

        super(Ceph, self).initialize()

        # unmount any kernel rbd volumes
        self.rbd_unmount()

        # shutdown any old processes
        self.shutdown()

        # Cleanup old junk and create new junk
        self.cleanup()
        common.mkdir_p(self.tmp_dir)
        common.pdsh(settings.getnodes('head', 'clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.tmp_dir).communicate()
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.pid_dir).communicate()
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.log_dir).communicate()
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.monitoring_dir).communicate()
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.core_dir).communicate()
        self.distribute_conf()

        # Set the core directory
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'echo "%s/core.%%e.%%p.%%h.%%t" | sudo tee /proc/sys/kernel/core_pattern' % self.tmp_dir).communicate()

        # Create the filesystems
        self.setup_fs()

        # Build the cluster
        monitoring.start('%s/creation' % self.monitoring_dir)
        self.make_mons()
        self.make_osds()
        self.start_rgw()
        monitoring.stop()

        # Check Health
        monitoring.start('%s/initial_health_check' % self.monitoring_dir)
        self.check_health()
        monitoring.stop()

        # Disable scrub and wait for any scrubbing to complete 
        self.disable_scrub()
        self.check_scrub()

        # Make the crush and erasure profiles
        self.make_profiles()

        # Peform Idle Monitoring
        if self.idle_duration > 0:
            monitoring.start("%s/idle_monitoring" % self.monitoring_dir)
            time.sleep(self.idle_duration)
            monitoring.stop()

        return True
Exemple #16
0
    def initialize(self): 
        # safety check to make sure we don't blow away an existing cluster!
        if self.use_existing:
             raise RuntimeError('initialize was called on an existing cluster! Avoiding touching anything.') 

        super(Ceph, self).initialize()

        # unmount any kernel rbd volumes
        self.rbd_unmount()

        # shutdown any old processes
        self.shutdown()

        # Cleanup old junk and create new junk
        self.cleanup()
        common.mkdir_p(self.tmp_dir)
        common.pdsh(settings.getnodes('head', 'clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.tmp_dir).communicate()
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.pid_dir).communicate()
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.log_dir).communicate()
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.monitoring_dir).communicate()
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.core_dir).communicate()
        self.distribute_conf()

        # Set the core directory
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'echo "%s/core.%%e.%%p.%%h.%%t" | sudo tee /proc/sys/kernel/core_pattern' % self.tmp_dir).communicate()

        # Create the filesystems
        self.setup_fs()

        # Build the cluster
        monitoring.start('%s/creation' % self.monitoring_dir)
        self.make_mons()
        self.make_osds()
        self.start_rgw()
        monitoring.stop()

        # Check Health
        monitoring.start('%s/initial_health_check' % self.monitoring_dir)
        self.check_health()
        monitoring.stop()

        # Disable scrub and wait for any scrubbing to complete 
        self.disable_scrub()
        self.check_scrub()

        # Make the crush and erasure profiles
        self.make_profiles()

        # Peform Idle Monitoring
        if self.idle_duration > 0:
            monitoring.start("%s/idle_monitoring" % self.monitoring_dir)
            time.sleep(self.idle_duration)
            monitoring.stop()

        return True
Exemple #17
0
def gen_archis_platform(opts, platform):
    include_dir = os.path.join(opts.include_dir, platform)
    for s in opts.platforms[platform].get_simd_exts():
        print('-- Found new SIMD extension: {}'.format(s))
        if s in opts.simd:
            simd_dir = os.path.join(include_dir, s)
            common.mkdir_p(simd_dir)
            gen_archis_types(opts, simd_dir, platform, s)
            gen_archis_simd(opts, platform, s, simd_dir)
        else:
            print('--   Extension excluded by command line')
Exemple #18
0
def unpack_res(file_path, dir_path):
    with open(file_path) as f:
        data = f.read()

    res = ResourceFile.parse(data)

    mkdir_p(dir_path)

    for file_name, file_data in res.items():
        with open(os.path.join(dir_path, file_name), "wb") as f:
            f.write(file_data)
 def __init__(self, path=str(datetime.now().strftime("%Y%m%d%H%M%S"))):
     self.path = os.path.join(config.HISTORY_DIR, path)
     self.history = {
         'radius': [],
         'epoch_time': [],
         'error': [],
         'xs_files': [],
         'xs_images': [],
         'animation': None
     }
     if os.path.exists(self.path):
         shutil.rmtree(self.path, ignore_errors=True)
     common.mkdir_p(self.path)
def detect_from_images(appcfg, dnnmod, images, path, model, class_names, cmdcfg, api_model_key, show_bbox=False):
  """detections from the images
  Convention:
    image - image filename
    filepath - the absolute path of the image input file location
    im - binary data after reading the image file
  TODO:
    1. Prediction details log:
      - model details (path), copy of configuration, arch used, all class_names used in predictions, execution time etc.
    2. Verify that masks are properly scaled to the original image dimensions
    3. Impact on prediction of replacing skimage.io.imread with imread wrapper
    4. call response providing the pointer to the saved files
    5. viz from jsonres
    6. memory leak in reading image as read time increases
    7. async file and DB operation. MongoDB limit of 16 MB datasize
  """

  
  ## always create abs filepaths and respective directories
  timestamp = "{:%d%m%y_%H%M%S}".format(datetime.datetime.now())
  filepath = os.path.join(path, "predict-"+timestamp)
  common.mkdir_p(filepath)
  for d in ['splash', 'mask', 'annotations', 'viz', 'mmask', 'oframe']:
    common.mkdir_p(os.path.join(filepath,d))

  detect = apputil.get_module_fn(dnnmod, "detect")

  DBCFG = appcfg['APP']['DBCFG']
  CBIRCFG = DBCFG['CBIRCFG']

  # mclient = motor.motor_asyncio.AsyncIOMotorClient('mongodb://'+CBIRCFG['host']+':'+str(CBIRCFG['port']))
  # dbname = CBIRCFG['dbname']
  # db = mclient[dbname]
  # collection = db['IMAGES']

  # _create_res(detect, filepath, images, path, model, class_names, cmdcfg, api_model_key)

  loop = asyncio.new_event_loop()
  asyncio.set_event_loop(loop)

  try:
    loop.run_until_complete(_create_res(detect, filepath, images, path, model, class_names, cmdcfg, api_model_key, show_bbox=show_bbox))
  finally:
    # shutting down and closing fil descriptors after interupt
    loop.run_until_complete(loop.shutdown_asyncgens())
    loop.close()

  file_names = []
  res = []
  return file_names,res
Exemple #21
0
    def initialize(self): 
        super(Ceph, self).initialize()

        # unmount any kernel rbd volumes
        self.rbd_unmount()

        # shutdown any old processes
        self.shutdown()

        # Cleanup old junk and create new junk
        self.cleanup()
        common.mkdir_p(self.tmp_dir)
        common.pdsh(settings.getnodes('head', 'clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.tmp_dir).communicate()
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.pid_dir).communicate()
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.log_dir).communicate()
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.monitoring_dir).communicate()
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.core_dir).communicate()
        self.distribute_conf()

        # Set the core directory
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'echo "%s/core.%%e.%%p.%%h.%%t" | sudo tee /proc/sys/kernel/core_pattern' % self.tmp_dir).communicate()

        # Create the filesystems
        self.setup_fs()

        # Build the cluster
        monitoring.start('%s/creation' % self.monitoring_dir)
        self.make_mons()
        self.make_osds()
        self.start_rgw()
        monitoring.stop()

        # Check Health
        monitoring.start('%s/initial_health_check' % self.monitoring_dir)
        self.check_health()
        monitoring.stop()

        # Wait for initial scrubbing to complete (This should only matter on pre-dumpling clusters)
        self.check_scrub()

        # Make the crush and erasure profiles
        self.make_profiles()

        # Peform Idle Monitoring
        if self.idle_duration > 0:
            monitoring.start("%s/idle_monitoring" % self.monitoring_dir)
            time.sleep(self.idle_duration)
            monitoring.stop()

        return True
def set_user_password(username, ous, cp_to=None):
    p = run("diceware -n 6", shell=True, capture_output=True)
    pw = p.stdout.decode('utf-8')[:-1]
    mkdir_p('pws')
    with open('pws/{}.team10.ldap.pw'.format(username), 'w') as f:
        f.write(pw + '\n')
    if cp_to is not None:
        cmd = "runuser -l {0} -c \"echo '{1}' > {2}{0}.team10.ldap.pw\""
        run(cmd.format(username, pw, cp_to), shell=True)
    identifier = ("uid={0},ou={1},dc=team10,dc=psa,dc=in,dc=tum,dc=de".format(
        username, ',ou='.join(ous)))
    cmd = ("ldappasswd {0} {1} {2} -x \"{3}\" -s {4}".format(
        ldap_host, ldap_auth, ldap_dn, identifier, pw))
    run(cmd, shell=True)
Exemple #23
0
    def initialize(self): 
        super(Ceph, self).initialize()

        # unmount any kernel rbd volumes
        self.rbd_unmount()

        # shutdown any old processes
        self.shutdown()

        # Cleanup old junk and create new junk
        self.cleanup()
        common.mkdir_p(self.tmp_dir)
        common.pdsh(settings.getnodes('head', 'clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.tmp_dir).communicate()
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.pid_dir).communicate()
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.log_dir).communicate()
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.monitoring_dir).communicate()
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'mkdir -p -m0755 -- %s' % self.core_dir).communicate()
        self.distribute_conf()

        # Set the core directory
        common.pdsh(settings.getnodes('clients', 'mons', 'osds', 'rgws', 'mds'), 'echo "%s/core.%%e.%%p.%%h.%%t" | sudo tee /proc/sys/kernel/core_pattern' % self.tmp_dir).communicate()

        # Create the filesystems
        self.setup_fs()

        # Build the cluster
        monitoring.start('%s/creation' % self.monitoring_dir)
        self.make_mons()
        self.make_osds()
        monitoring.stop()

        # Check Health
        monitoring.start('%s/initial_health_check' % self.monitoring_dir)
        self.check_health()
        monitoring.stop()

        # Wait for initial scrubbing to complete (This should only matter on pre-dumpling clusters)
        self.check_scrub()

        # Make the crush and erasure profiles
        self.make_profiles()

        # Peform Idle Monitoring
        if self.idle_duration > 0:
            monitoring.start("%s/idle_monitoring" % self.monitoring_dir)
            time.sleep(self.idle_duration)
            monitoring.stop()

        return True
Exemple #24
0
def get_filename(opts, op, typ, lang):
    pp_lang = {
        'c_base': 'C (base API)',
        'cxx_base': 'C++ (base API)',
        'cxx_adv': 'C++ (advanced API)'
    }
    tests_dir = os.path.join(opts.tests_dir, lang)
    common.mkdir_p(tests_dir)
    filename = os.path.join(
        tests_dir, '{}.{}.{}'.format(op.name, typ,
                                     'c' if lang == 'c_base' else 'cpp'))
    if common.can_create_filename(opts, filename):
        return filename
    else:
        return None
Exemple #25
0
def gen_tests(opts):
    for func in rand_functions:
        for word_size, nwords_nrounds in func.wordsize_nwords_nrounds.items():
            for nwords, list_nrounds in nwords_nrounds.items():
                for nrounds in list_nrounds:
                    # Write headers
                    dirname = os.path.join(opts.tests_dir, 'modules', 'random')
                    common.mkdir_p(dirname)
                    filename = os.path.join(dirname, '{}.cpp'. \
                               format(func.gen_function_name(nwords, word_size,
                                                             nrounds)))
                    with common.open_utf8(opts, filename) as out:
                        out.write(
                            func.gen_tests(opts, nrounds, word_size, nwords))

                    common.clang_format(opts, filename)
Exemple #26
0
def gen_filename(f, simd, typ):
    ## Retrieve directory from global options
    benches_dir = common.mkdir_p(os.path.join(_opts.benches_dir, _lang))
    ## Generate path (composed from: function name + type + extension)
    return os.path.join(
        benches_dir, '{}.{}.{}.{}'.format(f.name, simd, typ,
                                          common.ext_from_lang(_lang)))
    def generate_box_plot(self, start_year: int, end_year: int) -> dict:
        """
        Generate a box plot by years data.
        :param end_year: starting year for box plotting
        :param start_year: ending year for line plotting
        :return: returns the generated box plot images' saving paths class instance
        """
        try:
            print('Generate a BOX PLOT between years[{0}-{1}]...'.format(
                start_year, end_year))

            my_db = DBOperations('weather.sqlite')
            years_data_list = []
            for current_year in range(start_year, end_year + 1):
                years_data_list.extend(my_db.fetch_data(current_year))

            monthly_weather_data = {
            }  # format: [1:[Jan temps],2:[Feb temps],..,12:[Dec temps]]
            for month in range(1, 13):
                if month not in monthly_weather_data:
                    monthly_weather_data[month] = []

            for item in years_data_list:
                if is_number(item[5]):
                    monthly_weather_data[int(item[1][5:7])].append(
                        float(item[5]))

            plot_title = 'Monthly Temperature Distribution for: ' + str(
                start_year) + ' to ' + str(end_year)
            plt.boxplot(monthly_weather_data.values(), sym="o", whis=1.5)
            plt.xlabel('Month')
            plt.ylabel('Temperature (Celsius)')
            plt.title(plot_title)
            file_name = str(start_year) + '_to_' + str(end_year) + '.png'

            # Create new directory
            output_dir = "images"
            mkdir_p(output_dir)
            file_path = '{0}/{1}'.format(output_dir, file_name)
            self.box_plot_path_saving_dict[str(start_year) + '-' +
                                           str(end_year)] = file_path

            plt.savefig(file_path)
            plt.show()
            return self.box_plot_path_saving_dict
        except Exception as e:
            self.logger.error(e)
Exemple #28
0
def save(render_ctx, is_dev=DEBUG):
    fargs = {'date_str': datetime.utcnow().strftime('%Y%m%d'),
             'lang_shortcode': render_ctx['short_lang_name'],
             'dev_flag': ''}
    if is_dev:
        fargs['dev_flag'] = '_dev'
    out_path = DATA_PATH_TMPL.format(**fargs)
    rendered = render_rc(render_ctx)
    try:
        out_file = open(out_path, 'w')
    except IOError:
        mkdir_p(os.path.dirname(out_path))
        out_file = open(out_path, 'w')
        # if exception, couldn't create file or parent directories
    with out_file:
        out_file.write(rendered)
    return (out_path, len(rendered))
Exemple #29
0
def save(render_ctx, is_dev=DEBUG):
    fargs = {'date_str': datetime.utcnow().strftime('%Y%m%d'),
             'lang_shortcode': render_ctx['short_lang_name'],
             'dev_flag': ''}
    if is_dev:
        fargs['dev_flag'] = '_dev'
    out_path = DATA_PATH_TMPL.format(**fargs)
    rendered = render_rc(render_ctx)
    try:
        out_file = open(out_path, 'w')
    except IOError:
        mkdir_p(os.path.dirname(out_path))
        out_file = open(out_path, 'w')
        # if exception, couldn't create file or parent directories
    with out_file:
        out_file.write(rendered)
    return (out_path, len(rendered))
    def generate_line_plot(self, specific_year: int,
                           specific_month: int) -> dict:
        """
        Generate a line plot by month data.
        :param specific_month: the chosen month for line plotting
        :param specific_year: the chosen year for line plotting
        :return: returns the generated line plot images' saving paths class instance
        """
        try:
            print('Generate a Line PLOT for [{0}-{1}]...'.format(
                specific_year, specific_month))
            month_string_list = [
                'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
                'Oct', 'Nov', 'Dec'
            ]
            my_db = DBOperations('weather.sqlite')
            specific_timestamp = []  # 2020-12-01
            specific_month_data = []

            month_data = my_db.fetch_data(specific_year, specific_month)
            for item in month_data:
                if is_number(item[5]):
                    specific_timestamp.append(float(item[1][-2:]))
                    specific_month_data.append(float(item[5]))

            plt.plot(specific_timestamp, specific_month_data)
            plt.xlabel('Day')
            plt.ylabel('Temperature (Celsius)')
            plot_title = 'Daily Temperature Distribution for: ' + month_string_list[
                specific_month - 1] + ' ' + str(specific_year)
            plt.title(plot_title)
            file_name = str(specific_year) + '-' + str(specific_month) + '.png'

            # Create new directory
            output_dir = "images"
            mkdir_p(output_dir)
            file_path = '{0}/{1}'.format(output_dir, file_name)

            self.line_plot_path_saving_dict[str(specific_year) + '-' +
                                            str(specific_month)] = file_path
            plt.savefig(file_path)
            plt.show()

            return self.line_plot_path_saving_dict
        except Exception as e:
            self.logger.error(e)
Exemple #31
0
def cufflinks_worker(args):
    resultfolder = 'result/' + os.path.basename(args.filelist) + \
                   args.cufflinks_folder
    if not os.path.exists(resultfolder):
        ## Sometimes, when the jobs scheduled at the same time, they
        ## will try to create folder at the same time, therefore, we
        ## need this "mkdir -p" function to avoid the exception.
        common.mkdir_p(resultfolder)
    f = args.filename
    resultsubfolder = resultfolder + '/' + \
                      os.path.basename(f.strip().replace('.bam',''))
    if not os.path.exists(resultsubfolder):
        os.makedirs(resultsubfolder)
    if (args.is_new_cufflinks):
        common.run('cufflinks -F 0.1 -p 1 -q -o ' + resultsubfolder +
                   ' ' + f.strip())
    else :
        common.run('old_cufflinks  -F 0.1 -p 1 -q  -o ' + resultsubfolder +
                   ' ' + f.strip())
Exemple #32
0
def setup():
    print("Creating student schema ldif, slaptest output follows:\n")
    # Read in slaptest configuration file template
    conf = get_template('conf')
    # Format placeholder with the schema file path
    conf = conf.format_map({'pwd': os.getcwd()})
    # Create a temporary directory for slaptest output
    mkdir_p('tmpdir')
    # Write slaptest configuration file
    with open('tmpdir/student.conf', 'w') as f:
        f.write(conf)
    # Let slaptest create a ldif
    run("slaptest -f tmpdir/student.conf -F tmpdir/", shell=True)

    print("\nInstalling schema")
    # Read schema ldif and remove unnecessary lines
    with open('tmpdir/cn=config/cn=schema/cn={3}student.ldif', 'r') as f:
        ldif = f.read().splitlines()[5:-7]
    # Add some required lines
    ldif = ["dn: cn=student,cn=schema,cn=config",
            "objectClass: olcSchemaConfig",
            "cn: student"] + ldif
    # Write ldif file
    with open('tmpdir/add_student_schema.ldif', 'w') as f:
        f.write('\n'.join(ldif) + '\n')
    # Add schema to configuration
    run("ldapadd -Y EXTERNAL -H ldapi:/// -D \"cn=config\" -f tmpdir/add_student_schema.ldif", shell=True)
    # add_ldif("tmpdir/add_student_schema.ldif")

    print("Adding organizational units")
    for ou in ['Students', 'Praktikum']:
        filename = 'tmpdir/add_{}_org_unit.ldif'.format(ou)
        format_dict = {'ou': ou, 'in_ou_complete': ''}
        create_ldif('organizational_unit', format_dict, filename)
        add_ldif(filename)
    for i in range(1, 12):
        filename = 'tmpdir/add_Praktikum_Team{}_org_unit.ldif'.format(i)
        format_dict = {'ou': 'Team' + str(i),
                       'in_ou_complete': 'ou=Praktikum,'}
        create_ldif('organizational_unit', format_dict, filename)
        add_ldif(filename)
Exemple #33
0
def save_issue(fmt, rendered, lang, is_dev):
    fargs = {'fmt': fmt,
             'date_str': datetime.utcnow().strftime('%Y%m%d'),
             'lang_shortcode': lang,
             'dev_flag': '',
             'email_flag': ''}
    if is_dev:
        fargs['dev_flag'] = '_dev'
    if fmt == 'email':
        fargs['email_flag'] = '_e'
        fargs['fmt'] = 'html'
    out_path = ARCHIVE_PATH_TMPL.format(**fargs)
    try:
        out_file = open(out_path, 'w')
    except IOError:
        mkdir_p(os.path.dirname(out_path))
        out_file = open(out_path, 'w')
        # if exception, couldn't create file or parent directories
    with out_file:
        out_file.write(rendered)
    return (out_path, len(rendered))
def write_rpt_summary_annon(cfg, args, rpt_summary):
    log.info("-----------------------------")

    from bson.json_util import dumps

    timestamp = ("{:%d%m%y_%H%M%S}").format(datetime.datetime.now())
    logs_basepath = os.path.join(os.getenv('AI_LOGS'), 'annon')

    ## create logs_basepath if does not exists
    common.mkdir_p(logs_basepath)

    DBCFG = cfg['DBCFG']
    ANNONCFG = DBCFG['ANNONCFG']
    dbname = ANNONCFG['dbname']
    filepath_errors = None

    filepath = os.path.join(logs_basepath,
                            dbname + '-summary-' + timestamp + '.json')
    log.debug("filepath: {}".format(filepath))
    with open(filepath, 'w') as fw:
        # json_str = JSONEncoder().encode(rpt_summary)
        # json_str = json.encode(rpt_summary, cls=JSONEncoder)
        json_str = dumps(rpt_summary)
        # fw.write(json.dumps(json_str))
        # https://stackoverflow.com/questions/45539242/write-json-to-a-file-without-writing-escape-backslashes-to-file
        json.dump(json.loads(json_str), fw)

    ## write filepath_errors
    if 'errors' in rpt_summary and rpt_summary['errors'][
            'errors_for_reporting'] and len(
                rpt_summary['errors']['errors_for_reporting']) > 0:
        filepath_errors = os.path.join(
            logs_basepath, dbname + '-errors-' + timestamp + '.json')
        log.debug("filepath_errors: {}".format(filepath_errors))
        with open(filepath_errors, 'w') as fw:
            json_str = dumps(rpt_summary['errors']['errors_for_reporting'])
            json.dump(json.loads(json_str), fw)

    return [filepath, filepath_errors]
Exemple #35
0
def cache(path=None, format=None, filename=None):

    url = request.args.get('url', '').encode('utf-8')

    url = url.split('?')[0]

    parsed_tuple = urlparse.urlparse(url)

    path_dir = 'cache/' + parsed_tuple.netloc + \
        os.path.dirname(parsed_tuple.path)
    common.mkdir_p(path_dir)

    filepath, tmpfilename = os.path.split(url)
    shotname, extension = os.path.splitext(tmpfilename)

    filename = path_dir + '/' + tmpfilename
    print(filepath, tmpfilename, shotname, extension)
    print(filename)

    # print requests.get(url).text
    if os.path.exists(filename):
        # print()
        if os.path.getsize(filename) < 1024:
            print(filename, os.path.getsize(filename))
            c = common.httpGet(url)
            common.writeFile(path, c)

        c = common.readFile(filename)
    else:
        c = common.httpGet(url)
        common.writeFile(filename, c)

    if extension == '.m3u8':
        ts_urls = get_ts_url(c)
        for index, ts_url in enumerate(ts_urls):
            fpath = path_dir + '/' + ts_url
            furl = filepath + '/' + ts_url
            donwload(furl, fpath)
    return c
Exemple #36
0
def copy_images(json_file):
  assert os.path.exists(json_file),'{:s} not exists'.format(json_file)
  print("json_file : {}".format(json_file))
  base_path = getBasePath(json_file)
  # base_path = os.path.join(os.path.dirname(json_file),'')
  print("base_path : {}".format(base_path))
  path = os.path.join(base_path,'test_images')
  print("path : {}".format(path))
  mkdir_p(path)
  with open(json_file,'r') as json_file:
    json_lines = json_file.readlines()
    images = []
    for line_index,val in enumerate(json_lines):
      # print(line_index)
      json_line = json_lines[line_index]
      sample = json.loads(json_line)
      lanes = sample['lanes']
      image = sample['raw_file']
      images.append(image)        
    print(len(images))
    for im in images:
      # print(im)
      shutil.copy(im,path)
Exemple #37
0
def doit(opts):
    print('-- Generating SIMD implementations')
    opts.platforms = common.get_platforms(opts)
    for p in opts.platforms:
        common.mkdir_p(os.path.join(opts.include_dir, p))
        gen_archis_platform(opts, p)
Exemple #38
0
def worker((lookup, rating, cat, path)):
    """For pickling all Fanfics, and creating a dictionary to lookup where 
    fanfics can be loaded.
    lock = the Lock for the dictionary
    path = path per fic"""
    
    # Make new folder for pickled objects if needed
    fold_name= os.path.basename(os.path.dirname(path))
    
    new_folder = '%s%s' %(fold_name, '_pik')
    
    total_path = os.path.dirname(os.path.dirname(path))
    
    new_dir = '%s%s%s' %(total_path, '/', new_folder)
    
    mkdir_p(new_dir)
    #Make fic object
    # This will skip fics that do not have any relationship category because
    # I scraped wrong
    try:
        fic = Fanfic(path)
    except: 
        print 'Everything is Broken'
        return None  #don't do anything else in this function
        
    try:    
        print fic.id
        #save fic as id.pik
        filename = '%s/%s.pik'%(new_dir, fic.id)
        with open(filename, 'w') as f:
            pik.dump(fic, f)
        
       
        #rating
        rating[fic.rating[0]]+= [fic.id]
    
        #category:
        multi = False
        thecat = []
        if len(fic.cat)>1:
            multi=True
        for c in fic.cat:
            if c == 'M/M':
                cat['S']+= [fic.id]
                thecat += ['S']
            if c == 'F/M' or c == 'M/F':
                cat['H']+= [fic.id]
                thecat += ['H']
            if c == 'F/F':
                cat['FS']+= [fic.id]
                thecat += ['FS']
            if c == 'Multi':
                multi = True
                thecat += ['M'] 
            if c == 'Other':
                cat['O']+= [fic.id]
                thecat += ['O']
            if c == 'Gen':
                cat['G']+= [fic.id]
                thecat += ['G']
        if multi:
            cat['M'] += [fic.id]
            if 'M' not in thecat:
                thecat += ['M']
                
                
        #Make lookup object:
        look = Lookup(fic.id, filename, thecat, fic.rating)
        
        #add to dictionary
        lookup[fic.id] = look        
                
    except: 
        print 'Something went wrong'
        pass
    return None
Exemple #39
0
  def __init__(self, problem_name, core_spec_file="", mesh_base_name="", verbosity=0):
    super(ProblemData, self).__init__()

    self.folder = os.path.abspath(os.path.join('PROBLEMS', problem_name))

    # FIXME:
    #if MPI.rank(comm) == 0:
    self.parse_material_data()

    MPI.barrier(comm)

    self.verb = verbosity
    self.name = problem_name
    self.core_spec_file = core_spec_file

    self.xsd = dict()

    self.mesh_base_name = mesh_base_name
    if self.mesh_base_name == "":
      self.mesh_base_name = self.name

    self.out_folder = os.path.join(self.folder, "OUT", self.mesh_base_name)
    self.xs_vis_folder = os.path.join(self.out_folder, "XS")

    mkdir_p(self.xs_vis_folder)

    self.used_xs = set()
    self.isotropic_source_everywhere = True

    # Two alternative ways of specifying a mesh:
    #   1:  Python module
    #   2:  set of Dolfin mesh files + optional helper files defining region/material/boundary maps
    #
    self.mesh_module = None
    self.mesh_files = None

    self.region_physical_name_map = dict()
    self.reg_name_mat_name_map = dict()
    boundary_physical_name_map = dict()
    
    try:
      # Try to import a mesh module
      self.mesh_module = self.mesh_data_from_module(self.mesh_base_name)
    
    except (ImportError, IOError):
      # If failed, Dolfin mesh file structure is expected
      
      self.mesh_files = MeshFiles(self.folder, self.mesh_base_name)
      
      self.region_physical_names_from_file(self.mesh_files.reg_names)
      self.reg_names_to_material_names_from_file(self.mesh_files.mat_names)
      self.bc = BoundaryData.from_file(self.mesh_files.bnd_names)
    
    else:
      # Import region/material/boundary maps from the mesh module 
      
      try:
        self.region_physical_name_map = self.mesh_module.region_map
      except AttributeError:
        pass
  
      # try to get bc data from boundary_id-to-boundary_name map and a file with boundary_name-to-bc correspondences
      try:
        boundary_physical_name_map = self.mesh_module.boundary_map
      except AttributeError:
        pass
      finally:
        # use either the boundary_physical_name_map, or - if not set - assume all-vacuum bc
        self.bc = BoundaryData.from_boundary_names_map(boundary_physical_name_map)
  
      try:
        self.reg_name_mat_name_map = self.mesh_module.material_map
      except AttributeError:
        pass

    self.core = CoreData()
    self.load_core_and_bc_data()

    # Check if bcs have been loaded from core_spec_file; if not, try loading them directly from the mesh module
    if self.mesh_module is not None:
      if len(self.bc.vacuum_boundaries) == 0:
        try:
          self.bc.vacuum_boundaries = set(self.mesh_module.vacuum_boundaries)
        except AttributeError:
          pass

      if len(self.bc.reflective_boundaries) == 0:
        try:
          self.bc.reflective_boundaries = set(self.mesh_module.reflective_boundaries)
        except AttributeError:
          pass

      if len(self.bc.incoming_fluxes) == 0:
        try:
          self.bc.incoming_fluxes = self.mesh_module.incoming_fluxes
        except AttributeError:
          pass
Exemple #40
0
                  default=False)
parser.add_option("--delete", dest="delete",
                  action="store_true",
                  help="Remove all the existing data",
                  default=False)


(options, args) = parser.parse_args()


if __name__ == '__main__':

    findexes = options.indexes
    id = options.id
    name = options.name
    dst_dir = options.dst_dir

    for d in ["circuits", "races", "runners"]:
        mkdir_p(os.path.join(dst_dir,d))

    dates = []
    if not findexes:
        tags, platforms, dates = get_indexes_from_cronochip(name)

    else:
        tags = [line.replace("\n", "") for line in open(findexes)]
        # Load circuit json
    parse_circuit(tags, id, name, dst_dir,
                  update=options.update, delete=options.delete,
                  dates = dates, platforms=platforms)