Example #1
0
def generate_view(query):
	if len(query) == 0  and config.get('todo.command.last') == 'quick_create':
		add_query = config.get('todo.user.query')
		add_tag = config.get('todo.tag.recent')
		itemlist.save_todo(add_query,silent=True)
		config.update_state(command='', query='')

	info = parser.parse(query)
	tag = info['tag']
	q = info['task']
	
	todos = itemlist.get_todo_list()

	# view for pinned items
	# pinned items should have unique uuid and different logo
	pinned = [t for t in todos if itemlist.feature(t,'pinned') == True]
	pinned = [t for t in pinned if (tag is None or t['group'] == tag)]
	pinned = [t for t in pinned if (q is None or t['title'].lower().find(q.lower()) >= 0)] 
	pinned = pinned[::-1]
	# view for non-pinned items
	normal = [t for t in todos if itemlist.feature(t,'pinned') == False]
	normal = [t for t in normal if (tag is None or t['group'] == tag)]
	normal = [t for t in normal if (q is None or t['title'].lower().find(q.lower()) >= 0)] 
	normal = normal[::-1]

	feedback_items = []
	if len(normal) == 0 and len(pinned) == 0:
		feedback_items.append( generate_add_feedbackitem(query, info) )
	else:
		pinned = map(lambda x: generate_pinned_feedbackitem(x), pinned)
		normal = map(lambda x: generate_todo_feedbackitem(x), normal)
		feedback_items = pinned + normal
	
	alfred.write(alfred.xml(feedback_items))
Example #2
0
def cb_story(args):
    # adds tags to the entry properties
    request = args["request"]
    entry = args["entry"]
    config = request.get_configuration()

    sep = config.get("tags_separator", ",")
    tags = [t.strip() for t in entry.get("tags", "").split(sep)]
    tags.sort()
    entry["tags_raw"] = tags

    form = request.get_form()
    try:
        flavour = form["flav"].value
    except KeyError:
        flavour = config.get("default_flavour", "html")
    baseurl = config.get("base_url", "")
    trigger = config.get("tags_trigger", "tag")
    template = config.get("tags_item", '<a href="%(tagurl)s">%(tag)s</a>')

    tags = [template % {"base_url": baseurl,
                        "flavour": flavour,
                        "tag": tag,
                        "tagurl": "/".join([baseurl, trigger, tag])}
            for tag in tags]
    entry["tags"] = ", ".join(tags)
    return args
Example #3
0
def main():
    # change path to launcher
    global __file__
    __file__ = os.path.abspath(__file__)
    if os.path.islink(__file__):
        __file__ = getattr(os, 'readlink', lambda x: x)(__file__)
    os.chdir(os.path.dirname(os.path.abspath(__file__)))

    launcher_log.info("start XX-Net %s", update_from_github.current_version())

    web_control.confirm_xxnet_exit()

    setup_win_python.check_setup()

    module_init.start_all_auto()

    web_control.start()


    if has_desktop and config.get(["modules", "launcher", "popup_webui"], 1) == 1:
        host_port = config.get(["modules", "launcher", "control_port"], 8085)
        webbrowser.open("http://127.0.0.1:%s/" % host_port)

    update.start()

    if config.get(["modules", "launcher", "show_systray"], 1):
        sys_tray.serve_forever()
    else:
        while True:
            time.sleep(100)

    module_init.stop_all()
    sys.exit()
Example #4
0
def main():
    """ Initialise and do requested operations. """
    try:
        # set up the logging system
        prepare_logging()
        if config.get('version'):
            # in version mode, print version and exit
            show_version()
        elif config.get('help'):
            # in help mode, print usage and exit
            show_usage()
        elif config.get('convert'):
            # in converter mode, convert and exit
            convert()
        else:
            # otherwise, start an interpreter session
            start_basic()
    finally:
        # clean up our temp dir if we made one
        if plat.temp_dir:
            shutil.rmtree(plat.temp_dir)
        # avoid sys.excepthook errors when piping output
        # http://stackoverflow.com/questions/7955138/addressing-sys-excepthook-error-in-bash-script
        try:
            sys.stdout.close()
        except:
            pass
        try:
            sys.stderr.close()
        except:
            pass
def set_ini_properties(properties):
  aux = []
  for p in properties:
    line = parse_ini_property(p) + '=' + str(properties[p]) + '\r\n'
    aux.append(line)
  if len(aux) > 0:
    utils.write_file(config.get('ini_path')+config.get('emulator')+'.ini', aux)
Example #6
0
    def version(self, extended=False):
        """
        What version is cobbler?

        If extended == False, returns a float for backwards compatibility
         
        If extended == True, returns a dict:

            gitstamp      -- the last git commit hash
            gitdate       -- the last git commit date on the builder machine
            builddate     -- the time of the build
            version       -- something like "1.3.2"
            version_tuple -- something like [ 1, 3, 2 ]
        """

        config = ConfigParser()
        config.read("/etc/cobbler/version")
        data = {}
        data["gitdate"] = config.get("cobbler","gitdate")
        data["gitstamp"] = config.get("cobbler","gitstamp")
        data["builddate"] = config.get("cobbler","builddate")
        data["version"] = config.get("cobbler","version")
        # dont actually read the version_tuple from the version file
        data["version_tuple"] = []
        for num in data["version"].split("."):
            data["version_tuple"].append(int(num))

        if not extended:
            # for backwards compatibility and use with koan's comparisons
            elems = data["version_tuple"]
            return int(elems[0]) + 0.1*int(elems[1]) + 0.001*int(elems[2])
        else:
            return data
Example #7
0
def _getVersionText():
    ver = config.verinfo.ver
    cur = config.get('__xvmVersion')
    msg = ''
    if ver is not None:
        if utils.compareVersions(ver, cur) == 1:
            msg += '{{l10n:ver/newVersion:%s:%s}}\n' % (ver, config.verinfo.message)
        if cur.endswith('-dev'):
            if config.get('region').lower() == 'ru':
                msg += """
<font color='#FF0000'>Внимание!</font>
Установлена тестовая сборка XVM, не рекомендуемая для неопытных пользователей.
Некоторые функции могут работать неправильно, или не работать вообще.

Если вы тестируете XVM, проигнорируйте это сообщение.

<b>Если вы простой пользователь, пожалуйста, используйте стабильную версию с официального сайта мода XVM: <a href='#XVM_SITE_DL#'>www.ModXVM.com</a></b>
"""
            else:
                msg += """
<font color='#FF0000'>Warning!</font>
You've installed nightly build of XVM, which is not recommended for inexperienced users.
Some functionality may work incorrectly or may not work at all.

If you are testing XVM, you can ignore this message.

<b>If you're just a player and not a tester of XVM, please use a stable version instead of nightly builds. Download the stable version from the official website of XVM: <a href='#XVM_SITE_DL#'>www.ModXVM.com</a></b>
"""
    return msg
Example #8
0
 def testInvalidDomain(self):
     # can't be just a bare top-level domain
     domain = config.get('domain')  
     # get top-level domain. e.g.: 'test.hdf.io' -> 'hdf.io'
     npos = domain.find('.')
     topdomain = domain[npos+1:] 
     req = self.endpoint + "/"
     headers = {'host': topdomain}
     rsp = requests.get(req, headers=headers)
     self.failUnlessEqual(rsp.status_code, 403)  # 403 == Forbidden
     
     domain = 'two.dots..are.bad.' + config.get('domain')   
     req = self.endpoint + "/"
     headers = {'host': domain}
     rsp = requests.get(req, headers=headers)
     self.failUnlessEqual(rsp.status_code, 400)  # 400 == bad syntax
     
     domain = 'missingenddot' + topdomain   
     req = self.endpoint + "/"
     headers = {'host': domain}
     rsp = requests.get(req, headers=headers)
     self.failUnlessEqual(rsp.status_code, 400)  # 400 == bad syntax
     
     # just a dot is no good
     domain = '.' + topdomain  
     req = self.endpoint + "/"
     headers = {'host': domain}
     rsp = requests.get(req, headers=headers)
     self.failUnlessEqual(rsp.status_code, 400)  # 400 == bad syntax
     
     domain =  '.dot.in.front.is.bad.' + config.get('domain')   
     req = self.endpoint + "/"
     headers = {'host': domain}
     rsp = requests.get(req, headers=headers)
     self.failUnlessEqual(rsp.status_code, 400)  # 400 == bad syntax
  def get_content(cls, abspath, start=None, end=None):
    """
    Check if specific files are being requested and return them modified to
    contain a bunch of additional files, as specified by config lists above.
    """
    serverdir = os.path.dirname(os.path.realpath(__file__)) + "/"
    jspath = '../webapp/js'
    basepath = os.path.dirname(os.path.realpath(serverdir + jspath))
    relpath = abspath[basepath.__len__()+1:]

    if relpath == 'js/main.js' and config.get('Optimization', 'optimize_js'):
      js = cls.COMBINED_JS
      # The default behavior is to cache the combined files in memory
      # for as long as the server is running. debug_mode prevents this.
      if config.get('App', 'debug_mode'):
        cls.COMBINED_JS = ''
      return js

    elif relpath == 'css/layout.css' and \
      config.get('Optimization', 'optimize_css'):
      css = cls.COMBINED_CSS
      if config.get('App', 'debug_mode'):
        cls.COMBINED_CSS = ''
      return css
    else:
      return super(HubbleStaticFileHandler, cls).get_content(abspath, start, end)
  def render_string(self, template_name, **kwargs):
    """
    Combine all templates found in templates_dir into one string and insert
    them into index.html in order to reduce HTTP requests.
    """
    if not config.get('Optimization', 'optimize_templates'):
      return super(HubbleIndexHandler, self).render_string(template_name, **kwargs)
    if self.cached_index != '':
      return self.cached_index;

    indexfile = open('../webapp/index.html', 'r')
    indexhtml = indexfile.read()
    indexfile.close()
    basepath = os.path.dirname(os.path.realpath(__file__)) + '/../webapp'
    minifier = MinCatenator(basepath)
    templates = minifier.mincatenate(file_list=config.get('Optimization', 'templates_dir'))
    main_div = '<div id="page_content"></div>'.format(templates)
    new_main_div = "{}\n{}".format(main_div, templates)
    epoch_time = str(time.time())
    new_content = indexhtml.replace(main_div, new_main_div).replace('XXX', epoch_time)

    # Cache this in memory for as long as the server is running.
    if config.get('App', 'debug_mode'):
      self.cached_index = new_content

    return new_content
Example #11
0
def prepare():
    """ Initialise unicodepage module. """
    codepage = config.get('codepage')
    if not codepage:
        codepage = '437'
    state.console_state.codepage = Codepage(
                            codepage, box_protect=not config.get('nobox'))
Example #12
0
 def wrap(*args, **kwargs):
     kwargs.update({
         "email": config.get("email"),
         "password": config.get("password")
     })
     o = func(*args, **kwargs)
     return o()
Example #13
0
    def make_menu(self):
        proxy_stat = self.get_proxy_state()
        gae_proxy_checked = win32_adapter.fState.MFS_CHECKED if proxy_stat=="gae" else 0
        x_tunnel_checked = win32_adapter.fState.MFS_CHECKED if proxy_stat=="x_tunnel" else 0
        pac_checked = win32_adapter.fState.MFS_CHECKED if proxy_stat=="pac" else 0
        disable_checked = win32_adapter.fState.MFS_CHECKED if proxy_stat=="disable" else 0

        if lang_code == "zh_CN":
            menu_options = [(u"设置", None, self.on_show, 0)]
            if config.get(["modules", "gae_proxy", "auto_start"], 0) == 1:
                menu_options.append((u"全局通过GAEProxy代理", None, self.on_enable_gae_proxy, gae_proxy_checked))

            if config.get(["modules", "x_tunnel", "auto_start"], 0) == 1:
                menu_options.append((u"全局通过X-Tunnel代理", None, self.on_enable_x_tunnel, x_tunnel_checked))

            menu_options += [
                        (u"全局PAC智能代理", None, self.on_enable_pac, pac_checked),
                        (u"取消全局代理", None, self.on_disable_proxy, disable_checked),
                        (u'退出', None, SysTrayIcon.QUIT, False)]
        else:
            menu_options = [(u"Config", None, self.on_show, 0)]
            if config.get(["modules", "gae_proxy", "auto_start"], 0) == 1:
                menu_options.append((u"Set Global GAEProxy Proxy", None, self.on_enable_gae_proxy, gae_proxy_checked))

            if config.get(["modules", "x_tunnel", "auto_start"], 0) == 1:
                menu_options.append((u"Set Global X-Tunnel Proxy", None, self.on_enable_x_tunnel, x_tunnel_checked))

            menu_options += [
                (u"Set Global PAC Proxy", None, self.on_enable_pac, pac_checked),
                (u"Disable Global Proxy", None, self.on_disable_proxy, disable_checked),
                (u'Quit', None, SysTrayIcon.QUIT, False)]

        return tuple(menu_options)
Example #14
0
def send_task(task):
    """Sends task to random free worker"""

    free_workers = []

    for item in workers:
        if not item.is_busy():
            free_workers.append(item)

    if free_workers:
        free_worker = random.choice(free_workers)
        print 'Sending task {0} to {1}'.format(
            task._id, 
            free_worker.name
        )

        task.set_worker(free_worker.name)

        result = {
            '_id': task._id,
            'data': task.data,
            'worker_id': free_worker._id,
            'direction': 'workers'
        }
        
        config.get('storage').publish(Task.CHANNEL, json.dumps(result))
def main():
    global log
    cfg_file = "tevs.cfg"
    out_file = open("summary.csv","w")
    config.get(cfg_file)
    log = config.logger(util.root("log.txt"))

    #pdb.set_trace()
    #matches = is_this_like_them("hello there",{"hellc there":"bonjour","yabba":"dabba"})

    if not const.use_db:
        print "The configuration file indicates no database is in use."
        print "We will now build totals from the results files."
        build_totals_from_results_files()
        output_totals_from_results_files()
        return 0

    try:
        dbc = db.PostgresDB(const.dbname, const.dbuser)
        print "Generating totals from db %s, user %s" % (const.dbname, 
                                                         const.dbuser)
        print "If there are many vote ops, this may take several minutes."
        print "The next output will be the number of vote opportunities."
        qs = query_database(dbc)
    except db.DatabaseError:
        print "Although the configuration file indicates a database is in use,"
        print "we could not connect for dbname %s user %s." % (const.dbname, 
                                                               const.dbuser)
        print "We will now build totals from the results files."
        build_totals_from_results_files()
        output_totals_from_results_files()
        return 0

    return 0
Example #16
0
def api(version=None):
    global services

    if 'ARVADOS_DEBUG' in config.settings():
        logging.basicConfig(level=logging.DEBUG)

    if not services.get(version):
        apiVersion = version
        if not version:
            apiVersion = 'v1'
            logging.info("Using default API version. " +
                         "Call arvados.api('%s') instead." %
                         apiVersion)
        if 'ARVADOS_API_HOST' not in config.settings():
            raise Exception("ARVADOS_API_HOST is not set. Aborting.")
        url = ('https://%s/discovery/v1/apis/{api}/{apiVersion}/rest' %
               config.get('ARVADOS_API_HOST'))
        credentials = CredentialsFromEnv()

        # Use system's CA certificates (if we find them) instead of httplib2's
        ca_certs = '/etc/ssl/certs/ca-certificates.crt'
        if not os.path.exists(ca_certs):
            ca_certs = None             # use httplib2 default

        http = httplib2.Http(ca_certs=ca_certs,
                             cache=http_cache('discovery'))
        http = credentials.authorize(http)
        if re.match(r'(?i)^(true|1|yes)$',
                    config.get('ARVADOS_API_HOST_INSECURE', 'no')):
            http.disable_ssl_certificate_validation=True
        services[version] = apiclient.discovery.build(
            'arvados', apiVersion, http=http, discoveryServiceUrl=url)
    return services[version]
Example #17
0
def register():
    if request.method == 'POST' and config.get('registration', bool):
        if 'username' in request.form and 'password' in request.form and \
           'email' in request.form:
            try:
                user = NewUser(request.form['username'],
                               request.form['password'],
                               request.form['email'])
                user.create()
            except exceptions.UserAlreadyExists:
                flash(messages.username_already_exists, 'error')
                return redirect(url_for('register'))
            except:
                flash(messages.register_error, 'error')
                return redirect(url_for('register'))

            if user.is_active():
                user.login()
                flash(messages.registered_and_logged_in, 'message')
                return redirect(url_for('index'))
            else:
                flash(messages.registered_and_deactivated, 'message')
                return redirect(url_for('public_index'))
        else:
            flash(messages.register_field_error, 'error')

    return render_template(get_template('register.html'),
                           registration_enabled=config.get('registration', bool))
Example #18
0
 def testGetDomain(self):
     filePath = "tall.h5"
     domain = getDomain(filePath)
     self.assertEqual(domain, 'tall.' + config.get('domain'))
     filePath = "somevalue"
     domain = getDomain(filePath)
     self.assertEqual(domain, 'somevalue.' + config.get('domain'))
Example #19
0
    def _set_eye_xy_flat_pos(self, x, y, do_party, do_business, in_sky):
        #
        # Basic formula is pan = atan(x), tilt = atan( (y * sin(pan)) / x )
        #

        # if y < -0.5:
        #     y = -0.5

        # self.lastXPos = x
        # self.lastYPos = y

        #y = .125

        # Units for X and Y real are "height of the eye" = 1. Scaling out so
        # that we can tweak it more here than in touch and can define a reasonable
        # addressable area
        xr = config.get("xy_scale")["x"] * x
        yr = config.get("xy_scale")["y"] * (y + 0.5)

        if do_party:
            self.p_eye_pos = eyes.xy_to_pnt([xr, yr], True, in_sky)

            # pan_rads = math.atan2(xr,1)
            # tilt_rads = math.atan2( yr * math.sin(math.fabs(pan_rads)), xr)
            # self.p_eye_pos[PAN] = math.degrees(pan_rads)
            # self.p_eye_pos[TILT] = math.degrees(tilt_rads) - 90
            # if self.p_eye_pos[TILT]  < 0:
            #     self.p_eye_pos[TILT]  += 360
            # if self.p_eye_pos[TILT]  > 180:
            #     self.p_eye_pos[TILT]  = 360-self.p_eye_pos[TILT] 

            # print "P x=%f y=%f pan=%f tilt=%f" % (xr,yr, self.p_eye_pos[PAN] , self.p_eye_pos[TILT] )
            # if self.p_eye_pos[TILT]  > 135:
            #     self.p_eye_pos[TILT]  = 135

            # if in_sky:
            #     self.p_eye_pos[PAN]  = 360-self.p_eye_pos[PAN] 

            self._notify_eye_changed(True)

        if do_business:
            # xr -= 0.25 # This is (roughly) the distance between the lights in light_to_ground units
            # pan_rads = math.atan2(xr,1)
            # tilt_rads = math.atan2( yr * math.sin(math.fabs(pan_rads)), xr)
            # self.b_eye_pos[PAN] = math.degrees(pan_rads)
            # self.b_eye_pos[TILT] = math.degrees(tilt_rads) - 90
            # if self.b_eye_pos[TILT] < 0:
            #     self.b_eye_pos[TILT] += 360
            # if self.b_eye_pos[TILT] > 180:
            #     self.b_eye_pos[TILT] = 360-self.b_eye_pos[TILT]

            # print "B x=%f y=%f pan=%f tilt=%f" % (xr,yr, self.b_eye_pos[PAN], self.b_eye_pos[TILT])
            # if self.b_eye_pos[TILT] > 135:
            #     self.b_eye_pos[TILT] = 135

            # if in_sky:
            #     self.b_eye_pos[PAN] = 360-self.b_eye_pos[PAN]

            self.b_eye_pos = eyes.xy_to_pnt([xr, yr], False, in_sky)
            self._notify_eye_changed(False)
    def initialize(self):
        config = self.iface.getConfig()

        cache = config.get("general", "package_cache")
        cache_limit = config.get("general", "package_cache_limit")
        cache_limit = int(cache_limit) if cache_limit else 0
        cache_dir = config.get("directories", "cached_packages_dir")
        cache_dir = str(cache_dir) if cache_dir else '/var/cache/pisi/packages'

        # If pisi.conf does not have it yet, default is use package cache
        if not cache or cache == "True":
            enableCache = True
        else:
            enableCache = False

        self.cacheEnabled = enableCache
        self.cacheSize = cache_limit
        self.settings.cacheGroup.setEnabled(self.cacheEnabled)
        self.settings.useCacheCheck.setChecked(enableCache)
        self.settings.useCacheSpin.setValue(cache_limit)
        self.settings.cacheDirPath.setText(cache_dir)

        bandwidth_limit = config.get("general", "bandwidth_limit")
        bandwidth_limit = int(bandwidth_limit) if bandwidth_limit else 0

        self.settings.useBandwidthLimit.setChecked(not bandwidth_limit == 0)
        self.settings.bandwidthSpin.setValue(bandwidth_limit)
Example #21
0
def createCampaign():
    params = {
        "recipients": {
            "list_id": config.get().MailChimpListId
        },
        "type": "regular",
        "settings": {
            "subject_line": "Olava Weekly Gaming Roundup",
            "reply_to": "*****@*****.**",
            "from_name": "Olava"
        }
    }
    response = requests.post(
        'https://us10.api.mailchimp.com/3.0/campaigns',
        auth=(
            'apikey',
            config.get().MailChimpApiKey
        ),
        json=params
    )
    try:
        response.raise_for_status()
        body = response.json()
        return body['id']
    except requests.exceptions.HTTPError as err:
        print("Error: {} {}".format(str(response.status_code), err))
        print(json.dumps(response.json(), indent=4))
        return -1
    except ValueError:
        print("Cannot decode json, got %s" % response.text)
        return -1
Example #22
0
    def load_module_menus(self):
        global module_menus
        module_menus = {}
        #config.load()
        modules = config.get(['modules'], None)
        for module in modules:
            values = modules[module]
            if module != "launcher" and config.get(["modules", module, "auto_start"], 0) != 1: # skip php_proxy module
                continue

            #version = values["current_version"]
            menu_path = os.path.join(root_path, module, "web_ui", "menu.yaml") # launcher & gae_proxy modules
            if not os.path.isfile(menu_path):
                continue

            #module_menu = yaml.load(file(menu_path, 'r')) # non-i18n
            # i18n code lines (Both the locale dir & the template dir are module-dependent)
            locale_dir = os.path.abspath(os.path.join(root_path, module, 'lang'))
            template_dir = os.path.abspath(os.path.join(root_path, module, 'web_ui'))
            jinja2_i18n_helper.ihelper.refresh_env(locale_dir, template_dir)
            stream = jinja2_i18n_helper.ihelper.render("menu.yaml", None)
            
            module_menu = yaml.load(stream)
            module_menus[module] = module_menu

        module_menus = sorted(module_menus.iteritems(), key=lambda (k,v): (v['menu_sort_id']))
Example #23
0
    def test_global_admin_page(self):
        # Load the global administration page.
        doc = self.go_as_admin('/global/admin')
        assert self.s.status == 200

        # Change some settings.
        settings_form = doc.first('form', id='save_global')
        doc = self.s.submit(settings_form,
            sms_number_to_repo=
                '{"+198765432109": "haiti", "+8101234567890": "japan"}'
        )
        assert self.s.status == 200, self.get_admin_page_error_message()

        # Reopen the admin page and check if the change took effect on the page.
        doc = self.go_as_admin('/global/admin')
        assert self.s.status == 200
        assert (simplejson.loads(
                    doc.first('textarea', id='sms_number_to_repo').text) ==
                {'+198765432109': 'haiti', '+8101234567890': 'japan'})

        # Also check if the change took effect in the config.
        assert (config.get('sms_number_to_repo') ==
            {'+198765432109': 'haiti', '+8101234567890': 'japan'})

        # Change settings again and make sure they took effect.
        settings_form = doc.first('form', id='save_global')
        doc = self.s.submit(settings_form,
            sms_number_to_repo=
                '{"+198765432109": "test", "+8101234567890": "japan"}'
        )
        assert self.s.status == 200, self.get_admin_page_error_message()
        assert (config.get('sms_number_to_repo') ==
            {'+198765432109': 'test', '+8101234567890': 'japan'})
Example #24
0
def prepare():
    """ Initialise the memory module """
    global field_mem_base, field_mem_start, field_mem_offset
    global code_start, stack_size, max_memory, total_memory
    # length of field record (by default 128)
    file_rec_len = config.get("max-reclen")
    # file header (at head of field memory)
    file_header_size = 194
    # number of file records
    num_files = config.get("max-files")
    # first field buffer address (workspace size; 3429 for gw-basic)
    field_mem_base = config.get("reserved-memory")
    # bytes distance between field buffers
    field_mem_offset = file_header_size + file_rec_len
    # start of 1st field =3945, includes FCB & header header of 1st field
    # used by var.py
    field_mem_start = field_mem_base + field_mem_offset + file_header_size
    # data memory model: start of code section
    code_start = field_mem_base + (num_files + 1) * field_mem_offset
    # BASIC stack (determined by CLEAR)
    # Initially, the stack space should be set to 512 bytes,
    # or one-eighth of the available memory, whichever is smaller.
    stack_size = 512
    # max available memory to BASIC (set by /m)
    max_list = config.get("max-memory")
    max_list[1] = max_list[1] * 16 if max_list[1] else max_list[0]
    max_list[0] = max_list[0] or max_list[1]
    max_memory = min(max_list) or 65534
    # total size of data segment (set by CLEAR)
    total_memory = max_memory
Example #25
0
def prepare():
    """ Prepare the video modes. """
    global video_capabilities, mono_monitor
    global colours16, colours16_mono, cga4_palettes, mono_tint
    global circle_aspect
    video_capabilities = config.get('video')
    if video_capabilities == 'tandy':
        circle_aspect = (3072, 2000)
    else:
        circle_aspect = (4, 3)
    mono_monitor = config.get('monitor') == 'mono'
    if video_capabilities == 'ega' and mono_monitor:
        video_capabilities = 'ega_mono'
    cga_low = config.get('cga-low')
    # set monochrome tint
    mono_tint = config.get('mono-tint')
    # build colour sets
    colours16_mono = tuple(tuple(tint*i//255 for tint in mono_tint)
                           for i in intensity16_mono)
    if mono_monitor:
        colours16 = list(colours16_mono)
    else:
        colours16 = list(colours16_colour)
    # initialise the 4-colour CGA palette
    # palette 1: Black, Ugh, Yuck, Bleah, choice of low & high intensity
    # palette 0: Black, Green, Red, Brown/Yellow, low & high intensity
    # tandy/pcjr have high-intensity white, but low-intensity colours
    # mode 5 (SCREEN 1 + colorburst on RGB) has red instead of magenta
    if video_capabilities in ('pcjr', 'tandy'):
        # pcjr does not have mode 5
        cga4_palettes = {0: (0, 2, 4, 6), 1: (0, 3, 5, 15), 5: None}
    elif cga_low:
        cga4_palettes = {0: (0, 2, 4, 6), 1: (0, 3, 5, 7), 5: (0, 3, 4, 7)}
    else:
        cga4_palettes = {0: (0, 10, 12, 14), 1: (0, 11, 13, 15), 5: (0, 11, 12, 15)}
Example #26
0
def toText() :
    '''
        クロールデータのHTMLファイルをテキストファイルのみに整形して出力します
    '''
    dir = config.get("crawl", "crawldata.dir.path")
    textDir = config.get("crawl", "text.dir.path")
    fileUtils.makedirs(textDir)

    list = os.listdir(dir)
    for file in list:
        filePath = dir + file
        if os.path.isfile(filePath) :
            f = open(filePath,'r', encoding='utf-8-sig')
            try :
                crawlData = json.load(f)
            except:
                logger.exception("Could not load %s" % filePath + " ", traceback.format_exc())
                continue
            f.close()
            text = crawlData["html"]
            outputFile = textDir + os.path.splitext(file)[0] + ".txt"
           
            f = open(outputFile, 'w', encoding='utf-8-sig')
            f.write(text)
            f.close()
Example #27
0
def start_emulator(options):
  emulator_name = config.get('emulator')
  emulator_port = config.get('port')
  adb_device_name = 'emulator-' + str(emulator_port)
  emulator = ['emulator', '-avd', emulator_name]

  print "> Created new execution evironment. Wating for emulator..."  
  # Wait for emulator and clean logcat
  emulator.extend(options)
  print "> " + ' '.join(emulator)
  Popen(emulator)

  bootcomplete = None
  count = 0
  while bootcomplete is not 1:
    p = Popen(['adb', '-s', adb_device_name, 'wait-for-device', 'shell', 'getprop', 'sys.boot_completed'], stdout=PIPE)
    out=p.communicate()
    print out
    try:
      bootcomplete = int(str(out[0]).strip())
    except:
      pass
    sleep(2)
    count+=1
    if count % 10 is 0:
      print "Waiting boot completed... ", bootcomplete
Example #28
0
def set_file_rights(path):
    '''Changes group permissions and ownership of file or directory according to configuration.

    If compiled_umask is set, will change group permissions of `path` to given value. If `path` is a
    directory it will also set all executable bits to one.

    If compiled_groupid is set, will change group ownership to given gid.'''
    try:
        umask = int(config.get('global', 'compiled_umask'))
        if os.path.isdir(path):
            import stat
            umask += stat.S_IXUSR + stat.S_IXGRP + stat.S_IXOTH
    except:
        umask = None
    try:
        gid = int(config.get('global', 'compiled_groupid'))
    except:
        gid = None

    # Correcting rights and group ownership, if configured
    if umask:
        import stat
        os.chmod(path, umask)
    if gid:
        os.chown(path, -1, gid)
Example #29
0
def main():

    # change path to launcher
    global __file__
    __file__ = os.path.abspath(__file__)
    if os.path.islink(__file__):
        __file__ = getattr(os, "readlink", lambda x: x)(__file__)
    os.chdir(os.path.dirname(os.path.abspath(__file__)))

    web_control.confirm_xxnet_exit()

    setup_win_python.check_setup()

    module_init.start_all_auto()

    web_control.start()

    if has_desktop and config.get(["modules", "launcher", "popup_webui"], 1) == 1:
        webbrowser.open("http://127.0.0.1:8085/")

    update.start()

    if config.get(["modules", "launcher", "show_systray"], 1):
        sys_tray.serve_forever()
    else:
        while True:
            time.sleep(100)

    module_init.stop_all()
    sys.exit()
Example #30
0
 def getFileName(self, basename):
     if config.get("use_h5py"):
         if not op.isdir("out"):
             os.mkdir("out")
         return "out/" + basename + ".h5"
     else:
         return basename + "." + config.get("domain")
Example #31
0
    app.router.add_get('/nodeinfo/{statkey}', nodeinfo)
    app.router.add_get('/info', info)
    app.router.add_post('/register', register)

    return app


#
# Main
#

if __name__ == '__main__':
    loop = asyncio.get_event_loop()
    app = loop.run_until_complete(init(loop))

    # create a client Session here so that all client requests
    #   will share the same connection pool
    max_tcp_connections = int(config.get("max_tcp_connections"))
    app['client'] = ClientSession(
        loop=loop, connector=TCPConnector(limit=max_tcp_connections))

    session = aiobotocore.get_session(loop=loop)
    app["session"] = session
    app["loop"] = loop

    asyncio.ensure_future(healthCheck(app), loop=loop)
    head_port = config.get("head_port")
    log.info("Starting service on port: {}".format(head_port))
    log.debug("debug test")
    run_app(app, port=int(head_port))
Example #32
0
def train(dset, model):
    n_epoch = 300
    batch_size = 128

    loader = data.DataLoader(dataset=db(dset.S_tr, dset.X_tr, dset.y_tr),
                             batch_size=batch_size,
                             shuffle=True)

    model, recon_loss = get(model)
    dualAE = model(dset.X_tr.shape[1], dset.S_tr.shape[1], dset.nclass)

    net = dualAE.cuda()

    print('-------training V-Net --------')
    V_optim = torch.optim.Adam(net.parameters(), lr=0.0005, weight_decay=1e-5)
    for epoch in xrange(0):
        for i, (_, V, _) in enumerate(loader):
            dV = data_corrupt(V, 0.2)
            dV = Variable(dV).cuda()
            V = Variable(V).cuda()

            V_optim.zero_grad()
            latent, out = net.V_net_forward(dV)

            loss = recon_loss(out, V)

            if model == 'CAE':
                loss += cae_loss(net.get_VAE_W(), latent) * 0.01

            loss.backward()
            V_optim.step()

            if (i + 1) % 100 == 0:
                print('Epoch [%d/40] [%d/%d], Loss: %.5f' %
                      (epoch + 1, i + 1, len(loader), loss.data[0]))

    print('-------training S-Net --------')
    S_optim = torch.optim.Adam(net.parameters(), lr=0.00001, weight_decay=1e-5)
    for epoch in xrange(0):
        for i, (S, _, _) in enumerate(loader):
            dS = data_corrupt(S)
            dS = Variable(dS).cuda()
            S = Variable(S).cuda()

            S_optim.zero_grad()
            latent, out = net.S_net_forward(dS)
            loss = recon_loss(out, S)

            if model == 'CAE':
                loss += cae_loss(net.get_SAE_W(), latent)

            loss.backward()
            S_optim.step()

            if (i + 1) % 100 == 0:
                print('Epoch [%d/40] [%d/%d], Loss: %.5f' %
                      (epoch + 1, i + 1, len(loader), loss.data[0]))

    criter = nn.CrossEntropyLoss()
    optim = torch.optim.Adam(net.parameters(), lr=0.001,
                             weight_decay=1e-5)  #0.0005
    for epoch in xrange(n_epoch):
        for i, (S, V, L) in enumerate(loader):
            dS = data_corrupt(S, 0.01)
            dS = Variable(dS).cuda()
            S, V = Variable(S).cuda(), Variable(V).cuda()
            L = Variable(L).cuda()

            optim.zero_grad()
            latent, pred, out_v, out_s = net(dS)

            # reconstruction error
            recon_v_loss = recon_loss(out_v, V)
            recon_s_loss = recon_loss(out_s, S)
            #         clf_loss = criter(pred, L)

            loss = recon_v_loss + recon_s_loss  #+ clf_loss*10
            if model == 'CAE':
                loss += cae_loss(net.get_SAE_W(), latent) * 0.001
            loss.backward()
            optim.step()

            if (i + 1) % 50 == 0:
                print(
                    'Epoch [%d/%d] [%d/%d], Loss: %.5f  --recon_v_loss: %.5f, recon_s_loss: %.5f, clf_loss: %.5f'
                    % (epoch + 1, n_epoch, i + 1, len(loader), loss.data[0],
                       recon_v_loss.data[0], recon_s_loss.data[0],
                       0.0))  #, clf_loss.data[0]))
        if (epoch + 1) % 200 == 0:
            optim = torch.optim.Adam(net.parameters(),
                                     lr=0.0001,
                                     weight_decay=1e-5)

        kNN(net, dset)
    return net
Example #33
0
 def getUsers(self):
     fp = open(config.get("ssFile"), 'r')
     value = json.load(fp)
     fp.close()
     return value['port_password']
Example #34
0
async def healthCheck(app):
    """ Periodic method that pings each active node and verifies it is still healthy.  
    If node doesn't respond, free up the node slot (the node can re-register if it comes back)'"""

    app["last_health_check"] = int(time.time())

    # update/initialize root object before starting node updates
    headnode_key = getHeadNodeS3Key()
    log.info("headnode S3 key: {}".format(headnode_key))
    headnode_obj_found = False
    head_url = getUrl(app["head_host"], app["head_port"])

    nodes = app["nodes"]
    while True:
        # sleep for a bit
        sleep_secs = config.get("head_sleep_time")
        await asyncio.sleep(sleep_secs)

        now = int(time.time())
        log.info("health check {}".format(unixTimeToUTC(now)))

        if not headnode_obj_found:
            log.info("checking for headnode_key: {}".format(headnode_key))
            if await isS3Obj(app, headnode_key):
                headnode_obj_found = True
                headnode_stats = await getS3ObjStats(app, headnode_key)
                log.info("headnode_stats: {}".format(headnode_stats))
            else:
                # first time hsds has run with this bucket name?
                log.warn("need to create headnode obj")
                head_state = {}
                head_state["created"] = int(time.time())
                head_state["id"] = app["id"]
                head_state["last_health_check"] = app["last_health_check"]
                head_state["head_url"] = head_url
                log.info("write head_state to S3: {}".format(head_state))
                try:
                    await putS3JSONObj(app, headnode_key, head_state)
                except HTTPInternalServerError as hpe:
                    # Might be bad AWS config, transient S3 error, or minio not initialized yet...
                    log.warn(
                        "HTTPInternalServerError writing head_state: {}: {}".
                        format(headnode_key, str(hpe)))
            continue  # start health check on next iteration

        head_state = await getS3JSONObj(app, headnode_key)
        log.info("head_state: {}".format(head_state))
        log.info("elapsed time since last health check: {}".format(
            elapsedTime(head_state["last_health_check"])))
        if head_state['id'] != app['id']:
            log.warn("mis-match bucket head id: {}".format(head_state["id"]))
            if now - head_state["last_health_check"] < sleep_secs * 4:
                log.warn("other headnode may be active")
                continue  # skip node checks and loop around again
            else:
                log.warn(
                    "other headnode is not active, making this headnode leader"
                )
                head_state['id'] = app['id']
        else:
            log.info("head_state id matches S3 Object")

        head_state["last_health_check"] = now
        app["last_health_check"] = now
        head_state["head_url"] = head_url
        log.info("write head_state to S3: {}".format(head_state))
        await putS3JSONObj(app, headnode_key, head_state)

        log.info("putS3JSONObj complete")
        fail_count = 0
        HEALTH_CHECK_RETRY_COUNT = 1  # times to try before calling a node dead
        for node in nodes:
            if node["host"] is None:
                fail_count += 1
                continue
            url = getUrl(node["host"], node["port"]) + "/info"
            try:
                rsp_json = await http_get(app, url)
                if "node" not in rsp_json:
                    log.error("Unexpected response from node")
                    fail_count += 1
                    continue
                node_state = rsp_json["node"]
                node_id = node_state["id"]

                if node_id != node['id']:
                    log.warn("unexpected node_id: {} (expecting: {})".format(
                        node_id, node['id']))
                    node['host'] = None
                    node['id'] = None
                    fail_count += 1
                    continue

                if 'number' in node_state and node_state['number'] != node[
                        'node_number']:
                    msg = "unexpected node_number got {} (expecting: {})"
                    log.warn(
                        msg.format(node_state["number"], node['node_number']))
                    node['host'] = None
                    node['id'] = None
                    fail_count += 1
                    continue

                # save off other useful info from the node
                app_node_stats = app["node_stats"]
                node_stats = {}
                for k in NODE_STAT_KEYS:
                    node_stats[k] = rsp_json[k]
                app_node_stats[node_id] = node_stats
                # mark the last time we got a response from this node
                node["healthcheck"] = unixTimeToUTC(int(time.time()))
                node["failcount"] = 0  # rest
            except OSError as ose:
                log.warn("OSError for req: {}: {}".format(url, str(ose)))
                # node has gone away?
                node["failcount"] += 1
                if node["failcount"] >= HEALTH_CHECK_RETRY_COUNT:
                    log.warn("node {}:{} not responding".format(
                        node["host"], node["port"]))
                    fail_count += 1

            except HTTPInternalServerError as hpe:
                log.warn("HTTPInternalServerError for req: {}: {}".format(
                    url, str(hpe)))
                # node has gone away?
                node["failcount"] += 1
                if node["failcount"] >= HEALTH_CHECK_RETRY_COUNT:
                    log.warn("removing {}:{} from active list".format(
                        node["host"], node["port"]))
                    fail_count += 1
            except TimeoutError as toe:
                log.warn("Timeout error for req: {}: {}".format(url, str(toe)))
                # node has gone away?
                node["failcount"] += 1
                if node["failcount"] >= HEALTH_CHECK_RETRY_COUNT:
                    log.warn("removing {}:{} from active list".format(
                        node["host"], node["port"]))
                    fail_count += 1
        log.info("node health check fail_count: {}".format(fail_count))
        if fail_count > 0:
            if app["cluster_state"] == "READY":
                # go back to INITIALIZING state until another node is registered
                log.warn(
                    "Fail_count > 0, Setting cluster_state from READY to INITIALIZING"
                )
                app["cluster_state"] = "INITIALIZING"
        elif fail_count == 0 and app["cluster_state"] != "READY":
            log.info("All nodes healthy, changing cluster state to READY")
            app["cluster_state"] = "READY"
Example #35
0
import threading
import time
from common import logger
import json
import netMonitor
import api
import config

headers = {'content-type': 'application/json'}

fileName = config.get("flow_file")

task_time = config.get("task_time")


class myThread(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self.policy = config.get("flow")

    def run(self):
        self.taskLoad()

    def taskLoad(self):
        self.timer_start()
        while True:
            time.sleep(60)

    def timer_start(self):
        t = threading.Timer(int(task_time), self.test_func)
        t.start()
Example #36
0
def getAdminUser():
    # Returns the EZID administrator user.
    import config
    return getByUsername(config.get("auth.admin_username"))
Example #37
0
settings = config.get(name='phpipam',
                      values=[
                          {
                              'value':
                              'url',
                              'prompt':
                              "Please enter the full URL of your "
                              "phpIPAM installation including the API"
                              " app_id \nex. https://phpipam."
                              "mycompanyserver.com/api/app_id/ \n"
                              "URL> ",
                              'optional':
                              False,
                              'sensitive':
                              False
                          },
                          {
                              'value': 'username',
                              'prompt':
                              "Please enter your phpIPAM username: \n"
                              "Username> ",
                              'optional': True,
                              'sensitive': False
                          },
                          {
                              'value': 'password',
                              'prompt':
                              "Please enter your phpIPAM password: \n"
                              "Password> ",
                              'optional': True,
                              'sensitive': True
                          },
                      ])
Example #38
0
 def __init__(self):
     threading.Thread.__init__(self)
     self.policy = config.get("flow")
Example #39
0
    def testPostChunkedRefDataset(self):
        print("testPostChunkedRefDataset", self.base_domain)
        headers = helper.getRequestHeaders(domain=self.base_domain)

        hdf5_sample_bucket = config.get("hdf5_sample_bucket")
        if not hdf5_sample_bucket:
            print(
                "hdf5_sample_bucket config not set, skipping testChunkedRefDataset"
            )
            return

        s3path = "s3://" + hdf5_sample_bucket + "/data/hdf5test" + "/snp500.h5"
        SNP500_ROWS = 3207353

        snp500_json = helper.getHDF5JSON("snp500.json")
        if not snp500_json:
            print(
                "snp500.json file not found, skipping testPostChunkedRefDataset"
            )
            return

        if "snp500.h5" not in snp500_json:
            self.assertTrue(False)

        chunk_dims = [
            60000,
        ]  # chunk layout used in snp500.h5 file

        chunk_info = snp500_json["snp500.h5"]
        dset_info = chunk_info["/dset"]
        if "byteStreams" not in dset_info:
            self.assertTrue(False)
        byteStreams = dset_info["byteStreams"]

        # construct map of chunks
        chunks = {}
        for item in byteStreams:
            index = item["index"]
            chunk_key = str(index)
            chunks[chunk_key] = (item["file_offset"], item["size"])

        # get domain
        req = helper.getEndpoint() + '/'
        rsp = requests.get(req, headers=headers)
        rspJson = json.loads(rsp.text)
        self.assertTrue("root" in rspJson)
        root_uuid = rspJson["root"]

        # define types we need

        s10_type = {
            "charSet": "H5T_CSET_ASCII",
            "class": "H5T_STRING",
            "length": 10,
            "strPad": "H5T_STR_NULLPAD"
        }
        s4_type = {
            "charSet": "H5T_CSET_ASCII",
            "class": "H5T_STRING",
            "length": 4,
            "strPad": "H5T_STR_NULLPAD"
        }

        fields = ({
            'name': 'date',
            'type': s10_type
        }, {
            'name': 'symbol',
            'type': s4_type
        }, {
            'name': 'sector',
            'type': 'H5T_STD_I8LE'
        }, {
            'name': 'open',
            'type': 'H5T_IEEE_F32LE'
        }, {
            'name': 'high',
            'type': 'H5T_IEEE_F32LE'
        }, {
            'name': 'low',
            'type': 'H5T_IEEE_F32LE'
        }, {
            'name': 'volume',
            'type': 'H5T_IEEE_F32LE'
        }, {
            'name': 'close',
            'type': 'H5T_IEEE_F32LE'
        })

        datatype = {'class': 'H5T_COMPOUND', 'fields': fields}

        data = {
            "type": datatype,
            "shape": [
                SNP500_ROWS,
            ]
        }
        layout = {
            "class": 'H5D_CHUNKED_REF',
            "file_uri": s3path,
            "dims": chunk_dims,
            "chunks": chunks
        }
        data['creationProperties'] = {'layout': layout}

        req = self.endpoint + '/datasets'
        rsp = requests.post(req, data=json.dumps(data), headers=headers)
        self.assertEqual(rsp.status_code, 201)
        rspJson = json.loads(rsp.text)
        dset_id = rspJson["id"]
        self.assertTrue(helper.validateId(dset_id))

        # link new dataset as 'dset'
        name = "dset"
        req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
        payload = {"id": dset_id}
        rsp = requests.put(req, data=json.dumps(payload), headers=headers)
        self.assertEqual(rsp.status_code, 201)

        # do a point selection
        req = self.endpoint + "/datasets/" + dset_id + "/value"
        points = [
            1234567,
        ]
        body = {"points": points}
        rsp = requests.post(req, data=json.dumps(body), headers=headers)
        if rsp.status_code == 404:
            print(
                "s3object: {} not found, skipping point chunk ref test".format(
                    s3path))
        else:
            self.assertEqual(rsp.status_code, 200)
            rspJson = json.loads(rsp.text)
            self.assertTrue("value" in rspJson)
            value = rspJson["value"]
            self.assertEqual(len(value), len(points))
            item = value[0]
            self.assertEqual(item[0], '1998.10.22')
            self.assertEqual(item[1], 'MHFI')
            self.assertEqual(item[2], 3)
    return u + (marks['pos'] - marks['neg']) * diff / (N * N)


# The following variables are used in the remove_dc() method.
# Since we know the DC bias of our circuit, we can compute the
# digital value and simply remove it from all incoming signals.
#
# NOTE: This methodology assumes the DC bias remains constant
# for the entirety of sampling. Any fluctuations in the DC bias
# point can skew the output. There are algorithms that detect
# the DC bias on the fly. This may be worth looking into in the
# future.
#
# The variables are defined outside the function to avoid computing
# them everytime the function is called.
analog_ref_volt = cfg.get('sampling', 'analog_ref_volt')  # V
bit_res = cfg.get('sampling', 'bit_res')  # bits
dc_bias_volt = cfg.get('sampling', 'dc_bias_volt')  # V

max_adc_val = pow(2, bit_res) - 1

# Compute the digital DC bias value
digital_dc_bias = int((max_adc_val / analog_ref_volt) * dc_bias_volt)


def remove_dc(data):
    """ Remove the DC bias from a sample
    
        Parameters
        ----------
        data : int
Example #41
0
    def testPostContiguousDataset(self):
        print("testPostContiguousDataset", self.base_domain)
        headers = helper.getRequestHeaders(domain=self.base_domain)

        hdf5_sample_bucket = config.get("hdf5_sample_bucket")
        if not hdf5_sample_bucket:
            print(
                "hdf5_sample_bucket config not set, skipping testPostContiguousDataset"
            )
            return

        tall_json = helper.getHDF5JSON("tall.json")
        if not tall_json:
            print(
                "tall.json file not found, skipping testPostContiguousDataset")
            return

        if "tall.h5" not in tall_json:
            self.assertTrue(False)

        chunk_info = tall_json["tall.h5"]
        if "/g1/g1.1/dset1.1.2" not in chunk_info:
            self.assertTrue(False)

        dset112_info = chunk_info["/g1/g1.1/dset1.1.2"]
        if "byteStreams" not in dset112_info:
            self.assertTrue(False)
        byteStreams = dset112_info["byteStreams"]

        # should be just one element for this contiguous dataset
        self.assertTrue(len(byteStreams), 1)
        byteStream = byteStreams[0]
        dset112_offset = byteStream["file_offset"]
        dset112_size = byteStream["size"]
        self.assertEqual(dset112_size, 80)

        if "/g2/dset2.2" not in chunk_info:
            self.assertTrue(False)
        dset22_info = chunk_info["/g2/dset2.2"]
        if "byteStreams" not in dset22_info:
            self.assertTrue(False)
        byteStreams = dset22_info["byteStreams"]
        self.assertTrue(len(byteStreams), 1)
        byteStream = byteStreams[0]
        dset22_offset = byteStream["file_offset"]
        dset22_size = byteStream["size"]
        self.assertEqual(dset22_size, 60)

        # get domain
        req = helper.getEndpoint() + '/'
        rsp = requests.get(req, headers=headers)
        rspJson = json.loads(rsp.text)
        self.assertTrue("root" in rspJson)
        root_uuid = rspJson["root"]

        # create dataset fodr /g1/g1.1/dset1.1.2
        s3path = "s3://" + hdf5_sample_bucket + "/data/hdf5test" + "/tall.h5"
        data = {"type": 'H5T_STD_I32BE', "shape": 20}
        layout = {
            "class": 'H5D_CONTIGUOUS_REF',
            "file_uri": s3path,
            "offset": dset112_offset,
            "size": dset112_size
        }
        data['creationProperties'] = {'layout': layout}

        req = self.endpoint + '/datasets'
        rsp = requests.post(req, data=json.dumps(data), headers=headers)
        self.assertEqual(rsp.status_code, 201)
        rspJson = json.loads(rsp.text)
        dset112_id = rspJson["id"]
        self.assertTrue(helper.validateId(dset112_id))

        # link new dataset as 'dset112'
        name = "dset112"
        req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
        payload = {"id": dset112_id}
        rsp = requests.put(req, data=json.dumps(payload), headers=headers)
        self.assertEqual(rsp.status_code, 201)

        # create dataset for /g2/dset2.2
        data = {"type": 'H5T_IEEE_F32BE', "shape": [3, 5]}
        layout = {
            "class": 'H5D_CONTIGUOUS_REF',
            "file_uri": s3path,
            "offset": dset22_offset,
            "size": dset22_size
        }
        data['creationProperties'] = {'layout': layout}

        req = self.endpoint + '/datasets'
        rsp = requests.post(req, data=json.dumps(data), headers=headers)
        self.assertEqual(rsp.status_code, 201)
        rspJson = json.loads(rsp.text)
        dset22_id = rspJson["id"]
        self.assertTrue(helper.validateId(dset22_id))

        # link new dataset as 'dset22'
        name = "dset22"
        req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
        payload = {"id": dset22_id}
        rsp = requests.put(req, data=json.dumps(payload), headers=headers)
        self.assertEqual(rsp.status_code, 201)

        # do a point selection read on dset22
        req = self.endpoint + "/datasets/" + dset112_id + "/value"
        points = [2, 3, 5, 7, 11, 13, 17, 19]
        body = {"points": points}
        rsp = requests.post(req, data=json.dumps(body), headers=headers)
        if rsp.status_code == 404:
            print(
                "s3object: {} not found, skipping point read chunk reference contiguous test"
                .format(s3path))
            return

        self.assertEqual(rsp.status_code, 200)
        rspJson = json.loads(rsp.text)
        self.assertTrue("value" in rspJson)
        ret_value = rspJson["value"]
        self.assertEqual(len(ret_value), len(points))
        self.assertEqual(
            ret_value,
            points)  # get back the points since the dataset in the range 0-20

        # do a point selection read on dset22
        req = self.endpoint + "/datasets/" + dset22_id + "/value"
        points = [(0, 0), (1, 1), (2, 2)]
        body = {"points": points}
        rsp = requests.post(req, data=json.dumps(body), headers=headers)
        self.assertEqual(rsp.status_code, 200)
        rspJson = json.loads(rsp.text)
        self.assertTrue("value" in rspJson)
        ret_value = rspJson["value"]
        self.assertEqual(len(ret_value), len(points))
Example #42
0
    def testPostChunkedRefIndirectDataset(self):
        print("testPostChunkedRefIndirectDataset", self.base_domain)
        headers = helper.getRequestHeaders(domain=self.base_domain)

        hdf5_sample_bucket = config.get("hdf5_sample_bucket")
        if not hdf5_sample_bucket:
            print(
                "hdf5_sample_bucket config not set, skipping testPostChunkedRefIndirectDataset"
            )
            return

        s3path = "s3://" + hdf5_sample_bucket + "/data/hdf5test" + "/snp500.h5"
        SNP500_ROWS = 3207353

        snp500_json = helper.getHDF5JSON("snp500.json")
        if not snp500_json:
            print("snp500.json file not found, skipping testChunkedRefDataset")
            return

        if "snp500.h5" not in snp500_json:
            self.assertTrue(False)

        chunk_dims = [
            60000,
        ]  # chunk layout used in snp500.h5 file
        num_chunks = (SNP500_ROWS // chunk_dims[0]) + 1

        chunk_info = snp500_json["snp500.h5"]
        dset_info = chunk_info["/dset"]
        if "byteStreams" not in dset_info:
            self.assertTrue(False)
        byteStreams = dset_info["byteStreams"]

        self.assertEqual(len(byteStreams), num_chunks)

        chunkinfo_data = [(0, 0)] * num_chunks

        # fill the numpy array with info from bytestreams data
        for i in range(num_chunks):
            item = byteStreams[i]
            index = item["index"]
            chunkinfo_data[index] = (item["file_offset"], item["size"])

        # get domain
        req = helper.getEndpoint() + '/'
        rsp = requests.get(req, headers=headers)
        rspJson = json.loads(rsp.text)
        self.assertTrue("root" in rspJson)
        root_uuid = rspJson["root"]

        # create table to hold chunkinfo
        # create a dataset to store chunk info
        fields = ({
            'name': 'offset',
            'type': 'H5T_STD_I64LE'
        }, {
            'name': 'size',
            'type': 'H5T_STD_I32LE'
        })
        chunkinfo_type = {'class': 'H5T_COMPOUND', 'fields': fields}
        req = self.endpoint + "/datasets"
        # Store 40 chunk locations
        chunkinfo_dims = [
            num_chunks,
        ]
        payload = {'type': chunkinfo_type, 'shape': chunkinfo_dims}
        req = self.endpoint + "/datasets"
        rsp = requests.post(req, data=json.dumps(payload), headers=headers)
        self.assertEqual(rsp.status_code, 201)  # create dataset
        rspJson = json.loads(rsp.text)
        chunkinfo_uuid = rspJson['id']
        self.assertTrue(helper.validateId(chunkinfo_uuid))

        # link new dataset as 'chunks'
        name = "chunks"
        req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
        payload = {"id": chunkinfo_uuid}
        rsp = requests.put(req, data=json.dumps(payload), headers=headers)
        self.assertEqual(rsp.status_code, 201)

        # write to the chunkinfo dataset
        payload = {'value': chunkinfo_data}

        req = self.endpoint + "/datasets/" + chunkinfo_uuid + "/value"
        rsp = requests.put(req, data=json.dumps(payload), headers=headers)
        self.assertEqual(rsp.status_code, 200)  # write value

        # define types we need

        s10_type = {
            "charSet": "H5T_CSET_ASCII",
            "class": "H5T_STRING",
            "length": 10,
            "strPad": "H5T_STR_NULLPAD"
        }
        s4_type = {
            "charSet": "H5T_CSET_ASCII",
            "class": "H5T_STRING",
            "length": 4,
            "strPad": "H5T_STR_NULLPAD"
        }

        fields = ({
            'name': 'date',
            'type': s10_type
        }, {
            'name': 'symbol',
            'type': s4_type
        }, {
            'name': 'sector',
            'type': 'H5T_STD_I8LE'
        }, {
            'name': 'open',
            'type': 'H5T_IEEE_F32LE'
        }, {
            'name': 'high',
            'type': 'H5T_IEEE_F32LE'
        }, {
            'name': 'low',
            'type': 'H5T_IEEE_F32LE'
        }, {
            'name': 'volume',
            'type': 'H5T_IEEE_F32LE'
        }, {
            'name': 'close',
            'type': 'H5T_IEEE_F32LE'
        })

        datatype = {'class': 'H5T_COMPOUND', 'fields': fields}

        data = {
            "type": datatype,
            "shape": [
                SNP500_ROWS,
            ]
        }
        layout = {
            "class": 'H5D_CHUNKED_REF_INDIRECT',
            "file_uri": s3path,
            "dims": chunk_dims,
            "chunk_table": chunkinfo_uuid
        }
        data['creationProperties'] = {'layout': layout}

        req = self.endpoint + '/datasets'
        rsp = requests.post(req, data=json.dumps(data), headers=headers)
        self.assertEqual(rsp.status_code, 201)
        rspJson = json.loads(rsp.text)
        dset_id = rspJson["id"]
        self.assertTrue(helper.validateId(dset_id))

        # link new dataset as 'dset'
        name = "dset"
        req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
        payload = {"id": dset_id}
        rsp = requests.put(req, data=json.dumps(payload), headers=headers)
        self.assertEqual(rsp.status_code, 201)

        # do a point selection
        req = self.endpoint + "/datasets/" + dset_id + "/value"
        points = [
            1234567,
        ]
        body = {"points": points}
        rsp = requests.post(req, data=json.dumps(body), headers=headers)
        if rsp.status_code == 404:
            print(
                "s3object: {} not found, skipping point read chunk reference indirect test"
                .format(s3path))
            return

        self.assertEqual(rsp.status_code, 200)
        rspJson = json.loads(rsp.text)
        self.assertTrue("value" in rspJson)
        value = rspJson["value"]
        self.assertEqual(len(value), len(points))
        item = value[0]
        self.assertEqual(item[0], '1998.10.22')
        self.assertEqual(item[1], 'MHFI')
        self.assertEqual(item[2], 3)