Beispiel #1
0
def who_flag_parse(flags):
    """ Parse WHO flags.

    :param flags:
        Flags to parse.

    :returns:
        A namespace object containing the following attributes:

        :param operator:
            Whether or not the user is an operator.

        :param away:
            Whether or not the user is away.

        :param modes:
            A set of the user's present modes (prefixes).
    """
    ret = SimpleNamespace(operator=False, away=False, modes=set())
    ret.operator = False

    for char in flags:
        if char == '*':
            ret.operator = True
        elif char == "G":
            ret.away = True
        elif char == "H":
            ret.away = False
        elif char not in numletters:
            ret.modes.add(char)
        else:
            logger.debug("No known way to handle WHO flag %s", char)

    return ret
Beispiel #2
0
def read_data(csv, config):
    with open(config) as fin:
        use = fin.readline().strip().split(',')
        ref = fin.readline().strip()

    with open(csv) as fin:
        header = fin.readline().strip().split(',')
        use.sort(key=lambda i: header.index(i))

        data_matrix = np.array([], dtype=np.float).reshape(0, len(use))
        ref_array = []

        ref_data = None
        for line in fin:
            entry = line.strip().split(',')
            use_data = []
            del ref_data

            for i, val in enumerate(entry):
                label = header[i]
                if label in use:
                    use_data.append(np.log(float(val)))
                elif label == ref:
                    ref_data = int(val)

            data_matrix = np.vstack((data_matrix, use_data))
            ref_array.append(ref_data)

    ret = SimpleNamespace()
    ret.data_header = use
    ret.data = data_matrix
    ret.flag = ref_array
    ret.samples = len(ref_array)

    return ret
Beispiel #3
0
def setup_test_environment(debug=None):
    """
    Perform global pre-test setup, such as installing the instrumented template
    renderer and setting the email backend to the locmem email backend.
    """
    if hasattr(_TestState, 'saved_data'):
        # Executing this function twice would overwrite the saved values.
        raise RuntimeError(
            "setup_test_environment() was already called and can't be called "
            "again without first calling teardown_test_environment()."
        )

    if debug is None:
        debug = settings.DEBUG

    saved_data = SimpleNamespace()
    _TestState.saved_data = saved_data

    saved_data.allowed_hosts = settings.ALLOWED_HOSTS
    # Add the default host of the test client.
    settings.ALLOWED_HOSTS = list(settings.ALLOWED_HOSTS) + ['testserver']

    saved_data.debug = settings.DEBUG
    settings.DEBUG = debug

    saved_data.email_backend = settings.EMAIL_BACKEND
    settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'

    saved_data.template_render = Template._render
    Template._render = instrumented_test_render

    mail.outbox = []

    deactivate()
Beispiel #4
0
 def test_reinterpreter_comp(self):
     comp1 = L.pe('COMP({(x, y, (x, z)) for (x, y) in S '
                                       'for (y, z) in T}, [], {})')
     comp2 = L.pe('COMP({(x, x) for (x, y) in U}, [], {})')
     spec1 = CompSpec.from_comp(comp1, self.manager.factory)
     spec2 = CompSpec.from_comp(comp2, self.manager.factory)
     
     # Dummy wrapper for what would be IncComp.
     Dummy1 = SimpleNamespace()
     Dummy1.spec = spec1
     Dummy2 = SimpleNamespace()
     Dummy2.spec = spec2
     invs = {'Q': Dummy1, 'S': Dummy2}
     # Boilerplate domain information regarding the comprehension.
     constrs = []
     constrs.extend(spec1.get_domain_constraints('Q'))
     constrs.extend(spec2.get_domain_constraints('S'))
     domain_subst = unify(constrs)
     domain_subst = add_domain_names(domain_subst, {})
     
     trans = CostReinterpreter(invs, domain_subst, {}, {})
     
     cost = NameCost('Q')
     cost = trans.process(cost)
     cost = normalize(cost)
     exp_cost_str = '(Q_x*Q_z)'
     self.assertEqual(str(cost), exp_cost_str)
Beispiel #5
0
def test_mcmcmc_step_parse(capsys):
    rand_gen = random.Random(4)
    tmp_file = br.TempFile()
    walker = SimpleNamespace(name="qwerty", proposed_score=None, score_history=[1.12, 3.42], current_score=3.42,
                             accept=lambda *_: print("Calling accept() method"), rand_gen=rand_gen, heat=0.25,
                             ice=False, lava=False, proposed_score_file=tmp_file)

    # Accept higher score
    tmp_file.write("7.90", mode="w")

    mcmcmc.MCMCMC.step_parse(walker=walker, std=1.5)
    assert walker.score_history == [1.12, 3.42, 7.9]
    assert walker.proposed_score == 7.9
    out, err = capsys.readouterr()
    assert out == "Calling accept() method\n"

    # Reject lower score
    tmp_file.write("0.91", mode="w")

    mcmcmc.MCMCMC.step_parse(walker=walker, std=3.1)
    assert walker.score_history == [1.12, 3.42, 7.9, 0.91]
    assert walker.proposed_score == 0.91
    out, err = capsys.readouterr()
    assert out == ""

    # Accept lower score
    tmp_file.write("3.3", mode="w")

    mcmcmc.MCMCMC.step_parse(walker=walker, std=3.1)
    assert walker.score_history == [1.12, 3.42, 7.9, 0.91, 3.3]
    assert walker.proposed_score == 3.3
    out, err = capsys.readouterr()
    assert out == "Calling accept() method\n", print(out)

    # Lava walker accepts any score
    tmp_file.write("0.1", mode="w")

    walker.lava = True
    mcmcmc.MCMCMC.step_parse(walker=walker, std=3.1)
    assert walker.score_history == [1.12, 3.42, 7.9, 0.91, 3.3, 0.1]
    out, err = capsys.readouterr()
    assert out == "Calling accept() method\n"

    # Ice walker rejects any lower scores
    tmp_file.write("3.4", mode="w")

    walker.lava = False
    walker.ice = True
    mcmcmc.MCMCMC.step_parse(walker=walker, std=3.1)
    assert walker.score_history == [1.12, 3.42, 7.9, 0.91, 3.3, 0.1, 3.4]
    out, err = capsys.readouterr()
    assert out == ""

    # Do not allow history to grow over 1000 items long
    walker.score_history = [1 for _ in range(1000)]
    assert len(walker.score_history) == 1000
    mcmcmc.MCMCMC.step_parse(walker, 3.1)
    assert len(walker.score_history) == 1000
    assert walker.score_history[-1] == 3.4
 def __mock_output__(self, open_method=False, close_method=False):
     output = SimpleNamespace()
     output.on_update = MagicMock(spec=(""))
     if open_method:
         output.open = MagicMock(spec=(""))
     if close_method:
         output.close = MagicMock(spec=(""))
     return output
def registries(hass):
    """Registry mock setup."""
    from types import SimpleNamespace
    ret = SimpleNamespace()
    ret.entity = mock_registry(hass)
    ret.device = mock_device_registry(hass)
    ret.area = mock_area_registry(hass)
    return ret
Beispiel #8
0
def extract_module_doc(state: State, path: List[str], module):
    assert inspect.ismodule(module)

    out = Empty()
    out.url = make_url(path)
    out.name = path[-1]
    out.summary = extract_summary(state, state.class_docs, path, module.__doc__)
    return out
Beispiel #9
0
def extract_class_doc(state: State, path: List[str], class_):
    assert inspect.isclass(class_)

    out = Empty()
    out.url = make_url(path)
    out.name = path[-1]
    out.summary = extract_summary(state, state.class_docs, path, class_.__doc__)
    return out
Beispiel #10
0
def test_chain_get_cold_walker():
    walker1 = SimpleNamespace(heat=0.1)
    walker2 = SimpleNamespace(heat=0.4)

    chain = SimpleNamespace(walkers=[walker1, walker2], get_cold_walker=mcmcmc._Chain.get_cold_walker, cold_heat=0.1)
    assert chain.get_cold_walker(chain) == walker1

    chain.walkers = [walker2, walker1]
    assert chain.get_cold_walker(chain) == walker1
Beispiel #11
0
def test_chain_get_best_walker():
    walker1 = SimpleNamespace(current_score=35)
    walker2 = SimpleNamespace(current_score=15)

    chain = SimpleNamespace(walkers=[walker1, walker2], get_best_walker=mcmcmc._Chain.get_best_walker)
    assert chain.get_best_walker(chain) == walker1

    chain.walkers = [walker2, walker1]
    assert chain.get_best_walker(chain) == walker1
def get_mappings(efo_mapping_file, snp_2_gene_file):
    mappings = SimpleNamespace()
    mappings.trait_2_efo, mappings.unavailable_efo = \
        load_efo_mapping(efo_mapping_file)

    mappings.consequence_type_dict = \
        CT.process_consequence_type_file(snp_2_gene_file)

    return mappings
Beispiel #13
0
    def get_noise_record(self):
        noise = SimpleNamespace()

        noise.x1 = self.x[0]
        noise.w_sequence = self.w[:]
        noise.v_sequence = self.v[:]
        noise.n_sequence = list(np.array(self.noise).flatten())

        return noise
Beispiel #14
0
def test_chain_get_ice_walker():
    walker1 = SimpleNamespace(ice=False)
    walker2 = SimpleNamespace(ice=False)
    ice_walker = SimpleNamespace(ice=True)

    chain = SimpleNamespace(walkers=[walker1, walker2], get_ice_walker=mcmcmc._Chain.get_ice_walker)
    assert chain.get_ice_walker(chain) is False

    chain.walkers = [walker2, ice_walker, walker1]
    assert chain.get_ice_walker(chain) == ice_walker
Beispiel #15
0
def extract_data_doc(state: State, parent, path: List[str], data):
    assert not inspect.ismodule(data) and not inspect.isclass(data) and not inspect.isroutine(data) and not inspect.isframe(data) and not inspect.istraceback(data) and not inspect.iscode(data)

    out = Empty()
    out.name = path[-1]
    # Welp. https://stackoverflow.com/questions/8820276/docstring-for-variable
    out.summary = ''
    out.has_details = False
    if hasattr(parent, '__annotations__') and out.name in parent.__annotations__:
        out.type = extract_annotation(state, parent.__annotations__[out.name])
    else:
        out.type = None
    # The autogenerated <foo.bar at 0xbadbeef> is useless, so provide the value
    # only if __repr__ is implemented for given type
    if '__repr__' in type(data).__dict__:
        out.value = html.escape(repr(data))
    else:
        out.value = None

    # External data summary, if provided
    path_str = '.'.join(path)
    if path_str in state.data_docs:
        # TODO: use also the contents
        out.summary = render_inline_rst(state, state.data_docs[path_str]['summary'])
        del state.data_docs[path_str]

    return out
Beispiel #16
0
def test_mcmcmc_mc_step_run():
    tmp_file = br.TempFile()
    walker = SimpleNamespace(function=lambda func_args: 1234, params=[], proposed_score_file=tmp_file)
    mcmcmc.MCMCMC.mc_step_run(walker, ["foo"])
    assert tmp_file.read() == "1234"

    tmp_file.clear()
    walker.params = ["bar", "baz"]
    walker.function = lambda func_args, params: 4321
    mcmcmc.MCMCMC.mc_step_run(walker, ["foo"])
    assert tmp_file.read() == "4321"
Beispiel #17
0
def parse_syllabus(syllabus_file, content_folder='', parse_all=False):
    # loading raw syllabus
    syll = split_into_units(syllabus_file)[0]
    cell = syll.cells[1]

    def section_to_name_date(line):
        name = re.findall('\*\*(.*)\*\*', line)[0]
        date = release_dates.get(name)
        return name, date

    def subs_to_name_file(line):
        try:
            file_name = re.findall(r'\((.+?\.ipynb)\)', line)[0]
        except IndexError:
            return
        subsection_name = re.findall(r'\[(.+?)\]', line)[0]
        return subsection_name, file_name

    is_section = lambda line: line.startswith('*')

    lines = cell['source'].split('\n')
    sections = [section_to_name_date(line) for line in lines
                if is_section(line)]

    # Make a list of lines in each section.
    subsections = (tuple(g) for k, g in groupby(lines, key=lambda x: not
                                                is_section(x)) if k)
    # Filter the actual subsections.
    subsections = [[subs_to_name_file(i) for i in j if subs_to_name_file(i) is
                    not None] for j in subsections]

    data = SimpleNamespace(category='main', chapters=[])
    for i, section in enumerate(zip(sections, subsections)):
        if not parse_all:
            # Don't convert sections with no release date.
            if section[0][1] is None:
                continue

        # creating chapter
        chapter = SimpleNamespace(category='chapter', sequentials=[])

        chapter.name = section[0][0]
        chapter.date = section[0][1]
        chapter.url = "sec_{0}".format(str(i).zfill(2))

        for j, subsection in enumerate(section[1]):
            # creating sequential
            sequential = SimpleNamespace(category='sequential', verticals=[])

            sequential.name = subsection[0]
            sequential.date = chapter.date
            sequential.url = "subsec_{0}_{1}".format(str(i).zfill(2),
                                                     str(j).zfill(2))
            sequential.source_notebook = content_folder + '/' + subsection[1]

            chapter.sequentials.append(sequential)

        data.chapters.append(chapter)
    return data
 def test_populate_rootfs_contents_from_filesystem(self):
     with ExitStack() as resources:
         workdir = resources.enter_context(TemporaryDirectory())
         args = SimpleNamespace(
             project=None,
             suite='xenial',
             arch='amd64',
             image_format='img',
             workdir=workdir,
             output=None,
             subproject=None,
             subarch=None,
             output_dir=None,
             cloud_init=None,
             with_proposed=None,
             extra_ppas=None,
             hooks_directory=[],
             gadget_tree=self.gadget_tree,
             filesystem=None,
             )
         state = resources.enter_context(XXXClassicBuilder(args))
         # Now we have to craft enough of gadget definition to drive the
         # method under test.
         part = SimpleNamespace(
             role=StructureRole.system_data,
             filesystem_label='writable',
             filesystem=FileSystemType.none,
             )
         volume = SimpleNamespace(
             structures=[part],
             bootloader=BootLoader.grub,
             schema=VolumeSchema.gpt,
             )
         state.gadget = SimpleNamespace(
             volumes=dict(volume1=volume),
             )
         prep_state(state, workdir)
         # Fake some state expected by the method under test.
         args.filesystem = resources.enter_context(TemporaryDirectory())
         etc_path = os.path.join(args.filesystem, 'etc')
         os.makedirs(etc_path)
         with open(os.path.join(etc_path, 'fstab'), 'w') as fp:
             fp.write('LABEL=cloudimg-rootfs   /    ext4   defaults    0 0')
         state.rootfs = resources.enter_context(TemporaryDirectory())
         # Jump right to the state method we're trying to test.
         state._next.pop()
         state._next.append(state.populate_rootfs_contents)
         next(state)
         # The seed metadata should exist.
         # And the filesystem label should be modified to 'writable'
         fstab_data = os.path.join(state.rootfs, 'etc', 'fstab')
         with open(fstab_data, 'r', encoding='utf-8') as fp:
             self.assertEqual(fp.read(), 'LABEL=writable   '
                                         '/    ext4   defaults    0 0')
Beispiel #19
0
def test_chain_write_sample():
    foo_var = SimpleNamespace(draw_random=lambda: True, draw_value=0.1, name="foo", current_value=0.15)
    bar_var = SimpleNamespace(draw_random=lambda: True, draw_value=0.5, name="bar", current_value=0.51)
    walker1 = SimpleNamespace(variables=[foo_var, bar_var], lava=False, ice=False, current_score=35,
                              heat=0.1)

    tmp_file = br.TempFile()
    chain = SimpleNamespace(step_counter=2, get_cold_walker=lambda *_: walker1, outfile=tmp_file.path,
                            write_sample=mcmcmc._Chain.write_sample)

    chain.write_sample(chain)
    assert tmp_file.read() == "2\t0.15\t0.51\t35\n", print(tmp_file.read())
Beispiel #20
0
def _setup_fake_gpool(
    frame_size, intrinsics, detection_mapping_mode, rec_dir, min_calibration_confidence
):
    cap = SimpleNamespace()
    cap.frame_size = frame_size
    cap.intrinsics = intrinsics
    pool = SimpleNamespace()
    pool.capture = cap
    pool.get_timestamp = time
    pool.detection_mapping_mode = detection_mapping_mode
    pool.min_calibration_confidence = min_calibration_confidence
    pool.rec_dir = rec_dir
    pool.app = "player"
    return pool
Beispiel #21
0
def test_chain_get_results():
    tmp_file = br.TempFile()
    tmp_file.write("""rec_id1,rec_id2,r_square
BOL-PanxαB,Bab-PanxαA,0.016894041431
BOL-PanxαB,Bch-PanxαA,0.087311057754
BOL-PanxαB,Bfo-PanxαE,0.274041115357""")

    chain = SimpleNamespace(outfile=tmp_file.path, get_results=mcmcmc._Chain.get_results)
    assert type(chain.get_results(chain)) == pd.DataFrame
    assert str(chain.get_results(chain)) == """\
      rec_id1     rec_id2        r_square
0  BOL-PanxαB  Bab-PanxαA  0.016894041431
1  BOL-PanxαB  Bch-PanxαA  0.087311057754
2  BOL-PanxαB  Bfo-PanxαE  0.274041115357""", print(chain.get_results(chain))
Beispiel #22
0
def test_mcmcmc_reset_params():
    walker1 = SimpleNamespace(params=[1, 2])
    walker2 = SimpleNamespace(params=[3, 4])
    walker3 = SimpleNamespace(params=[5, 6])
    walker4 = SimpleNamespace(params=[7, 8])

    chain1 = SimpleNamespace(walkers=[walker1, walker2])
    chain2 = SimpleNamespace(walkers=[walker3, walker4])

    mc_obj = SimpleNamespace(reset_params=mcmcmc.MCMCMC.reset_params, chains=[chain1, chain2])
    with pytest.raises(AttributeError) as e:
        mc_obj.reset_params(mc_obj, ['a', 'b', 'c'])
    assert "Incorrect number of params supplied in reset_params(). 2 expected; 3 supplied; ['a', 'b', 'c']" in str(e)

    mc_obj.reset_params(mc_obj, ['a', 'b'])
    assert walker2.params == ['a', 'b']
    def create_http_client(self, response_str="", http_error_codes=None, authentication_handler=EmptyAuthenticationHandler(), header=None):
        h = HttpClient(base_url="http://irgendw.as",
                       authentication_handler= authentication_handler,
                       retry_delay_sec=0)
        response = SimpleNamespace()
        response.readall = Mock(spec=(""), return_value=response_str.encode("UTF-8"))
        response.headers = SimpleNamespace()
        response.headers.get_content_charset= Mock(spec=(""), return_value="UTF-8")
        response.getheader = Mock(spec=(""), return_value=header)

        side_effects = [HTTPError("http://foo.bar", code, None, None, None) if code else DEFAULT for code in http_error_codes] if http_error_codes else None

        h.__open__ = Mock(spec=(""),
                          return_value=response,
                          side_effect=side_effects)
        return h
Beispiel #24
0
def test_chain_dump_obj():
    walker1 = SimpleNamespace(_dump_obj=lambda *_: "walker1")
    walker2 = SimpleNamespace(_dump_obj=lambda *_: "walker2")
    tmp_file = br.TempFile()
    tmp_file.write("outfile results")

    chain = SimpleNamespace(walkers=[walker1, walker2], outfile=tmp_file.path, cold_heat=0.1, hot_heat=0.2,
                            step_counter=20, best_score_ever_seen=100, _dump_obj=mcmcmc._Chain._dump_obj)

    dump = chain._dump_obj(chain)
    assert dump["walkers"] == ["walker1", "walker2"]
    assert dump["cold_heat"] == 0.1
    assert dump["hot_heat"] == 0.2
    assert dump["step_count"] == 20
    assert dump["best_score"] == 100
    assert dump["results"] == "outfile results"
Beispiel #25
0
    def testInteractivePlots(self):
        slhaFolder = './testFiles/scanExample/slha'
        smodelsFolder = './testFiles/scanExample/smodels-output'
        parametersFile = './iplots_parameters.py'
        outFolder = './plots_test'
        
        defaultFolder = './plots_test_default'
        
        if os.path.isdir(outFolder):
            shutil.rmtree(outFolder)

        parser = SimpleNamespace()
        parser.smodelsFolder = smodelsFolder
        parser.slhaFolder  =  slhaFolder
        parser.parameters = parametersFile
        parser.outputFolder = outFolder
        parser.verbosity = 'error'
        parser.npoints = -1

        run = main(parser)
        
        self.assertEqual(run,outFolder)        
        self.assertEqual(sorted(os.listdir(outFolder)), sorted(os.listdir(defaultFolder)))

        if os.path.isdir(outFolder):
            shutil.rmtree(outFolder)
    def setUp(self):
        mockextension1 = SimpleNamespace()
        mockextension2 = SimpleNamespace()

        def load_jupyter_server_extension(obj):
            obj.mockI = True
            obj.mock_shared = 'I'

        mockextension1.load_jupyter_server_extension = load_jupyter_server_extension

        def load_jupyter_server_extension(obj):
            obj.mockII = True
            obj.mock_shared = 'II'

        mockextension2.load_jupyter_server_extension = load_jupyter_server_extension

        sys.modules['mockextension2'] = mockextension2
        sys.modules['mockextension1'] = mockextension1
Beispiel #27
0
 def get_list(self, page, sort_field, sort_desc, search, filters, page_size=None):
     bukudb = self.bukudb
     tags = bukudb.get_tag_all()[1]
     tags = [(x, y) for x, y in tags.items()]
     tags = self._apply_filters(tags, filters)
     if sort_field == 'usage_count':
         tags = sorted(tags, key=lambda x: x[1], reverse=sort_desc)
     elif sort_field == 'name':
         tags = sorted(tags, key=lambda x: x[0], reverse=sort_desc)
     tags = list(tags)
     count = len(tags)
     if page_size and tags:
         tags = list(chunks(tags, page_size))[page]
     data = []
     for name, usage_count in tags:
         tag_sns = SimpleNamespace(name=None, usage_count=None)
         tag_sns.name, tag_sns.usage_count = name, usage_count
         data.append(tag_sns)
     return count, data
Beispiel #28
0
def test_mcmcmc_check_convergence(hf):
    csv_path = os.path.join(hf.resource_path, "mcmcmc", "chain")
    chain1 = SimpleNamespace(step_counter=99, outfile=csv_path + "1.csv")
    chain2 = SimpleNamespace(step_counter=99, outfile=csv_path + "2.csv")
    chain3 = SimpleNamespace(step_counter=99, outfile=csv_path + "3.csv")

    mc_obj = SimpleNamespace(_check_convergence=mcmcmc.MCMCMC._check_convergence, chains=[chain1, chain2, chain3],
                             convergence=1.01)

    # Return False when step_counter < 100
    assert mc_obj._check_convergence(mc_obj) is False

    # Return False when convergence is not met
    chain1.step_counter = chain2.step_counter = chain3.step_counter = 100
    assert mc_obj._check_convergence(mc_obj) is False

    # Return True on convergence
    mc_obj.convergence = 1.1
    assert mc_obj._check_convergence(mc_obj) is True
Beispiel #29
0
def userhost_parse(mask):
    """Parse a USERHOST reply.

    :returns:
        An object with the following attributes set:

        :hostmask:
            :py:class:`~PyIRC.line.Hostmask` of the user. This may be a cloak.

        :operator:
            Whether or not the user is an operator. False does not mean they
            are not an operator, as operators may be hidden on the server.

        :away:
            Whether or not the user is away.

    """
    if not mask:
        raise ValueError("Need a mask to parse")

    ret = SimpleNamespace(hostmask=None, operator=None, away=None)

    nick, sep, userhost = mask.partition('=')
    if not sep:
        return ret

    if nick.endswith('*'):
        nick = nick[:-1]
        ret.operator = True

    if userhost.startswith(('+', '-')):
        away, userhost = userhost[0], userhost[:1]
        ret.away = (away == '+')

    # user@host
    username, sep, host = userhost.partition('@')
    if not sep:
        host = username
        username = None

    ret.hostmask = Hostmask(nick=nick, username=username, host=host)

    return ret
Beispiel #30
0
def test_rose_config_exists_true(tmp_path):
    (tmp_path / "rose-suite.conf").touch()
    assert rose_config_exists(tmp_path, SimpleNamespace()) is True
Beispiel #31
0
def test_rose_config_exists_nonexistant_dir(tmp_path):
    assert rose_config_exists(
        tmp_path / "non-existant-folder", SimpleNamespace(
            opt_conf_keys='', defines=[], rose_template_vars=[]
        )
    ) is False
Beispiel #32
0
def test_rose_config_exists_no_rose_suite_conf(tmp_path):
    assert rose_config_exists(
        tmp_path, SimpleNamespace(
            opt_conf_keys=None, defines=[], rose_template_vars=[]
        )
    ) is False
def run_single(subj_dir,
               script,
               script_args,
               path_segmanual,
               path_data,
               path_data_processed,
               path_results,
               path_log,
               path_qc,
               itk_threads,
               continue_on_error=False):
    """
    Job function for mapping with multiprocessing
    :param subj_dir:
    :param script:
    :param script_args:
    :param path_segmanual:
    :param path_data:
    :param path_data_processed:
    :param path_results:
    :param path_log:
    :param path_qc:
    :param itk_threads:
    :param continue_on_error:
    :return:
    """

    # Strip the `.sh` extension from the script for building error logs
    # TODO: we should probably strip all extensions
    script_base = re.sub('\\.sh$', '', os.path.basename(script))
    script_full = os.path.abspath(os.path.expanduser(script))

    subject = os.path.basename(subj_dir)
    log_file = os.path.join(path_log, '{}_{}.log'.format(script_base, subject))
    err_file = os.path.join(path_log,
                            'err.{}_{}.log'.format(script_base, subject))

    print('Started at {}: {}. See log file {}'.format(
        time.strftime('%Hh%Mm%Ss'), subject, log_file),
          flush=True)

    # A full copy of the environment is needed otherwise sct programs won't necessarily be found
    envir = os.environ.copy()
    # Add the script relevant environment variables
    envir.update({
        'PATH_SEGMANUAL': path_segmanual,
        'PATH_DATA': path_data,
        'PATH_DATA_PROCESSED': path_data_processed,
        'PATH_RESULTS': path_results,
        'PATH_LOG': path_log,
        'PATH_QC': path_qc,
        'ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS': str(itk_threads),
        'SCT_PROGRESS_BAR': 'off'
    })

    # Ship the job out, merging stdout/stderr and piping to log file
    try:
        res = subprocess.run([script_full, subj_dir] + script_args.split(' '),
                             env=envir,
                             stdout=open(log_file, 'w'),
                             stderr=subprocess.STDOUT)

        assert res.returncode == 0, 'Processing of subject {} failed'.format(
            subject)
    except Exception as e:
        process_completed = 'res' in locals()
        res = res if process_completed else SimpleNamespace(returncode=-1)
        process_suceeded = res.returncode == 0

        if not process_suceeded and os.path.exists(log_file):
            # If the process didn't complete or succeed rename the log file to indicate
            # the error
            os.rename(log_file, err_file)

        if process_suceeded or continue_on_error:
            return res
        else:
            raise e

    return res
Beispiel #34
0
def parse_nav_data(lines, century=2000):
    '''
    Given filepath to RINEX Navigation file, parses navigation into ephemeris.
    Returns dictionary {prn: [SimpleNamespace]} of ephemeris objects
    
    Output
    ------
    Dictionary of format:
        {<prn>: <namespace>}
    Each namespace contains the following parameters:
        e - eccentricity
        t_oe - time of ephemeris
        i_0 - inclination at reference time (rad)
        a - semi-major axis (m); usually given as SQRT
        omega_dot - rate of right ascension (rad/s)
        omega_0 - right ascension at week (rad)
        omega - argument of perigee
        M_0 - mean anomaly of reference time (rad)
        week - GPS week number
        delta_n - mean motion difference (rad/s)
        i_dot - rate of inclination angle (rad/s)
        c_us - argument of latitude (amplitude of cosine, radians)
        c_rs - orbit radius (amplitude of sine, meters)
        c_is - inclination (amplitude of sine, meters)
        c_uc - argument of latitude (amplitude of cosine, radians)
        c_rc - orbit radius (amplitude of cosine, meters)
        c_ic - inclination (amplitude of cosine, meters)century = 2000
    '''
    epoch_pattern = '(\s?\d+)\s(\s?\d+)\s(\s?\d+)\s(\s?\d+)\s(\s?\d+)\s(\s?\d+)\s(\s?\d+\.\d)'
    number_pattern = '\n?\s*([+-]?\d+\.\d{12}D[+-]?\d{2})'
    pattern = epoch_pattern + 29 * number_pattern
    data = {}
    matches = re.findall(pattern, '\n'.join(lines))
    for m in matches:
        prn, yy, month, day, hour, minute = (int(i) for i in m[:6])
        second, a0, a1, a2, \
            iode1, c_rs, delta_n, m_0, \
            c_uc, e, c_us, sqrt_a, \
            t_oe, c_ic, omega_0, c_is, \
            i_0, c_rc, omega, omega_dot, \
            i_dot, l2_codes, week, l2p_data, \
            accuracy, health, tgd, iodc, \
            transmit_time, fit_interval = (float(s.replace('D', 'E')) for s in m[6:36])
        year = century + yy
        epoch = datetime(year,
                         month,
                         day,
                         hour,
                         minute,
                         int(second),
                         int(1e6 * (second % 1)),
                         tzinfo=timezone.utc)
        eph = SimpleNamespace(
            epoch=epoch,
            a0=a0,
            a1=a1,
            a2=a2,
            iode1=iode1,
            c_rs=c_rs,
            delta_n=delta_n,
            m_0=m_0,
            c_uc=c_uc,
            e=e,
            c_us=c_us,
            sqrt_a=sqrt_a,
            t_oe=t_oe,
            c_ic=c_ic,
            omega_0=omega_0,
            c_is=c_is,
            i_0=i_0,
            c_rc=c_rc,
            omega=omega,
            omega_dot=omega_dot,  # TODO check if orbit solutions correct omega
            i_dot=i_dot,
            l2_codes=l2_codes,
            week=week,
            l2p_data=l2p_data,
            accuracy=accuracy,
            health=health,
            tgd=tgd,
            iodc=iodc,
            transmit_time=transmit_time,
            fit_interval=fit_interval)
        if prn not in data.keys():
            data[prn] = []
        data[prn].append(eph)
    return data
Beispiel #35
0
""" application config
"""
from types import SimpleNamespace

bootstrap = SimpleNamespace(
    # Is bootstrap running
    running=False,

    # Attached output
    output=None)

maas = SimpleNamespace(
    # Client
    client=None,

    # API key
    api_key=None,

    # API Endpoint
    endpoint=None)

juju = SimpleNamespace(
    # Client
    client=None,

    # Is authenticated?
    authenticated=False)

app = SimpleNamespace(
    # Juju bootstrap details
    bootstrap=bootstrap,
Beispiel #36
0
from types import SimpleNamespace

from .param import Params


class ClinicalParams(Params):
    pass


#
# Clinical parameters
#
CLINICAL_DEFAULT = Params(
    "Default",
    hospitalization_period=7.0,
    icu_period=7.5,
    severe_delay=5.0,
    hospitalization_overflow_bias=0.25,
    critical_delay=7.0,
    prob_severe=0.18,
    prob_critical=0.05,
    prob_fatality=0.015 / 0.05,
    prob_no_hospitalization_fatality=0.30,
    prob_no_icu_fatality=1.00,
    case_fatality_ratio=0.015,
    infection_fatality_ratio=0.015 * 0.14,
    hospital_fatality_ratio=0.05,
)
clinical = SimpleNamespace(DEFAULT=CLINICAL_DEFAULT)
Beispiel #37
0
def deserialize_json(json_object):
    return loads(json_object, object_hook=lambda d: SimpleNamespace(**d))
Beispiel #38
0
    def load(self, config_file):
        cf = json.load(open(config_file))

        # Network
        model_def = cf['network']
        model_pkg = model_def.get('package', None)
        model_mod = importlib.import_module(model_def['module'], model_pkg)
        model_class = getattr(model_mod, model_def['model'])
        model_kwargs = model_def['kwargs']

        # Dataset
        data_def = cf['dataset']
        if data_def['module']:
            data_pkg = data_def.get('package', None)
            data_mod = importlib.import_module(data_def['module'], data_pkg)
            dataset = getattr(data_mod, data_def['dataset'])
            data_kwargs = data_def['kwargs']
        else:
            dataset = None
            data_kwargs = {}

        # Loss
        loss_def = cf['loss']
        if loss_def['module']:
            loss_pkg = loss_def.get('package', None)
            loss_mod = importlib.import_module(loss_def['module'], loss_pkg)
            loss = getattr(loss_mod, loss_def['loss'])
            loss_kwargs = loss_def['kwargs']
        else:
            loss = None
            loss_kwargs = {}

        # Optimizer
        optim_def = cf.get('optimizer', False)
        if optim_def:
            opt_pkg = optim_def.get('package', None)
            opt_mod = importlib.import_module(optim_def['module'], opt_pkg)
            opt = getattr(opt_mod, optim_def['optim'])
            opt_kwargs = optim_def['kwargs']
        else:
            opt = None
            opt_kwargs = {}

        # Trainer
        trainer_def = cf['trainer']
        if trainer_def['module']:
            t_pkg = trainer_def.get('package', None)
            trainer_mod = importlib.import_module(trainer_def['module'], t_pkg)
            trainer = getattr(trainer_mod, trainer_def['trainer'])
        else:
            trainer = None

        # Hyperparameters
        params = cf['hyperparams']

        # Other
        other = cf.get('other', {})

        self.model = SimpleNamespace(model=model_class, args=model_kwargs)
        self.data = SimpleNamespace(data=dataset, args=data_kwargs)
        self.loss = SimpleNamespace(loss=loss, args=loss_kwargs)
        self.optim = SimpleNamespace(optim=opt, args=opt_kwargs)
        self.trainer = trainer
        self.hp = params
        self.other = other
Beispiel #39
0
def animateParagraphs(scriptPath):
    sessionFolderName = "videos_version_" + str(
        len(os.listdir("../animated_text")))

    os.system("rm /dev/shm/voice_tracks/*.wav")
    os.system("rm /dev/shm/sequence/*.png")
    os.system("rm /dev/shm/animated_text_temp/to_be_stretched.mov")
    os.system("rm ../voice_tracks/*.wav")

    os.system("mkdir /dev/shm/voice_tracks")
    os.system("mkdir /dev/shm/sequence")
    os.system("mkdir /dev/shm/animated_text_temp")
    os.system("mkdir ../animated_text")
    os.system("mkdir ../animated_text/" + sessionFolderName)
    os.system("mkdir ../voice_tracks")
    # os.system("mkdir '" + customOutputFolderVideoClips + "/renpy'")

    os.system("shnsplit -f " + voiceTrackResourcesFolder + "*.cue " +
              voiceTrackResourcesFolder + "*.wav -d /dev/shm/voice_tracks")
    os.system("cp /dev/shm/voice_tracks/*.wav ../voice_tracks")

    clearSequenceFolder()

    nameIndicatorImageDictonary = getNameIndicators()
    voiceTrackList = getVoiceTracks()

    currentScriptParagraphIndex = 0
    currentScriptParagraphCoordinates = (
        0, 0
    )  #Koordinaten des aktuellen Paragraphs (wenn mehrere Paragraphen "appended" werden)

    lastFrame = None

    scriptParagraphList = pyexcel_ods.get_data(scriptPath)["Paragraphs"][1:]
    hasSelection = checkSelection(scriptParagraphList)

    for scriptParagraphRecordCurrent in scriptParagraphList:
        #Zuweisen des nächsten Paragraphen
        scriptParagraphRecordNext = scriptParagraphList[
            currentScriptParagraphIndex + 1]

        ####################################################
        # 			Parsen von Paragraph-Argumenten		   #
        ####################################################

        #Standard: Leerer Name => Keine Nameplate anzeigen!
        scriptParagraphName = ""

        #Standard: Leerer Text => Leere Textzeile anzeigen!
        scriptParagraphText = ""

        #Standard: Keine Voiced-Line
        voiceTrack = None

        #nameInidcatorImage grundsätzlich mit None initialisieren => Standard: Keine Nameplate anzeigen!
        nameIndicatorImage = None

        #Standard: Kein Appenden
        toBeAppendedNext = False
        toBeAppendedCurrent = False

        #Verarbeiten der Felder
        if len(scriptParagraphRecordCurrent) > 0:
            #Zuweisen den Character-Namens an scriptParagraphName
            scriptParagraphName = scriptParagraphRecordCurrent[0]

        if len(scriptParagraphRecordCurrent) > 1:
            #Zuweisen der eigentlichen Textzeile an SciptParagraphText
            scriptParagraphText = scriptParagraphRecordCurrent[1]

        if len(scriptParagraphRecordCurrent
               ) > 2 and scriptParagraphRecordCurrent[2] == 1 and len(
                   voiceTrackList) > 0:
            #Zuweisen des Voiced-Line-Flag Wertes an voiceTrack
            voiceTrack = voiceTrackList.pop()

        if len(scriptParagraphRecordCurrent
               ) > 3 and scriptParagraphRecordCurrent[3] == 1:
            #Zuweisen des Append-Flag-Wertes an toBeAppendedCurrent
            toBeAppendedCurrent = True
            scriptParagraphName = ""

        if len(scriptParagraphRecordNext) > 3:
            #Zuweisen des Append-Flag-Wertes an toBeAppendedNext
            toBeAppendedNext = scriptParagraphRecordNext[3] == 1

        if hasSelection:
            #Verarbeiten des Selection-Flags => Nur rendern wenn gesetzt!
            if len(scriptParagraphRecordCurrent
                   ) > 4 and scriptParagraphRecordCurrent[4] == 1:
                pass
            else:
                currentScriptParagraphIndex += 1
                continue

        #Nur wenn Name UND nameplate definiert soll auch eine Nameplate angezeigt werden
        if len(
                scriptParagraphName
        ) > 0 and scriptParagraphName in nameIndicatorImageDictonary.keys():
            nameIndicatorImage = nameIndicatorImageDictonary[
                scriptParagraphName]

        #Für den Fall, dass unbeabsichtigte Whitespaces vorhanden sind => entfernen
        #Außerdem: Standard-Anführungszeichen durch "schönere" Anführungszeichen ersetzen
        scriptParagraphText = scriptParagraphText.lstrip().replace(
            "\"", "“", 1).replace("\"", "”", 1)

        ####################################################
        # 				  Appenden managen				   #
        ####################################################

        if toBeAppendedCurrent:
            #Dieser Paragraph wird appended
            scriptParagraph = SimpleNamespace(
                text=scriptParagraphText,
                index=currentScriptParagraphIndex,
                voiceTrack=voiceTrack)

            fadeInOptions = initFadeInOptions(
                currentScriptParagraphCoordinates, lastFrame,
                nameIndicatorImage, True)
            fadeOutOptions = fadeInParagraph(scriptParagraph, fadeInOptions)

            if toBeAppendedNext:
                #Nächster Paragraph wird appended
                lastFrame = fadeOutOptions.lastFrame
                currentScriptParagraphCoordinates = (
                    fadeOutOptions.lastLineCoordinates[0],
                    fadeOutOptions.lastLineCoordinates[1] +
                    fadeInOptions.lineMargin * 1.5 *
                    fadeInOptions.upscaleFactor)

            else:
                #Nächster Paragraph wird ersetzt
                fadeOutParagraph(fadeOutOptions)

                lastFrame = None
                currentScriptParagraphCoordinates = (0, 0)

        else:
            #Dieser Paragraph ersetzt den vorherigen
            scriptParagraphText = scriptParagraphText.rstrip()
            currentScriptParagraphCoordinates = (0, 0)
            scriptParagraph = SimpleNamespace(
                text=scriptParagraphText,
                index=currentScriptParagraphIndex,
                voiceTrack=voiceTrack)
            fadeInOptions = initFadeInOptions(
                nameIndicatorImage=nameIndicatorImage)

            fadeOutOptions = fadeInParagraph(scriptParagraph, fadeInOptions)
            fadeOutParagraph(fadeOutOptions)

        ####################################################
        # 				   Video erstellen				   #
        ####################################################

        #MOV-Video erstellen und erstmal im Arbeitsspeicher ablegen...
        os.system(
            "ffmpeg -r " + str(fadeInOptions.framerate) +
            " -f image2 -i /dev/shm/sequence/%*.png -vcodec png /dev/shm/animated_text_temp/to_be_stretched.mov"
        )

        #...dann die ffmpeg Filter-Einstellungen ermitteln...
        if scriptParagraph.voiceTrack == None: stretchFilter = ""
        else:
            stretchFilter = '-filter:v "setpts=' + str(
                round(scriptParagraph.voiceTrack.length, 3) / getVideoLength(
                    "/dev/shm/animated_text_temp/to_be_stretched.mov") *
                1.004) + '*PTS"'

        #...und, nachdem die Länge bekannt, ist an diese anpassen und im endgültigen Ausgabe-Ordner abspeichern
        os.system(
            "ffmpeg -i /dev/shm/animated_text_temp/to_be_stretched.mov " +
            stretchFilter + " -vcodec png ../animated_text/" +
            sessionFolderName + "/" +
            convertToLetters(currentScriptParagraphIndex) + ".mov")

        if len(sys.argv) >= 2 and sys.argv[1] == "-c":
            os.system("yes | cp ../animated_text/" + sessionFolderName +
                      "/*.mov '" + customOutputFolderVideoClips + "'")
            os.system("yes | cp ../voice_tracks/*.wav '" +
                      customOutputFolderVoiceTracks + "'")

            if len(sys.argv) >= 3 and sys.argv[2] == "-r":
                os.system(
                    "ffmpeg -y -i ../animated_text/" + sessionFolderName +
                    "/" + convertToLetters(currentScriptParagraphIndex) +
                    ".mov '" + customOutputFolderVideoClips + "/renpy/" +
                    str(currentScriptParagraphIndex) +
                    ".avi' -filter_complex 'color=black,format=rgb24[c];[c][0]scale2ref[c][i];[c][i]overlay=format=auto:shortest=1,setsar=1'"
                )

        #Jetzt noch die Ordner aufräumen...
        os.system("rm /dev/shm/sequence/*.png")
        os.system("rm /dev/shm/animated_text_temp/to_be_stretched.mov")

        ####################################################
        # 				Iteration abschließen			   #
        ####################################################

        #Nächste Iteration vorbereiten
        currentScriptParagraphIndex += 1
Beispiel #40
0
def load_config(path):
    from types import SimpleNamespace
    with open(path) as f:
        config = json.load(f, object_hook=lambda d: SimpleNamespace(**d))
    return config
Beispiel #41
0
from label_studio.tasks import Tasks

logger = logging.getLogger(__name__)

app = flask.Flask(__name__, static_url_path='')

app.secret_key = 'A0Zrdqwf1AQWj12ajkhgFN]dddd/,?RfDWQQT'
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config['WTF_CSRF_ENABLED'] = False

# input arguments
input_args = None
if os.path.exists('server.json'):
    try:
        with open('server.json') as f:
            input_args = SimpleNamespace(**json.load(f))
    except:
        pass


def project_get_or_create(multi_session_force_recreate=False):
    """
    Return existed or create new project based on environment. Currently supported methods:
    - "fixed": project is based on "project_name" attribute specified by input args when app starts
    - "session": project is based on "project_name" key restored from flask.session object
    :return:
    """
    if input_args.command == 'start-multi-session':
        # get user from session
        if 'user' not in session:
            session['user'] = str(uuid4())
Beispiel #42
0
def to_obj(data: Union[dict, str]):
    if isinstance(data, dict):
        data = str(data).replace('\'', '"')
    return json.loads(data, object_hook=lambda d: SimpleNamespace(**d))
Beispiel #43
0
def invite_client_to_project(client, project):
    if apps.is_installed('annotators'):
        return client.get(f'/annotator/invites/{project.token}/')
    else:
        return SimpleNamespace(status_code=200)
Beispiel #44
0
def transform_values_from_rinex_obs(rinex_data):
    '''
    Transforms output from `parse_obs` to more useful format.
    
    Input:
    -------
    `rinex_data` -- Python dictionary with format:
        {<sat_id>: {'time': [<dt...>], <obs_id>: [<values...>]}}
        
    Output:
    -------
    `data` -- namespace containing:
        `satellites` -- dictionary of format {<sat_id>: <namespace>} with
        <namespace> containing time array and signal namespaces.  Each 
        signal namespace contains arrays of any measurements for that 
        corresponding signal.
    '''
    rinex_obs_datatypes_mapping = {
        'C1': {
            'signal': 'L1',
            'name': 'pr'
        },
        'L1': {
            'signal': 'L1',
            'name': 'carrier'
        },
        'D1': {
            'signal': 'L1',
            'name': 'doppler'
        },
        'S1': {
            'signal': 'L1',
            'name': 'snr'
        },
        'C2': {
            'signal': 'L2',
            'name': 'pr'
        },
        'P2': {
            'signal': 'L2',
            'name': 'pr'
        },
        'L2': {
            'signal': 'L2',
            'name': 'carrier'
        },
        'D2': {
            'signal': 'L2',
            'name': 'doppler'
        },
        'S2': {
            'signal': 'L2',
            'name': 'snr'
        },
    }
    data = {}
    for sat_id, rnx_sat in rinex_data.items():
        if sat_id not in data.keys():
            data[sat_id] = SimpleNamespace(signals={})
        sat = data[sat_id]
        for obs_name, mapping in rinex_obs_datatypes_mapping.items():
            if obs_name in rnx_sat.keys():
                signal = mapping['signal']
                if signal not in sat.signals.keys():
                    sat.signals[signal] = SimpleNamespace()
                setattr(sat.signals[signal], mapping['name'],
                        array(rnx_sat[obs_name]))
        if 'time' in rnx_sat.keys():
            sat.time = array(rnx_sat['time'])
    return data
Beispiel #45
0
import webbrowser

pio.renderers.default = "browser"
tm = tb.Terminal()
_ = Input, Output, State, dcc, html, daq
_ = go, px, make_subplots

CONFIG = SimpleNamespace(
    displayModeBar=True,  # always visible.
    staticPlot=False,
    scrollZoom=False,
    doubleClick="reset",
    showTips=True,
    toImageButtonOptions={
        'format': 'png',  # one of png, svg, jpeg, webp
        'filename': 'custom_image',
        'height': 1500,  # None means use currently rendered size.
        'width': 1500,
        'scale': 1  # Multiply title/legend/axis/canvas sizes by this factor
    },
    modeBarButtonsToAdd=[
        'drawline', 'drawopenpath', 'drawclosedpath', 'drawcircle', 'drawrect',
        'eraseshape'
    ])


class App:
    @staticmethod
    def run(app, debug=False, random_port=True):
        """"Random ports prevent multile programs from crashing into each other."""
        host = "127.0.0.1"
Beispiel #46
0
def parse_rinex_header(lines):
    '''
    Given list of lines corresponding to the header of a RINEX file, parses
    the header of the file and returns a namespace containing the header information.
    
    Input
    -----
    `lines` -- lines corresponding to RINEX header
    
    Output
    ------
    namespace containing RINEX header information
    '''
    header = SimpleNamespace()
    lines = iter(lines)
    try:
        while True:
            line = next(lines)
            if line[60:].strip() == 'RINEX VERSION / TYPE':
                header.version = line[:20].strip()
                header.type = line[20:60].strip()
            elif line[60:].strip() == 'PGM / RUN BY / DATE':
                header.program = line[:20].strip()
                header.run_by = line[20:40].strip()
                header.date = line[40:60].strip()
            elif line[60:].strip() == 'MARKER NAME':
                header.marker_name = line[:60].strip()
            elif line[60:].strip() == 'MARKER NUMBER':
                header.marker_number = line[:60].strip()
            elif line[60:].strip() == 'OBSERVER / AGENCY':
                header.observer = line[:20].strip()
                header.agency = line[20:60].strip()
            elif line[60:].strip() == 'REC # / TYPE / VERS':
                header.receiver_number = line[:20].strip()
                header.receiver_type = line[20:40].strip()
                header.receiver_version = line[40:60].strip()
            elif line[60:].strip() == 'ANT # / TYPE':
                header.antenna_number = line[:20].strip()
                header.antenna_type = line[20:60].strip()
            elif line[60:].strip() == 'APPROX POSITION XYZ':
                header.approximate_position_xyz = line[:60].strip()
            elif line[60:].strip() == 'ANTENNA: DELTA H/E/N':
                header.delta_hen = line[:60].strip()
            elif line[60:].strip() == 'APPROX POSITION XYZ':
                header.approximate_position_xyz = line[:60].strip()
            elif line[60:].strip() == 'WAVELENGTH FACT L1/2':
                header.wavelength_fact_l12 = line[:60].strip()
            elif line[60:].strip() == 'APPROX POSITION XYZ':
                header.approximate_position_xyz = line[:60].strip()
            elif line[60:].strip() == 'TIME OF FIRST OBS':
                header.time_of_first_obs = line[:60].strip()
            elif line[60:].strip() == '# / TYPES OF OBSERV':
                header.n_obs = int(line[:10])
                header.obs_types = line[10:58].split()
            elif line[60:].strip() == 'COMMENT':
                pass
    except StopIteration:
        pass
    return header
Beispiel #47
0
def Ycalc(
    tippos: int,  # tunneling spin
    #u: float,  # elastic tunneling
    ke: KronSpinSpace,  # The kronecker space with the electron
    eigvec,
    u=0.0,
    mxt_fmt="csc",
    use_sparse=False,
    with_progress=False,
):

    t = TicToc()

    indexes = ke.indexes
    fmt_Sσ = mxt_fmt
    fmt_ϕσ = mxt_fmt

    j = ke.dim - 1  # electron position

    t.tic("Sσ_s")
    Sσ_s = sparse.coo_matrix(ke.x.shape)
    for i in range(j):
        Sσ_s += (ke.x.prod(i, j) + ke.y.prod(i, j) + ke.z.prod(i, j))
    Sσ_s = Sσ_s.asformat(fmt_Sσ)
    t.toc("Sσ_s")

    t.tic("Sσ_t")
    tip = indexes[tippos]

    Sσ_t = (ke.x.prod(tip, j) + ke.y.prod(tip, j) + ke.z.prod(tip, j))

    if u != 0.0:
        Sσ_t += u * sparse.eye(Sσ_t.shape[0])
        Sσ_s += u * sparse.eye(Sσ_s.shape[0])
    Sσ_t = Sσ_t.asformat(fmt_Sσ)
    t.toc("Sσ_t")

    t.tic("ϕσ")

    ϕσ = sparse.kron(eigvec, np.array([[1, 0], [0, 1]]), format="csc")
    t.toc("ϕσ")

    N = eigvec.shape[1]

    Ys = np.empty((2 * N, 2 * N), dtype=np.complex)
    Yt = np.empty((2 * N, 2 * N), dtype=np.complex)

    if use_sparse:

        def _get(k):
            return ϕσ.getcol(k)
    else:
        if sparse.issparse(ϕσ):
            ϕσ = ϕσ.todense()

        def _get(k):
            return ϕσ[:, k]

    lst = list(range(2 * N))
    if with_progress:
        lst = tqdm(lst)

    for i in lst:
        m = _get(i)
        right_s = Sσ_s.dot(m)
        right_t = Sσ_t.dot(m)
        for j in range(i + 1):
            m = _get(j).getH()
            Ys[i, j] = m.dot(right_s)[0, 0]
            Yt[i, j] = m.dot(right_t)[0, 0]
            if i != j:
                Ys[j, i] = Ys[i, j]
                Yt[j, i] = Yt[i, j]

    return SimpleNamespace(s=np.square(np.abs(Ys)), t=np.square(np.abs(Yt)))
Beispiel #48
0
from common import EpsilonTracker
from data import MAgentEnv
from model import DQNModel, MAgentPreprocessor, obtain_dqn_loss

MAP_SIZE = 64
COUNT_DEERS = 50
COUNT_TIGERS = 15
WALLS_DENSITY = 0.04

PARAMETERS = SimpleNamespace(
    **{
        'run_name': 'tigers',
        'stop_reward': None,
        'replay_size': 1000000,
        'replay_initial': 100,
        'target_net_sync': 1000,
        'epsilon_frames': 5 * 10**5,
        'epsilon_start': 1.0,
        'epsilon_final': 0.02,
        'learning_rate': 1e-4,
        'gamma': 0.99,
        'batch_size': 32
    })

TEST_REWARD_METRIC: str = 'test_reward'
TEST_STEPS_METRTIC: str = 'test_steps'
TEST_DEERS_METRIC: str = 'test_deers'


def test_model(dqn_model: DQNModel, device: torch.device,
               configuration: Config) -> Tuple[float, float, float]:
    gridworld_test: GridWorld = GridWorld(configuration, map_size=MAP_SIZE)
Beispiel #49
0
                'JINA_POD_NAME',
                'JINA_PROFILING',
                'JINA_SOCKET_HWM',
                'JINA_STACK_CONFIG',
                'JINA_TEST_CONTAINER',
                'JINA_TEST_GPU',
                'JINA_TEST_PRETRAINED',
                'JINA_VCS_VERSION',
                'JINA_VERSION',
                'JINA_WARN_UNNAMED',)

__default_host__ = os.environ.get('JINA_DEFAULT_HOST', '0.0.0.0')
__ready_msg__ = 'ready and listening'
__stop_msg__ = 'terminated'

JINA_GLOBAL = SimpleNamespace()
JINA_GLOBAL.imported = SimpleNamespace()
JINA_GLOBAL.imported.executors = False
JINA_GLOBAL.imported.drivers = False
JINA_GLOBAL.stack = SimpleNamespace()
JINA_GLOBAL.stack.id = random.randint(0, 10000)
JINA_GLOBAL.logserver = SimpleNamespace()


def import_classes(namespace: str, targets=None,
                   show_import_table: bool = False, import_once: bool = False):
    """
    Import all or selected executors into the runtime. This is called when Jina is first imported for registering the YAML
    constructor beforehand. It can be also used to import third-part or external executors.

    :param namespace: the namespace to import
Beispiel #50
0
    def _display_occurrencies_DELETE(self, book, occurrencies):
        ''' Sample
            {
                "word1": {"counter": 1 },
                "word2": {"counter": 1 },
                "data": {
                            "1": ["Lee Child"],
                            "2": ["Child pippo"],
                        }
            }

        {'word1': [
                (2553, 2561),
                (2678, 3456)
                ]
        }
        '''
        words = occurrencies.keys()
        choice = '' # scelta menu
        _max = len(occurrencies)
        _max = 5
        _min = 0
        _step=4
        inx_from=_min

        # - prepard book info display data
        nsBook=SimpleNamespace(**book) # di comodo
        dis_line=[]
        dis_line.append('')
        dis_line.append(f'book: {nsBook.title} - [{nsBook.author}]')
        dis_line.append(f'    - id: {nsBook._id}')
        dis_line.append(f'    - tags: {nsBook.tags}')


        result=[]
        for word in words:
            positions=occurrencies[word]
            """ numero di occurrencies per ogni word """
            counter = len(occurrencies[word])
            dis_line.append(f'        - item: {word} - instances: {counter}')

            for pos in positions:
                # incr counter for specific word
                # result[word]['counter'] += 1
                # counter += 1 # counter totale

                # - get text around the found word
                _from=0 if pos-_before<0 else pos-_before
                _to=pos+word_len+_after
                text=item[_from:_to].replace('\n', ' ')
                new_text = ' '.join(text.split()) # remove multiple blanks

                '''
                new_text=text.replace(cur_word, colored_word) # no good perché case-sensitive

                redata = re.compile(re.escape(cur_word), re.IGNORECASE)
                new_text = redata.sub(colored_word, text)

                '''

                # replace word(s) with colored_word
                # ruotiamo sulle word in modo da colorarle
                # se fossero presenti nello stesso testo
                for i, w in enumerate(words):
                    colored_word = colors[i](text=w, get=True)
                    new_text = re.sub(w, colored_word, new_text, flags=re.IGNORECASE)

                # - wrap text to easy displaying
                tb=textwrap.wrap(new_text, 80, break_long_words=True)

                # - save it into result list
                result['data'][counter] = []
                result['data'][counter].extend(tb)

                if fPRINT:
                    for l in tb:
                        print('    ', l)
                    print()



        while True:
            if choice=='b': break # return to book_list

            # - display book metadata
            for line in dis_line:
                C.pYellowH(text=line, tab=8)

            ''' Display data.
                ruoto all'interno della lista visualizzando
                [step] results per volta
            '''

            # - set range to display menu
            if inx_from>=_max: inx_from=_max-_step
            if inx_from<0:     inx_from=0
            inx_to = inx_from+_step
            if inx_to>_max:    inx_to=_max

            # - display data
            for index in range(inx_from, inx_to):
                item = items[index+1]
                print('{0:5} - {1}'.format(index+1, item[0]))
                for line in item[1:]:
                    print(' '*7, line)
                print()

            # - Get keybord input
            choice=prompt('[n]ext [p]rev [b]ooks_list [t]ag', validKeys='n|p|b|t')
            if   choice in ['b']: break
            elif choice in ['n']: inx_from+=_step
            elif choice in ['p']: inx_from-=_step
            elif choice in ['t']:
                if self._execute:
                    tags=prompt('Please enter TAGs (BLANK separator)')
                    book['tags'] = tags.split()
                    result = self._ePubs.updateField(rec=book, fld_name='tags')
                    if result.matched_count:
                        C.pCyanH(text='tags {0} have been added'.format(book['tags']), tab=4)
                        self._book_indexing(book, fields=['tags'])
                        print()
                else:
                    C.pCyanH(text='in DRY-RUN mode, tag setting not available', tab=4)
                    prompt()
Beispiel #51
0
import numpy as np

from torch.utils.data import TensorDataset
from collections import defaultdict
from types import SimpleNamespace
from pathlib import Path

from nes.ensemble_selection.utils import (
    make_predictions,
    evaluate_predictions,
    form_ensemble_pred,
    model_seeds,
)


METRICS = SimpleNamespace(loss="loss", accuracy="acc", error="error", ece="ece")


def check_to_avoid_overwrite(thing_to_check):
    """Makes a decorator which makes sure 'thing_to_check' is None before running func."""

    def _decorator(func):
        def careful_func(*args, force_overwrite=False, **kwargs):
            if force_overwrite:
                func(*args, **kwargs)
            else:
                if getattr(args[0], thing_to_check) is not None:
                    print(
                        f"Did not run to avoid overwriting. Set force_overwrite=True to overwrite."
                    )
                    return None
Beispiel #52
0
def test_compact_time_remaining_column(task_time, formatted):
    task = SimpleNamespace(finished=False, time_remaining=task_time, total=100)
    column = TimeRemainingColumn(compact=True)

    assert str(column.render(task)) == formatted
Beispiel #53
0
    def _form_mk(self):
        """
        Form the stiffness matrix, and optionally the mass matrix.

        Parameters
        ----------
        nodes : 2d array_like
            3-column matrix defining the nodes: [id, x, y]
        elements : 2d array_like
            5 or 6-column column matrix of element properties::

                [node1, node2, A, E, I [, rho]]
                A   = cross-section area
                E   = Young's modulus
                I   = area moment of inertia
                rho = material density, optional

        Returns
        -------
        A SimpleNamespace with the members:

        K : 2d ndarray
            Stiffness matrix
        M : 2d ndarray
            Mass matrix; will be None if `rho` was not part of
            `elements`
        elen : 1d ndarray
            Length of each element (ordered like elements)
        etheta : 1d ndarray
            Angle from x-axis of each element (rad)
        nodes : 2d ndarray
            Sorted version of the input
        elements : 2d ndarray
            Copy of the input
        """
        nodes, elements = np.atleast_2d(self.nodes, self.elements)
        if nodes.shape[1] != 3:  # Checks how many columns there are
            raise ValueError(
                "'nodes' is incorrectly sized (must have 3 columns)"
            )  # Raises your own error
        re, ce = elements.shape
        if ce != 5 and ce != 6:
            raise ValueError(
                "'elements' is incorrectly sized (must have 5 or 6 columns)")
        rn = nodes.shape[0]  # Number of rows in nodes array.
        i = np.argsort(nodes[:, 0])  # Sorts the nodes
        nodes = nodes[i]  # Create the sorted nodes array
        ids = nodes[:, 0].astype(int)  # ids are first column of nodes

        DOF = 3 * rn  # DOF = total degrees of freedom.
        K = np.zeros((DOF, DOF))  # K is a square matrix DOFxDOF

        if ce == 6:
            rho = elements[:, 5]  # get the density of each element
            M = K.copy()  # M is a square matrix of shape DOFxDOF
        else:  # Set densities to 0 if not given.
            rho = 0.0 * elements[:, 0]
            M = None  # No mass matrix

        elen = np.empty(re)
        etheta = np.empty(re)

        N1 = elements[:, 0].astype(int)  # Node 1 IDs
        N2 = elements[:, 1].astype(int)  # Node 2 IDs

        for n, (n1, n2, (a, e, I),
                rho) in enumerate(zip(N1, N2, elements[:, 2:5], rho)):

            p1 = (ids == n1).nonzero()[0]
            p2 = (ids == n2).nonzero()[0]

            if p1.size == 0:  # node1 is undefined
                raise ValueError(
                    ("elements[{}] references undefined node: {}").format(
                        n, n1))
            else:
                p1 = p1[0]  # node1 is defined
            if p2.size == 0:
                raise ValueError(
                    ("elements[{}] references undefined node: {}").format(
                        n, n2))
            else:
                p2 = p2[0]

            xy1 = nodes[p1, 1:]

            xy2 = nodes[p2, 1:]
            l1 = xy2 - xy1  # element length (array)
            L = elen[n] = np.hypot(*l1)  # length
            angle = etheta[n] = np.arctan2(l1[1], l1[0])

            # Element (local) Bernoulli-Euler Mass Matrix
            bk = a * e / L
            fk = e * I / (L**3)
            k_t = np.array([
                [bk, 0, 0, -bk, 0, 0],
                [0, 12 * fk, 6 * L * fk, 0, -12 * fk, 6 * L * fk],
                [0, 6 * L * fk, 4 * L**2 * fk, 0, -6 * L * fk, 2 * L**2 * fk],
                [-bk, 0, 0, bk, 0, 0],
                [0, -12 * fk, -6 * L * fk, 0, 12 * fk, -6 * L * fk],
                [0, 6 * L * fk, 2 * L**2 * fk, 0, -6 * L * fk, 4 * L**2 * fk],
            ])

            # Element (local) Bernoulli-Euler Mass Matrix
            if ce == 6:
                m_e = rho * a * L
                m_t = (m_e / 420 * np.array([
                    [140, 0, 0, 70, 0, 0],
                    [0, 156, 22 * L, 0, 54, -13 * L],
                    [0, 22 * L, 4 * L**2, 0, 13 * L, -3 * L**2],
                    [70, 0, 0, 140, 0, 0],
                    [0, 54, 13 * L, 0, 156, -22 * L],
                    [0, -13 * L, -3 * L**2, 0, -22 * L, 4 * L**2],
                ]))

            if angle != 0.0:
                k_t = self._rot(k_t, angle)
                if ce == 6:
                    m_t = self._rot(m_t, angle)

            # Put element mass and stiffness in full mass and stiffness
            p1 *= 3  # 3 DOF
            p2 *= 3
            v = [p1, p1 + 1, p1 + 2, p2, p2 + 1, p2 + 2]  # 6 DOF
            v = np.ix_(v, v)  # 6x6
            K[v] += k_t
            if ce == 6:  # if given a density, calculate mass
                M[v] += m_t

        return SimpleNamespace(K=K,
                               M=M,
                               elen=elen,
                               etheta=etheta,
                               nodes=nodes,
                               elements=elements)
Beispiel #54
0
def make_model_api(table_name,
                   json_schema,
                   list_decorator=empty_decorator,
                   get_decorator=empty_decorator,
                   create_decorator=empty_decorator,
                   update_decorator=empty_decorator,
                   delete_decorator=empty_decorator):
    def response_schema(operation):
        if operation == 'list':
            return {
                'type': 'object',
                'properties': {
                    'data': {
                        'type': 'array',
                        'items': json_schema
                    },
                    'count': {
                        'type': 'integer'
                    },
                    'limit': {
                        'type': 'integer'
                    },
                    'offset': {
                        'type': 'integer'
                    },
                    'sort': {
                        'type': 'string'
                    },
                    'filter': {
                        'type': 'object'
                    }
                },
                'additionalProperties': False,
                'required': ['data', 'count', 'limit', 'offset']
            }
        else:
            return json_schema

    @list_decorator
    def list(request):
        limit = int(util.get(request, 'query.limit', 100))
        offset = int(util.get(request, 'query.offset', 0))
        sort = util.get(request, 'query.sort') or '-updated_at'
        if not is_valid_sort(json_schema, sort):
            return invalid_response(
                'Invalid sort parameter, must be on the format column1,column2,column3... For descending sort, use -column1'
            )
        filter = parse_filter(json_schema, request.get('query', {}))
        count = db.count(table_name, filter)
        docs = [
            remove_none(doc)
            for doc in db.find(table_name, limit, offset, sort, filter)
        ]
        body = {
            'count': count,
            'limit': limit,
            'offset': offset,
            'sort': sort,
            'filter': filter,
            'data': docs
        }
        return {'body': body}

    @get_decorator
    def get(request):
        id = request.get('path_params')['id']
        doc = db.find_one(table_name, id)
        if not doc:
            return {'status': 404}
        return {'body': remove_none(doc)}

    @create_decorator
    def create(request):
        data = writable_doc(json_schema, request.get('body'))
        if 'created_at' in json_schema[
                'properties'] and 'updated_at' in json_schema['properties']:
            now = datetime.now()
            data = {**data, 'created_at': now, 'updated_at': now}
        try:
            id = db.create(table_name, data)
            created_doc = db.find_one(table_name, id)
            return {'body': remove_none(created_doc)}
        except (UniqueViolation, ForeignKeyViolation) as db_error:
            return exception_response(db_error)

    @update_decorator
    def update(request):
        id = request.get('path_params')['id']
        data = writable_doc(json_schema, request.get('body'))
        try:
            if 'updated_at' in json_schema['properties']:
                data = {**data, 'updated_at': datetime.now()}
            db.update(table_name, id, data)
        except (UniqueViolation, ForeignKeyViolation) as db_error:
            return exception_response(db_error)
        updated_doc = db.find_one(table_name, id)
        if not updated_doc:
            return {'status': 404}
        return {'body': remove_none(updated_doc)}

    @delete_decorator
    def delete(request):
        id = request.get('path_params')['id']
        doc = db.find_one(table_name, id)
        if not doc:
            return {'status': 404}
        db.delete(table_name, id)
        return {'body': remove_none(doc)}

    api = {
        'response_schema': response_schema,
        'list': list,
        'get': get,
        'create': create,
        'update': update,
        'delete': delete
    }
    return SimpleNamespace(**api)
Beispiel #55
0
 def __init__(self, n_components):
     self.n_components = n_components
     self.transformer = SimpleNamespace()
     self.batch_support = False
     self.n_iter = 2
     self.l = 2*self.n_components
def run_main():
    try:
        parser = argparse.ArgumentParser(
            description=("""This script is a small wrapper around the
                abi-compliance-checker and abi-dumper tools, applying them
                to compare the ABI and API of the library files from two
                different Git revisions within an Mbed TLS repository.
                The results of the comparison are either formatted as HTML and
                stored at a configurable location, or are given as a brief list
                of problems. Returns 0 on success, 1 on ABI/API non-compliance,
                and 2 if there is an error while running the script.
                Note: must be run from Mbed TLS root."""))
        parser.add_argument(
            "-v",
            "--verbose",
            action="store_true",
            help="set verbosity level",
        )
        parser.add_argument(
            "-r",
            "--report-dir",
            type=str,
            default="reports",
            help="directory where reports are stored, default is reports",
        )
        parser.add_argument(
            "-k",
            "--keep-all-reports",
            action="store_true",
            help="keep all reports, even if there are no compatibility issues",
        )
        parser.add_argument(
            "-o",
            "--old-rev",
            type=str,
            help="revision for old version.",
            required=True,
        )
        parser.add_argument("-or",
                            "--old-repo",
                            type=str,
                            help="repository for old version.")
        parser.add_argument("-oc",
                            "--old-crypto-rev",
                            type=str,
                            help="revision for old crypto submodule.")
        parser.add_argument("-ocr",
                            "--old-crypto-repo",
                            type=str,
                            help="repository for old crypto submodule.")
        parser.add_argument(
            "-n",
            "--new-rev",
            type=str,
            help="revision for new version",
            required=True,
        )
        parser.add_argument("-nr",
                            "--new-repo",
                            type=str,
                            help="repository for new version.")
        parser.add_argument("-nc",
                            "--new-crypto-rev",
                            type=str,
                            help="revision for new crypto version")
        parser.add_argument("-ncr",
                            "--new-crypto-repo",
                            type=str,
                            help="repository for new crypto submodule.")
        parser.add_argument(
            "-s",
            "--skip-file",
            type=str,
            help="path to file containing symbols and types to skip")
        parser.add_argument(
            "-b",
            "--brief",
            action="store_true",
            help=
            "output only the list of issues to stdout, instead of a full report",
        )
        abi_args = parser.parse_args()
        if os.path.isfile(abi_args.report_dir):
            print("Error: {} is not a directory".format(abi_args.report_dir))
            parser.exit()
        old_version = SimpleNamespace(
            version="old",
            repository=abi_args.old_repo,
            revision=abi_args.old_rev,
            crypto_repository=abi_args.old_crypto_repo,
            crypto_revision=abi_args.old_crypto_rev,
            abi_dumps={},
            modules={})
        new_version = SimpleNamespace(
            version="new",
            repository=abi_args.new_repo,
            revision=abi_args.new_rev,
            crypto_repository=abi_args.new_crypto_repo,
            crypto_revision=abi_args.new_crypto_rev,
            abi_dumps={},
            modules={})
        configuration = SimpleNamespace(
            verbose=abi_args.verbose,
            report_dir=abi_args.report_dir,
            keep_all_reports=abi_args.keep_all_reports,
            brief=abi_args.brief,
            skip_file=abi_args.skip_file)
        abi_check = AbiChecker(old_version, new_version, configuration)
        return_code = abi_check.check_for_abi_changes()
        sys.exit(return_code)
    except Exception:  # pylint: disable=broad-except
        # Print the backtrace and exit explicitly so as to exit with
        # status 2, not 1.
        traceback.print_exc()
        sys.exit(2)
Beispiel #57
0
def generate_instances(batch_size, file_path, word_index, separator,
                       unknown_word_key):
    """
    Reads composition dataset files (txt format), with 3 entries on each line, 
    e.g. Apfel Baum Apfelbaum.

    Each entry is converted into word indices that can be looked with a lookup table.
    Converts data into batches. All batches are saved to lists.
    """

    modifier_batches, head_batches, compound_batches = [], [], []
    modifier_vec, head_vec, compound_vec = [], [], []
    text_compounds = []

    batch_index = 0
    total_size = 0

    unk_id = get_word_id(unknown_word_key, word_index, unknown_word_key)
    mh_set = set()
    mh_set.add(unk_id)

    with open(file_path, "r", encoding="utf8") as f:
        for line in f:
            line_parts = line.strip().split(separator)
            assert (len(line_parts) == 3
                    ), "error: wrong number of elements on line"

            modifier = line_parts[0]
            head = line_parts[1]
            compound = line_parts[2]

            text_compounds.append(compound)

            modifier_id = get_word_id(modifier, word_index, unknown_word_key)
            head_id = get_word_id(head, word_index, unknown_word_key)
            compound_id = get_word_id(compound, word_index, unknown_word_key)

            mh_set.add(modifier_id)
            mh_set.add(head_id)

            modifier_vec.append(modifier_id)
            head_vec.append(head_id)
            compound_vec.append(compound_id)

            batch_index += 1
            total_size += 1

            if batch_index == batch_size:
                modifier_batches.append(
                    np.asarray(modifier_vec, dtype=np.int64))
                head_batches.append(np.asarray(head_vec, dtype=np.int64))
                compound_batches.append(
                    np.asarray(compound_vec, dtype=np.int64))

                batch_index = 0
                modifier_vec, head_vec, compound_vec = [], [], []

    # create a new batch only if there is more data
    if batch_index > 0:
        modifier_batches.append(np.asarray(modifier_vec, dtype=np.int64))
        head_batches.append(np.asarray(head_vec, dtype=np.int64))
        compound_batches.append(np.asarray(compound_vec, dtype=np.int64))

    assert (len(modifier_batches) == len(head_batches) ==
            len(compound_batches)), "error: inconsistent batch size"
    assert (total_size == sum([len(batch) for batch in modifier_batches
                               ])), "error: batches missing data"

    data_batches = SimpleNamespace(modifier_batches=modifier_batches,
                                   head_batches=head_batches,
                                   compound_batches=compound_batches,
                                   text_compounds=text_compounds,
                                   no_batches=len(compound_batches),
                                   total_size=total_size,
                                   mh_set=mh_set,
                                   unk_vector_id=unk_id)

    print(
        "%d unique modifiers and heads in the dataset, including unknown vector(s)"
        % len(mh_set))
    return data_batches
Beispiel #58
0
def initFadeInOptions(param_margin=(0, 0),
                      param_textBase=None,
                      nameIndicatorImage=None,
                      isMultiParagraph=False):
    global textAreaSize, textColor, fontPath, fontSize, fadeInSpeed, lineMargin, waitingTime, upscaleFactor, customOffsetTop, customOffsetLeft, framerate

    if isMultiParagraph:
        textAreaSizeUpscaled = (textAreaSize[0] * upscaleFactor,
                                multiParagraphHeight * upscaleFactor)

        offsetTop = customOffsetTopMultiParagraph
        offsetLeft = customOffsetLeftMultiParagraph

        maxLineWidth = textAreaSizeUpscaled[0] - offsetLeft * 4 * upscaleFactor

        fontSizeFinal = int(fontSize * 1.15)
        lineMarginFinal = lineMargin * 1.1

    else:
        textAreaSizeUpscaled = (textAreaSize[0] * upscaleFactor,
                                textAreaSize[1] * upscaleFactor)

        offsetTop = customOffsetTop
        offsetLeft = customOffsetLeft

        maxLineWidth = textAreaSizeUpscaled[0] - offsetLeft * 2 * upscaleFactor

        fontSizeFinal = fontSize
        lineMarginFinal = lineMargin

    marginTop = param_margin[1]

    if param_textBase == None:
        textBase = Image.new('RGBA', textAreaSizeUpscaled,
                             (textColor[0], textColor[1], textColor[2], 0))
    else:
        textBase = param_textBase

    return SimpleNamespace(
        textOverlay=Image.new(
            'RGBA', textAreaSizeUpscaled,
            (textColor[0], textColor[1], textColor[2], 0)),  #muss weg
        textBase=textBase,
        nameIndicatorImage=nameIndicatorImage,
        upscaleFactor=upscaleFactor,
        font=ImageFont.truetype(fontPath, fontSizeFinal * upscaleFactor),
        maxLineWidth=maxLineWidth,
        waitingTime=waitingTime,
        speed=fadeInSpeed,
        framerate=framerate,
        lineMargin=lineMarginFinal,
        lineCoordinates=[
            (offsetLeft * upscaleFactor,
             offsetTop * upscaleFactor + marginTop),
            (offsetLeft * upscaleFactor, offsetTop * upscaleFactor +
             marginTop + lineMarginFinal * upscaleFactor),
            (offsetLeft * upscaleFactor, offsetTop * upscaleFactor +
             marginTop + lineMarginFinal * upscaleFactor * 2),
            (offsetLeft * upscaleFactor, offsetTop * upscaleFactor +
             marginTop + lineMarginFinal * upscaleFactor * 3),
            (offsetLeft * upscaleFactor, offsetTop * upscaleFactor +
             marginTop + lineMarginFinal * upscaleFactor * 4),
            (offsetLeft * upscaleFactor, offsetTop * upscaleFactor +
             marginTop + lineMarginFinal * upscaleFactor * 5),
            (offsetLeft * upscaleFactor, offsetTop * upscaleFactor +
             marginTop + lineMarginFinal * upscaleFactor * 6),
            (offsetLeft * upscaleFactor, offsetTop * upscaleFactor +
             marginTop + lineMarginFinal * upscaleFactor * 7),
            (offsetLeft * upscaleFactor, offsetTop * upscaleFactor +
             marginTop + lineMarginFinal * upscaleFactor * 8),
            (offsetLeft * upscaleFactor, offsetTop * upscaleFactor +
             marginTop + lineMarginFinal * upscaleFactor * 9),
            (offsetLeft * upscaleFactor, offsetTop * upscaleFactor +
             marginTop + lineMarginFinal * upscaleFactor * 10),
            (offsetLeft * upscaleFactor, offsetTop * upscaleFactor +
             marginTop + lineMarginFinal * upscaleFactor * 11),
            (offsetLeft * upscaleFactor, offsetTop * upscaleFactor +
             marginTop + lineMarginFinal * upscaleFactor * 12)
        ])
def api_request():  #Handle errors gracefully
    response = requests.get("https://api.dccresource.com/api/games")
    json_data = json.loads(response.content, object_hook=lambda d:SimpleNamespace(**d))\
        if response and response.status_code == 200 else None
    return json_data
Beispiel #60
-1
    def _classify(self):
        """
        classify fixations
        """
        if self.g_pool.app == "exporter":
            return

        if self.bg_task:
            self.bg_task.cancel()

        gaze_data = [gp.serialized for gp in self.g_pool.gaze_positions]

        cap = SimpleNamespace()
        cap.frame_size = self.g_pool.capture.frame_size
        cap.intrinsics = self.g_pool.capture.intrinsics
        cap.timestamps = self.g_pool.capture.timestamps
        generator_args = (
            cap,
            gaze_data,
            np.deg2rad(self.max_dispersion),
            self.min_duration / 1000,
            self.max_duration / 1000,
            self.g_pool.min_data_confidence,
        )

        self.fixation_data = deque()
        self.fixation_start_ts = deque()
        self.fixation_stop_ts = deque()
        self.bg_task = bh.IPC_Logging_Task_Proxy(
            "Fixation detection", detect_fixations, args=generator_args
        )