Example #1
0
def MockRequest(
    env,
    *,
    path='/mockrequest/',
    routing=True,
    multilang=True,
    context=frozendict(),
    cookies=frozendict(),
    country_code=None,
    website=None,
    sale_order_id=None,
    website_sale_current_pl=None,
):

    lang_code = context.get('lang', env.context.get('lang', 'en_US'))
    env = env(context=dict(context, lang=lang_code))

    request = Mock(
        # request
        httprequest=Mock(
            host='localhost',
            path=path,
            app=odoo.http.root,
            environ=dict(
                EnvironBuilder(path=path,
                               base_url=HttpCase.base_url()).get_environ(),
                REMOTE_ADDR=HOST,
            ),
            cookies=cookies,
            referrer='',
        ),
        type='http',
        future_response=odoo.http.FutureResponse(),
        params={},
        redirect=env['ir.http']._redirect,
        session=DotDict(
            odoo.http.DEFAULT_SESSION,
            geoip={'country_code': country_code},
            sale_order_id=sale_order_id,
            website_sale_current_pl=website_sale_current_pl,
        ),
        geoip={},
        db=None,
        env=env,
        registry=env.registry,
        cr=env.cr,
        uid=env.uid,
        context=env.context,
        lang=env['res.lang']._lang_get(lang_code),
        website=website,
    )
    if website:
        request.website_routing = website.id

    # The following code mocks match() to return a fake rule with a fake
    # 'routing' attribute (routing=True) or to raise a NotFound
    # exception (routing=False).
    #
    #   router = odoo.http.root.get_db_router()
    #   rule, args = router.bind(...).match(path)
    #   # arg routing is True => rule.endpoint.routing == {...}
    #   # arg routing is False => NotFound exception
    router = MagicMock()
    match = router.return_value.bind.return_value.match
    if routing:
        match.return_value[0].routing = {
            'type': 'http',
            'website': True,
            'multilang': multilang
        }
    else:
        match.side_effect = NotFound

    with contextlib.ExitStack() as s:
        odoo.http._request_stack.push(request)
        s.callback(odoo.http._request_stack.pop)
        s.enter_context(patch('odoo.http.root.get_db_router', router))

        yield request
Example #2
0
    def test__subscribed_values_updated__subscription_receives_events(self):
        assert (len(self._tags) == 6
                ), "Test needs to be updated to add additional data types"

        values = set()
        polling_interval = timedelta(milliseconds=500)
        date_value = datetime.now(timezone.utc)
        bool_value = False
        double_value = math.pi
        int_value = -13
        string_value = "hello there"
        uint64_value = 2**31 + 3

        with contextlib.ExitStack() as exit_stack:
            selection = exit_stack.enter_context(
                self.tag_manager.create_selection(self._tags))
            subscription = exit_stack.enter_context(
                selection.create_subscription(
                    update_interval=polling_interval))
            writer = exit_stack.enter_context(
                self.tag_manager.create_writer(buffer_size=2))

            subscription.tag_changed += lambda tag, reader: values.add(
                (tag, reader))

            tags = {t.data_type.name: t for t in self._tags}
            writer.write(tags["BOOLEAN"].path, tags["BOOLEAN"].data_type,
                         bool_value)
            writer.write(tags["DATE_TIME"].path, tags["DATE_TIME"].data_type,
                         date_value)
            writer.write(tags["DOUBLE"].path, tags["DOUBLE"].data_type,
                         double_value)
            writer.write(tags["INT32"].path, tags["INT32"].data_type,
                         int_value)
            writer.write(tags["STRING"].path, tags["STRING"].data_type,
                         string_value)
            writer.write(tags["UINT64"].path, tags["UINT64"].data_type,
                         uint64_value)
            writer.send_buffered_writes()

            # Wait for the subscription events to come in.
            for _ in range(10):
                if len(self._tags) == len(values):
                    break
                time.sleep(polling_interval.total_seconds())
            else:
                assert len(self._tags) == len(values)

        # Exactly one of each data type.
        assert len(self._tags) == len(
            values)  # len(set(t.tag.data_type for t in values))

        for (tag, reader) in values:
            assert reader is not None
            if tag.data_type == tbase.DataType.DOUBLE:
                assert double_value == reader.read().value
                break
            elif tag.data_type == tbase.DataType.INT32:
                assert int_value == reader.read().value
                break
            elif tag.data_type == tbase.DataType.STRING:
                assert string_value == reader.read().value
                break
            elif tag.data_type == tbase.DataType.BOOLEAN:
                assert bool_value == reader.read().value
                break
            elif tag.data_type == tbase.DataType.UINT64:
                assert uint64_value == reader.read().value
                break
            elif tag.data_type == tbase.DataType.DATE_TIME:
                assert date_value == reader.read().value
                break
            else:
                assert False, "Unknown data type {}".format(tag.data_type)
                break
Example #3
0
    def R(self, line, cell=None, local_ns=None):
        """
        Execute code in R, optionally returning results to the Python runtime.

        In line mode, this will evaluate an expression and convert the returned
        value to a Python object.  The return value is determined by rpy2's
        behaviour of returning the result of evaluating the final expression.

        Multiple R expressions can be executed by joining them with
        semicolons::

            In [9]: %R X=c(1,4,5,7); sd(X); mean(X)
            Out[9]: array([ 4.25])

        In cell mode, this will run a block of R code. The resulting value
        is printed if it would be printed when evaluating the same code
        within a standard R REPL.

        Nothing is returned to python by default in cell mode::

            In [10]: %%R
               ....: Y = c(2,4,3,9)
               ....: summary(lm(Y~X))

            Call:
            lm(formula = Y ~ X)

            Residuals:
                1     2     3     4
             0.88 -0.24 -2.28  1.64

            Coefficients:
                        Estimate Std. Error t value Pr(>|t|)
            (Intercept)   0.0800     2.3000   0.035    0.975
            X             1.0400     0.4822   2.157    0.164

            Residual standard error: 2.088 on 2 degrees of freedom
            Multiple R-squared: 0.6993,Adjusted R-squared: 0.549
            F-statistic: 4.651 on 1 and 2 DF,  p-value: 0.1638

        In the notebook, plots are published as the output of the cell::

            %R plot(X, Y)

        will create a scatter plot of X bs Y.

        If cell is not None and line has some R code, it is prepended to
        the R code in cell.

        Objects can be passed back and forth between rpy2 and python via the
        -i -o flags in line::

            In [14]: Z = np.array([1,4,5,10])

            In [15]: %R -i Z mean(Z)
            Out[15]: array([ 5.])


            In [16]: %R -o W W=Z*mean(Z)
            Out[16]: array([  5.,  20.,  25.,  50.])

            In [17]: W
            Out[17]: array([  5.,  20.,  25.,  50.])

        The return value is determined by these rules:

        * If the cell is not None (i.e., has contents), the magic returns None.

        * If the final line results in a NULL value when evaluated
          by rpy2, then None is returned.

        * No attempt is made to convert the final value to a structured array.
          Use %Rget to push a structured array.

        * If the -n flag is present, there is no return value.

        * A trailing ';' will also result in no return value as the last
          value in the line is an empty string.
        """

        args = parse_argstring(self.R, line)

        # arguments 'code' in line are prepended to
        # the cell lines

        if cell is None:
            code = ''
            return_output = True
            line_mode = True
        else:
            code = cell
            return_output = False
            line_mode = False

        code = ' '.join(args.code) + code

        # if there is no local namespace then default to an empty dict
        if local_ns is None:
            local_ns = {}

        if args.converter is None:
            converter = self.converter
        else:
            try:
                converter = local_ns[args.converter]
            except KeyError:
                try:
                    converter = self.shell.user_ns[args.converter]
                except KeyError:
                    raise NameError("name '%s' is not defined" %
                                    args.converter)
            if not isinstance(converter, Converter):
                raise TypeError(
                    "'%s' must be a %s object (but it is a %s)." %
                    (args.converter, Converter, type(localconverter)))

        if args.input:
            for input in ','.join(args.input).split(','):
                try:
                    val = local_ns[input]
                except KeyError:
                    try:
                        val = self.shell.user_ns[input]
                    except KeyError:
                        raise NameError("name '%s' is not defined" % input)
                with localconverter(converter) as cv:
                    ro.r.assign(input, val)

        tmpd = self.setup_graphics(args)

        text_output = ''
        try:
            if line_mode:
                for line in code.split(';'):
                    text_result, result, visible = self.eval(line)
                    text_output += text_result
                if text_result:
                    # The last line printed something to the console so
                    # we won't return it.
                    return_output = False
            else:
                text_result, result, visible = self.eval(code)
                text_output += text_result
                if visible:
                    with contextlib.ExitStack() as stack:
                        if self.cache_display_data:
                            stack.enter_context(
                                rpy2.rinterface_lib.callbacks.obj_in_module(
                                    rpy2.rinterface_lib.callbacks,
                                    'consolewrite_print',
                                    self.write_console_regular))
                        ro.r.show(result)
                        text_output += self.flush()

        except RInterpreterError as e:
            # TODO: Maybe we should make this red or something?
            print(e.stdout)
            if not e.stdout.endswith(e.err):
                print(e.err)
            if tmpd:
                rmtree(tmpd)
            return
        finally:
            if self.device in ['png', 'svg']:
                ro.r('dev.off()')

        if text_output:
            # display_data.append(('RMagic.R', {'text/plain':text_output}))
            displaypub.publish_display_data(data={'text/plain': text_output},
                                            source='RMagic.R')
        # publish the R images
        if self.device in ['png', 'svg']:
            display_data, md = self.publish_graphics(tmpd, args.isolate_svgs)

            for tag, disp_d in display_data:
                displaypub.publish_display_data(data=disp_d,
                                                source=tag,
                                                metadata=md)

            # kill the temporary directory - currently created only for "svg"
            # and "png" (else it's None)
            rmtree(tmpd)

        if args.output:
            with localconverter(converter) as cv:
                for output in ','.join(args.output).split(','):
                    output_ipy = ro.globalenv.find(output)
                    self.shell.push({output: output_ipy})

        # this will keep a reference to the display_data
        # which might be useful to other objects who happen to use
        # this method

        if self.cache_display_data:
            self.display_cache = display_data

        # We're in line mode and return_output is still True,
        # so return the converted result
        if return_output and not args.noreturn:
            if result is not ri.NULL:
                with localconverter(converter) as cv:
                    res = cv.rpy2py(result)
                return res
Example #4
0
 def nested(*contexts):
     with contextlib.ExitStack() as stack:
         yield [stack.enter_context(c) for c in contexts]
# contextlib_exitstack_callbacks_error.py
import contextlib


def callback(*args, **kwds):
    print('closing callback({}, {})'.format(args, kwds))


try:
    with contextlib.ExitStack() as stack:
        stack.callback(callback, 'arg1', 'arg2')
        stack.callback(callback, arg3='val3')
        raise RuntimeError('thrown error')
except RuntimeError as err:
    print('ERROR: {}'.format(err))
    
Example #6
0
File: utils.py Project: ryo-n/oj
def exec_command(
        command_str: str,
        *,
        stdin: Optional[IO[Any]] = None,
        input: Optional[bytes] = None,
        timeout: Optional[float] = None,
        gnu_time: Optional[str] = None
) -> Tuple[Dict[str, Any], subprocess.Popen]:
    if input is not None:
        assert stdin is None
        stdin = subprocess.PIPE  # type: ignore
    if gnu_time is not None:
        context: Any = tempfile.NamedTemporaryFile(delete=True)
    else:
        context = contextlib.ExitStack(
        )  # TODO: we should use contextlib.nullcontext() if possible
    with context as fh:
        command = shlex.split(command_str)
        if gnu_time is not None:
            command = [gnu_time, '-f', '%M', '-o', fh.name, '--'] + command
        if os.name == 'nt':
            # HACK: without this encoding and decoding, something randomly fails with multithreading; see https://github.com/kmyk/online-judge-tools/issues/468
            command = command_str.encode().decode()  # type: ignore
        begin = time.perf_counter()

        # We need kill processes called from the "time" command using process groups. Without this, orphans spawn. see https://github.com/kmyk/online-judge-tools/issues/640
        preexec_fn = None
        if gnu_time is not None and os.name == 'posix':
            preexec_fn = os.setsid

        try:
            proc = subprocess.Popen(command,
                                    stdin=stdin,
                                    stdout=subprocess.PIPE,
                                    stderr=sys.stderr,
                                    preexec_fn=preexec_fn)
        except FileNotFoundError:
            logger.error('No such file or directory: %s', command)
            sys.exit(1)
        except PermissionError:
            logger.error('Permission denied: %s', command)
            sys.exit(1)
        answer: Optional[bytes] = None
        try:
            answer, _ = proc.communicate(input=input, timeout=timeout)
        except subprocess.TimeoutExpired:
            pass
        finally:
            if preexec_fn is not None:
                try:
                    os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
                except ProcessLookupError:
                    pass
            else:
                proc.terminate()

        end = time.perf_counter()
        memory: Optional[float] = None
        if gnu_time is not None:
            with open(fh.name) as fh1:
                reported = fh1.read()
            logger.debug('GNU time says:\n%s', reported)
            if reported.strip() and reported.splitlines()[-1].isdigit():
                memory = int(reported.splitlines()[-1]) / 1000
    info = {
        'answer': answer,  # Optional[byte]
        'elapsed': end - begin,  # float, in second
        'memory': memory,  # Optional[float], in megabyte
    }
    return info, proc
def main():
    parser = DownloaderArgumentParser()
    parser.add_argument('--name', metavar='PAT[,PAT...]',
        help='download only models whose names match at least one of the specified patterns')
    parser.add_argument('--list', type=Path, metavar='FILE.LST',
        help='download only models whose names match at least one of the patterns in the specified file')
    parser.add_argument('--all',  action='store_true', help='download all available models')
    parser.add_argument('--print_all', action='store_true', help='print all available models')
    parser.add_argument('--precisions', metavar='PREC[,PREC...]',
                        help='download only models with the specified precisions (actual for DLDT networks)')
    parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR',
        default=Path.cwd(), help='path where to save models')
    parser.add_argument('--cache_dir', type=Path, metavar='DIR',
        help='directory to use as a cache for downloaded files')
    parser.add_argument('--num_attempts', type=positive_int_arg, metavar='N', default=1,
        help='attempt each download up to N times')
    parser.add_argument('--progress_format', choices=('text', 'json'), default='text',
        help='which format to use for progress reporting')
    # unlike Model Converter, -jauto is not supported here, because CPU count has no
    # relation to the optimal number of concurrent downloads
    parser.add_argument('-j', '--jobs', type=positive_int_arg, metavar='N', default=1,
        help='how many downloads to perform concurrently')

    args = parser.parse_args()

    def make_reporter(context):
        return common.Reporter(context,
            enable_human_output=args.progress_format == 'text',
            enable_json_output=args.progress_format == 'json')

    reporter = make_reporter(common.DirectOutputContext())

    cache = NullCache() if args.cache_dir is None else DirCache(args.cache_dir)
    models = common.load_models_from_args(parser, args)

    failed_models = set()

    if args.precisions is None:
        requested_precisions = common.KNOWN_PRECISIONS
    else:
        requested_precisions = set(args.precisions.split(','))
        unknown_precisions = requested_precisions - common.KNOWN_PRECISIONS
        if unknown_precisions:
            sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions))))

    with contextlib.ExitStack() as exit_stack:
        session_factory = ThreadSessionFactory(exit_stack)
        if args.jobs == 1:
            results = [download_model(reporter, args, cache, session_factory, requested_precisions, model)
                for model in models]
        else:
            results = common.run_in_parallel(args.jobs,
                lambda context, model: download_model(
                    make_reporter(context), args, cache, session_factory, requested_precisions, model),
                models)

    failed_models = {model.name for model, successful in zip(models, results) if not successful}

    if failed_models:
        reporter.print('FAILED:')
        for failed_model_name in failed_models:
            reporter.print(failed_model_name)
        sys.exit(1)
Example #8
0
    def differentiable_loss(self, model, guide, *args, **kwargs):

        # get batched, enumerated, to_funsor-ed traces from the guide and model
        with plate(
                size=self.num_particles
        ) if self.num_particles > 1 else contextlib.ExitStack(), enum(
                first_available_dim=(-self.max_plate_nesting -
                                     1) if self.max_plate_nesting else None):
            guide_tr = trace(guide).get_trace(*args, **kwargs)
            model_tr = trace(replay(model, trace=guide_tr)).get_trace(
                *args, **kwargs)

        # extract from traces all metadata that we will need to compute the elbo
        guide_terms = terms_from_trace(guide_tr)
        model_terms = terms_from_trace(model_tr)

        # guide side enumeration is not supported
        if any(guide_terms["plate_to_step"].values()):
            raise NotImplementedError(
                "TraceMarkovEnum_ELBO does not yet support guide side Markov enumeration"
            )

        # build up a lazy expression for the elbo
        with funsor.terms.lazy:
            # identify and contract out auxiliary variables in the model with partial_sum_product
            contracted_factors, uncontracted_factors = [], []
            for f in model_terms["log_factors"]:
                if model_terms["measure_vars"].intersection(f.inputs):
                    contracted_factors.append(f)
                else:
                    uncontracted_factors.append(f)
            # incorporate the effects of subsampling and handlers.scale through a common scale factor
            markov_dims = frozenset({
                plate
                for plate, step in model_terms["plate_to_step"].items() if step
            })
            contracted_costs = [
                model_terms["scale"] * f
                for f in funsor.sum_product.dynamic_partial_sum_product(
                    funsor.ops.logaddexp,
                    funsor.ops.add,
                    model_terms["log_measures"] + contracted_factors,
                    plate_to_step=model_terms["plate_to_step"],
                    eliminate=model_terms["measure_vars"] | markov_dims,
                )
            ]

            costs = contracted_costs + uncontracted_factors  # model costs: logp
            costs += [-f for f in guide_terms["log_factors"]
                      ]  # guide costs: -logq

            # finally, integrate out guide variables in the elbo and all plates
            plate_vars = guide_terms["plate_vars"] | model_terms["plate_vars"]
            elbo = to_funsor(0, output=funsor.Real)
            for cost in costs:
                # compute the marginal logq in the guide corresponding to this cost term
                log_prob = funsor.sum_product.sum_product(
                    funsor.ops.logaddexp,
                    funsor.ops.add,
                    guide_terms["log_measures"],
                    plates=plate_vars,
                    eliminate=(plate_vars | guide_terms["measure_vars"]) -
                    frozenset(cost.inputs),
                )
                # compute the expected cost term E_q[logp] or E_q[-logq] using the marginal logq for q
                elbo_term = funsor.Integrate(
                    log_prob, cost,
                    guide_terms["measure_vars"] & frozenset(cost.inputs))
                elbo += elbo_term.reduce(funsor.ops.add,
                                         plate_vars & frozenset(cost.inputs))

        # evaluate the elbo, using memoize to share tensor computation where possible
        with funsor.interpretations.memoize():
            return -to_data(apply_optimizer(elbo))
Example #9
0
 def _TF2SummaryContext(self):
     if FLAGS.disable_tf2_summary:
         return contextlib.ExitStack()
     return self._tf2_summary_writer.as_default()
Example #10
0
def sh(*cmd,
       timeout=False,
       output_file=None,
       input_file=None,
       input_text=None,
       error=subprocess.STDOUT,
       ignore_dry_run=False,
       pass_fds=[]):
    try:
        cmd = list(cmd)

        if input_file and input_text:
            return 401, "Cannot use both text and file inputs"

        # if this is a dry_run, only print the commands that would be ran
        if settings.dry_run and not ignore_dry_run:
            cmd = "{} cmd: {}".format(os.getcwd(), ' '.join(cmd))
            if output_file and not isinstance(output_file, int):
                cmd += " > "
                cmd += output_file

            if error and not isinstance(error, int):
                cmd += " 2> "
                cmd += error

            if input_file and not isinstance(
                    input_file, int) and os.path.isfile(input_file):
                cmd += " < "
                cmd += input_file

            print(cmd)
            return 0, None

        with contextlib.ExitStack() as onexit:
            # add input redirection if needed
            input_file = openfd(input_file, 'r', onexit, True)

            # add output redirection if needed
            output_file = openfd(output_file, 'w', onexit, False)

            # add error redirection if needed
            error = openfd(error, 'w', onexit, False)

            # run the desired command
            # use with statement to make sure proc is cleaned
            # don't use subprocess.run because we want to send SIGABRT on exit
            with subprocess.Popen(
                    cmd,
                    **({
                        'input': bytes(input_text, encoding='utf-8')
                    } if input_text else {
                        'stdin': input_file
                    }),
                    stdout=output_file,
                    stderr=error,
                    pass_fds=pass_fds) as proc:

                try:
                    out, errout = proc.communicate(
                        timeout=settings.timeout.single if timeout else None)

                    return proc.returncode, out.decode(
                        "latin-1") if out else None, errout.decode(
                            "latin-1") if errout else None
                except subprocess.TimeoutExpired:
                    if settings.timeout2gdb:
                        print("Process {} timeout".format(proc.pid))
                        proc.communicate()
                        return 124, str(None), "Subprocess Timeout 2 gdb"
                    else:
                        proc.send_signal(signal.SIGABRT)
                        proc.communicate()
                        return 124, str(None), "Subprocess Timeout 2 gdb"

    except Exception as ex:
        print("Unexpected error: %s" % ex)
        raise
Example #11
0
    def differentiable_loss(self, model, guide, *args, **kwargs):

        # get batched, enumerated, to_funsor-ed traces from the guide and model
        with plate(
                size=self.num_particles
        ) if self.num_particles > 1 else contextlib.ExitStack(), enum(
                first_available_dim=(-self.max_plate_nesting -
                                     1) if self.max_plate_nesting else None):
            guide_tr = trace(guide).get_trace(*args, **kwargs)
            model_tr = trace(replay(model, trace=guide_tr)).get_trace(
                *args, **kwargs)

        # extract from traces all metadata that we will need to compute the elbo
        guide_terms = terms_from_trace(guide_tr)
        model_terms = terms_from_trace(model_tr)

        # build up a lazy expression for the elbo
        with funsor.terms.lazy:
            # identify and contract out auxiliary variables in the model with partial_sum_product
            contracted_factors, uncontracted_factors = [], []
            for f in model_terms["log_factors"]:
                if model_terms["measure_vars"].intersection(f.inputs):
                    contracted_factors.append(f)
                else:
                    uncontracted_factors.append(f)
            # incorporate the effects of subsampling and handlers.scale through a common scale factor
            contracted_costs = [
                model_terms["scale"] * f
                for f in funsor.sum_product.partial_sum_product(
                    funsor.ops.logaddexp,
                    funsor.ops.add,
                    model_terms["log_measures"] + contracted_factors,
                    plates=model_terms["plate_vars"],
                    eliminate=model_terms["measure_vars"],
                )
            ]

            # accumulate costs from model (logp) and guide (-logq)
            costs = contracted_costs + uncontracted_factors  # model costs: logp
            costs += [-f for f in guide_terms["log_factors"]
                      ]  # guide costs: -logq

            # compute expected cost
            # Cf. pyro.infer.util.Dice.compute_expectation()
            # https://github.com/pyro-ppl/pyro/blob/0.3.0/pyro/infer/util.py#L212
            # TODO Replace this with funsor.Expectation
            plate_vars = guide_terms["plate_vars"] | model_terms["plate_vars"]
            # compute the marginal logq in the guide corresponding to each cost term
            targets = dict()
            for cost in costs:
                input_vars = frozenset(cost.inputs)
                if input_vars not in targets:
                    targets[input_vars] = funsor.Tensor(
                        funsor.ops.new_zeros(
                            funsor.tensor.get_default_prototype(),
                            tuple(v.size for v in cost.inputs.values()),
                        ),
                        cost.inputs,
                        cost.dtype,
                    )
            with AdjointTape() as tape:
                logzq = funsor.sum_product.sum_product(
                    funsor.ops.logaddexp,
                    funsor.ops.add,
                    guide_terms["log_measures"] + list(targets.values()),
                    plates=plate_vars,
                    eliminate=(plate_vars | guide_terms["measure_vars"]),
                )
            marginals = tape.adjoint(funsor.ops.logaddexp, funsor.ops.add,
                                     logzq, tuple(targets.values()))
            # finally, integrate out guide variables in the elbo and all plates
            elbo = to_funsor(0, output=funsor.Real)
            for cost in costs:
                target = targets[frozenset(cost.inputs)]
                logzq_local = marginals[target].reduce(
                    funsor.ops.logaddexp,
                    frozenset(cost.inputs) - plate_vars)
                log_prob = marginals[target] - logzq_local
                elbo_term = funsor.Integrate(
                    log_prob,
                    cost,
                    guide_terms["measure_vars"] & frozenset(log_prob.inputs),
                )
                elbo += elbo_term.reduce(funsor.ops.add,
                                         plate_vars & frozenset(cost.inputs))

        # evaluate the elbo, using memoize to share tensor computation where possible
        with funsor.interpretations.memoize():
            return -to_data(apply_optimizer(elbo))
Example #12
0
def main():
    import argparse

    formats = loading.get_formats()

    parser = argparse.ArgumentParser(
        formatter_class=type(
            "_HelpFormatter",
            (argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter),
            {},
        )
    )
    parser.print_usage = parser.print_help  # hack
    parser.add_argument(
        "--log",
        choices=list(logging._nameToLevel.keys()),
        default="INFO",
        dest="log_level",
        help="-",
    )
    parser.add_argument("-q", "--quiet", action="store_true", help="-")
    parser.add_argument("--debug", action="store_true", help="-")

    subparsers = parser.add_subparsers(dest="subcommand", title="subcommands")
    subparsers.required = True

    # merge
    fn = merge
    sparser = subparsers.add_parser(
        fn.__name__, help=fn.__doc__, formatter_class=parser.formatter_class
    )
    sparser.set_defaults(subcommand=fn)
    sparser.add_argument("files", nargs="*", default=None, help="-")
    sparser.add_argument("--dst", default=None, help="-")
    sparser.add_argument("--strict", action="store_true", help="-")
    sparser.add_argument("--style", default="ref", choices=["ref", "whole"], help="-")
    sparser.add_argument("--wrap", default=None, help="-")
    sparser.add_argument("--wrap-section", default="definitions", help="-")
    # tojsonschema

    fn = tojsonschema
    sparser = subparsers.add_parser(
        fn.__name__, help=fn.__doc__, formatter_class=parser.formatter_class
    )
    sparser.set_defaults(subcommand=fn)
    sparser.add_argument("--src", default=None, help="-")
    sparser.add_argument("--dst", default=None, help="-")
    sparser.add_argument("--name", default="top", help="-")

    # json2swagger
    fn = json2swagger
    sparser = subparsers.add_parser(
        fn.__name__, help=fn.__doc__, formatter_class=parser.formatter_class
    )
    sparser.set_defaults(subcommand=fn)
    sparser.add_argument("files", nargs="*", default=None, help="-")
    sparser.add_argument("--dst", default=None, help="-")
    sparser.add_argument(
        "-o", "--output-format", default=None, choices=formats, help="-"
    )
    sparser.add_argument("--name", default="top", help="-")
    sparser.add_argument("--detector", default="Detector", help="-")
    sparser.add_argument("--emitter", default="Emitter", help="-")
    sparser.add_argument("--annotate", default=None, help="-")
    sparser.add_argument(
        "--emit", default="schema", choices=["schema", "info"], help="-"
    )
    sparser.add_argument("--with-minimap", action="store_true", help="-")
    sparser.add_argument("--without-example", action="store_true", help="-")

    # flatten
    fn = flatten
    sparser = subparsers.add_parser(
        fn.__name__, help=fn.__doc__, formatter_class=parser.formatter_class
    )
    sparser.set_defaults(subcommand=fn)
    sparser.add_argument("src", nargs="?", default=None, help="-")
    sparser.add_argument("--dst", default=None, help="-")
    sparser.add_argument(
        "-i", "--input-format", default=None, choices=formats, help="-"
    )
    sparser.add_argument(
        "-o", "--output-format", default=None, choices=formats, help="-"
    )
    sparser.add_argument("-f", "--format", default=None, choices=formats, help="-")

    args = parser.parse_args()

    with contextlib.ExitStack() as s:
        params = vars(args)
        if params.pop("quiet"):
            args.log_level = logging._levelToName[logging.WARNING]
            s.enter_context(warnings.catch_warnings())
            warnings.simplefilter("ignore")
        logging.basicConfig(level=getattr(logging, params.pop("log_level")))
        with traceback_shortly(params.pop("debug")):
            return params.pop("subcommand")(**params)
Example #13
0
 def setUp(self):
     self.fixtures = contextlib.ExitStack()
     self.addCleanup(self.fixtures.close)
Example #14
0
 def setUp(self):
     self.fixtures = contextlib.ExitStack()
     self.addCleanup(self.fixtures.close)
     self.fixtures.enter_context(tempdir_as_cwd())
     build_files(self.files)
Example #15
0
def train(
    rank:           int,
    world_size:     int,
    *,
    resume:         bool,
    location:       str,
    name:           str,
    quiet:          bool,
    save_every:     int,
    validate_every: int,
    post_command:   str,
    explicit_args:  set[str],
    params:         Hyperparams,
):
    # Get filepath within path context
    fpath = lambda path: os.path.join(location, path) if isinstance(path, str) else os.path.join(location, *path)

    # Setup multi-gpu if used
    setup(rank, world_size)

    is_master = rank < 1  # Are we on the main node?
    is_distributed = rank != -1  # Are we performing distributed computing?
    num_workers = torch.distributed.get_world_size() if is_distributed else 1

    # Update locations
    TrainResults.subfolder = name
    Hyperparams.subfolder = name

    # Setup logger
    log.configure(
        os.path.join(location, name, "pretraining-worker=%s.log" % (rank if is_distributed else 0)),
        "DaLUKE pretraining on node %i" % rank,
        log_commit  = True,
        print_level = (Levels.INFO if quiet else Levels.DEBUG) if is_master else None,
        append      = resume,  # Append to existing log file if we are resuming training
    )

    post_time, post_command = parse_post_command(post_command)
    execute_post_command = False
    if post_time:
        log("Quitting in %.2f h and running command '%s'" % ((post_time-time.time())/3600, post_command))

    if resume:
        log("Resuming from %s" % name)
        # Load results and hyperparameters from earlier training
        res = TrainResults.load(location)
        # Close unended profiles
        close_tt(res.tt)
        TT.fuse(res.tt)
        res.tt = TT
        tmp_saved_pu = res.parameter_update
        loaded_params = Hyperparams.load(location)
        # Overwrite ff-size if given explicitly
        if "ff_size" in explicit_args:
            loaded_params.ff_size = params.ff_size
        params = loaded_params
    else:
        tmp_saved_pu = None
    log.section("Starting pretraining with the following hyperparameters", params)
    log("Training using %i workers" % num_workers)

    log("Reading metadata and entity vocabulary")
    with open(fpath(DatasetBuilder.metadata_file)) as f:
        metadata = json.load(f)
    with open(fpath(DatasetBuilder.entity_vocab_file)) as f:
        entity_vocab = json.load(f)
    log("Loaded metadata:", json.dumps(metadata, indent=4))
    log(f"Loaded entity vocabulary of {len(entity_vocab)} entities")
    if params.ent_min_mention:
        log("Removing entities with less than %i mentions" % params.ent_min_mention)
        entity_vocab = { ent: info for ent, info in entity_vocab.items()
            if info["count"] >= params.ent_min_mention or ent in {"[PAD]", "[UNK]", "[MASK]"} }
        log("After filtering, entity vocab now has %i entities" % len(entity_vocab))

    # Device should be cuda:rank or just cuda if single gpu, else cpu
    if is_distributed:
        device = torch.device("cuda", index=rank)
    else:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    log.debug(
        "Hardware for this worker:",
        "CPU: %s" % cpuinfo.get_cpu_info()["brand_raw"],
        "GPU: %s" % (torch.cuda.get_device_name(device) if torch.cuda.is_available() else "NA"),
        sep="\t\n",
    )

    if params.entity_loss_weight:
        log("Setting up loss function with entity loss weighting")
        # Don't weigh special tokens
        weights = torch.Tensor([0, 0, 0, *(1 / info["count"] for info in entity_vocab.values() if info["count"])]).to(device)
        entity_criterion = nn.CrossEntropyLoss(weight=weights)
    else:
        log("Setting up loss function without entity loss weighting")
        entity_criterion = nn.CrossEntropyLoss()
    word_criterion = nn.CrossEntropyLoss()
    loss_calculator = lambda w, e: params.word_ent_weight * w + (1 - params.word_ent_weight) * e

    # Load dataset and training results
    bert_config = AutoConfig.from_pretrained(metadata["base-model"])
    if metadata["reduced-vocab"]:
        token_map_file = fpath(DatasetBuilder.token_map_file)
        log("Loading token map from '%s'" % token_map_file)
        token_map = np.load(token_map_file)
        tokenizer = AutoTokenizer.from_pretrained(metadata["base-model"])
        *__, unk_id = get_special_ids(tokenizer)
        token_reduction = token_map_to_token_reduction(token_map, unk_id)
    else:
        token_map = None

    log("Building dataset")
    data = DataLoader(
        location,
        metadata,
        entity_vocab,
        device,
        params.word_mask_prob,
        params.word_unmask_prob,
        params.word_randword_prob,
        params.ent_mask_prob,
        vocab_size=metadata["vocab-size"],
        token_map=token_map,
        ent_min_mention=params.ent_min_mention,
    )
    sampler = (DistributedSampler if is_distributed else RandomSampler)(data.train_examples)
    log("Built %i examples" % len(data))

    loader = data.get_dataloader(params.ff_size, sampler)
    val_loader = data.get_dataloader(params.ff_size, SequentialSampler(data.val_examples), validation=True)

    # Number of subbatches in each parameter update (batch)
    grad_accumulation_steps = params.batch_size // (params.ff_size * num_workers)
    # How many full batches can be made from the dataset
    batches_in_data = len(data) // params.batch_size
    log(
        "Parameter updates:               %i" % params.parameter_updates,
        "Subbatches per parameter update: %i" % grad_accumulation_steps,
        "Subbatches generated:            %i" % len(loader),
        "Batches needed to cover dataset: %i" % batches_in_data,
    )

    if not resume:
        # Calculate parameter differences, when at least 20k examples have been seen
        paramdiff_every = ceil(MIN_EXAMPLES_PER_PARAMDIFF / params.batch_size)
        log("Recalculating parameter differences every %i'th parameter update" % paramdiff_every)
        top_k = [1, 3, 10]
        log("Calculating top %s accuracies" % top_k)
        if validate_every:
            val_updates = unique(np.array(
                np.arange(-1, params.parameter_updates, validate_every).tolist() + [params.parameter_updates-1]
            ))[1:]
        else:
            val_updates = np.array([], dtype=int)
        res = TrainResults(
            runtime           = np.zeros(params.parameter_updates),
            lr                = np.zeros(params.parameter_updates),
            parameter_update  = 0,

            losses            = np.zeros(params.parameter_updates),
            scaled_loss       = np.zeros(params.parameter_updates),

            top_k             = top_k,
            w_losses          = np.zeros(params.parameter_updates),
            e_losses          = np.zeros(params.parameter_updates),
            w_accuracies      = np.zeros((params.parameter_updates, len(top_k))),
            e_accuracies      = np.zeros((params.parameter_updates, len(top_k))),

            val_param_updates = val_updates,
            val_losses        = np.zeros(len(val_updates)),
            val_w_losses      = np.zeros(len(val_updates)),
            val_e_losses      = np.zeros(len(val_updates)),
            val_w_accuracies  = np.zeros((len(val_updates), len(top_k))),
            val_e_accuracies  = np.zeros((len(val_updates), len(top_k))),

            paramdiff_every   = paramdiff_every,
            groups_to_slices  = None,  # Set later
            orig_params       = None,
            paramdiff_1       = None,

            luke_exclusive_params = None,  # Set later
            att_mats_from_base    = None,  # Set later

            tt = TT,
        )

    save_pus = set(range(-1, params.parameter_updates, save_every)).union({params.parameter_updates-1})
    log("Saving model at parameter updates: %s" % sorted(save_pus),
        "Validating at parameter updates: %s" % res.val_param_updates.tolist())

    # Build model, possibly by loading previous weights
    log.section("Setting up model")
    bert_config = AutoConfig.from_pretrained(metadata["base-model"])
    if params.ent_hidden_size is None:
        params.ent_hidden_size = bert_config.hidden_size
    else:
        assert params.ent_hidden_size <= bert_config.hidden_size,\
            "Entity hidden size (%i) cannot be larger than hidden size in '%s' (%i)" % (
                params.hidden_size,
                metadata["base-model"],
                bert_config.hidden_size,
            )

    log("Initializing model")
    model_cls = BertAttentionPretrainTaskDaLUKE if params.bert_attention else PretrainTaskDaLUKE
    model = model_cls(
        bert_config,
        ent_vocab_size        = len(entity_vocab),
        ent_embed_size        = params.ent_embed_size,
        ent_hidden_size       = params.ent_hidden_size,
        ent_intermediate_size = params.ent_intermediate_size,
    ).to(device)
    bert_config.vocab_size = metadata["vocab-size"]
    log("Bert config", bert_config.to_json_string())

    if params.lukeinit:
        log("Initializing weights in accordance with LUKE")
        model.apply(lambda module: model.init_weights(module, bert_config.initializer_range))
    # Load parameters from base model
    if not params.no_base_model:
        log("Loading base model parameters")
        with TT.profile("Loading base model parameters"):
            base_model = AutoModelForPreTraining.from_pretrained(metadata["base-model"])
            new_weights = load_base_model_weights(
                model,
                base_model.state_dict(),
                params.bert_attention,
            )
            if metadata["reduced-vocab"]:
                log("Removing unneeded token weights")
                reduced_model = model_cls(
                    bert_config,
                    ent_vocab_size        = len(entity_vocab),
                    ent_embed_size        = params.ent_embed_size,
                    ent_hidden_size       = params.ent_hidden_size,
                    ent_intermediate_size = params.ent_intermediate_size,
                ).to(device)
                copy_with_reduced_state_dict(token_reduction, model, reduced_model)
                model = reduced_model
    else:
        new_weights = set(model.state_dict())
    # Initialize self-attention query matrices to BERT word query matrices
    att_mat_keys = set()
    if not params.bert_attention and not params.no_base_model:
        log("Initializing new attention matrices with%s PCA" % ("" if params.pcainit else "out"))
        att_mat_keys = model.init_special_attention(params.pcainit, device)
    if not resume:
        res.luke_exclusive_params = new_weights
        res.att_mats_from_base = att_mat_keys
        if is_master:
            res.orig_params = all_params(model).cpu().numpy()
    log("Pretraining model initialized with %s parameters" % thousand_seps(len(model)))

    # Unfixes params at this parameter update
    unfix_base_model_params_pu = round(params.bert_fix_prop * params.parameter_updates)
    log("Unfixing base model params after %i parameter updates" % unfix_base_model_params_pu)

    if resume:
        mpath = fpath((TrainResults.subfolder, MODEL_OUT.format(i=res.parameter_update)))
        log("Loading model from '%s'" % mpath)
        model.load_state_dict(torch.load(mpath, map_location=device))
        log(f"Resuming training saved at parameter update {res.parameter_update}")
    else:
        res.groups_to_slices, t = all_params_groups_to_slices(model, bert_config.num_hidden_layers)
        log("Parameter groups and positions", t)
        res.paramdiff_1 = { name: np.zeros(ceil(params.parameter_updates/res.paramdiff_every)) for name in res.groups_to_slices }
    if is_distributed:
        model = DDP(model, device_ids=[rank], find_unused_parameters=True)
    non_ddp_model = model.module if is_distributed else model

    log("Setting up optimizer, scaler, and learning rate scheduler")
    optimizer = get_optimizer(non_ddp_model, params.weight_decay, params.lr)
    scaler = amp.GradScaler() if params.fp16 else None
    scheduler = get_lr_scheduler(
        optimizer,
        int(params.warmup_prop * params.parameter_updates),
        params.parameter_updates,
        unfix_base_model_params_pu,
    )
    if resume:
        optimizer.load_state_dict(torch.load(fpath((TrainResults.subfolder, OPTIMIZER_OUT.format(i=res.parameter_update))), map_location=device))
        scheduler.load_state_dict(torch.load(fpath((TrainResults.subfolder, SCHEDULER_OUT.format(i=res.parameter_update))), map_location=device))
        if params.fp16:
            scaler.load_state_dict(torch.load(fpath((TrainResults.subfolder, SCALER_OUT.format(i=res.parameter_update))), map_location=device))
        res.parameter_update += 1  # We saved the data at pu i, but should now commence pu i+1

    log.debug("Time distribution before starting training", TT)
    log_memory_stats(device)

    log.section(f"Training DaLUKE for {params.parameter_updates} parameter updates")
    model.zero_grad()  # To avoid tracking of model parameter manipulation
    model.train()

    # Start with transfer learned weights locked
    fix_base_model_params(res.luke_exclusive_params, non_ddp_model, True)
    fixed_params = True

    # Save initial parameters
    if is_master and not resume:
        with TT.profile("Saving progress"):
            paths = save_training(location, params, model.module if is_distributed else model,
                res, optimizer, scheduler, scaler, -1)
            log.debug("Saved initial state to", *paths)

    batch_iter = iter(loader)
    for i in range(res.parameter_update, params.parameter_updates):
        TT.profile("Parameter update")
        res.parameter_update = i
        if i >= unfix_base_model_params_pu and fixed_params:
            log("Unfixing base model params")
            fix_base_model_params(res.luke_exclusive_params, model, False)
            fixed_params = False
        if is_distributed and i % batches_in_data == 0:
            sampler.set_epoch(i // batches_in_data)

        # Losses and accuracies for this parameter update
        t_loss, w_loss, e_loss, s_loss = 0, 0, 0, 0
        w_accuracies = np.zeros((grad_accumulation_steps, len(res.top_k)))
        e_accuracies = np.zeros((grad_accumulation_steps, len(res.top_k)))

        # Loop over enough batches to make a parameter update
        for j in range(grad_accumulation_steps):
            TT.profile("Sub-batch")
            try:
                batch = next(batch_iter)
            except StopIteration:
                batch_iter = iter(loader)
                batch = next(batch_iter)

            TT.profile("FP and gradients")
            with amp.autocast() if params.fp16 else contextlib.ExitStack():
                word_preds, ent_preds = model(batch)
                # Compute and backpropagate loss
                word_loss = word_criterion(word_preds, batch.word_mask_labels)
                ent_loss = entity_criterion(ent_preds, batch.ent_mask_labels)
                has_entities = not torch.isnan(ent_loss).item()
                ent_loss = torch.nan_to_num(ent_loss)
            loss = loss_calculator(word_loss, ent_loss)
            loss /= grad_accumulation_steps

            # Only sync parameters on grad updates, aka last pass of this loop
            with model.no_sync() if is_distributed and j < grad_accumulation_steps - 1 else contextlib.ExitStack():
                if params.fp16:
                    scaled_loss = scaler.scale(loss)
                    scaled_loss.backward()
                    s_loss += scaled_loss.item()
                else:
                    loss.backward()

            t_loss += loss.item()
            w_loss += word_loss.item() / grad_accumulation_steps
            e_loss += ent_loss.item() / grad_accumulation_steps if has_entities else 0

            if torch.cuda.is_available():
                torch.cuda.synchronize(rank if is_distributed else None)

            TT.end_profile()

            # Save accuracy for statistics
            if is_master:
                with TT.profile("Training accuracy"):
                    w_accuracies[j] = top_k_accuracy(batch.word_mask_labels, word_preds, res.top_k)
                    e_accuracies[j] = top_k_accuracy(batch.ent_mask_labels, ent_preds, res.top_k)

            TT.end_profile()

        # Update model parameters
        with TT.profile("Parameter step"):
            if params.fp16:
                scaler.step(optimizer)
                scaler.update()
            else:
                optimizer.step()
            scheduler.step()
            model.zero_grad()

        # Calculate how much gradient has changed
        if is_master and i % res.paramdiff_every == 0:
            with torch.no_grad(), TT.profile("Parameter changes"):
                log.debug("Calculating parameter changes")
                orig_pars = torch.from_numpy(res.orig_params).to(device)
                current_pars = all_params(model.module if is_distributed else model)
                absdiff = torch.abs(current_pars-orig_pars)
                for blockname, slice_ in res.groups_to_slices.items():
                    j = i // res.paramdiff_every
                    res.paramdiff_1[blockname][j] = absdiff[slice_].sum().item()
                del orig_pars, current_pars

        res.losses[i]       = t_loss
        res.w_losses[i]     = w_loss
        res.e_losses[i]     = e_loss
        res.scaled_loss[i]  = s_loss
        res.lr[i]           = scheduler.get_last_lr()[0]
        res.w_accuracies[i] = np.mean(w_accuracies, axis=0)
        res.e_accuracies[i] = np.nanmean(e_accuracies, axis=0)
        res.runtime[i]      = TT.end_profile()
        log.debug(
            "Performed parameter update %i / %i in %.2f s" % (i, params.parameter_updates-1, res.runtime[i]),
            f"  Loss (total, word, entity, scaled): {t_loss:9.4f}, {w_loss:9.4f}, {e_loss:9.4f}, {s_loss:.4f}",
            f"  Accuracy (word, entity): {100*res.w_accuracies[i, 0]:7.2f} %, {100*res.e_accuracies[i, 0]:7.2f} %",
        )

        if i in res.val_param_updates and is_master:
            TT.profile("Model validation")
            log("Validating model")
            vi = res.val_param_updates.tolist().index(i)
            res.val_w_losses[vi], res.val_e_losses[vi], res.val_w_accuracies[vi], res.val_e_accuracies[vi] =\
                validate_model(model, val_loader, word_criterion, entity_criterion, res.top_k)
            res.val_losses[vi] = loss_calculator(res.val_w_losses[vi], res.val_e_losses[vi])
            log(
                "Validation loss:",
                "  Total:  %9.4f" % res.val_losses[vi],
                "  Word:   %9.4f" % res.val_w_losses[vi],
                "  Entity: %9.4f" % res.val_e_losses[vi],
                "Validation accuracy:",
                "  Word:   %7.2f %%" % (100 * res.val_w_accuracies[vi, 0]),
                "  Entity: %7.2f %%" % (100 * res.val_e_accuracies[vi, 0]),
            )
            model.train()
            TT.end_profile()
            log.debug("Time distribution so far", TT)

        # Save results and model
        if is_master and i in save_pus:
            with TT.profile("Saving progress"):
                save_progress(location, i, tmp_saved_pu, save_pus, params,
                    model.module if is_distributed else model, res, optimizer, scheduler, scaler)
        if i in save_pus:
            log_memory_stats(device)

        # If timed out, save, quit, and run resume command
        if post_time and time.time() > post_time:
            log_memory_stats(device)
            log.section("Time limit reached. Quitting and running command '%s'" % post_command)
            with TT.profile("Saving progress"):
                save_progress(location, i, tmp_saved_pu, save_pus, params,
                    model.module if is_distributed else model, res, optimizer, scheduler, scaler)
            execute_post_command = True
            break

    log.debug("Time distribution", TT)

    # Clean up multi-gpu if used
    cleanup(rank)

    if is_master and execute_post_command:
        os.system(post_command)
Example #16
0
def _load_data(args, s):
    # Prepare the simulation parameters
    if args.min_maf is not None or args.max_maf is not None:
        if args.min_maf is None:
            args.min_maf = 0.01
        if args.max_maf is None:
            args.max_maf = 0.05
        s.sample_mafs(args.min_maf, args.max_maf)
    if args.annotation_vector is not None:
        logger.debug('Loading annotation vector')
        a = numpy.loadtxt(args.annotation_vector).astype('int8')
        if a.shape[0] != args.num_variants:
            raise _A('{} variants present in annotations file, but {} specified'.format(a.shape[0], args.num_variants))
        s.load_annotations(a)
    elif args.annotation_matrix is not None:
        logger.debug('Loading annotation matrix')
        with gzip.open(args.annotation_matrix, 'rt') as f:
            a = numpy.loadtxt(f).astype('int8')
        if a.shape[0] != args.num_variants:
            raise _A('{} variants present in annotations file, but {} specified'.format(a.shape[0], args.num_variants))
        if args.annotation_matrix_column < 0:
            raise _A('Annotation column must be non-negative')
        if args.annotation_matrix_column > a.shape[1]:
            raise _A('{} columns present in annotation matrix, but column {} specified'.format(a.shape[1], args.annotation_matrix_column))
        s.load_annotations(a, args.annotation_matrix_column)
    if args.permute_causal:
        logger.debug('Generating effects with permuted causal indicator')
        s.sample_effects(pve=args.pve, annotation_params=args.annotation, permute=True)
    # Load/generate the genotypes and phenotypes
    if args.load_oxstats:
        logger.debug('Loading OXSTATS datasets')
        with contextlib.ExitStack() as stack:
            data = [stack.enter_context(ctra.formats.oxstats_genotypes(*a))
                    for a in ctra.algorithms.kwise(args.load_oxstats, 2)]
            samples = list(itertools.chain.from_iterable(s for _, _, s, _ in data))
            merged = ctra.formats.merge_oxstats([d for _, _, _, d in data])
            probs = ([float(x) for x in row[5:]] for row in merged)
            if args.num_samples > len(samples):
                logger.error('{} individuals present in OXSTATS data, but {} were specified'.format(len(samples), args.num_samples))
                sys.exit(1)
            x = numpy.array(list(itertools.islice(probs, args.num_variants)))
        p, n = x.shape
        if p < args.num_variants:
            logger.error('{} variants present in OXSTATS data, but {} were specified'.format(p, args.num_variants))
            sys.exit(1)
        n = n // 3
        x = (x.reshape(p, -1, 3) * numpy.array([0, 1, 2])).sum(axis=2).T[:n,:]
        s.estimate_mafs(x)
        y = s.compute_liabilities(x)
    elif args.load_hdf5:
        with h5py.File(args.load_hdf5) as f:
            logger.debug('Loading HDF5 dataset')
            x = f['dosage'][:args.num_samples, :args.num_variants]
            logger.debug('Re-computing liabilities')
            s.estimate_mafs(x)
            y = s.compute_liabilities(x)
    else:
        if args.model == 'logistic':
            x, y = s.sample_case_control(n=args.num_samples, K=args.prevalence, P=args.study_prop)
        else:
            x, y = s.sample_gaussian(n=args.num_samples)
    # Hold out samples
    if args.model == 'logistic':
        # Randomly subsample hold out set
        validation = numpy.zeros(args.num_samples, dtype='bool')
        hold_out = s.random.choice(args.num_samples, args.validation, replace=False)
        validation[hold_out] = True
        x_validate = x[validation]
        y_validate = y[validation]
        # Permute the training set so minibatches are balanced in expectation
        perm = s.random.permutation(args.num_samples - args.validation)
        x = x[~validation][perm]
        y = y[~validation][perm]
    else:
        # Assume samples are exchangeable
        x_validate = x[-args.validation:]
        y_validate = y[-args.validation:]
        x = x[:-args.validation]
        y = y[:-args.validation]
    x -= x.mean(axis=0)
    x_validate -= x_validate.mean(axis=0)
    if args.model != 'logistic':
        y -= y.mean()
        y_validate -= y_validate.mean()
    return x, y, x_validate, y_validate
Example #17
0
def prune_state_dict(state_dict, model_cfg: Optional[DictConfig]):
    """Prune the given state_dict if desired for LayerDrop
    (https://arxiv.org/abs/1909.11556).

    Training with LayerDrop allows models to be robust to pruning at inference
    time. This function prunes state_dict to allow smaller models to be loaded
    from a larger model and re-maps the existing state_dict for this to occur.

    It's called by functions that load models from checkpoints and does not
    need to be called directly.
    """
    arch = None
    if model_cfg is not None:
        arch = (model_cfg._name if isinstance(model_cfg, DictConfig) else
                getattr(model_cfg, "arch", None))

    if not model_cfg or arch is None or arch == "ptt_transformer":
        # args should not be none, but don't crash if it is.
        return state_dict

    encoder_layers_to_keep = getattr(model_cfg, "encoder_layers_to_keep", None)
    decoder_layers_to_keep = getattr(model_cfg, "decoder_layers_to_keep", None)

    if not encoder_layers_to_keep and not decoder_layers_to_keep:
        return state_dict

    # apply pruning
    logger.info(
        "Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop"
    )

    def create_pruning_pass(layers_to_keep, layer_name):
        keep_layers = sorted(
            int(layer_string) for layer_string in layers_to_keep.split(","))
        mapping_dict = {}
        for i in range(len(keep_layers)):
            mapping_dict[str(keep_layers[i])] = str(i)

        regex = re.compile(
            r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name))
        return {"substitution_regex": regex, "mapping_dict": mapping_dict}

    pruning_passes = []
    if encoder_layers_to_keep:
        pruning_passes.append(
            create_pruning_pass(encoder_layers_to_keep, "encoder"))
    if decoder_layers_to_keep:
        pruning_passes.append(
            create_pruning_pass(decoder_layers_to_keep, "decoder"))

    new_state_dict = {}
    for layer_name in state_dict.keys():
        match = re.search(r"\.layers\.(\d+)\.", layer_name)
        # if layer has no number in it, it is a supporting layer, such as an
        # embedding
        if not match:
            new_state_dict[layer_name] = state_dict[layer_name]
            continue

        # otherwise, layer should be pruned.
        original_layer_number = match.group(1)
        # figure out which mapping dict to replace from
        for pruning_pass in pruning_passes:
            if original_layer_number in pruning_pass[
                    "mapping_dict"] and pruning_pass[
                        "substitution_regex"].search(layer_name):
                new_layer_number = pruning_pass["mapping_dict"][
                    original_layer_number]
                substitution_match = pruning_pass["substitution_regex"].search(
                    layer_name)
                new_state_key = (layer_name[:substitution_match.start(1)] +
                                 new_layer_number +
                                 layer_name[substitution_match.end(1):])
                new_state_dict[new_state_key] = state_dict[layer_name]

    # Since layers are now pruned, *_layers_to_keep are no longer needed.
    # This is more of "It would make it work fix" rather than a proper fix.
    if isinstance(model_cfg, DictConfig):
        context = open_dict(model_cfg)
    else:
        context = contextlib.ExitStack()
    with context:
        if hasattr(model_cfg, "encoder_layers_to_keep"):
            model_cfg.encoder_layers_to_keep = None
        if hasattr(model_cfg, "decoder_layers_to_keep"):
            model_cfg.decoder_layers_to_keep = None

    return new_state_dict
  def decorated(self, **kwargs):
    """A wrapped test method that can treat some arguments in a special way."""
    original_kwargs = kwargs.copy()

    # Skip combinations that are going to be executed in a different testing
    # environment.
    reasons_to_skip = []
    for combination in test_combinations:
      should_execute, reason = combination.should_execute_combination(
          original_kwargs.copy())
      if not should_execute:
        reasons_to_skip.append(' - ' + reason)

    if reasons_to_skip:
      self.skipTest('\n'.join(reasons_to_skip))

    customized_parameters = []
    for combination in test_combinations:
      customized_parameters.extend(combination.parameter_modifiers())
    customized_parameters = set(customized_parameters)

    # The function for running the test under the total set of
    # `context_managers`:
    def execute_test_method():
      requested_parameters = tf_inspect.getfullargspec(test_method).args
      for customized_parameter in customized_parameters:
        for argument, value in customized_parameter.modified_arguments(
            original_kwargs.copy(), requested_parameters).items():
          if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST:
            kwargs.pop(argument, None)
          else:
            kwargs[argument] = value

      omitted_arguments = set(requested_parameters).difference(
          set(list(kwargs.keys()) + ['self']))
      if omitted_arguments:
        raise ValueError('The test requires parameters whose arguments '
                         'were not passed: {} .'.format(omitted_arguments))
      missing_arguments = set(list(kwargs.keys()) + ['self']).difference(
          set(requested_parameters))
      if missing_arguments:
        raise ValueError('The test does not take parameters that were passed '
                         ': {} .'.format(missing_arguments))

      kwargs_to_pass = {}
      for parameter in requested_parameters:
        if parameter == 'self':
          kwargs_to_pass[parameter] = self
        else:
          kwargs_to_pass[parameter] = kwargs[parameter]
      with self.cached_session():
        test_method(**kwargs_to_pass)

    # Install `context_managers` before running the test:
    context_managers = []
    for combination in test_combinations:
      for manager in combination.context_managers(
          original_kwargs.copy()):
        context_managers.append(manager)

    if hasattr(contextlib, 'nested'):  # Python 2
      # TODO(isaprykin): Switch to ExitStack when contextlib2 is available.
      with contextlib.nested(*context_managers):
        execute_test_method()
    else:  # Python 3
      with contextlib.ExitStack() as context_stack:
        for manager in context_managers:
          context_stack.enter_context(manager)
        execute_test_method()
    ''' Main class, prepare kafka and citisim broker and connects them '''
    def run(self, libcitisim_config, producers):
        ''' Main function '''
        citisim_broker = libcitisim.Broker(libcitisim_config)

        for topic_name, producer in producers.items():
            print("Subscribing to '{}' topic".format(topic_name))
            citisim_broker.subscribe(topic_name,
                                     EventMirrorVisitor(producer).callback)

        print("Awaiting data...")
        citisim_broker.wait_for_events()


if __name__ == "__main__":
    args = SubscriberArgumentParser().parse_args(sys.argv[1:])

    kafka_events_handler = pykafka.handlers.ThreadingHandler()
    kafka_cluster = pykafka.cluster.Cluster(hosts=args.kafka_cluster,
                                            handler=kafka_events_handler)

    with contextlib.ExitStack() as exit_stack:
        kafka_producers = {
            topic_name: exit_stack.enter_context(
                kafka_mirror_producer(kafka_cluster, topic_name))
            for topic_name in args.forward_topic
        }

        SubscriberMirror().run(libcitisim_config=args.libcitisim_args[0],
                               producers=kafka_producers)
Example #20
0
    def run(self, num_frames, preview_alpha, image_format, image_folder,
            enable_streaming):
        logger.info('Starting...')
        leds = Leds()

        with contextlib.ExitStack() as stack:
            player = stack.enter_context(Player(gpio=BUZZER_GPIO, bpm=10))
            photographer = stack.enter_context(
                Photographer(image_format, image_folder))
            animator = stack.enter_context(Animator(leds))
            # Forced sensor mode, 1640x1232, full FoV. See:
            # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
            # This is the resolution inference run on.
            # Use half of that for video streaming (820x616).
            camera = stack.enter_context(
                PiCamera(sensor_mode=4, resolution=(820, 616)))
            stack.enter_context(PrivacyLed(leds))

            server = None
            if enable_streaming:
                server = stack.enter_context(StreamingServer(camera))
                server.run()

            def take_photo():
                logger.info('Button pressed.')
                player.play(BEEP_SOUND)
                photographer.shoot(camera)

            if preview_alpha > 0:
                camera.start_preview(alpha=preview_alpha)

            button = Button(BUTTON_GPIO)
            button.when_pressed = take_photo

            joy_score_moving_average = MovingAverage(5)  #Changed it from 10
            prev_joy_score = 0.0
            with CameraInference(face_detection.model()) as inference:
                logger.info('Model loaded.')
                player.play(MODEL_LOAD_SOUND)
                for i, result in enumerate(inference.run()):
                    faces = face_detection.get_faces(result)
                    photographer.update_faces(faces)

                    joy_score = joy_score_moving_average.next(
                        average_joy_score(faces))
                    animator.update_joy_score(joy_score)
                    if server:
                        data = server_inference_data(result.width,
                                                     result.height, faces,
                                                     joy_score)
                        server.send_inference_data(data)

                    if joy_score > JOY_SCORE_PEAK > prev_joy_score:
                        player.play(JOY_SOUND)
                        ##                        picoSpeakNow(list_happy[np.random.randint(0,N_HAPPY)])
                        ##                        os.system('pico2wave -w test.wav "keep smiling. I feed off of smile energy... do not let the smile die down." && aplay test.wav')

                        ##                        time.sleep(3)
                        espeak_happy = 'espeak -s160 -g6 -ven+f3 ' + '"' + if_happy_list[
                            np.random.randint(0, N_HAPPY)] + '"'
                        os.system(espeak_happy)
                    elif joy_score < 0.35 < prev_joy_score:
                        player.play(SAD_SOUND)
                        espeak_sad = 'espeak -s160 -g6 -ven+f3 ' + '"' + if_sad[
                            0] + '"'
                        os.system(espeak_sad)
                    ##                        picoSpeakNow(list_sad[np.random.randint(0,N_SAD)])
                    ##                        time.sleep(3)
                    ##                        os.system('espeak "Keep smiling. I feed off of smile energy... do not let the smile die down"')
                    ##                        os.system('pico2wave -w test.wav "start smiling. I feed off of smile energy... do not let the smile die down." && aplay test.wav')

                    prev_joy_score = joy_score

                    if self._done.is_set() or i == num_frames:
                        break
    def test_enable_configures_apt_sources_and_auth_files(
            self, _m_get_cloud_type, m_setup_apt_proxy, entitlement):
        """When entitled, configure apt repo auth token, pinning and url."""
        patched_packages = ["a", "b"]
        expected_conditional_packages = [
            "openssh-server",
            "openssh-server-hmac",
            "strongswan",
            "strongswan-hmac",
        ]

        with contextlib.ExitStack() as stack:
            m_add_apt = stack.enter_context(
                mock.patch("uaclient.apt.add_auth_apt_repo"))
            m_add_pinning = stack.enter_context(
                mock.patch("uaclient.apt.add_ppa_pinning"))
            m_installed_pkgs = stack.enter_context(
                mock.patch(
                    "uaclient.apt.get_installed_packages",
                    return_value=["openssh-server", "strongswan"],
                ))
            m_subp = stack.enter_context(
                mock.patch("uaclient.util.subp", return_value=("", "")))
            m_can_enable = stack.enter_context(
                mock.patch.object(entitlement, "can_enable"))
            stack.enter_context(
                mock.patch("uaclient.util.handle_message_operations"))
            stack.enter_context(
                mock.patch(M_GETPLATFORM, return_value={"series": "xenial"}))
            stack.enter_context(mock.patch(M_REPOPATH + "os.path.exists"))
            # Note that this patch uses a PropertyMock and happens on the
            # entitlement's type because packages is a property
            m_packages = mock.PropertyMock(return_value=patched_packages)
            stack.enter_context(
                mock.patch.object(type(entitlement), "packages", m_packages))
            stack.enter_context(
                mock.patch("uaclient.util.is_container", return_value=False))

            m_can_enable.return_value = (True, None)
            assert (True, None) == entitlement.enable()

        repo_url = "http://{}".format(entitlement.name.upper())
        add_apt_calls = [
            mock.call(
                "/etc/apt/sources.list.d/ubuntu-{}.list".format(
                    entitlement.name),
                repo_url,
                "{}-token".format(entitlement.name),
                ["xenial"],
                entitlement.repo_key_file,
            )
        ]
        apt_pinning_calls = [
            mock.call(
                "/etc/apt/preferences.d/ubuntu-{}".format(entitlement.name),
                repo_url,
                entitlement.origin,
                1001,
            )
        ]

        install_cmd = []
        install_cmd.append(
            mock.call(
                [
                    "apt-get",
                    "install",
                    "--assume-yes",
                    "--allow-downgrades",
                    '-o Dpkg::Options::="--force-confdef"',
                    '-o Dpkg::Options::="--force-confold"',
                ] + patched_packages,
                capture=True,
                retry_sleeps=apt.APT_RETRIES,
                env={"DEBIAN_FRONTEND": "noninteractive"},
            ))

        for pkg in expected_conditional_packages:
            install_cmd.append(
                mock.call(
                    [
                        "apt-get",
                        "install",
                        "--assume-yes",
                        "--allow-downgrades",
                        '-o Dpkg::Options::="--force-confdef"',
                        '-o Dpkg::Options::="--force-confold"',
                        pkg,
                    ],
                    capture=True,
                    retry_sleeps=apt.APT_RETRIES,
                    env={"DEBIAN_FRONTEND": "noninteractive"},
                ))

        subp_calls = [
            mock.call(
                ["apt-mark", "showholds"],
                capture=True,
                retry_sleeps=apt.APT_RETRIES,
                env={},
            ),
            mock.call(
                ["apt-get", "update"],
                capture=True,
                retry_sleeps=apt.APT_RETRIES,
                env={},
            ),
        ]
        subp_calls += install_cmd

        assert [mock.call()] == m_can_enable.call_args_list
        assert 1 == m_setup_apt_proxy.call_count
        assert 1 == m_installed_pkgs.call_count
        assert add_apt_calls == m_add_apt.call_args_list
        assert apt_pinning_calls == m_add_pinning.call_args_list
        assert subp_calls == m_subp.call_args_list
        assert [["", messages.FIPS_SYSTEM_REBOOT_REQUIRED.msg]
                ] == entitlement.cfg.read_cache("notices")
Example #22
0
 async def setUp(self):
     super().setUp()
     self._stack = contextlib.ExitStack().__enter__()
     self._astack = await contextlib.AsyncExitStack().__aenter__()
Example #23
0
    def test_jsonarray2netcdf_execute(self):
        dirname = tempfile.gettempdir()
        nc_data = "Hello NetCDF!"
        with contextlib.ExitStack() as stack_exec:
            tmp_ncdf = tempfile.NamedTemporaryFile(dir=dirname,
                                                   mode="w",
                                                   suffix=".nc")
            tmp_json = tempfile.NamedTemporaryFile(dir=dirname,
                                                   mode="w",
                                                   suffix=".json")
            tmp_ncdf = stack_exec.enter_context(tmp_ncdf)  # noqa
            tmp_json = stack_exec.enter_context(tmp_json)  # noqa
            tmp_ncdf.write(nc_data)
            tmp_ncdf.seek(0)
            tmp_json.write(
                json.dumps(
                    ["file://{}".format(os.path.join(dirname,
                                                     tmp_ncdf.name))]))
            tmp_json.seek(0)
            data = {
                "mode":
                "async",
                "response":
                "document",
                "inputs": [{
                    "id": "input",
                    "href": os.path.join(dirname, tmp_json.name)
                }],
                "outputs": [{
                    "id":
                    "output",
                    "transmissionMode":
                    EXECUTE_TRANSMISSION_MODE_REFERENCE
                }],
            }

            for mock_exec in mocked_execute_process():
                stack_exec.enter_context(mock_exec)
            path = "/processes/jsonarray2netcdf/jobs"
            resp = mocked_sub_requests(self.app,
                                       "post_json",
                                       path,
                                       data=data,
                                       headers=self.json_headers,
                                       only_local=True)

        assert resp.status_code == 201, "Error: {}".format(resp.json)
        assert resp.content_type in CONTENT_TYPE_APP_JSON
        job_url = resp.json["location"]
        results = self.monitor_job(job_url)
        assert results["outputs"][0]["id"] == "output"
        nc_path = results["outputs"][0]["href"]
        assert isinstance(nc_path, str) and len(nc_path)
        settings = get_settings_from_testapp(self.app)
        wps_out = "{}{}".format(settings.get("weaver.url"),
                                settings.get("weaver.wps_output_path"))
        nc_real_path = nc_path.replace(wps_out,
                                       settings.get("weaver.wps_output_dir"))
        assert nc_path.startswith(wps_out)
        assert os.path.split(nc_real_path)[-1] == os.path.split(nc_path)[-1]
        assert os.path.isfile(nc_real_path)
        with open(nc_real_path, "r") as f:
            assert f.read() == nc_data
Example #24
0
    def __init__(self):
        self._serializer = cattr.Converter()
        self._serializer.register_unstructure_hook(enum.Enum, _unstructure_enum)
        self._serializer.register_unstructure_hook(
            LoadRequest,
            _with_api_key("load", self._serializer.unstructure_attrs_asdict),
        )
        self._serializer.register_unstructure_hook(
            CreateRequest,
            _with_api_key("create", self._serializer.unstructure_attrs_asdict),
        )
        self._serializer.register_unstructure_hook(
            DeleteRequest,
            _with_api_key("del", self._serializer.unstructure_attrs_asdict),
        )
        self._serializer.register_unstructure_hook(
            GetRequest, _with_api_key("get", self._serializer.unstructure_attrs_asdict)
        )
        self._serializer.register_unstructure_hook(
            StaticGetRequest,
            _with_api_key("sget", self._serializer.unstructure_attrs_asdict),
        )
        self._serializer.register_unstructure_hook(
            SetRequest, _with_api_key("set", self._serializer.unstructure_attrs_asdict)
        )
        self._serializer.register_unstructure_hook(
            StaticSetRequest,
            _with_api_key("sset", self._serializer.unstructure_attrs_asdict),
        )
        self._serializer.register_unstructure_hook(
            InvokeRequest,
            _with_api_key("invoke", self._serializer.unstructure_attrs_asdict),
        )
        self._serializer.register_unstructure_hook(
            StaticInvokeRequest,
            _with_api_key("sinvoke", self._serializer.unstructure_attrs_asdict),
        )
        self._serializer.register_unstructure_hook(
            BeginRequest,
            _with_api_key("begin", self._serializer.unstructure_attrs_asdict),
        )
        self._serializer.register_unstructure_hook(
            EndRequest, _with_api_key("end", self._serializer.unstructure_attrs_asdict)
        )
        self._serializer.register_unstructure_hook(
            CallbacksRequest,
            _with_api_key("callbacks", self._serializer.unstructure_attrs_asdict),
        )
        self._serializer.register_unstructure_hook(
            CompleteRequest,
            _with_api_key("complete", self._serializer.unstructure_attrs_asdict),
        )
        self._serializer.register_unstructure_hook(
            StatsRequest,
            _with_api_key("stats", self._serializer.unstructure_attrs_asdict),
        )
        self._serializer.register_unstructure_hook(
            Override, self._serializer.unstructure_attrs_asdict
        )
        self._serializer.register_unstructure_hook(ObjRef, _unstructure_ref)
        self._serializer.register_structure_hook(ObjRef, _with_reference)

        self._ctx_stack = contextlib.ExitStack()
Example #25
0
def main():
    parser = argparse.ArgumentParser(
        description='utility for scheduling Postgresql jobs',
    )
    
    parser.add_argument(
        '--check-config-only',
        action='store_true',
        help='do nothing. read and check config only',
    )
    
    parser.add_argument(
        '--not-use-sd-notify',
        action='store_true',
        help='not use sd_notify',
    )
    
    parser.add_argument(
        '--log-config',
        metavar='LOG_CONFIG_PATH',
        help='path to log config file',
    )
    
    parser.add_argument(
        'config',
        nargs='+',
        metavar='CONFIG_PATH',
        help='path to config file',
    )
    
    args = parser.parse_args()
    
    check_config_only = args.check_config_only
    not_use_sd_notify = args.not_use_sd_notify
    log_config_path = args.log_config
    config_path_list = args.config
    
    with contextlib.ExitStack() as stack:
        log.init(log_config_path)
        
        config_ctx = pg_perfect_ticker.ConfigCtx()
        
        pg_perfect_ticker.blocking_read_config(config_ctx, config_path_list)
        
        if check_config_only:
            return
        
        loop = asyncio.get_event_loop()
        ticker_ctx = pg_perfect_ticker.TickerCtx()
        
        ticker_init_fut = loop.create_task(
            pg_perfect_ticker.ticker_init(loop, ticker_ctx, config_path_list, config_ctx),
        )
        
        loop.run_until_complete(ticker_init_fut)
        stack.callback(pg_perfect_ticker.blocking_ticker_shutdown, ticker_ctx)
        
        def shutdown_handler():
            loop.create_task(
                pg_perfect_ticker.ticker_shutdown_handler(loop, ticker_ctx),
            )
        
        loop.add_signal_handler(signal.SIGINT, shutdown_handler)
        stack.callback(loop.remove_signal_handler, signal.SIGINT)
        loop.add_signal_handler(signal.SIGTERM, shutdown_handler)
        stack.callback(loop.remove_signal_handler, signal.SIGTERM)
        
        if not not_use_sd_notify:
            sd.notify('READY=1', unset_environment=True)
        
        ticker_process_fut = loop.create_task(
            pg_perfect_ticker.ticker_process(loop, ticker_ctx),
        )
        
        loop.run_until_complete(ticker_process_fut)
Example #26
0
def before_scenario(context, scenario):
    """Perform the setup before each scenario is run."""
    context.resource_manager = contextlib.ExitStack()
Example #27
0
def joy_detector(num_frames, preview_alpha, image_format, image_folder,
                 enable_streaming, streaming_bitrate, mdns_name):
    done = threading.Event()

    def stop():
        logger.info('Stopping...')
        done.set()

    signal.signal(signal.SIGINT, lambda signum, frame: stop())
    signal.signal(signal.SIGTERM, lambda signum, frame: stop())

    logger.info('Starting Edit detection joy...')
    with contextlib.ExitStack() as stack:
        leds = stack.enter_context(Leds())
        board = stack.enter_context(Board())
        player = stack.enter_context(Player(gpio=BUZZER_GPIO, bpm=10))
        photographer = stack.enter_context(
            Photographer(image_format, image_folder))
        animator = stack.enter_context(Animator(leds))
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        # Use half of that for video streaming (820x616).
        camera = stack.enter_context(
            PiCamera(sensor_mode=4, resolution=(820, 616)))
        stack.enter_context(PrivacyLed(leds))

        server = None
        if enable_streaming:
            server = stack.enter_context(
                StreamingServer(camera,
                                bitrate=streaming_bitrate,
                                mdns_name=mdns_name))

        def model_loaded():
            logger.info('Model loaded.')
            player.play(MODEL_LOAD_SOUND)

        def take_photo():
            logger.info('Button pressed.')
            player.play(BEEP_SOUND)
            photographer.shoot(camera)

        if preview_alpha > 0:
            camera.start_preview(alpha=preview_alpha)

        board.button.when_pressed = take_photo

        joy_moving_average = moving_average(10)
        joy_moving_average.send(None)  # Initialize.
        joy_threshold_detector = threshold_detector(JOY_SCORE_LOW,
                                                    JOY_SCORE_HIGH)
        joy_threshold_detector.send(None)  # Initialize.
        for faces, frame_size in run_inference(num_frames, model_loaded):
            photographer.update_faces((faces, frame_size))
            joy_score = joy_moving_average.send(average_joy_score(faces))
            animator.update_joy_score(joy_score)
            # Send http request
            sendJoy(joy_score, faces)
            event = joy_threshold_detector.send(joy_score)
            if event == 'high':
                logger.info('High joy detected.')
                player.play(JOY_SOUND)
            elif event == 'low':
                logger.info('Low joy detected.')
                player.play(SAD_SOUND)

            if server:
                server.send_overlay(svg_overlay(faces, frame_size, joy_score))

            if done.is_set():
                break
Example #28
0
async def _render_wfmodule(
    chroot_context: ChrootContext,
    workflow: Workflow,
    wf_module: WfModule,
    module_zipfile: Optional[ModuleZipfile],
    raw_params: Dict[str, Any],
    tab: Tab,
    input_result: RenderResult,
    tab_results: Dict[Tab, Optional[RenderResult]],
    output_path: Path,
) -> RenderResult:
    """
    Prepare and call `wf_module`'s `render()`; return a RenderResult.

    The actual render runs in a background thread so the event loop can process
    other events.
    """
    basedir = output_path.parent

    if wf_module.order > 0 and input_result.status != "ok":
        return RenderResult()  # 'unreachable'

    if module_zipfile is None:
        return RenderResult(errors=[
            RenderError(
                I18nMessage.trans(
                    "py.renderer.execute.wf_module.noModule",
                    default=
                    "Please delete this step: an administrator uninstalled its code.",
                ))
        ])

    # exit_stack: stuff that gets deleted when the render is done
    with contextlib.ExitStack() as exit_stack:
        try:
            # raise UnneededExecution, TabCycleError, TabOutputUnreachableError,
            # PromptingError
            fetch_result, params = await _execute_wfmodule_pre(
                basedir,
                exit_stack,
                workflow,
                wf_module,
                module_zipfile,
                raw_params,
                input_result.table,
                tab_results,
            )
        except TabCycleError:
            return RenderResult(errors=[
                RenderError(
                    I18nMessage.trans(
                        "py.renderer.execute.wf_module.TabCycleError",
                        default=
                        "The chosen tab depends on this one. Please choose another tab.",
                    ))
            ])
        except TabOutputUnreachableError:
            return RenderResult(errors=[
                RenderError(
                    I18nMessage.trans(
                        "py.renderer.execute.wf_module.TabOutputUnreachableError",
                        default=
                        "The chosen tab has no output. Please select another one.",
                    ))
            ])
        except PromptingError as err:
            return RenderResult(errors=err.as_render_errors())

        # Render may take a while. run_in_executor to push that slowdown to a
        # thread and keep our event loop responsive.
        loop = asyncio.get_event_loop()
        return await loop.run_in_executor(
            None,
            _wrap_render_errors,
            partial(
                invoke_render,
                module_zipfile,
                chroot_context=chroot_context,
                basedir=basedir,
                input_table=input_result.table,
                params=params,
                tab=tab,
                fetch_result=fetch_result,
                output_filename=output_path.name,
            ),
        )
Example #29
0
 def setUp(self):
     super().setUp()
     stack = contextlib.ExitStack()
     self.addCleanup(stack.close)
     stack.enter_context(
         test_lib.ConfigOverrider({"Client.use_memory_sandboxing": True}))
Example #30
0
def initial_sync_mirror(
    channel_name: str,
    remote_repository: RemoteRepository,
    arch: str,
    dao: Dao,
    pkgstore: PackageStore,
    auth: authorization.Rules,
    skip_errors: bool = True,
):

    force = True  # needed for updating packages

    try:
        repo_file = remote_repository.open(os.path.join(arch, "repodata.json"))
        repodata = json.load(repo_file.file)
    except RemoteServerError:
        logger.error(f"can not get repodata.json for channel {channel_name}")
        return
    except json.JSONDecodeError:
        logger.error(
            f"repodata.json badly formatted for arch {arch} in channel {channel_name}"
        )
        return

    channel = dao.get_channel(channel_name)

    from quetz.main import handle_package_files

    packages = repodata.get("packages", {})

    version_methods = [
        _check_timestamp(channel, dao),
        _check_checksum(pkgstore, channel_name, arch, "sha256"),
        _check_checksum(pkgstore, channel_name, arch, "md5"),
    ]

    # version_methods are context managers (for example, to update the db
    # after all packages have been checked), so we need to enter the context
    # for each
    any_updated = False
    with contextlib.ExitStack() as version_stack:

        version_checks = [
            version_stack.enter_context(method) for method in version_methods
        ]

        for package_name, metadata in packages.items():
            path = os.path.join(arch, package_name)

            # try to find out whether it's a new package version

            is_uptodate = None
            for _check in version_checks:
                is_uptodate = _check(package_name, metadata)
                if is_uptodate is not None:
                    break

            # if package is up-to-date skip uploading file
            if is_uptodate:
                logger.debug(
                    f"package {package_name} from {arch} up-to-date. Not updating"
                )
                continue
            else:
                logger.debug(f"updating package {package_name} form {arch}")

            try:
                remote_package = remote_repository.open(path)
            except RemoteServerError:
                logger.error(f"remote server error when getting a file {path}")
                continue

            files = [remote_package]
            try:
                handle_package_files(
                    channel_name,
                    files,
                    dao,
                    auth,
                    force,
                )
                any_updated = True
            except Exception as exc:
                logger.error(
                    f"could not process package {package_name} from channel"
                    f"{channel_name} due to error {exc}")
                if not skip_errors:
                    raise exc

    if any_updated:
        indexing.update_indexes(dao, pkgstore, channel_name, subdirs=[arch])