Exemple #1
0
    def get_previous_value(self, context=None):
        # temporary method until previous values are integrated for all parameters
        value = self.parameters.previous_value._get(context)
        if value is None:
            value = self.parameters.previous_value._get(Context())

        return value
Exemple #2
0
    def __init__(self,
                 composition,
                 execution_ids=[None],
                 *,
                 additional_tags=frozenset()):
        super().__init__(buffers=[
            'state_struct', 'param_struct', 'data_struct', 'conditions'
        ])
        self._composition = composition
        self._execution_contexts = [
            Context(execution_id=eid) for eid in execution_ids
        ]
        self.__bin_exec_func = None
        self.__bin_exec_multi_func = None
        self.__bin_func = None
        self.__bin_run_func = None
        self.__bin_run_multi_func = None
        self.__debug_env = debug_env
        self.__frozen_vals = None
        self.__tags = frozenset(additional_tags)

        # TODO: Consolidate these
        if len(execution_ids) > 1:
            self.__state_struct = None
            self.__param_struct = None
            self.__data_struct = None
            self.__conds = None
            self._ct_len = ctypes.c_int(len(execution_ids))
 def _add_system(self, system, role: str):
     super()._add_system(system, role)
     if isinstance(self.modulated_mechanisms,
                   str) and self.modulated_mechanisms is ALL:
         # Call with ContextFlags.COMPONENT so that OutputPorts are replaced rather than added
         self._instantiate_output_ports(context=Context(
             source=ContextFlags.COMPONENT))
Exemple #4
0
    def __init__(self, component, execution_ids=[None], *, tags=frozenset()):
        super().__init__()
        self._bin_func = pnlvm.LLVMBinaryFunction.from_obj(component,
                                                           tags=tags)
        self._execution_contexts = [
            Context(execution_id=eid) for eid in execution_ids
        ]
        self._component = component
        self.__debug_env = debug_env

        par_struct_ty, ctx_struct_ty, vi_ty, vo_ty = self._bin_func.byref_arg_types

        if len(execution_ids) > 1:
            self._bin_multirun = self._bin_func.get_multi_run()
            self._ct_len = ctypes.c_int(len(execution_ids))
            vo_ty = vo_ty * len(execution_ids)
            vi_ty = vi_ty * len(execution_ids)

            self.__param_struct = None
            self.__state_struct = None

        self._vo_ty = vo_ty
        self._ct_vo = vo_ty()
        self._vi_ty = vi_ty
        self._vi_dty = _element_dtype(vi_ty)
        if "stat" in self.__debug_env:
            print("Input struct size:", _pretty_size(ctypes.sizeof(vi_ty)),
                  "for", self._component.name)
            print("Output struct size:", _pretty_size(ctypes.sizeof(vo_ty)),
                  "for", self._component.name)
Exemple #5
0
    def __init__(self, composition, device, context=None):

        if not torch_available:
            raise Exception('Pytorch python module (torch) is not installed. Please install it with '
                            '`pip install torch` or `pip3 install torch`')

        super(PytorchModelCreator, self).__init__()

        # Maps Mechanism -> PytorchMechanismWrapper
        self.nodes = []
        self.component_map = {}

        # Maps Projections -> PytorchProjectionWrappers
        self.projections = []
        self.projection_map = {}

        self.params = nn.ParameterList()
        self.device = device
        self._composition = composition

        # Instantiate pytorch mechanisms
        for node in set(composition.nodes) - set(composition.get_nodes_by_role(NodeRole.LEARNING)):
            pytorch_node = PytorchMechanismWrapper(node, self._composition._get_node_index(node), device, context=context)
            self.component_map[node] = pytorch_node
            self.nodes.append(pytorch_node)

        # Instantiate pytorch projections
        for projection in composition.projections:
            if projection.sender.owner in self.component_map and projection.receiver.owner in self.component_map:
                proj_send = self.component_map[projection.sender.owner]
                proj_recv = self.component_map[projection.receiver.owner]

                port_idx = projection.sender.owner.output_ports.index(projection.sender)
                new_proj = PytorchProjectionWrapper(projection, list(self._composition._inner_projections).index(projection), port_idx, device, sender=proj_send, receiver=proj_recv, context=context)
                proj_send.add_efferent(new_proj)
                proj_recv.add_afferent(new_proj)
                self.projection_map[projection] = new_proj
                self.projections.append(new_proj)
                self.params.append(new_proj.matrix)

        c = Context()
        try:
            composition.scheduler._init_counts(execution_id=c.execution_id, base_execution_id=context.execution_id)
        except graph_scheduler.SchedulerError:
            # called from LLVM, no base context is provided
            composition.scheduler._init_counts(execution_id=c.execution_id)

        # Setup execution sets
        # 1) Remove all learning-specific nodes
        self.execution_sets = [x - set(composition.get_nodes_by_role(NodeRole.LEARNING)) for x in composition.scheduler.run(context=c)]
        # 2) Convert to pytorchcomponent representation
        self.execution_sets = [{self.component_map[comp] for comp in s if comp in self.component_map} for s in self.execution_sets]
        # 3) Remove empty execution sets
        self.execution_sets = [x for x in self.execution_sets if len(x) > 0]

        composition.scheduler._delete_counts(c.execution_id)
Exemple #6
0
 def evaluate(self,
              feature_values,
              control_allocation,
              num_estimates,
              base_context=Context(execution_id=None),
              context=None):
     """Return `target <FunctionAppproximator.target>` predicted by `function <FunctionAppproximator.function> for
     **input**, using current set of `prediction_parameters <FunctionAppproximator.prediction_parameters>`.
     """
     # FIX: AUGMENTTO USE num_estimates
     return self.function(feature_values,
                          control_allocation,
                          context=context)
    def __init__(self,
                 default_variable=None,
                 sample: tc.optional(
                     tc.any(OutputPort, Mechanism_Base, dict, is_numeric,
                            str)) = None,
                 target: tc.optional(
                     tc.any(OutputPort, Mechanism_Base, dict, is_numeric,
                            str)) = None,
                 function=LinearCombination(weights=[[-1], [1]]),
                 output_ports: tc.optional(tc.any(str, Iterable)) = None,
                 params=None,
                 name=None,
                 prefs: is_pref_set = None,
                 **kwargs):

        input_ports = kwargs.pop(INPUT_PORTS, {})
        if input_ports:
            input_ports = {INPUT_PORTS: input_ports}

        input_ports = self._merge_legacy_constructor_args(
            sample, target, default_variable, input_ports)

        # Default output_ports is specified in constructor as a tuple rather than a list
        # to avoid "gotcha" associated with mutable default arguments
        # (see: bit.ly/2uID3s3 and http://docs.python-guide.org/en/latest/writing/gotchas/)
        if isinstance(output_ports, (str, tuple)):
            output_ports = list(output_ports)

        # IMPLEMENTATION NOTE: The following prevents the default from being updated by subsequent assignment
        #                     (in this case, to [OUTCOME, {NAME= MSE}]), but fails to expose default in IDE
        # output_ports = output_ports or [OUTCOME, MSE]

        super().__init__(
            monitor=input_ports,
            function=function,
            output_ports=
            output_ports,  # prevent default from getting overwritten by later assign
            params=params,
            name=name,
            prefs=prefs,
            **kwargs)

        # Require Projection to TARGET InputPort (already required for SAMPLE as primary InputPort)
        self.input_ports[1].parameters.require_projection_in_composition._set(
            True, Context())
Exemple #8
0
    def __init__(self, component, execution_ids=[None]):
        super().__init__()
        self._bin_func = pnlvm.LLVMBinaryFunction.from_obj(component)
        self._execution_contexts = [
            Context(execution_id=eid) for eid in execution_ids
        ]
        self._component = component

        par_struct_ty, ctx_struct_ty, vi_ty, vo_ty = self._bin_func.byref_arg_types

        if len(execution_ids) > 1:
            self._bin_multirun = self._bin_func.get_multi_run()
            self._ct_len = ctypes.c_int(len(execution_ids))
            vo_ty = vo_ty * len(execution_ids)
            vi_ty = vi_ty * len(execution_ids)

            self.__param_struct = None
            self.__state_struct = None

        self._ct_vo = vo_ty()
        self._vi_ty = vi_ty
Exemple #9
0
    def execute(
        self,
        inputs=None,
        autodiff_stimuli=None,
        scheduler=None,
        termination_processing=None,
        call_before_time_step=None,
        call_before_pass=None,
        call_after_time_step=None,
        call_after_pass=None,
        context=None,
        base_context=Context(execution_id=None),
        clamp_input=SOFT_CLAMP,
        runtime_params=None,
        skip_initialization=False,
        bin_execute=False,
    ):

        if isinstance(inputs, list):
            inputs = {
                self.get_mechanisms_by_role(NodeRole.ORIGIN).pop(): inputs
            }

        output = super(PathwayComposition, self).execute(
            inputs,
            scheduler=scheduler,
            termination_processing=termination_processing,
            call_before_time_step=call_before_time_step,
            call_before_pass=call_before_pass,
            call_after_time_step=call_after_time_step,
            call_after_pass=call_after_pass,
            context=context,
            clamp_input=clamp_input,
            runtime_params=runtime_params,
            skip_initialization=skip_initialization,
            bin_execute=bin_execute,
        )
        return output
Exemple #10
0
    def run(self,
            termination_conds=None,
            context=None,
            base_context=Context(execution_id=None),
            skip_trial_time_increment=False):
        """
        run is a python generator, that when iterated over provides the next `TIME_STEP` of
        executions at each iteration

        :param termination_conds: (dict) - a mapping from `TimeScale`\\s to `Condition`\\s that when met
               terminate the execution of the specified `TimeScale`
        """
        self._validate_run_state()
        if termination_conds is None:
            termination_conds = self.termination_conds
        else:
            termination_conds = self.update_termination_conditions(
                Scheduler._parse_termination_conditions(termination_conds))

        if context is None:
            context = Context(execution_id=self.default_execution_id)

        self._init_counts(context.execution_id, base_context.execution_id)
        self._reset_counts_useable(context.execution_id)
        self._reset_counts_total(TimeScale.TRIAL, context.execution_id)

        while (not termination_conds[TimeScale.TRIAL].is_satisfied(
                scheduler=self, context=context)
               and not termination_conds[TimeScale.RUN].is_satisfied(
                   scheduler=self, context=context)):
            self._reset_counts_total(TimeScale.PASS, context.execution_id)

            execution_list_has_changed = False
            cur_index_consideration_queue = 0

            while (cur_index_consideration_queue < len(
                    self.consideration_queue)
                   and not termination_conds[TimeScale.TRIAL].is_satisfied(
                       scheduler=self, context=context)
                   and not termination_conds[TimeScale.RUN].is_satisfied(
                       scheduler=self, context=context)):
                # all nodes to be added during this time step
                cur_time_step_exec = set()
                # the current "layer/group" of nodes that MIGHT be added during this time step
                cur_consideration_set = self.consideration_queue[
                    cur_index_consideration_queue]
                try:
                    iter(cur_consideration_set)
                except TypeError as e:
                    raise SchedulerError(
                        'cur_consideration_set is not iterable, did you ensure that this Scheduler was instantiated with an actual toposort output for param toposort_ordering? err: {0}'
                        .format(e))

                # do-while, on cur_consideration_set_has_changed
                # we check whether each node in the current consideration set is allowed to run,
                # and nodes can cause cascading adds within this set
                while True:
                    cur_consideration_set_has_changed = False
                    for current_node in cur_consideration_set:
                        # only add each node once during a single time step, this also serves
                        # to prevent infinitely cascading adds
                        if current_node not in cur_time_step_exec:
                            if self.conditions.conditions[
                                    current_node].is_satisfied(
                                        scheduler=self, context=context):
                                cur_time_step_exec.add(current_node)
                                execution_list_has_changed = True
                                cur_consideration_set_has_changed = True

                                for ts in TimeScale:
                                    self.counts_total[context.execution_id][
                                        ts][current_node] += 1
                                # current_node's node is added to the execution queue, so we now need to
                                # reset all of the counts useable by current_node's node to 0
                                for n in self.counts_useable[
                                        context.execution_id]:
                                    self.counts_useable[context.execution_id][
                                        n][current_node] = 0
                                # and increment all of the counts of current_node's node useable by other
                                # nodes by 1
                                for n in self.counts_useable[
                                        context.execution_id]:
                                    self.counts_useable[context.execution_id][
                                        current_node][n] += 1
                    # do-while condition
                    if not cur_consideration_set_has_changed:
                        break

                # add a new time step at each step in a pass, if the time step would not be empty
                if len(cur_time_step_exec) >= 1:
                    self.execution_list[context.execution_id].append(
                        cur_time_step_exec)
                    yield self.execution_list[context.execution_id][-1]

                    self.get_clock(context)._increment_time(
                        TimeScale.TIME_STEP)

                cur_index_consideration_queue += 1

            # if an entire pass occurs with nothing running, add an empty time step
            if not execution_list_has_changed:
                self.execution_list[context.execution_id].append(set())
                yield self.execution_list[context.execution_id][-1]

                self.get_clock(context)._increment_time(TimeScale.TIME_STEP)

            self.get_clock(context)._increment_time(TimeScale.PASS)

        if not skip_trial_time_increment:
            self.get_clock(context)._increment_time(TimeScale.TRIAL)

        if termination_conds[TimeScale.RUN].is_satisfied(scheduler=self,
                                                         context=context):
            self.date_last_run_end = datetime.datetime.now()

        return self.execution_list[context.execution_id]
Exemple #11
0
    def execute(
        self,
        inputs=None,
        num_trials=None,
        minibatch_size=1,
        do_logging=False,
        scheduler=None,
        termination_processing=None,
        call_before_minibatch=None,
        call_after_minibatch=None,
        call_before_time_step=None,
        call_before_pass=None,
        call_after_time_step=None,
        call_after_pass=None,
        reset_stateful_functions_to=None,
        context=None,
        base_context=Context(execution_id=None),
        clamp_input=SOFT_CLAMP,
        targets=None,
        runtime_params=None,
        execution_mode: pnlvm.ExecutionMode = pnlvm.ExecutionMode.Python,
        skip_initialization=False,
        report_output: ReportOutput = ReportOutput.OFF,
        report_params: ReportOutput = ReportParams.OFF,
        report_progress: ReportProgress = ReportProgress.OFF,
        report_simulations: ReportSimulations = ReportSimulations.OFF,
        report_to_devices: ReportDevices = None,
        report=None,
        report_num=None,
    ):
        self._assign_execution_ids(context)
        context.composition = self
        context.source = ContextFlags.COMPOSITION

        if scheduler is None:
            scheduler = self.scheduler

        if self._is_learning(context):
            # TBI: How are we supposed to use base_context and statefulness here?
            # TBI: can we call _build_pytorch_representation in _analyze_graph so that pytorch
            # model may be modified between runs?

            autodiff_inputs = self._infer_input_nodes(inputs)
            autodiff_targets = self._infer_output_nodes(inputs)

            report(
                self,
                LEARN_REPORT,
                # EXECUTE_REPORT,
                report_num=report_num,
                scheduler=scheduler,
                content='trial_start',
                context=context)

            self._build_pytorch_representation(context)
            output = self.autodiff_training(autodiff_inputs, autodiff_targets,
                                            context, scheduler)

            # FIX 5/28/20:
            # context.add_flag(ContextFlags.PROCESSING)
            execution_phase = context.execution_phase
            context.execution_phase = ContextFlags.PROCESSING

            self.output_CIM.execute(output, context=context)
            # FIX 5/28/20:
            context.execution_phase = execution_phase

            report(
                self,
                # [LEARN_REPORT],
                [EXECUTE_REPORT, PROGRESS_REPORT],
                report_num=report_num,
                scheduler=scheduler,
                content='trial_end',
                context=context)

            scheduler.get_clock(context)._increment_time(TimeScale.TRIAL)

            return output

        return super(AutodiffComposition, self).execute(
            inputs=inputs,
            scheduler=scheduler,
            termination_processing=termination_processing,
            call_before_time_step=call_before_time_step,
            call_before_pass=call_before_pass,
            call_after_time_step=call_after_time_step,
            call_after_pass=call_after_pass,
            reset_stateful_functions_to=reset_stateful_functions_to,
            context=context,
            base_context=base_context,
            clamp_input=clamp_input,
            runtime_params=runtime_params,
            execution_mode=execution_mode,
            report=report,
            report_num=report_num)
    def execute(
        self,
        inputs=None,
        num_trials=None,
        minibatch_size=1,
        do_logging=False,
        scheduler=None,
        termination_processing=None,
        call_before_minibatch=None,
        call_after_minibatch=None,
        call_before_time_step=None,
        call_before_pass=None,
        call_after_time_step=None,
        call_after_pass=None,
        reset_stateful_functions_to=None,
        context=None,
        base_context=Context(execution_id=None),
        clamp_input=SOFT_CLAMP,
        targets=None,
        runtime_params=None,
        bin_execute=False,
        skip_initialization=False,
    ):
        self._assign_execution_ids(context)
        context.composition = self
        context.source = ContextFlags.COMPOSITION

        if scheduler is None:
            scheduler = self.scheduler

        if self._is_learning(context):
            # TBI: How are we supposed to use base_context and statefulness here?
            # TBI: can we call _build_pytorch_representation in _analyze_graph so that pytorch
            # model may be modified between runs?

            autodiff_inputs = self._infer_input_nodes(inputs)
            autodiff_targets = self._infer_output_nodes(inputs)

            self._build_pytorch_representation(context)
            output = self.autodiff_training(autodiff_inputs, autodiff_targets,
                                            context, scheduler)

            # FIX 5/28/20:
            # context.add_flag(ContextFlags.PROCESSING)
            execution_phase = context.execution_phase
            context.execution_phase = ContextFlags.PROCESSING

            self.output_CIM.execute(output, context=context)
            # FIX 5/28/20:
            context.execution_phase = execution_phase

            scheduler.get_clock(context)._increment_time(TimeScale.TRIAL)
            return output

        return super(AutodiffComposition, self).execute(
            inputs=inputs,
            scheduler=scheduler,
            termination_processing=termination_processing,
            call_before_time_step=call_before_time_step,
            call_before_pass=call_before_pass,
            call_after_time_step=call_after_time_step,
            call_after_pass=call_after_pass,
            reset_stateful_functions_to=reset_stateful_functions_to,
            context=context,
            base_context=base_context,
            clamp_input=clamp_input,
            runtime_params=runtime_params,
            bin_execute=bin_execute,
        )