Esempio n. 1
0
    def _hack_(self):
        """
        TODO: please remove me at earliest convenience

        HACK for the inconsistent proxy and backend behavior
        Replace exc and bibi representation by reading from storage
        Proxy might have changed something that backend is not aware of
        """
        from hbp_nrp_backend.storage_client_api.StorageClient import StorageClient
        import urllib

        client = StorageClient()

        latest_exc = client.get_file(self._sim_config.token,
                                     urllib.quote_plus(
                                         self._sim_config.experiment_id),
                                     self._sim_config.exc_path.rel_path,
                                     by_name=True)
        latest_bibi = client.get_file(self._sim_config.token,
                                      urllib.quote_plus(
                                          self._sim_config.experiment_id),
                                      self._sim_config.bibi_path.rel_path,
                                      by_name=True)

        try:
            self._exc_dom = exc_parser.CreateFromDocument(latest_exc)
            self._bibi_dom = bibi_parser.CreateFromDocument(latest_bibi)

        except Exception as ex:
            raise Exception(
                "Something went horribly wrong while creating latest "
                "config objects with following exception {}".format(str(ex)))
Esempio n. 2
0
    def put(self, sim_id):
        """
        Set brain file of the simulation specified with simulation ID.
        Depending on the type of brain file, it has to be transmitted as text or as base64
        :param sim_id: The simulation ID

        :< json string brain_type: Type of the brain file ('h5' or 'py')
        :< json string data_type: type of the data field ('text' or 'base64')
        :< json string data: Contents of the brain file. Encoding given in field data_type
        :< json dict brain_populations: A dictionary indexed by population names and containing
                                        neuron indices
        :> json string error_message: Error Message if there is a syntax error in the code
        :> json int error_line: Line of code, where error occurred
        :> json int error_column: Column, where error occurred (if available)
        :> json bool handle_population_change: a flag indicating if user wants to change transfer
                                               functions according to population changes.

        :status 500: {0}
        :status 404: {1}
        :status 401: {2}
        :status 400: {3}
        :status 200: Success. The experiment brain file was replaced
        """

        simulation = _get_simulation_or_abort(sim_id)
        storage_client = StorageClient()
        if not UserAuthentication.can_modify(simulation):
            raise NRPServicesWrongUserException()

        body = request.get_json(force=True)

        file_url = body.get('urls', {}).get('fileUrl')
        if file_url:
            # TODO: fix server certificate and remove verify
            with requests.get(file_url, verify=False) as h5:
                filename = os.path.basename(file_url)
                with open(os.path.join(simulation.lifecycle.sim_dir, filename),
                          'w') as f:
                    f.write(h5.content)
                storage_client.create_or_update(simulation.token,
                                                simulation.experiment_id,
                                                filename, h5.content,
                                                "text/plain")

        result = simulation.cle.set_simulation_brain(
            brain_type=body['brain_type'],
            data=body['data'],
            data_type=body['data_type'],
            brain_populations=json.dumps(body['brain_populations']))

        if result.error_message:
            # Error in given brain
            return {
                'error_message': result.error_message,
                'error_line': result.error_line,
                'error_column': result.error_column
            }, 400
        # Success
        return {'message': "Success"}, 200
    def __init__(self, sim_config):
        """
        Creates a new simulation assembly to simulate an experiment using the CLE and Gazebo
        :param sim_config: config of the simulation to be managed
        """
        super(CLEGazeboSimulationAssembly, self).__init__(sim_config)
        self.cle_server = None
        self.simAssetsDir = os.path.join(sim_config.sim_dir, 'assets')
        self._simResourcesDir = os.path.join(sim_config.sim_dir, 'resources')

        self._storageClient = StorageClient()
        self._storageClient.set_sim_dir(sim_config.sim_dir)
Esempio n. 4
0
    def __init__(self, simulation, initial_state='created'):
        """
        Creates a new backend simulation lifecycle

        :param simulation: The simulation for which the simulation lifecycle is created
        """
        super(BackendSimulationLifecycle,
              self).__init__(TOPIC_LIFECYCLE(simulation.sim_id), initial_state)
        self.__simulation = simulation
        self._sim_dir = None
        self.__models_path = Settings.nrp_models_directory
        self.__experiment_path = None
        self.__textures_loaded = False
        self.__storageClient = StorageClient()
Esempio n. 5
0
    def _get_robot_abs_path(self, robot_file):
        """
        Gets the absolute path of the given robot file

        :param robot_file: The robot file
        :return: the absolute path to the robot file
        """
        if 'storage://' in robot_file:
            from hbp_nrp_backend.storage_client_api.StorageClient import StorageClient
            client = StorageClient()
            abs_file = os.path.join(client.get_temp_directory(),
                                    os.path.basename(robot_file))
            name, ext = os.path.splitext(abs_file)
            ext = ext.lower()
            zipped = False
            if ext == '.zip':
                zipped = True
            with open(abs_file, "w") as f:
                f.write(
                    client.get_file(self.token,
                                    client.get_folder_uuid_by_name(
                                        self.token, self.ctx_id, 'robots'),
                                    os.path.basename(robot_file),
                                    byname=True,
                                    zipped=zipped))
        elif self.__is_collab_hack():
            abs_file = os.path.join(self.exc.dir, os.path.basename(robot_file))
        else:
            abs_file = os.path.join(models_path, robot_file)
        name, ext = os.path.splitext(abs_file)
        ext = ext.lower()
        if ext == ".sdf":
            return abs_file
        elif ext == ".zip":
            name = os.path.split(name)[1] + "/model.sdf"
            with zipfile.ZipFile(abs_file) as robot_zip:
                try:
                    robot_zip.getinfo(name)
                except KeyError:
                    raise Exception(
                        "The robot zip archive must contain an sdf file named {0} "
                        "at the root of the archive, but does not.".format(
                            name))
                self.__tmp_robot_dir = tempfile.mkdtemp(suffix="robot")
                robot_zip.extractall(path=self.__tmp_robot_dir)
            return os.path.join(self.__tmp_robot_dir, name)
Esempio n. 6
0
 def _check_and_extract_environment_zip(self, experiment):
     """
     Checks for validity and extracts a zipped environment. First we
     make sure that the zip referenced in the experiment exists in the
     list of user environments, then we unzip it on the fly in the temporary
     simulation directory. After the extraction we also make sure to copy
     the sdf from the experiment folder cause the user may have modified it
     :param experiment: The experiment object.
     """
     from hbp_nrp_backend.storage_client_api.StorageClient import StorageClient
     client = StorageClient()
     environments_list = client.get_custom_models(
         UserAuthentication.get_header_token(request),
         self.simulation.ctx_id, 'environments')
     # we use the paths of the uploaded zips to make sure the selected
     # zip is there
     paths_list = [environment['path'] for environment in environments_list]
     # check if the zip is in the user storage
     zipped_model_path = [
         path for path in paths_list
         if experiment.environmentModel.customModelPath in path
     ]
     if len(zipped_model_path):
         environment_path = os.path.join(
             client.get_temp_directory(),
             os.path.basename(experiment.environmentModel.src))
         storage_env_zip_data = client.get_custom_model(
             UserAuthentication.get_header_token(request),
             self.simulation.ctx_id, zipped_model_path[0])
         env_sdf_name = os.path.basename(experiment.environmentModel.src)
         env_path = os.path.join(
             client.get_temp_directory(),
             experiment.environmentModel.customModelPath)
         with open(env_path, 'w') as environment_zip:
             environment_zip.write(storage_env_zip_data)
         with zipfile.ZipFile(env_path) as env_zip_to_extract:
             env_zip_to_extract.extractall(path=client.get_temp_directory())
         # copy back the .sdf from the experiment folder, cause we don't want the one
         # in the zip, cause the user might have made manual changes
         client.clone_file(env_sdf_name,
                           UserAuthentication.get_header_token(request),
                           self.simulation.experiment_id)
     # if the zip is not there, prompt the user to check his uploaded
     # models
     else:
         raise NRPServicesGeneralException(
             "Could not find selected zip %s in the list of uploaded models. Please make\
                 sure that it has been uploaded correctly" %
             (os.path.dirname(experiment.environmentModel.src)),
             "Zipped model retrieval failed")
     return environment_path
Esempio n. 7
0
    def prepare_record_for_playback(self):
        """
        Copy the record from user storage to tmp and unzip it.

        :> experimentID: The experiment
        :> storagePath: The storage path

        :tmp path: The local path ready to be played
        """

        client = StorageClient()
        file_clone_destination = os.path.join(self.sim_dir,
                                              self.simulation.playback_path)
        dest_path = os.path.dirname(file_clone_destination)

        try:
            if not os.path.exists(dest_path):
                os.makedirs(dest_path)
            with open(file_clone_destination, "w") as file_clone:

                file_contents = client.get_file(
                    UserAuthentication.get_header_token(),
                    urllib.quote_plus(self.simulation.experiment_id +
                                      '/recordings'),
                    os.path.basename(
                        self.simulation.playback_path),  # zip name
                    by_name=True,
                    zipped=True)

                file_clone.write(file_contents)

            ZipUtil.extractall(file_clone_destination, dest_path, True)

            # Update sim object's playback path with folder name
            self.simulation.playback_path = os.path.join(
                dest_path, ZipUtil.get_rootname(file_clone_destination))

            os.remove(file_clone_destination)

        except Exception as ex:
            SimUtil.delete_simulation_dir()
            raise NRPServicesClientErrorException(
                'Copying recording to backend tmp failed with {}'.format(
                    str(ex)),
                error_code=404)
Esempio n. 8
0
    def __init__(self, assembly, interval=5, folder_name=None):
        """
        The assembly object contains all the necessary information
        to save the csv data, the token, experiment_id

        :param SimulationAssembly assembly: contains all the
        information tied to the running simulation
        :param optional int interval: the interval between consequent saves
        :param optional string folder_name: user-defined name for csv_data
        """
        self._log_csv_thread = None
        self._creation_time = get_date_and_time_string()
        self._interval = interval
        self._assembly = assembly
        self._folder_name = folder_name
        self._storage_client = StorageClient()

        self.stop_flag = threading.Event()
Esempio n. 9
0
    def stop(self, state_change):
        """
        Stops the simulation and releases all resources

        :param state_change: The state change that led to releasing simulation resources
        """
        self.simulation.kill_datetime = None

        if self.simulation.cle is not None:
            self.simulation.cle.stop_communication(
                "Simulation server released")
        logger.info(
            "State machine outcomes: %s",
            ", ".join("%s: %s" % (sm.sm_id, str(sm.result))
                      for sm in self.simulation.state_machines))

        self.simulation.state_machine_manager.shutdown()
        from hbp_nrp_backend.storage_client_api.StorageClient import StorageClient
        client = StorageClient()
        client.remove_temp_directory()
Esempio n. 10
0
    def save_record_to_user_storage(self):
        """
        Save the record to user storage

        """

        client_record_folder = 'recordings'
        record_path = self.simulation.cle.command_simulation_recorder(
            SimulationRecorderRequest.STATE).message

        file_name = 'recording_{timestamp}.{ext}'.format(
            timestamp=time.strftime('%Y-%m-%d_%H-%M-%S'), ext='zip')

        temp_dest = os.path.join(tempfile.gettempdir(), file_name)
        ZipUtil.create_from_path(record_path, temp_dest)
        client = StorageClient()

        client.create_folder(self.simulation.token,
                             self.simulation.experiment_id,
                             client_record_folder)

        try:
            with open(temp_dest, 'rb') as record_file:
                zip_data = record_file.read()
                client.create_or_update(
                    self.simulation.token, self.simulation.experiment_id,
                    os.path.join(client_record_folder, file_name), zip_data,
                    "application/octet-stream")

        finally:
            os.remove(temp_dest)

        return file_name
Esempio n. 11
0
    def _parse_env_path(self, environment_path, experiment, using_storage):
        """
        Parses the environment path, depending if we are using a storage model from
        a template experiment(where we have to fetch the model from the storage),
        or we are running a storage experiment where the model is already there.
        Default case is when we are not using a storage model

        :param experiment: The experiment object.
        :param environment_path: Path to the environment configuration.
        :param using_storage: Private or template simulation
        """
        from hbp_nrp_backend.storage_client_api.StorageClient import StorageClient
        client = StorageClient()
        if using_storage:
            custom = experiment.environmentModel.customModelPath
            if custom:
                environment_path = self._check_and_extract_environment_zip(
                    experiment)
            else:
                if 'storage://' in environment_path:
                    environment_path = self._copy_storage_environment(
                        experiment)
        else:
            if not environment_path and 'storage://' in experiment.environmentModel.src:
                environment_path = os.path.join(
                    client.get_temp_directory(),
                    os.path.basename(experiment.environmentModel.src))
                with open(environment_path, "w") as f:
                    f.write(
                        client.get_file(
                            UserAuthentication.get_header_token(request),
                            client.get_folder_uuid_by_name(
                                UserAuthentication.get_header_token(request),
                                self.simulation.ctx_id, 'environments'),
                            os.path.basename(experiment.environmentModel.src),
                            byname=True))
            else:
                environment_path = os.path.join(
                    self.models_path, str(experiment.environmentModel.src))
        return environment_path
    def put(self, sim_id):
        """
        Save the simulation CSV recorders' content to the storage.

        :param sim_id: The simulation ID
        :status 500: Error when saving recorder files
        :status 404: {0}
        :status 401: {1}
        :status 200: Success. Files saved into storage
        """

        simulation = _get_simulation_or_abort(sim_id)
        if not UserAuthentication.matches_x_user_name_header(
                request, simulation.owner):
            raise NRPServicesWrongUserException()

        csv_files = simulation.cle.get_simulation_CSV_recorders_files()

        # Done here in order to avoid circular dependencies introduced by the
        # way we __init__ the rest_server module.
        from hbp_nrp_backend.storage_client_api.StorageClient \
            import StorageClient

        client = StorageClient()

        time_string = get_date_and_time_string()
        subfolder_name = string.join(['csv_records', time_string], '_')

        folder_uuid = client.create_folder(
            UserAuthentication.get_header_token(request),
            simulation.experiment_id, subfolder_name)['uuid']

        if csv_files:
            for csv_file in csv_files:
                with open(csv_file.temporary_path) as csvfile:
                    client.create_or_update(
                        UserAuthentication.get_header_token(request),
                        folder_uuid, csv_file.name, csvfile.read(),
                        'text/plain')
        return 200
Esempio n. 13
0
    def _copy_storage_environment(self, experiment):
        """
        Copies a storage environment from the storage environment models
        to the running simulation temporary folder

        :param experiment: The experiment object.
        """
        from hbp_nrp_backend.storage_client_api.StorageClient import StorageClient
        client = StorageClient()
        environment_path = os.path.join(
            client.get_temp_directory(),
            os.path.basename(experiment.environmentModel.src))
        with open(environment_path, "w") as f:
            f.write(
                client.get_file(
                    UserAuthentication.get_header_token(request),
                    client.get_folder_uuid_by_name(
                        UserAuthentication.get_header_token(request),
                        self.simulation.ctx_id, 'environments'),
                    os.path.basename(experiment.environmentModel.src),
                    byname=True))
        return environment_path
Esempio n. 14
0
    def _load_brain(self, rng_seed):
        """
        Loads the neural simulator, interfaces, and configuration

        :param rng_seed RNG seed to spawn Nest with
        """

        # Create interfaces to brain
        self._notify("Loading neural simulator")
        brainconfig.rng_seed = rng_seed
        braincomm, braincontrol = self._create_brain_adapters()

        self._notify("Loading brain and population configuration")
        # load brain
        brainfilepath = self.bibi.brainModel.file
        if self.__is_collab_hack():
            if self.exc.dir is not None:
                brainfilepath = os.path.join(self.exc.dir, brainfilepath)
        elif 'storage://' in brainfilepath:
            from hbp_nrp_backend.storage_client_api.StorageClient import StorageClient
            client = StorageClient()
            brainfilepath = os.path.join(client.get_temp_directory(),
                                         os.path.basename(brainfilepath))
            with open(brainfilepath, "w") as f:
                f.write(
                    client.get_file(self.token,
                                    client.get_folder_uuid_by_name(
                                        self.token, self.ctx_id, 'brains'),
                                    os.path.basename(brainfilepath),
                                    byname=True))
        else:
            brainfilepath = os.path.join(models_path, brainfilepath)
        neurons_config = get_all_neurons_as_dict(
            self.bibi.brainModel.populations)

        return braincontrol, braincomm, brainfilepath, neurons_config
Esempio n. 15
0
    def post(self, sim_id):
        """
        Save the current running experiment SDF back to the storage
        :param sim_id: The sim_id
        :param context_id: The context_id of the experiment
        :status 500: Error saving file
        :status 200: Success. File written.
        """
        # pylint: disable=too-many-locals
        simulation = _get_simulation_or_abort(sim_id)

        try:
            rospy.wait_for_service('/gazebo/export_world_sdf', 3)
        except rospy.ROSException as exc:
            raise NRPServicesUnavailableROSService(str(exc))

        dump_sdf_world = rospy.ServiceProxy('/gazebo/export_world_sdf',
                                            ExportWorldSDF)

        try:
            sdf_string = dump_sdf_world().sdf_dump
            tree = ET.fromstring(sdf_string)
            # Erase all robots from the SDF
            robots = simulation.cle.get_simulation_robots()
            for robot in robots:
                for m in tree.findall(".//model[@name='" + robot.robot_id +
                                      "']"):
                    m.getparent().remove(m)
            sdf_string = ET.tostring(tree, encoding='utf8', method='xml')
        except rospy.ServiceException as exc:
            raise NRPServicesClientErrorException(
                "Service did not process request:" + str(exc))

        client = StorageClient()

        # find the sdf world filename from the .exc
        exp_xml_file_path = client.clone_file(
            'experiment_configuration.exc',
            UserAuthentication.get_header_token(), simulation.experiment_id)

        experiment_file = client.parse_and_check_file_is_valid(
            exp_xml_file_path, exp_conf_api_gen.CreateFromDocument,
            exp_conf_api_gen.ExD_)

        world_file_name = experiment_file.environmentModel.src

        client.create_or_update(UserAuthentication.get_header_token(),
                                simulation.experiment_id, world_file_name,
                                sdf_string, "text/plain")

        return 200
class SimulationResetStorage(Resource):
    """
    This resource handles the reset of a simulation, forwarding all the reset requests to the
    respective CLE instance.
    """

    storage_client = StorageClient()

    @swagger.model
    class ResetRequest(object):
        """
        Represents a request for the API implemented by SimulationResetStorage
        """

        resource_fields = {
            'resetType': fields.Integer,
            'contextId': fields.String()
        }
        required = ['resetType']

    @swagger.operation(
        notes='Handles the reset of a given simulation.',
        parameters=[
            {
                "name": "sim_id",
                "description": "The ID of the simulation whose state shall be retrieved",
                "required": True,
                "paramType": "path",
                "dataType": int.__name__
            },
            {
                "name": "body",
                "paramType": "body",
                "dataType": ResetRequest.__name__,
                "required": True
            }
        ],
        responseMessages=[
            {
                "code": 500,
                "message": ErrorMessages.SERVER_ERROR_500
            },
            {
                "code": 404,
                "message": ErrorMessages.SIMULATION_NOT_FOUND_404
            },
            {
                "code": 401,
                "message": ErrorMessages.SIMULATION_PERMISSION_401
            },
            {
                "code": 400,
                "message": "Invalid request, the JSON parameters are incorrect"
            },
            {
                "code": 200,
                "message": "Success. The reset type requested was performed on the given "
                           "simulation"
            }
        ]
    )
    @docstring_parameter(ErrorMessages.SERVER_ERROR_500,
                         ErrorMessages.SIMULATION_NOT_FOUND_404,
                         ErrorMessages.SIMULATION_PERMISSION_401)
    def put(self, sim_id, experiment_id):
        """
        Calls the CLE for resetting a given simulation to the last saved state in the storage.

        :param sim_id: The simulation ID.
        :param experiment_id: The experiment ID

        :> json resetType: the reset type the user wants to be performed, details about possible
                          values are given in
                          GazeboRosPackages/src/cle_ros_msgs/srv/ResetSimulation.srv

        :status 500: {0}
        :status 404: {1}
        :status 401: {2}
        :status 400: Invalid request, the JSON parameters are incorrect
        :status 200: The requested reset was performed successfully
        """
        sim = _get_simulation_or_abort(sim_id)

        if not UserAuthentication.can_modify(sim):
            raise NRPServicesWrongUserException()

        req_body = request.get_json(force=True)
        context_id = req_body.get('contextId', None)

        for missing_par in (par for par in self.ResetRequest.required if par not in req_body):
            raise NRPServicesClientErrorException('Missing parameter {}'.format(missing_par))

        for invalid_p in (par for par in req_body if par not in self.ResetRequest.resource_fields):
            raise NRPServicesClientErrorException('Invalid parameter {}'.format(invalid_p))

        reset_type = req_body.get('resetType')

        rsr = srv.ResetSimulationRequest

        try:
            if reset_type == rsr.RESET_ROBOT_POSE:
                sim.cle.reset(reset_type)
            elif reset_type == rsr.RESET_WORLD:
                sim.cle.reset(reset_type,
                              world_sdf=self._get_sdf_world_from_storage(experiment_id, context_id))
            elif reset_type == rsr.RESET_FULL:
                brain_path, populations, _ = \
                    self._get_brain_info_from_storage(experiment_id, context_id)

                if sim.playback_path is None:
                    self.reset_from_storage_all(sim, experiment_id, context_id)

                sim.cle.reset(reset_type,
                              world_sdf=self._get_sdf_world_from_storage(experiment_id, context_id),
                              brain_path=brain_path,
                              populations=populations)
            else:
                return {}, 400  # Other reset modes are unsupported

        except ROSCLEClientException as e:
            raise NRPServicesGeneralException(str(e), 'CLE error', 500)

        return {}, 200

    @classmethod
    def reset_from_storage_all(cls, simulation, experiment_id, context_id):
        """
        Reset states machines and transfer functions

        :param: the simulation id
        :param: the experiment id
        :param: the context_id for collab based simulations
        """

        SimUtil.clear_dir(simulation.lifecycle.sim_dir)

        token = UserAuthentication.get_header_token()
        cls.storage_client.clone_all_experiment_files(
            token, experiment_id, destination_dir=simulation.lifecycle.sim_dir)

        with open(simulation.lifecycle.experiment_path) as exc_file:
            exc = exp_conf_api_gen.CreateFromDocument(exc_file.read())

        bibi_path = os.path.join(os.path.dirname(simulation.lifecycle.experiment_path),
                                 exc.bibiConf.src)
        with open(bibi_path) as bibi_file:
            bibi = bibi_api_gen.CreateFromDocument(bibi_file.read())

        cls.reset_brain(simulation, experiment_id, context_id)
        cls.reset_transfer_functions(simulation, bibi, simulation.lifecycle.sim_dir)
        cls.reset_state_machines(simulation, exc, simulation.lifecycle.sim_dir)

    @classmethod
    def reset_brain(cls, simulation, experiment_id, context_id):
        """
        Reset brain

        :param simulation: simulation object
        :param experiment_id: the related experiment id
        :param context_id: the context ID for collab based simulations
        """
        brain_path, _, neurons_config = cls._get_brain_info_from_storage(experiment_id, context_id)

        # Convert the populations to a JSON dictionary
        for name, s in neurons_config.iteritems():
            # Convert slice to a dictionary for Non-Spinnaker brains
            experiment_population = SimulationResetStorage._get_experiment_population(name, s)
            if (experiment_population.type != ExperimentPopulationInfo.TYPE_POPULATION_SPINNAKER):
                neurons_config[name] = {
                    'from': s.start,
                    'to': s.stop,
                    'step': s.step if s.step > 0 else 1}

        with open(brain_path, 'r') as brain_file:
            result_set_brain = simulation.cle.set_simulation_brain(
                brain_type='py',
                data_type='text',
                data=brain_file.read(),
                brain_populations=json.dumps(neurons_config))

            if result_set_brain is not None and result_set_brain.error_message:
                # Error in given brain
                raise ROSCLEClientException('{err}, column: {col}'.format(
                    err=result_set_brain.error_message, col=result_set_brain.error_column))

    @staticmethod
    def reset_state_machines(sim, experiment, sm_base_path):
        """
        Reset states machines

        :param sim:
        :param experiment: experiment conf
        :param sm_base_path: base path of the experiment
        """

        sim.delete_all_state_machines()

        state_machine_paths = {}
        if experiment.experimentControl is not None:
            state_machine_paths.update({sm.id: os.path.join(sm_base_path, sm.src)
                                        for sm in
                                        experiment.experimentControl.stateMachine
                                        if isinstance(sm, exp_conf_api_gen.SMACHStateMachine)})

        if experiment.experimentEvaluation is not None:
            state_machine_paths.update({sm.id: os.path.join(sm_base_path, sm.src)
                                        for sm in
                                        experiment.experimentEvaluation.stateMachine
                                        if isinstance(sm, exp_conf_api_gen.SMACHStateMachine)})

        sim.state_machine_manager.add_all(state_machine_paths, sim.sim_id, sim.lifecycle.sim_dir)
        sim.state_machine_manager.initialize_all()

    @staticmethod
    def reset_transfer_functions(simulation, bibi_conf, base_path):
        """
        Reset transfer functions

        :param simulation: simulation object
        :param bibi_conf: BIBI conf
        :param base_path: base path of the experiment
        """
        old_tfs, _ = simulation.cle.get_simulation_transfer_functions()

        for old_tf_name in (get_tf_name(old_tf) for old_tf in old_tfs):
            if old_tf_name is not None:  # ignore broken TFs
                simulation.cle.delete_simulation_transfer_function(old_tf_name)

        for tf in bibi_conf.transferFunction:
            tf_code = '{}\n'.format(correct_indentation(generate_tf(tf, base_path), 0).strip())

            logger.debug(" RESET TF: {tf_name}\n{tf_code}\n"
                         .format(tf_name=get_tf_name(tf_code), tf_code=tf_code))
            # adding original TFs from the bibi
            # do not check the error message.
            # CLE will handle also invalid TFs
            simulation.cle.add_simulation_transfer_function(str(tf_code))

    @classmethod
    def _get_brain_info_from_storage(cls, experiment_id, context_id):
        """
        Gathers from the storage the brain script and the populations by getting the BIBI
        configuration file.

        :param experiment_id: the id of the experiment in which to look for the brain information
        :param context_id: the context ID for collab based simulations
        :return: A tuple with the path to the brain file and a list of populations
        """
        del context_id  # Unused

        request_token = UserAuthentication.get_header_token()

        experiment_file = cls.storage_client.get_file(
            request_token, experiment_id, 'experiment_configuration.exc', by_name=True)

        bibi_filename = exp_conf_api_gen.CreateFromDocument(experiment_file).bibiConf.src

        # find the brain filename from the bibi
        bibi_file = cls.storage_client.get_file(
            request_token, experiment_id, bibi_filename, by_name=True)

        bibi_file_obj = bibi_api_gen.CreateFromDocument(bibi_file)
        brain_filename = os.path.basename(bibi_file_obj.brainModel.file)

        brain_filepath = cls.storage_client.clone_file(brain_filename, request_token, experiment_id)

        neurons_config = get_all_neurons_as_dict(bibi_file_obj.brainModel.populations)

        neurons_config_clean = \
            [SimulationResetStorage._get_experiment_population(name, v)
             for (name, v) in neurons_config.iteritems()]

        return brain_filepath, neurons_config_clean, neurons_config

    @staticmethod
    def _get_experiment_population(name, value):
        """
        Gets an ExperimentPopulation object for the given population

        :param name: The population name
        :param value: The value describing the population
        :return:
        """
        if isinstance(value, slice):
            return msg.ExperimentPopulationInfo(name=name, type=1, ids=[],
                                                start=value.start, stop=value.stop, step=value.step)
        if isinstance(value, list):
            return msg.ExperimentPopulationInfo(name=name, type=2, ids=value,
                                                start=0, stop=0, step=0)

        type_id = 3 if isinstance(value, basestring) else 0

        return msg.ExperimentPopulationInfo(name=name, type=type_id, ids=[],
                                            start=0, stop=0, step=0)

    @classmethod
    def _get_sdf_world_from_storage(cls, experiment_id, context_id):
        """
        Download from the storage an sdf world file as a string.
        The file belongs to the experiment identified by experiment_id

        :param experiment_id: the ID of the experiment in which to look for the world sdf
        :param context_id: the context ID for collab based simulations
        :return: The content of the world sdf file
        """
        del context_id  # Unused

        request_token = UserAuthentication.get_header_token()

        # find the sdf filename from the .exc
        experiment_file = cls.storage_client.get_file(
            request_token, experiment_id, 'experiment_configuration.exc', by_name=True)

        world_file_name = exp_conf_api_gen.CreateFromDocument(experiment_file).environmentModel.src

        return cls.storage_client.get_file(
            request_token, experiment_id, world_file_name, by_name=True)
    def put(self, experiment_id):
        """
        Save transfer functions of an experiment to the storage.

        :param path experiment_id: The experiment_id of the experiment where the transfer functions
         will be saved
        :<json body json array of string transfer_functions: the transfer functions as python
        :status 500: BIBI configuration file not found
        :status 500: Error saving file
        :status 404: The experiment_id with the given expreiment ID was not found
        :status 404: The request body is malformed
        :status 200: Success. File written.
        """
        # pylint: disable=too-many-locals
        # Done here in order to avoid circular dependencies introduced by the
        # way we __init__ the rest_server module
        from hbp_nrp_backend.storage_client_api.StorageClient \
            import StorageClient

        body = request.get_json(force=True)
        if 'transfer_functions' not in body:
            raise NRPServicesClientErrorException(
                "Transfer functions code should be sent in "
                "the body under the 'transfer_functions' key")

        client = StorageClient()

        experiment_file = client.get_file(
            UserAuthentication.get_header_token(request),
            experiment_id,
            'experiment_configuration.exc',
            byname=True)

        bibi_filename = exp_conf_api_gen.CreateFromDocument(
            experiment_file).bibiConf.src

        bibi_file_path = client.clone_file(
            bibi_filename, UserAuthentication.get_header_token(request),
            experiment_id)

        bibi = client.parse_and_check_file_is_valid(
            bibi_file_path, bibi_api_gen.CreateFromDocument,
            bibi_api_gen.BIBIConfiguration)
        # Remove all transfer functions from BIBI. Then we save them in a
        # separate python file.
        del bibi.transferFunction[:]
        threads = []
        for transfer_function in body['transfer_functions']:
            transfer_function_name = get_tf_name(transfer_function)
            if transfer_function_name is not None:
                transfer_function_node = bibi_api_gen.PythonTransferFunction()
                transfer_function_node.src = transfer_function_name + ".py"
                bibi.transferFunction.append(transfer_function_node)

                t = Thread(target=client.create_or_update,
                           kwargs={
                               'token':
                               UserAuthentication.get_header_token(request),
                               'experiment':
                               experiment_id,
                               'filename':
                               transfer_function_name + ".py",
                               'content':
                               transfer_function,
                               'content_type':
                               'text/plain'
                           })
                t.start()
                threads.append(t)

        # we need to prettify the parsed bibi
        pretty_bibi = xml.dom.minidom.parseString(
            bibi.toxml("utf-8")).toprettyxml()
        t = Thread(target=client.create_or_update,
                   kwargs={
                       'token': UserAuthentication.get_header_token(request),
                       'experiment': experiment_id,
                       'filename': bibi_filename,
                       'content': pretty_bibi,
                       'content_type': 'text/plain'
                   })
        t.start()
        threads.append(t)
        for x in threads:
            x.join()
        return 200
Esempio n. 18
0
    def initialize(self, state_change):
        """
        Initializes the simulation

        :param state_change: The state change that caused the simulation to be initialized
        """
        # TODO: fix dependencies so these import are not necessary
        # anymore
        from hbp_nrp_backend.storage_client_api.StorageClient import StorageClient
        simulation = self.simulation
        try:
            using_storage = simulation.private is not None
            if using_storage:
                client = StorageClient()
                clone_folder, experiment_paths = client.clone_all_experiment_files(
                    UserAuthentication.get_header_token(request),
                    simulation.experiment_id)
                self.__experiment_path = experiment_paths['experiment_conf']
                self.__simulation_root_folder = clone_folder

                environment_path = experiment_paths['environment_conf']
            else:
                self.__experiment_path = os.path.join(
                    self.__experiment_path, simulation.experiment_conf)
                self.__simulation_root_folder = os.path.dirname(
                    self.__experiment_path)
                environment_path = simulation.environment_conf
            experiment, environment_path = self._parse_exp_and_initialize_paths(
                self.__experiment_path, environment_path, using_storage)

            simulation.kill_datetime = datetime.datetime.now(timezone) \
                + datetime.timedelta(seconds=experiment.timeout)
            logger.info("simulation timeout initialized")

            simulation_factory_client = ROSCLESimulationFactoryClient()
            simulation_factory_client.create_new_simulation(
                environment_path, self.__experiment_path,
                simulation.gzserver_host, simulation.reservation,
                simulation.brain_processes, simulation.sim_id,
                str(simulation.kill_datetime), simulation.playback_path,
                UserAuthentication.get_header_token(request),
                self.simulation.ctx_id)
            if not simulation.playback_path:
                simulation.cle = ROSCLEClient(simulation.sim_id)
            else:
                simulation.cle = PlaybackClient(simulation.sim_id)
            logger.info("simulation initialized")

        except IOError as e:
            raise NRPServicesGeneralException(
                "Error while accessing simulation models (" + repr(e.message) +
                ")", "Models error")
        except rospy.ROSException as e:
            raise NRPServicesGeneralException(
                "Error while communicating with the CLE (" + repr(e.message) +
                ")", "CLE error")
        except rospy.ServiceException as e:
            raise NRPServicesGeneralException(
                "Error starting the simulation. (" + repr(e.message) + ")",
                "rospy.ServiceException",
                data=e.message)
Esempio n. 19
0
    def post(self, experiment_id):
        """
        Save the current running experiment SDF back to the storage
        :param experiment_id: The experiment ID
        :param context_id: The context_id of the experiment
        :status 500: Error saving file
        :status 200: Success. File written.
        """
        # pylint: disable=too-many-locals
        body = request.get_json(force=True)
        context_id = body.get('context_id', None)
        # Done here in order to avoid circular dependencies introduced by the
        # way we __init__ the rest_server module.
        from hbp_nrp_backend.storage_client_api.StorageClient \
            import StorageClient
        try:
            rospy.wait_for_service('/gazebo/export_world_sdf', 3)
        except rospy.ROSException as exc:
            raise NRPServicesUnavailableROSService(str(exc))

        dump_sdf_world = rospy.ServiceProxy('/gazebo/export_world_sdf',
                                            ExportWorldSDF)
        robot_pose = []

        try:
            sdf_string = dump_sdf_world().sdf_dump
            tree = ET.fromstring(sdf_string)
            try:
                robot_pose = tree.findall(
                    ".//state/model[@name='robot']/pose")[0].text.split()
            # pylint: disable=bare-except
            except:
                logger.error("Can't retrieve robot position.")
            # Erase all robots from the SDF
            for m in tree.findall(".//model[@name='robot']"):
                m.getparent().remove(m)
            sdf_string = ET.tostring(tree, encoding='utf8', method='xml')
        except rospy.ServiceException as exc:
            raise NRPServicesClientErrorException(
                "Service did not process request:" + str(exc))

        client = StorageClient()

        # find the sdf world filename from the .exc
        exp_xml_file_path = client.clone_file(
            'experiment_configuration.exc',
            UserAuthentication.get_header_token(request), experiment_id)

        experiment_file = client.parse_and_check_file_is_valid(
            exp_xml_file_path, exp_conf_api_gen.CreateFromDocument,
            exp_conf_api_gen.ExD_)

        world_file_name = experiment_file.environmentModel.src

        if 'storage://' in world_file_name:
            world_file_name = os.path.basename(world_file_name)
            client.create_or_update(
                UserAuthentication.get_header_token(request),
                client.get_folder_uuid_by_name(
                    UserAuthentication.get_header_token(request), context_id,
                    'environments'), world_file_name, sdf_string, "text/plain")
        else:
            client.create_or_update(
                UserAuthentication.get_header_token(request), experiment_id,
                world_file_name, sdf_string, "text/plain")

        # Save the robot position in the ExDConf file
        if len(robot_pose) is 6:  # We need 6 elements (from Gazebo)
            experiment_file.environmentModel.robotPose.x = robot_pose[0]
            experiment_file.environmentModel.robotPose.y = robot_pose[1]
            experiment_file.environmentModel.robotPose.z = robot_pose[2]
            quaternion = tf.transformations.quaternion_from_euler(
                float(robot_pose[3]), float(robot_pose[4]),
                float(robot_pose[5]))
            experiment_file.environmentModel.robotPose.ux = quaternion[0]
            experiment_file.environmentModel.robotPose.uy = quaternion[1]
            experiment_file.environmentModel.robotPose.uz = quaternion[2]
            experiment_file.environmentModel.robotPose.theta = quaternion[3]

            client.create_or_update(
                UserAuthentication.get_header_token(request),
                experiment_id, 'experiment_configuration.exc',
                experiment_file.toxml("utf-8"), "text/plain")

        else:
            logger.error("Malformed robot position tag in SDF: " + robot_pose)

        return 200
Esempio n. 20
0
class BackendSimulationLifecycle(SimulationLifecycle):
    """
    This class implements the backend simulation lifecycle
    """
    def __init__(self, simulation, initial_state='created'):
        """
        Creates a new backend simulation lifecycle

        :param simulation: The simulation for which the simulation lifecycle is created
        """
        super(BackendSimulationLifecycle,
              self).__init__(TOPIC_LIFECYCLE(simulation.sim_id), initial_state)
        self.__simulation = simulation
        self._sim_dir = None
        self.__models_path = Settings.nrp_models_directory
        self.__experiment_path = None
        self.__textures_loaded = False
        self.__storageClient = StorageClient()

    @property
    def simulation(self):
        """
        Gets the simulation for which the lifecycle is controlled
        :return:
        """
        return self.__simulation

    def _load_state_machines(self, exc):  # pragma: no cover
        """
        Parses the experiment configuration, loads state machines

        :param experiment_path: Path the experiment configuration
        """
        state_machine_paths = {}
        if exc.experimentControl is not None and not self.simulation.playback_path:
            state_machine_paths.update({
                sm.id: os.path.join(self._sim_dir, sm.src)
                for sm in exc.experimentControl.stateMachine
                if isinstance(sm, exp_conf_api_gen.SMACHStateMachine)
            })

        if exc.experimentEvaluation is not None and not self.simulation.playback_path:
            state_machine_paths.update({
                sm.id: os.path.join(self._sim_dir, sm.src)
                for sm in exc.experimentEvaluation.stateMachine
                if isinstance(sm, exp_conf_api_gen.SMACHStateMachine)
            })

        self.simulation.state_machine_manager.add_all(state_machine_paths,
                                                      self.simulation.sim_id,
                                                      self.sim_dir)
        self.simulation.state_machine_manager.initialize_all()
        logger.info("Requesting simulation resources")

        return exc

    def _prepare_custom_environment(self, exc):
        """
        Download and extracts zipped environment defined in the exc

        :param exc: The exc DOM object
        """

        # pylint: disable=too-many-locals
        env_model = Model(exc.environmentModel.model, ResourceType.ENVIRONMENT)
        data = self.__storageClient.get_model(
            UserAuthentication.get_header_token(), self.simulation.ctx_id,
            env_model)

        # if the zip is not there, prompt the user to check his uploaded models
        if not data:
            raise NRPServicesGeneralException(
                "Could not find selected zip {} in the list of uploaded custom models. Please make "
                "sure that it has been uploaded correctly".format(
                    os.path.dirname(exc.environmentModel.model)),
                "Zipped model retrieval failed")

        ZipUtil.extractall(zip_abs_path=io.BytesIO(data),
                           extract_to=os.path.join(self._sim_dir, 'assets'),
                           overwrite=True)

    def initialize(self, state_change):
        """
        Initializes the simulation

        :param state_change: The state change that caused the simulation to be initialized
        """

        simulation = self.simulation
        if not simulation.playback_path:
            self._sim_dir = SimUtil.init_simulation_dir()

        try:
            if not simulation.private:
                raise NRPServicesGeneralException(
                    "Only private experiments are supported", "CLE error", 500)

            self.__storageClient.clone_all_experiment_files(
                token=UserAuthentication.get_header_token(),
                experiment=simulation.experiment_id,
                destination_dir=self._sim_dir,
                exclude=['recordings/']
                if not simulation.playback_path else [])

            # divine knowledge about the exc name
            self.__experiment_path = os.path.join(
                self._sim_dir, 'experiment_configuration.exc')

            with open(self.__experiment_path) as exd_file:
                exc = exp_conf_api_gen.CreateFromDocument(exd_file.read())

            self._load_state_machines(exc)
            if exc.environmentModel.model:  # i.e., custom zipped environment
                self._prepare_custom_environment(exc)

            simulation.timeout_type = (
                TimeoutType.SIMULATION if exc.timeout.time
                == TimeoutType.SIMULATION else TimeoutType.REAL)

            timeout = exc.timeout.value()

            if simulation.timeout_type == TimeoutType.REAL:
                timeout = datetime.datetime.now(timezone) + datetime.timedelta(
                    seconds=timeout)
                simulation.kill_datetime = timeout
            else:
                simulation.kill_datetime = None

            logger.info("simulation timeout initialized")

            simulation_factory_client = ROSCLESimulationFactoryClient()
            simulation_factory_client.create_new_simulation(
                self.__experiment_path, simulation.gzserver_host,
                simulation.reservation, simulation.brain_processes,
                simulation.sim_id, str(timeout),
                simulation.timeout_type, simulation.playback_path,
                UserAuthentication.get_header_token(), self.simulation.ctx_id,
                self.simulation.experiment_id)
            if not simulation.playback_path:
                simulation.cle = ROSCLEClient(simulation.sim_id)
            else:
                simulation.cle = PlaybackClient(simulation.sim_id)
            logger.info("simulation initialized")

        except IOError as e:
            raise NRPServicesGeneralException(
                "Error while accessing simulation models (" + repr(e.message) +
                ")", "Models error")
        except rospy.ROSException as e:
            raise NRPServicesGeneralException(
                "Error while communicating with the CLE (" + repr(e.message) +
                ")", "CLE error")
        except rospy.ServiceException as e:
            raise NRPServicesGeneralException(
                "Error starting the simulation. (" + repr(e.message) + ")",
                "rospy.ServiceException",
                data=e.message)
        # pylint: disable=broad-except
        except Exception as e:
            raise NRPServicesGeneralException(
                "Error starting the simulation. (" + repr(e) + ")",
                "Unknown exception occured",
                data=e.message)

    def start(self, state_change):
        """
        Starts the simulation

        :param state_change: The state change that led to starting the simulation
        """
        logger.info("Starting State Machines...")
        try:
            self.simulation.state_machine_manager.start_all(False)
        except Exception as e:  # pylint: disable=broad-except
            logger.error("Starting state machines failed")
            logger.exception(e)
            # The frontend will be notified of any state machine issues directly
            # over the cle_error topic

    def stop(self, state_change):
        """
        Stops the simulation and releases all resources

        :param state_change: The state change that led to releasing simulation resources
        """

        if self.simulation.cle is not None:

            is_recording = self.simulation.cle.command_simulation_recorder(
                SimulationRecorderRequest.STATE).value

            if is_recording is True:
                self.save_record_to_user_storage()

        self.simulation.kill_datetime = None

        if self.simulation.cle is not None:
            self.simulation.cle.stop_communication(
                "Simulation server released")
        logger.info(
            "State machine outcomes: %s",
            ", ".join("%s: %s" % (sm.sm_id, str(sm.result))
                      for sm in self.simulation.state_machines))

        self.simulation.state_machine_manager.shutdown()

    def pause(self, state_change):
        """
        Pauses the simulation

        :param state_change: The state change that paused the simulation
        """
        # From the backend side, there is nothing to do because state machines
        # keep running
        pass

    def fail(self, state_change):
        """
        Fails the simulation

        :param state_change: The state change which resulted in failing the simulation
        """
        self.simulation.kill_datetime = None
        if self.simulation.cle is not None:
            self.simulation.cle.stop_communication("Simulation has failed")
        self.simulation.state_machine_manager.terminate_all()

    def reset(self, state_change):
        """
        Resets the simulation

        :param state_change: The state change that led to reseting the simulation
        """
        logger.info(
            "State machine outcomes: %s",
            ", ".join("%s: %s" % (sm.sm_id, str(sm.result))
                      for sm in self.simulation.state_machines))
        self.simulation.state_machine_manager.terminate_all()

        logger.info("simulation reset")

    def save_record_to_user_storage(self):
        """
        Save the record to user storage

        """

        client_record_folder = 'recordings'
        record_path = self.simulation.cle.command_simulation_recorder(
            SimulationRecorderRequest.STATE).message

        file_name = 'recording_{timestamp}.{ext}'.format(
            timestamp=time.strftime('%Y-%m-%d_%H-%M-%S'), ext='zip')

        temp_dest = os.path.join(tempfile.gettempdir(), file_name)
        ZipUtil.create_from_path(record_path, temp_dest)
        client = StorageClient()

        client.create_folder(self.simulation.token,
                             self.simulation.experiment_id,
                             client_record_folder)

        try:
            with open(temp_dest, 'rb') as record_file:
                zip_data = record_file.read()
                client.create_or_update(
                    self.simulation.token, self.simulation.experiment_id,
                    os.path.join(client_record_folder, file_name), zip_data,
                    "application/octet-stream")

        finally:
            os.remove(temp_dest)

        return file_name

    @property
    def experiment_path(self):
        """
        Gets the experiment_path

        :return: The experiment_path
        """
        return self.__experiment_path

    @experiment_path.setter
    def experiment_path(self, value):
        """
        Sets the experiment_path

        """
        self.__experiment_path = value

    @property
    def sim_dir(self):
        """
        Gets the simulation root folder

        :return: The _sim_dir
        """
        return self._sim_dir
Esempio n. 21
0
class CSVLogger(object):
    """
    Class that logs CSV data to the storage,
    received from the CLE every n seconds
    Runs in a separate killable thread
    """
    def __init__(self, assembly, interval=5, folder_name=None):
        """
        The assembly object contains all the necessary information
        to save the csv data, the token, experiment_id

        :param SimulationAssembly assembly: contains all the
        information tied to the running simulation
        :param optional int interval: the interval between consequent saves
        :param optional string folder_name: user-defined name for csv_data
        """
        self._log_csv_thread = None
        self._creation_time = get_date_and_time_string()
        self._interval = interval
        self._assembly = assembly
        self._folder_name = folder_name
        self._storage_client = StorageClient()

        self.stop_flag = threading.Event()

    def initialize(self):
        """
        Initializes a killable thread which runs the log_csv function
        every interval seconds, and starts the thread
        """

        self.stop_flag.clear()

        def _log_csv_job():  # pragma: no cover
            """
            The job to put in the thread
            """
            while not self.stop_flag.isSet():
                self._log_csv()
                time.sleep(self._interval)

        self._log_csv_thread = threading.Thread(target=_log_csv_job)
        self._log_csv_thread.start()

    def shutdown(self):
        """
        Terminates the killable thread that saves the csv data
        """
        if self._log_csv_thread:
            self.stop_flag.set()
            self._log_csv()

        if self._log_csv_thread:
            self._log_csv_thread.join()  # wait till thread returns
            self._log_csv_thread = None

    def reset(self):
        """
        Resets the killable thread that saves the csv data and updates
        the creation time. This is done to create a new folder after reset
        """
        self.shutdown()
        # update the creation time to store the data in a separate folder upon reset
        self._creation_time = get_date_and_time_string()
        self.initialize()

    def _log_csv(self):
        """
        Appends the simulation CSV recorders' content to the storage
        """
        csv_files = [
            CSVRecordedFile(recorded_file[0], recorded_file[1],
                            recorded_file[2])
            for recorded_file in tf_framework.dump_csv_recorder_to_files()
        ]
        if csv_files:
            time_string = self._creation_time if self._creation_time \
                else get_date_and_time_string()
            subfolder_name = self._folder_name if self._folder_name \
                else '_'.join(['csv_records', time_string])
            # no harm calling the function since the create_folder does
            # nothing if the folder exists
            folder_uuid = self._storage_client.create_folder(
                self._assembly.sim_config.token,
                self._assembly.sim_config.experiment_id,
                subfolder_name)['uuid']

            for csv_file in csv_files:
                # if there is no lock file it means that for the currently running sim
                # no csv files have been created, thus we create them in the storage and
                # append the headers. To make sure that we don't do it every time in the
                # context of the current simulation, we check if a lock file exists,
                # if not, we create it
                lock_filename = csv_file.name + '.lock'
                lock_full_path = os.path.join(self._assembly.sim_dir,
                                              subfolder_name, lock_filename)
                dirname = os.path.dirname(lock_full_path)
                lock = (SimUtil.find_file_in_paths(lock_filename, [dirname]) or
                        SimUtil.find_file_in_paths(csv_file.name, [dirname]))
                if not lock:
                    content = ''.join(csv_file.headers) + \
                        ''.join(csv_file.values)
                    if not os.path.exists(dirname):
                        os.makedirs(dirname)
                    with open(os.path.join(dirname, lock_filename), 'a'):
                        pass
                else:
                    content = ''.join(csv_file.values)
                self._storage_client.create_or_update(
                    self._assembly.sim_config.token,
                    folder_uuid,
                    csv_file.name,
                    content,
                    'text/plain',
                    append=lock)
class CLEGazeboSimulationAssembly(GazeboSimulationAssembly):
    """
    This class assembles the simulation using the CLE
    """
    def __init__(self, sim_config):
        """
        Creates a new simulation assembly to simulate an experiment using the CLE and Gazebo
        :param sim_config: config of the simulation to be managed
        """
        super(CLEGazeboSimulationAssembly, self).__init__(sim_config)
        self.cle_server = None
        self.simAssetsDir = os.path.join(sim_config.sim_dir, 'assets')
        self._simResourcesDir = os.path.join(sim_config.sim_dir, 'resources')

        self._storageClient = StorageClient()
        self._storageClient.set_sim_dir(sim_config.sim_dir)

    @property
    def storage_client(self):
        """
        Gets the storage client handler
        """
        return self._storageClient

    def _initialize(self, except_hook):
        """
        Internally initialize the simulation
        :param environment: The environment that should be simulated
        :param except_hook: A method that should be called when there is a critical error
        """
        # pylint: disable=too-many-locals

        # create the CLE server and lifecycle first to report any failures properly
        # initialize the cle server and services
        logger.info("Creating CLE Server")
        self.cle_server = ROSCLEServer(self.sim_config.sim_id,
                                       self.sim_config.timeout,
                                       self.sim_config.timeout_type,
                                       self.gzserver, self.ros_notificator)

        self.cle_server.setup_handlers(self)
        # Put the resources folder into the sys path for import
        try:
            SimUtil.makedirs(self._simResourcesDir)
            with open(os.path.join(self._simResourcesDir, '__init__.py'),
                      'w+'):
                pass  # make sure the __init__.py exists
        except IOError as err:
            logger.info(
                "Failed to setup resource directory due to {err}".format(
                    err=err))
        sys.path.insert(0, self._simResourcesDir)

        # start Gazebo simulator and bridge
        self._start_gazebo(extra_models=self.simAssetsDir + ':' + self.sim_dir)

        # load user textures in Gazebo
        self._load_textures()

        # load environment and robot models
        models, lights = self._load_environment(
            self.sim_config.world_model.resource_path.abs_path)

        # find robot
        self.robotManager.set_robot_dict(self.sim_config.robot_models)
        self._load_robot()

        robot_poses = {}
        for rid, robot in self.robotManager.get_robot_dict().iteritems():
            robot_poses[rid] = robot.pose

        # load robot adapters
        robotcomm, robotcontrol = self._create_robot_adapters()

        # load the brain
        braincontrol, braincomm, brainfile, brainconf = self._load_brain()

        #load external modules
        externalmodulearray = ExternalModuleManager()

        # initialize the cle server and services
        logger.info("Preparing CLE Server")
        self.cle_server.cle = self.__load_cle(robotcontrol, robotcomm,
                                              braincontrol, braincomm,
                                              brainfile, brainconf,
                                              externalmodulearray, robot_poses,
                                              models, lights)
        self.cle_server.prepare_simulation(except_hook)

        # load transfer functions
        self.__load_tfs()

        # Wait for the backend rendering environment to load (for any sensors/cameras)
        self._notify("Waiting for Gazebo simulated sensors to be ready")
        self.robotManager.scene_handler().wait_for_backend_rendering()
        # Spawns a new thread for the csv logger
        # pylint: disable=protected-access
        self.cle_server._csv_logger.initialize()

    def _prepare_simconfig_robots(self):
        """
        Reads robot list from bibi and poses from exc and populates robot manager
        :return: -
        """
        # pylint: disable=too-many-branches
        if not self.sim_config.robot_models:
            return

        for robot in self.sim_config.robot_models.values():
            if robot.isCustom:
                # pylint: disable=protected-access
                status, ret = self.cle_server._robotHandler.prepare_custom_robot(
                    robot.model)
                if not status:
                    raise Exception(
                        "Could not prepare custom robot {err}".format(err=ret))
                sdf_abs_path = ret

            else:
                sdf_abs_path = SimUtil.find_file_in_paths(
                    os.path.join(robot.id,
                                 os.path.basename(robot.SDFFileAbsPath)),
                    self.sim_config.model_paths)

                # Perhaps it's a previously coned experiment? Try with modelTag.value() BUT
                # only look into the simulation_directory, as DELETE robot REST call, if called,
                # would delete this file. Only for the exps without robotid folder in the storage
                # TODO: backward compatibility code. Remove when we decide not to support anymore
                if not sdf_abs_path:
                    sdf_abs_path = SimUtil.find_file_in_paths(
                        robot.SDFFileAbsPath, [self.sim_dir])

            # still couldn't find the SDF, abort!
            if not sdf_abs_path:
                raise Exception("Could not find robot file: {0}".format(
                    robot.SDFFileAbsPath))

            robot.SDFFileAbsPath = sdf_abs_path

            # Find robot specific roslaunch file in the directory where the SDF resides
            # Take the first one (by name) if multiple available
            rosLaunchRelPath = next(
                (f for f in os.listdir(os.path.dirname(robot.SDFFileAbsPath))
                 if f.endswith('.launch')), None)
            robot.rosLaunchAbsPath = (
                None if rosLaunchRelPath is None else os.path.join(
                    os.path.dirname(robot.SDFFileAbsPath), rosLaunchRelPath))

    def _load_environment(self, world_file_abs_path):
        """
        Loads the environment and robot in Gazebo

        :param world_file_abs_path Path to the world sdf
        """
        self._notify("Loading experiment environment")
        return self.robotManager.scene_handler().parse_gazebo_world_file(
            world_file_abs_path)

    def _load_textures(self):
        """
        Loads custom textures in Gazebo
        """
        self._notify("Loading textures")

        try:
            textures = self.storage_client.get_textures(
                self.sim_config.experiment_id, self.sim_config.token)
        except:  # pylint: disable=bare-except
            logger.info("Non-existent textures or folder!")
            return  # ignore missing textures or texture folder

        try:
            if textures:
                self.robotManager.scene_handler().load_textures(textures)
        except:  # pylint: disable=bare-except
            logger.info("Timeout while trying to load textures.")

    # pylint: disable-msg=too-many-branches
    def _load_robot(self):
        """
        Loads robots defined in the bibi and initializes any external controller
        """
        # Set retina config for the robotManager
        if self.sim_config.retina_config:
            self._notify("Configuring Retina Camera Plugin")
            self.robotManager.retina_config = self.sim_config.retina_config

        self._notify("Loading robots")
        self._prepare_simconfig_robots()
        for robot in self.robotManager.get_robot_dict().values():
            self.robotManager.initialize(robot)

        # load external robot controller
        if self.sim_config.ext_robot_controller is not None:
            robot_controller_filepath = SimUtil.find_file_in_paths(
                self.sim_config.ext_robot_controller,
                self.sim_config.model_paths)
            if not os.path.isfile(
                    robot_controller_filepath) and self.sim_dir is not None:
                robot_controller_filepath = os.path.join(
                    self.sim_dir, self.sim_config.ext_robot_controller)
            if os.path.isfile(robot_controller_filepath):
                self._notify("Loading external robot controllers")  # +1
                res = subprocess.call([robot_controller_filepath, 'start'])
                if res > 0:
                    logger.error(
                        "The external robot controller could not be loaded")
                    self.shutdown()
                    return

    def _create_robot_adapters(self):  # pragma: no cover
        """
        Creates the adapter components for the robot side

        :return: A tuple of the communication and control adapter for the robot side
        """
        raise NotImplementedError(
            "This method must be overridden in an implementation")

    def _load_brain(self):
        """
        Loads the neural simulator, interfaces, and configuration
        """

        # Create interfaces to brain
        self._notify("Loading neural simulator")
        brainconfig.rng_seed = self.rng_seed
        braincomm, braincontrol = self._create_brain_adapters()

        self._notify("Loading brain and population configuration")
        if not self.sim_config.brain_model:
            return braincontrol, braincomm, None, None

        if self.sim_config.brain_model.is_custom:
            self._extract_brain_zip()

        brain_abs_path = self.sim_config.brain_model.resource_path.abs_path
        brain_rel_path = self.sim_config.brain_model.resource_path.rel_path
        if not os.path.exists(brain_abs_path):
            logger.info(
                "Cannot find specified brain file {file} in {dir}. Searching in default "
                "directories {default}".format(
                    file=brain_rel_path,
                    dir=self.sim_dir,
                    default=str(self.sim_config.model_paths)))
            brain_abs_path = SimUtil.find_file_in_paths(
                brain_rel_path, self.sim_config.model_paths)

            if brain_abs_path:
                self.sim_config.brain_model.resource_path.abs_path = brain_abs_path
            else:
                raise NRPServicesGeneralException(
                    "Could not find brain file: {}".format(brain_rel_path))

        neurons_config = self.sim_config.get_populations_dict()

        return braincontrol, braincomm, brain_abs_path, neurons_config

    def _extract_brain_zip(self):
        """
        Checks for validity, and extracts a zipped brain. First we
        make sure that the zip referenced in the bibi exists in the
        list of user brains, then we unzip it on the fly in the temporary
        simulation directory. After the extraction we also make sure to copy
        the .py from the experiment folder cause the user may have modified it
        """
        # pylint: disable=too-many-locals
        brain = Model(self.sim_config.brain_model.model, ResourceType.BRAIN)

        storage_brain_zip_data = self._storageClient.get_model(
            self.sim_config.token, self.sim_config.ctx_id, brain)

        if storage_brain_zip_data:
            # Get the data
            # Write the zip in sim dir
            zip_model_path = self._storageClient.get_model_path(
                self.sim_config.token, self.sim_config.ctx_id, brain)

            brain_abs_zip_path = os.path.join(self.sim_dir, zip_model_path)

            if not os.path.exists(os.path.dirname(brain_abs_zip_path)):
                os.makedirs(os.path.dirname(brain_abs_zip_path))

            with open(brain_abs_zip_path, 'w') as brain_zip:
                brain_zip.write(storage_brain_zip_data)
            # Extract and flatten
            # FixME: not sure exactly why flattening is required
            ZipUtil.extractall(
                zip_abs_path=self.sim_config.brain_model.zip_path.abs_path,
                extract_to=self.sim_dir,
                overwrite=False,
                flatten=True)

            # copy back the .py from the experiment folder, cause we don't want the one
            # in the zip, cause the user might have made manual changes
            # TODO: verify if this still required and why only one file is copied
            brain_name = os.path.basename(
                self.sim_config.brain_model.resource_path.rel_path)
            self._storageClient.clone_file(brain_name, self.sim_config.token,
                                           self.sim_config.experiment_id)

        # if the zip is not there, prompt the user to check his uploaded models
        else:
            raise NRPServicesGeneralException(
                "Could not find selected brain model {name} in the list of uploaded models. "
                "Please make sure that it has been uploaded correctly".format(
                    name=self.sim_config.brain_model.model),
                "Zipped model retrieval failed")

    def _create_brain_adapters(self):  # pragma: no cover
        """
        Creates the adapter components for the neural simulator

        :return: A tuple of the communication and control adapter for the neural simulator
        """
        raise NotImplementedError(
            "This method must be overridden in an implementation")

    # pylint: disable=too-many-arguments
    def __load_cle(self, roscontrol, roscomm, braincontrol, braincomm,
                   brain_file_path, neurons_config, externalmodulearray,
                   robot_poses, models, lights):
        """
        Load the ClosedLoopEngine and initializes all interfaces

        :param roscontrol Robot Control Adapter to use
        :param roscomm Robot Communication Adapter to use
        :param braincontrol Brain Control Adapter to use
        :param braincomm Brain Communication Adapter to use
        :param brain_file_path Accessible path to brain file
        :param neurons_config Neuron configuration specified in the BIBI
        :param robot_post Initial robot pose
        :param models Initial models loaded into the environment
        :param lights Initial lights loaded into the environment
        """

        # Needed in order to cleanup global static variables
        self._notify("Connecting brain simulator to robot")
        tfm.start_new_tf_manager()

        # Create transfer functions manager
        tfmanager = tfm.config.active_node

        # set adapters
        tfmanager.robot_adapter = roscomm
        tfmanager.brain_adapter = braincomm

        # integration timestep between simulators, convert from ms to s (default to CLE value)
        timestep = (ClosedLoopEngine.DEFAULT_TIMESTEP
                    if self.sim_config.timestep is None else
                    self.sim_config.timestep)

        roscontrol.set_robots(self.robotManager.get_robot_dict())

        # initialize CLE
        self._notify("Initializing CLE")

        cle = DeterministicClosedLoopEngine(roscontrol, roscomm, braincontrol,
                                            braincomm, tfmanager,
                                            externalmodulearray, timestep)

        if brain_file_path:
            cle.initialize(brain_file_path, **neurons_config)
        else:
            cle.initialize()

        # Set initial pose
        cle.initial_robot_poses = robot_poses
        # Set initial models and lights
        cle.initial_models = models
        cle.initial_lights = lights

        return cle

    def __load_tfs(self):
        """
        Loads and connects all transfer functions
        """
        self._notify("Loading transfer functions")

        for tf in self.sim_config.transfer_functions:
            self._notify("Loading transfer function: {}".format(tf.name))
            #tf.code = correct_indentation(tf.code, 0)
            tf.code = tf.code.strip() + "\n"
            logger.debug("TF: " + tf.name + "\n" + tf.code + '\n')

            try:
                new_code = compile_restricted(tf.code, '<string>', 'exec')
            # pylint: disable=broad-except
            except Exception as e:
                logger.error(
                    "Error while compiling the transfer function {name} in restricted "
                    "mode with error {err}".format(name=tf.name, err=str(e)))
                tfm.set_flawed_transfer_function(tf.code, tf.name, e)
                continue

            try:
                tfm.set_transfer_function(tf.code, new_code, tf.name,
                                          tf.active)
            except tfm.TFLoadingException as loading_e:
                logger.error(loading_e)
                tfm.set_flawed_transfer_function(tf.code, tf.name, loading_e)

    def _handle_gazebo_shutdown(self):
        """
        Handles the case that Gazebo was shut down

        :param close_error: Any exception happened while closing Gazebo
        """
        super(CLEGazeboSimulationAssembly, self)._handle_gazebo_shutdown()
        if self.cle_server is not None and self.cle_server.lifecycle is not None:
            # Set the simulation to halted
            self.cle_server.lifecycle.failed()
            # If not already stopped, free simulation resources
            self.cle_server.lifecycle.stopped()

    def run(self):
        """
        Runs the simulation
        """
        self.cle_server.run()

    def _shutdown(self, notifications):
        """
        Shutdown the CLE and any hooks before shutting down Gazebo

        :param notifications: A flag indicating whether notifications should be attempted to send
        """
        try:
            if notifications:
                self.ros_notificator.update_task(
                    "Shutting down Closed Loop Engine",
                    update_progress=True,
                    block_ui=False)

            self.robotManager.shutdown()
            self.cle_server.shutdown()
        # pylint: disable=broad-except
        except Exception, e:
            logger.error("The cle server could not be shut down")
            logger.exception(e)

        finally:
Esempio n. 23
0
    def put(self, experiment_id):
        """
        Save a brain model PyNN of an experiment to the storage.
        :param path experiment_id: The experiment id

        :< json body json string data: PyNN script of the model
        :< json body json string brain_populations: neuron populations
        :< json body json string brain_populations: context_id of the sim

        :status 500: {0}
        :status 404: {1}
        :status 400: The request body is malformed
        :status 200: Success. File written.
        """
        from hbp_nrp_backend.storage_client_api.StorageClient \
            import StorageClient
        body = request.get_json(force=True)
        if 'data' not in body:
            raise NRPServicesClientErrorException(
                "Neural network python code should be sent in the body under the 'data' key"
            )
        context_id = body.get('context_id', None)

        # no need to rewrite a get_header function since the user
        # authentication already has one
        # Read the request data
        content_type = UserAuthentication.get_header(request, 'Content-type',
                                                     'text/plain')
        data = body['data']
        brain_populations = body.get('additional_populations')

        # Instantiate the storage client
        client = StorageClient()

        # find the bibi filename from the .exc
        experiment_file = client.get_file(
            UserAuthentication.get_header_token(request),
            experiment_id,
            'experiment_configuration.exc',
            byname=True)

        bibi_filename = exp_conf_api_gen.CreateFromDocument(
            experiment_file).bibiConf.src

        # find the brain filename from the bibi
        bibi_file = client.get_file(
            UserAuthentication.get_header_token(request),
            experiment_id,
            bibi_filename,
            byname=True)
        bibi_file_obj = bibi_api_gen.CreateFromDocument(bibi_file)
        brain_filename = bibi_file_obj.brainModel.file

        if 'storage://' in brain_filename:
            client.create_or_update(
                UserAuthentication.get_header_token(request),
                client.get_folder_uuid_by_name(
                    UserAuthentication.get_header_token(request), context_id,
                    'brains'), os.path.basename(brain_filename), data,
                content_type)
        else:
            client.create_or_update(
                UserAuthentication.get_header_token(request), experiment_id,
                os.path.basename(brain_filename), data, content_type)

        # remove all the populations
        del bibi_file_obj.brainModel.populations[:]

        if brain_populations is not None:
            self.parsePopulations(brain_populations, bibi_file_obj)

        # replace the bibi contents in the storage to match the new brain
        # definition
        client.create_or_update(
            UserAuthentication.get_header_token(request), experiment_id,
            bibi_filename,
            xml.dom.minidom.parseString(
                bibi_file_obj.toxml("utf-8")).toprettyxml(), "text/plain")

        return 200
Esempio n. 24
0
class UserAuthentication(object):
    """
    Helper class to get the user, authenticated at the HBP Unified Portal, from a HTTP request.
    This is done by reading the value of the 'X-User-Name' header. If this header is not present,
    the user name 'default-owner' ist used to allow testing outside the portal, etc.
    """

    HTTP_HEADER_USER_NAME = "X-User-Name"
    HEADER_TOKEN = "Authorization"
    DEFAULT_OWNER = "default-owner"
    NO_TOKEN = "no_token"
    client = StorageClient()

    @staticmethod
    def get_header(header_name, default_value):
        """
        Gets the value of the headername header from the current HTTP
        :param default_value: If nothing is found, this will be returned
        :return: The value of the headername header or if not found default_value
        """
        request_parser = reqparse.RequestParser()
        request_parser.add_argument(header_name, type=str, location='headers')
        header_value = request_parser.parse_args(request)[header_name]
        if header_value is None:
            header_value = default_value

        return header_value

    @staticmethod
    def get_x_user_name_header():
        """
        Gets the value of the 'X-User-Name' header from the current HTTP
        :return: The value of the 'X-User-Name' header or if not found 'default-owner'
        """
        return UserAuthentication.get_header(
            UserAuthentication.HTTP_HEADER_USER_NAME,
            UserAuthentication.DEFAULT_OWNER)

    @staticmethod
    def get_header_token():
        """
        Gets the value of the 'Authorization' header from the current HTTP
        :return: The value of the 'Authorization' header or if not found 'no-token'
        """
        token_field = UserAuthentication.get_header(
            UserAuthentication.HEADER_TOKEN, UserAuthentication.NO_TOKEN)
        if (token_field != UserAuthentication.NO_TOKEN):
            return token_field.split()[1]
        else:
            return token_field

    @staticmethod
    @lru_cache()
    def get_token_owner(token):
        """
        Gets the owner of an authentication token
        :param token: The authentication token
        :return: The user's id
        """
        try:
            user = UserAuthentication.client.get_user(token)
            return user['id'] if user else None
        # pylint: disable=broad-except
        except Exception:
            return None

    @staticmethod
    def get_user():
        """
        Gets the owner of the current request, from the header data
        It uses the x-user-name if specified, else by the authentication token
        :return: The user's id
        """
        username = UserAuthentication.get_x_user_name_header()
        if username != UserAuthentication.DEFAULT_OWNER:
            return username

        token = UserAuthentication.get_header_token()
        if token == UserAuthentication.NO_TOKEN:
            return username

        token_owner = UserAuthentication.get_token_owner(token)
        return token_owner if token_owner else username

    @staticmethod
    @lru_cache()
    def __user_can_access_experiment(token, context_id, experiment_id):
        """
        Checkis if a user can access a simulation.
        :param token: The authentication token
        :param context_id: Optional context idenfifier
        :param experiment_id: The simulation's experiment id
        :return: Whether the user can access the simulation
        """

        if token == UserAuthentication.NO_TOKEN:
            return False

        try:
            return UserAuthentication.client.can_acess_experiment(
                token, context_id, experiment_id)
        # pylint: disable=broad-except
        except Exception:
            return False

    @staticmethod
    def can_view(simulation):
        """
        Checks if the current HTTP request is authenticated by
        someone the simulation was shared with
        :param simulation: The simulation
        :return: True if the simulation was shared with the request's user, False otherwise
        """

        token = UserAuthentication.get_header_token()
        return UserAuthentication.__user_can_access_experiment(
            token, simulation.ctx_id, simulation.experiment_id)

    @staticmethod
    def can_modify(simulation):
        """
        Checks if the current HTTP request is authenticated by the simulation owner
        :param simulation: The simulation
        :return: True if the request was triggered by a simulation owner, False otherwise
        """
        user = simulation.owner
        request_user = UserAuthentication.get_user()
        if user == request_user:
            return True
        else:
            logger.warn(
                "Request from user '{request_user}' but simulation owned by '{user}'"
                .format(request_user=request_user, user=user))
            return False
Esempio n. 25
0
    def put(self, experiment_id):
        """
        Save state machines to the storage

        :param path experiment_id: The experiment_id id of the experiment
        :param body source_code: Source code of the state machine as string.
        :status 500: The experiment xml either could not be found or read
        :status 200: Success. File written.
        """
        # Done here in order to avoid circular dependencies introduced by the
        # way we __init__ the rest_server module.
        body = request.get_json(force=True)
        if 'state_machines' not in body:
            raise NRPServicesClientErrorException(
                "State machine code should be sent in "
                "the body under the 'state_machines' key")

        from hbp_nrp_backend.storage_client_api.StorageClient \
            import StorageClient

        client = StorageClient()

        exp_xml_file_path = client.clone_file(
            'experiment_configuration.exc',
            UserAuthentication.get_header_token(request), experiment_id)

        if not exp_xml_file_path:
            return {
                "message": "Failed to clone experiment configuration file"
            }, 500

        experiment = client.parse_and_check_file_is_valid(
            exp_xml_file_path, exp_conf_api_gen.CreateFromDocument,
            exp_conf_api_gen.ExD_)

        if not experiment:
            return {
                "message": "Failed to parse experiment configuration file"
            }, 500

        threads = []
        for sm_name in body['state_machines']:
            sm_node = exp_conf_api_gen.SMACHStateMachine()
            sm_node.id = os.path.splitext(sm_name)[0]
            sm_node.src = sm_name if sm_name.endswith(
                ".exd") else sm_name + ".exd"
            exp_control = exp_conf_api_gen.ExperimentControl()
            exp_control.stateMachine.append(sm_node)
            experiment.experimentControl = exp_control
            t = Thread(target=client.create_or_update,
                       kwargs={
                           'token':
                           UserAuthentication.get_header_token(request),
                           'experiment':
                           experiment_id,
                           'filename':
                           sm_node.src,
                           'content':
                           body['state_machines'][sm_name],
                           'content_type':
                           "application/hbp-neurorobotics.sm+python"
                       })
            t.start()
            threads.append(t)

        t = Thread(target=client.create_or_update,
                   kwargs={
                       'token': UserAuthentication.get_header_token(request),
                       'experiment': experiment_id,
                       'filename': 'experiment_configuration.exc',
                       'content': experiment.toxml("utf-8"),
                       'content_type': "application/hbp-neurorobotics+xml"
                   })
        t.start()
        threads.append(t)
        for thread in threads:
            thread.join()
        return {"message": "Success. Files written to the storage"}, 200