Beispiel #1
0
def load_wiki(wiki_config, config):
    # Merge in wiki defaults
    wiki_config = deep_merge.merge({}, config["wiki_defaults"], wiki_config,
                                   merge_lists=deep_merge.overwrite)
    for model_name, model_config in wiki_config.get('models', {}).items():
        # Merge in model defaults
        model_config = deep_merge.merge(
            {}, config["model_defaults"], model_config)

        wiki_config['models'][model_name] = model_config
    return wiki_config
Beispiel #2
0
    def advance(self, evaluator: Evaluator):
        self.iteration += 1
        logger.info("Starting SPSA iteration %d." % self.iteration)

        if self.parameters is None:
            self.parameters = evaluator.problem.initial

        # Calculate objective
        if self.compute_objective:
            annotations = { "type": "objective" }
            objective_identifier = evaluator.submit(self.parameters, annotations = annotations)

        # Update step lengths
        gradient_length = self.gradient_factor / (self.iteration + self.gradient_offset)**self.gradient_exponent
        perturbation_length = self.perturbation_factor / self.iteration**self.perturbation_exponent

        # Sample direction from Rademacher distribution
        direction = self.random.randint(0, 2, len(self.parameters)) - 0.5

        annotations = {
            "gradient_length": gradient_length,
            "perturbation_length": perturbation_length,
            "direction": direction,
            "type": "gradient"
        }

        # Schedule samples
        positive_parameters = np.copy(self.parameters)
        positive_parameters += direction * perturbation_length
        annotations = deep_merge.merge(annotations, { "type": "positive_gradient" })
        positive_identifier = evaluator.submit(positive_parameters, annotations = annotations)

        negative_parameters = np.copy(self.parameters)
        negative_parameters -= direction * perturbation_length
        annotations = deep_merge.merge(annotations, { "type": "negative_gradient" })
        negative_identifier = evaluator.submit(negative_parameters, annotations = annotations)

        # Wait for gradient run results
        evaluator.wait()

        if self.compute_objective:
            evaluator.clean(objective_identifier)

        positive_objective, positive_state = evaluator.get(positive_identifier)
        evaluator.clean(positive_identifier)

        negative_objective, negative_state = evaluator.get(negative_identifier)
        evaluator.clean(negative_identifier)

        g_k = (positive_objective - negative_objective) / (2.0 * perturbation_length)
        g_k *= direction**-1

        # Update state
        self.parameters -= gradient_length * g_k
Beispiel #3
0
    def submit(self, x, simulator_parameters = {}, annotations = {}, transient = False):
        if len(x) != self.number_of_parameters:
            raise RuntimeError("Invalid number of parameters: %d (expected %d)" % (
                len(x), self.number_of_parameters
            ))

        identifier = self._create_identifier()
        response = self.problem.parameterize(x)

        if isinstance(response, tuple):
            parameters, cost = response
        else:
            parameters, cost = response, 1

        parameters = deep_merge.merge(parameters, simulator_parameters)

        self.simulations[identifier] = {
            "identifier": identifier,
            "parameters": parameters, "x": x,
            "cost": cost, "annotations": annotations,
            "status": "pending", "transient": transient
        }

        self.pending.append(identifier)
        return identifier
Beispiel #4
0
def swagger_json(request):
    services = cornice.service.get_services()
    swagger = cornice_swagger.CorniceSwagger(
        services,
        pyramid_registry=request.registry
    )
    swagger.base_path = '/api'
    swagger.summary_docstrings = True
    info = request.registry.settings['api_specs']
    base = swagger.generate(**info, info=info)
    for path in base['paths'].keys():
        for doc in (DESCRIPTIONS, RESPONSES):
            merge(base['paths'][path], doc[path.lstrip('/')])

    base['definitions'] = request.registry.models
    # This is private endpoint
    del(base['paths']['/releases.json']['post'])
    return base
Beispiel #5
0
def main():
    config_path = os.path.dirname(os.path.abspath(__file__)) + '/config.yaml'
    with open(config_path, 'r') as f:
        config = yamlconf.load(f)
    data = get_ores_data(config['ores_host'])
    data = deep_merge.merge(data,
                            get_wikilabels_data(config['wikilabels_host']))
    data = {'data': data, 'timestamp': time.time()}
    path = os.path.dirname(os.path.abspath(__file__)) + '/static/data.json'
    with open(path, 'w') as f:
        f.write(json.dumps(data))
Beispiel #6
0
def union_merge_observations(observations, id_column):
    """Merge all observations, returning the output as a list.
    """
    id_map = collections.defaultdict(dict)
    for ob in observations:
        # Get the id value.
        ob_id = ob[id_column]

        # Merge the contents, with later entries taking precedence when keys
        # match.
        id_map[ob_id] = deep_merge.merge(id_map[ob_id], ob)

    return id_map.values()
def intersect_merge_observations(observation_sets, id_column):
    """Intersect all observations, returning the output as an iterable.
    """
    observation_maps = [
        {ob[id_column]: ob for ob in observation_set}
        for observation_set in observation_sets]

    for id_ in observation_maps[0]:
        # Key exists in all sets
        if sum(id_ in om for om in observation_maps) == len(observation_maps):
            new_ob = {}
            for observation_map in observation_maps:
                new_ob = deep_merge.merge(new_ob, observation_map[id_])

            yield new_ob
Beispiel #8
0
def intersect_merge_observations(observation_sets, id_column):
    """Intersect all observations, returning the output as an iterable.
    """
    observation_maps = [{ob[id_column]: ob
                         for ob in observation_set}
                        for observation_set in observation_sets]

    for id_ in observation_maps[0]:
        # Key exists in all sets
        if sum(id_ in om for om in observation_maps) == len(observation_maps):
            new_ob = {}
            for observation_map in observation_maps:
                new_ob = deep_merge.merge(new_ob, observation_map[id_])

            yield new_ob
Beispiel #9
0
def uniq_roles(mapping, bind, value):
    """
    Makes roles unique
    """
    roles_map = dict()
    for v in value:
        id_ = v.get('id', '')
        if not id_:
            id_ = v.get('name')
        if not id_:
            continue
        if id_ in roles_map:
            roles = roles_map[id_].pop('roles', [])
            roles.extend(v.get('roles', []))
            roles_map[id_] = deep_merge.merge(roles_map[id_], v)
            roles_map[id_]['roles'] = list(set(roles))
        else:
            roles_map[id_] = v
    return list(roles_map.values())
Beispiel #10
0
import deep_merge

print(deep_merge.merge({'a': {'b': 10}}, {'a': {'c': 5}}))
Beispiel #11
0
    def _load_modules(self,
                      root_dir: str,
                      module_loader_registry: ModuleLoaderRegistry,
                      dir_filter: Callable[[str], bool],
                      module_load_context: Optional[str],
                      keys_referenced_as_modules: Set[str],
                      ignore_unresolved_params: bool = False) -> bool:
        """
        Load modules which have not already been loaded and can be loaded (don't have unresolved parameters).

        :param ignore_unresolved_params:    If true, not-yet-loaded modules will be loaded even if they are
                                            passed parameters that are not fully resolved.
        :return:                            True if there were modules that were not loaded due to unresolved
                                            parameters.
        """
        all_module_definitions = {}
        all_module_evaluations_context = {}
        skipped_a_module = False
        for file in list(self.out_definitions.keys()):
            # Don't process a file in a directory other than the directory we're processing. For example,
            # if we're down dealing with <top_dir>/<module>/something.tf, we don't want to rescan files
            # up in <top_dir>.
            if os.path.dirname(file) != root_dir:
                continue
            # Don't process a file reference which has already been processed
            if file.endswith("]"):
                continue

            file_data = self.out_definitions.get(file)
            if file_data is None:
                continue
            module_calls = file_data.get("module")
            if not module_calls or not isinstance(module_calls, list):
                continue

            for module_index, module_call in enumerate(module_calls):

                if not isinstance(module_call, dict):
                    continue

                # There should only be one module reference per outer dict, but... safety first
                for module_call_name, module_call_data in module_call.items():
                    if not isinstance(module_call_data, dict):
                        continue

                    module_address = (file, module_index, module_call_name)
                    if module_address in self._loaded_modules:
                        continue

                    # Variables being passed to module, "source" and "version" are reserved
                    specified_vars = {
                        k: v[0]
                        for k, v in module_call_data.items()
                        if k != "source" and k != "version"
                    }

                    if not ignore_unresolved_params:
                        has_unresolved_params = False
                        for k, v in specified_vars.items():
                            if not is_acceptable_module_param(
                                    v) or not is_acceptable_module_param(k):
                                has_unresolved_params = True
                                break
                        if has_unresolved_params:
                            skipped_a_module = True
                            continue
                    self._loaded_modules.add(module_address)

                    source = module_call_data.get("source")
                    if not source or not isinstance(source, list):
                        continue
                    source = source[0]
                    if not isinstance(source, str):
                        logging.debug(
                            f"Skipping loading of {module_call_name} as source is not a string, it is: {source}"
                        )
                        continue

                    # Special handling for local sources to make sure we aren't double-parsing
                    if source.startswith("./") or source.startswith("../"):
                        source = os.path.normpath(
                            os.path.join(
                                os.path.dirname(
                                    _remove_module_dependency_in_path(file)),
                                source))

                    version = module_call_data.get("version", "latest")
                    if version and isinstance(version, list):
                        version = version[0]
                    try:
                        with module_loader_registry.load(
                                root_dir, source, version) as content:
                            if not content.loaded():
                                continue

                            self._internal_dir_load(
                                directory=content.path(),
                                module_loader_registry=module_loader_registry,
                                dir_filter=dir_filter,
                                specified_vars=specified_vars,
                                module_load_context=module_load_context,
                                keys_referenced_as_modules=
                                keys_referenced_as_modules)

                            module_definitions = {
                                path: self.out_definitions[path]
                                for path in list(self.out_definitions.keys())
                                if os.path.dirname(path) == content.path()
                            }

                            if not module_definitions:
                                continue

                            # NOTE: Modules are put into the main TF definitions structure "as normal" with the
                            #       notable exception of the file name. For loaded modules referrer information is
                            #       appended to the file name to create this format:
                            #         <file_name>[<referred_file>#<referrer_index>]
                            #       For example:
                            #         /the/path/module/my_module.tf[/the/path/main.tf#0]
                            #       The referrer and index allow a module allow a module to be loaded multiple
                            #       times with differing data.
                            #
                            #       In addition, the referring block will have a "__resolved__" key added with a
                            #       list pointing to the location of the module data that was resolved. For example:
                            #         "__resolved__": ["/the/path/module/my_module.tf[/the/path/main.tf#0]"]

                            resolved_loc_list = module_call_data.get(
                                RESOLVED_MODULE_ENTRY_NAME)
                            if resolved_loc_list is None:
                                resolved_loc_list = []
                                module_call_data[
                                    RESOLVED_MODULE_ENTRY_NAME] = resolved_loc_list

                            # NOTE: Modules can load other modules, so only append referrer information where it
                            #       has not already been added.
                            keys = list(module_definitions.keys())
                            for key in keys:
                                if key.endswith("]") or file.endswith("]"):
                                    continue
                                keys_referenced_as_modules.add(key)
                                new_key = f"{key}[{file}#{module_index}]"
                                module_definitions[
                                    new_key] = module_definitions[key]
                                del module_definitions[key]
                                del self.out_definitions[key]
                                if new_key not in resolved_loc_list:
                                    resolved_loc_list.append(new_key)
                            resolved_loc_list.sort(
                            )  # For testing, need predictable ordering

                            deep_merge.merge(all_module_definitions,
                                             module_definitions)
                    except Exception as e:
                        logging.warning(
                            "Unable to load module (source=\"%s\" version=\"%s\"): %s",
                            source, version, e)
                        pass

        if all_module_definitions:
            deep_merge.merge(self.out_definitions, all_module_definitions)
            deep_merge.merge(self.out_evaluations_context,
                             all_module_evaluations_context)
        return skipped_a_module
Beispiel #12
0
import deep_merge
from django.conf import settings
from django.utils.module_loading import import_string

DEFAULT_UPLOAD_SETTINGS = {
    'FORM': 'data_ingest.forms.UploadForm',
    'INGESTOR': 'data_ingest.ingestors.Ingestor',
    'TEMPLATE': 'data_ingest/upload.html',
    'LIST_TEMPLATE': 'data_ingest/upload_list.html',
    'DETAIL_TEMPLATE': 'data_ingest/upload_detail.html',
    'MODEL': 'data_ingest.models.DefaultUpload',
    'DESTINATION': 'data_ingest/',
    'DESTINATION_FORMAT': 'json',
    'VALIDATION_SCHEMA': None,
}

UPLOAD_SETTINGS = deep_merge.merge(DEFAULT_UPLOAD_SETTINGS,
                                   getattr(settings, 'DATA_INGEST', {}))

upload_form_class = import_string(UPLOAD_SETTINGS['FORM'])
upload_model_class = import_string(UPLOAD_SETTINGS['MODEL'])
ingestor_class = import_string(UPLOAD_SETTINGS['INGESTOR'])
Beispiel #13
0
with open("../node_modules/cloverleaf/data/sites.json", 'r') as json_file:
    sites = json.load(json_file)

with open("../data/logos.json", 'r') as json_file:
    logos = json.load(json_file)

with open("../node_modules/cloverleaf/unit_tests/configs.json",
          'r') as json_file:
    configs = json.load(json_file)

with open("../node_modules/cloverleaf/unit_tests/results.json",
          'r') as json_file:
    results = json.load(json_file)

sites = deep_merge.merge(sites, logos)


def status_code(driver, url):
    js = '''
        let callback = arguments[0];
        let xhr = new XMLHttpRequest();
        xhr.open('GET', ''' + "'" + url.replace("'",
                                                "\\\'") + "'" + ''', true);
        xhr.onload = function () {
            if (this.readyState === 4) {
                callback(this.status);
            }
        };
        xhr.onerror = function () {
            callback('error');
Beispiel #14
0
    def _load_modules(self, root_dir: str,
                      module_loader_registry: ModuleLoaderRegistry,
                      dir_filter: Callable[[str], bool],
                      module_load_context: Optional[str]):
        all_module_definitions = {}
        all_module_evaluations_context = {}
        for file in list(self.out_definitions.keys()):
            file_data = self.out_definitions.get(file)
            module_calls = file_data.get("module")
            if not module_calls or not isinstance(module_calls, list):
                continue

            for module_index, module_call in enumerate(module_calls):
                if not isinstance(module_call, dict):
                    continue

                # There should only be one module reference per outer dict, but... safety first
                for module_call_name, module_call_data in module_call.items():
                    if not isinstance(module_call_data, dict):
                        continue

                    source = module_call_data.get("source")
                    if not source or not isinstance(source, list):
                        continue
                    source = source[0]

                    # Special handling for local sources to make sure we aren't double-parsing
                    if source.startswith("./") or source.startswith("../"):
                        source = os.path.normpath(
                            os.path.join(
                                os.path.dirname(
                                    _remove_module_dependency_in_path(file)),
                                source))

                    version = module_call_data.get("version", "latest")
                    if version and isinstance(version, list):
                        version = version[0]
                    try:
                        with module_loader_registry.load(
                                root_dir, source, version) as content:
                            if not content.loaded():
                                continue

                            # Variables being passed to module, "source" and "version" are reserved
                            specified_vars = {
                                k: v[0]
                                for k, v in module_call_data.items()
                                if k != "source" and k != "version"
                            }

                            if not dir_filter(os.path.abspath(content.path())):
                                continue
                            self._internal_dir_load(
                                directory=content.path(),
                                module_loader_registry=module_loader_registry,
                                dir_filter=dir_filter,
                                specified_vars=specified_vars,
                                module_load_context=module_load_context)

                            module_definitions = {
                                path: self.out_definitions[path]
                                for path in list(self.out_definitions.keys())
                                if os.path.dirname(path) == content.path()
                            }

                            if not module_definitions:
                                continue

                            # NOTE: Modules are put into the main TF definitions structure "as normal" with the
                            #       notable exception of the file name. For loaded modules referrer information is
                            #       appended to the file name to create this format:
                            #         <file_name>[<referred_file>#<referrer_index>]
                            #       For example:
                            #         /the/path/module/my_module.tf[/the/path/main.tf#0]
                            #       The referrer and index allow a module allow a module to be loaded multiple
                            #       times with differing data.
                            #
                            #       In addition, the referring block will have a "__resolved__" key added with a
                            #       list pointing to the location of the module data that was resolved. For example:
                            #         "__resolved__": ["/the/path/module/my_module.tf[/the/path/main.tf#0]"]

                            resolved_loc_list = module_call_data.get(
                                RESOLVED_MODULE_ENTRY_NAME)
                            if resolved_loc_list is None:
                                resolved_loc_list = []
                                module_call_data[
                                    RESOLVED_MODULE_ENTRY_NAME] = resolved_loc_list

                            # NOTE: Modules can load other modules, so only append referrer information where it
                            #       has not already been added.
                            keys = list(module_definitions.keys())
                            for key in keys:
                                if key.endswith("]"):
                                    continue
                                new_key = f"{key}[{file}#{module_index}]"
                                module_definitions[new_key] = \
                                    module_definitions[key]
                                del module_definitions[key]
                                del self.out_definitions[key]

                                resolved_loc_list.append(new_key)

                            deep_merge.merge(all_module_definitions,
                                             module_definitions)
                    except Exception as e:
                        logging.warning(
                            "Unable to load module (source=\"%s\" version=\"%s\"): %s",
                            source, version, e)
                        pass

        if all_module_definitions:
            deep_merge.merge(self.out_definitions, all_module_definitions)
            deep_merge.merge(self.out_evaluations_context,
                             all_module_evaluations_context)
Beispiel #15
0
def load_wiki(wiki, config):
    wiki = deep_merge.merge({},
                            config["wiki_defaults"],
                            wiki,
                            merge_lists=deep_merge.overwrite)
    result = collections.OrderedDict()
    if 'models' not in wiki:
        wiki['models'] = {}
    if isinstance(wiki["models"], list):
        wiki["models"] = {name: {} for name in wiki["models"]}

    for model_name in ['reverted', 'damaging', 'goodfaith']:
        if model_name not in wiki['models']:
            continue
        model = wiki["models"][model_name]

        # Do not apply default configs for RandomForest models
        # Because it doesn't make sense for them
        if not model.get('rf'):
            model = deep_merge.merge({}, config["model_defaults"], model)

        for case in model['tuning_params']:
            value = model['tuning_params'][case]
            if isinstance(value, str):
                model['tuning_params'][case] = '"%s"' % value

        # Normalize for old label.
        if model_name == 'reverted':
            model['label'] = 'reverted_for_damage'
        else:
            model['label'] = model_name

        target_prediction = 'false' if model_name == 'goodfaith' else 'true'
        model['label_weight'] = \
            "{}=$({}_weight)".format(target_prediction, model_name)

        model['algorithm'] = 'rf' if 'rf' in model else 'gradient_boosting'
        model[
            'class_name'] = 'RandomForest' if 'rf' in model else 'GradientBoosting'

        result[model_name] = model

    wiki["models"] = result

    # Sort sample types
    result = collections.OrderedDict()
    for sample_type in ['quarry_url', 'labeling_campaign']:
        for sample in wiki.get('samples', {}):
            if sample_type not in wiki['samples'][sample]:
                continue
            result[sample] = wiki['samples'][sample]
    wiki['samples'] = result

    # Normalize options
    if 'default_sample_bigger_sample' not in wiki:
        wiki['default_sample_bigger_sample'] = wiki['default_sample']

    if 'sample_to_build_review' not in wiki:
        wiki['sample_to_build_review'] = wiki['default_sample']

    return wiki
Beispiel #16
0
    def run(self, identifier, parameters):
        """
            Runs a MATSim simulation.
        """
        if identifier in self.simulations:
            raise RuntimeError(
                "A simulation with identifier %s already exists." % identifier)

        simulation_path = "%s/%s" % (self.working_directory, identifier)

        if os.path.exists(simulation_path):
            shutil.rmtree(simulation_path)

        os.mkdir(simulation_path)

        simulation_parameters = {}
        simulation_parameters = deep_merge.merge(simulation_parameters,
                                                 self.parameters)
        simulation_parameters = deep_merge.merge(simulation_parameters,
                                                 parameters)
        parameters = simulation_parameters

        # Rewrite configuration
        if "iterations" in parameters:
            if "controler.lastIteration" in parameters["config"]:
                logger.warn(
                    "Overwriting 'controler.lastIteration' for simulation %s" %
                    identifier)

            if "controler.writeEventsInterval" in parameters["config"]:
                logger.warn(
                    "Overwriting 'controler.writeEventsInterval' for simulation %s"
                    % identifier)

            if "controler.writePlansInterval" in parameters["config"]:
                logger.warn(
                    "Overwriting 'controler.writePlansInterval' for simulation %s"
                    % identifier)

            parameters["config"]["controler.lastIteration"] = parameters[
                "iterations"]
            parameters["config"]["controler.writeEventsInterval"] = parameters[
                "iterations"]
            parameters["config"]["controler.writePlansInterval"] = parameters[
                "iterations"]

        if "random_seed" in parameters:
            if "global.random_seed" in parameters["config"]:
                logger.warn(
                    "Overwriting 'global.random_seed' for simulation %s" %
                    identifier)

            parameters["config"]["global.random_seed"] = parameters[
                "random_seed"]

        if "restart" in parameters:
            if "plans.inputPlansFile" in parameters["config"]:
                logger.warn(
                    "Overwriting 'plans.inputPlansFile' for simulation %s" %
                    identifier)

            restart_path = "%s/%s" % (self.working_directory,
                                      parameters["restart"])
            parameters["config"][
                "plans.inputPlansFile"] = "%s/output/output_plans.xml.gz" % restart_path

        if "controler.outputDirectory" in parameters["config"]:
            logger.warn(
                "Overwriting 'controler.outputDirectory' for simulation %s" %
                identifier)

        parameters["config"][
            "controler.outputDirectory"] = "%s/output" % simulation_path

        # Construct command line arguments
        if not "class_path" in parameters:
            raise RuntimeError(
                "Parameter 'class_path' must be set for the MATSim simulator.")

        if not "main_class" in parameters:
            raise RuntimeError(
                "Parameter 'main_class' must be set for the MATSim simulator.")

        arguments = [
            parameters["java"],
            "-Xmx%s" % parameters["memory"], "-cp", parameters["class_path"],
            parameters["main_class"]
        ] + parameters["prefix_arguments"] + parameters["arguments"]

        for key, value in parameters["config"].items():
            arguments += ["--config:%s" % key, str(value)]

        arguments += parameters["postfix_arguments"]

        stdout = open("%s/simulation_output.log" % simulation_path, "w+")
        stderr = open("%s/simulation_error.log" % simulation_path, "w+")

        logger.info("Starting simulation %s:" % identifier)
        logger.info(" ".join(arguments))

        self.simulations[identifier] = {
            "process":
            sp.Popen(arguments, stdout=stdout, stderr=stderr),
            "arguments":
            arguments,
            "status":
            "running",
            "progress":
            -1,
            "iterations":
            parameters["iterations"] if "iterations" in parameters else None
        }
Beispiel #17
0
    def advance(self, evaluator: Evaluator):
        self.iteration += 1
        logger.info("Starting FDSA iteration %d." % self.iteration)

        # Calculate objective
        if self.compute_objective:
            annotations = {"type": "objective"}
            objective_identifier = evaluator.submit(self.parameters,
                                                    annotations=annotations)

        # Update lengths
        gradient_length = self.gradient_factor / (
            self.iteration + self.gradient_offset)**self.gradient_exponent
        perturbation_length = self.perturbation_factor / self.iteration**self.perturbation_exponent

        annotations = {
            "gradient_length": gradient_length,
            "perturbation_length": perturbation_length,
            "type": "gradient"
        }

        # I) Calculate gradients
        gradient = np.zeros((len(self.parameters), ))
        gradient_information = []

        # Schedule all necessary runs
        for d in range(len(self.parameters)):
            annotations = deep_merge.merge(annotations, {"dimension": d})

            positive_parameters = np.copy(self.parameters)
            positive_parameters[d] += perturbation_length
            annotations = deep_merge.merge(annotations,
                                           {"type": "positive_gradient"})
            positive_identifier = evaluator.submit(positive_parameters,
                                                   annotations=annotations)

            negative_parameters = np.copy(self.parameters)
            negative_parameters[d] -= perturbation_length
            annotations = deep_merge.merge(annotations,
                                           {"sign": "negative_gradient"})
            negative_identifier = evaluator.submit(negative_parameters,
                                                   annotations=annotations)

            gradient_information.append(
                (positive_parameters, positive_identifier, negative_parameters,
                 negative_identifier))

        # Wait for gradient run results
        evaluator.wait()

        if self.compute_objective:
            evaluator.clean(objective_identifier)

        for d, item in enumerate(gradient_information):
            positive_parameters, positive_identifier, negative_parameters, negative_identifier = item

            positive_objective, positive_state = evaluator.get(
                positive_identifier)
            evaluator.clean(positive_identifier)

            negative_objective, negative_state = evaluator.get(
                negative_identifier)
            evaluator.clean(negative_identifier)

            gradient[d] = (positive_objective -
                           negative_objective) / (2.0 * perturbation_length)

        # II) Update state
        self.parameters -= gradient_length * gradient