예제 #1
0
class RouteInfo(object):
    route_key = attr.attrib(default=None)        
    attributes_required = attr.attrib(default=False)
    group_attributes_required = attr.attrib(default=False)
예제 #2
0
class StatusDetails(object):
    known = attrib(default=None)
    flaky = attrib(default=None)
    message = attrib(default=None)
    trace = attrib(default=None)
예제 #3
0
class Trainer:
    """The trainer object handles the actual training and testing of the model.

    Args:
        model:              The PyTorch-Model
        data_bundle:        The training and test data as a DataBundle
        optimizer_bundle:   Contains the optimizer and the learning rate scheduler
        run_id:             A string identifying the specific run.
        batch_size:         The batch_size to train the model on.
        epochs:             The (total) number of epochs to train the model.
        criterion:          The optimization criterionn (loss), default is cross-entropy
        metrics:            A list of metric-object
        device:             The compute device or list of compute devices to place the model(s) on
        logs_dir:           The directory to store the results
        conv_method:        The strategy for handling convolutional layers for saturation computation
        device_sat:         The device to compute the saturation on. If None, the same device is used as for
                            the model.
        delta:              The delta threshold for computing saturation
        data_parallel:      Enable or Disable multi-GPU
        downsampling:       If None, downsampling is disabled, else the feature maps will be downsampled
                            to (downsampling x downsampling) resolution
    """

    # private internal variables
    _tracker: CheckLayerSat = attrib(init=False)
    _save_path: str = attrib(init=False)
    _initial_epoch: int = attrib(init=False)
    _trained_epochs: int = attrib(init=False)
    _experiment_done: bool = attrib(init=False)

    # General Training setup
    model: Module
    data_bundle: DataBundle
    optimizer_bundle: OptimizerSchedulerBundle
    run_id: str
    batch_size: int = 32
    epochs: int = 30
    criterion: nn.modules.loss._Loss = nn.modules.CrossEntropyLoss()
    metrics: List[Metric] = attrib(factory=list)

    # Technical Setup
    device: str = 'cpu'
    logs_dir: str = './logs'

    # delve setup
    conv_method = 'channelwise'
    device_sat: Optional[str] = None
    delta: float = 0.99
    data_parallel: bool = False
    downsampling: Optional[int] = None

    def _initialize_tracker(self):
        writer = CSVandPlottingWriter(self._save_path.replace('.csv', ''),
                                      primary_metric='test_accuracy')

        self._tracker = CheckLayerSat(
            self._save_path.replace('.csv', ''), [writer],
            self.model,
            ignore_layer_names='convolution',
            stats=['lsat', 'idim'],
            sat_threshold=self.delta,
            verbose=False,
            conv_method=self.conv_method,
            log_interval=1,
            device=self.device_sat,
            reset_covariance=True,
            max_samples=None,
            initial_epoch=self._initial_epoch,
            interpolation_strategy='nearest'
            if self.downsampling is not None else None,
            interpolation_downsampling=self.downsampling)

    def _initialize_saving_structure(self):
        save_dir: str = build_saving_structure(
            logs_dir=self.logs_dir,
            model_name=self.model.name,
            dataset_name=self.data_bundle.dataset_name,
            output_resolution=self.data_bundle.output_resolution,
            run_id=self.run_id)
        self._save_path = os.path.join(
            save_dir,
            f"{self.model.name}-{self.data_bundle.dataset_name}-r{self.data_bundle.output_resolution}-bs{self.batch_size}-e{self.epochs}.csv"
        )

    def _load_model(self):
        self.model.load_state_dict(
            torch.load(self._save_path.replace('.csv',
                                               '.pt'))['model_state_dict'])
        if self.data_parallel:
            # TODO: make this work with DistributedDataParallel
            self.model = nn.DataParallel(self.model)
            # from torch.nn.parallel import DistributedDataParallel
        self.model = self.model.to(self.device)

    def _load_optimizer_and_scheduler(self):
        self.optimizer_bundle.optimizer.load_state_dict(
            torch.load(self._save_path.replace('.csv', '.pt'))['optimizer'])
        if self.optimizer_bundle.scheduler is not None:
            self.optimizer_bundle.scheduler.load_state_dict(
                torch.load(self._save_path.replace('.csv',
                                                   '.pt'))['scheduler'])

    def _load_initial_and_trained_epoch(self):
        self._trained_epochs = torch.load(
            self._save_path.replace('.csv', '.pt'))['epoch']
        self._initial_epoch = self._trained_epochs + 1

    def _check_training_done(self):
        if self._initial_epoch >= self.epochs:
            self._experiment_done = True
            print(
                f'Experiment Logs for the exact same experiment with identical run_id was detected, '
                f'training will be skipped, consider using another run_id')

    def _checkpointing(self):
        self._initial_epoch = 0
        self._trained_epochs = 0
        self._experiment_done = False
        self.model = self.model.to(self.device)
        if os.path.exists(self._save_path):
            self._load_initial_and_trained_epoch()
            self._check_training_done()
            self._load_model()
            self._load_optimizer_and_scheduler()
            print('Resuming existing run, starting at epoch',
                  self._initial_epoch + 1, 'from',
                  self._save_path.replace('.csv', '.pt'))

    def _enable_benchmark_mode_if_cuda(self):
        if "cuda" in self.device:
            from torch.backends import cudnn
            cudnn.benchmark = True

    def __attrs_post_init__(self):
        self.device_sat = self.device if self.device_sat is None else self.device_sat
        self._enable_benchmark_mode_if_cuda()
        self._initialize_saving_structure()
        self._checkpointing()
        self._initialize_tracker()

    def _reset_metrics(self):
        for metric in self.metrics:
            metric.reset()

    def _eval_metrics(self, y_true: torch.Tensor, y_pred: torch.Tensor):
        for metric in self.metrics:
            metric.update(y_true, y_pred)

    def _print_status(self, batch: int, old_time: int, dataset: DataLoader):
        metrics = [
            f"{metric.name}:  {round(metric.value, 3)}"
            for metric in self.metrics
        ]
        print(batch, 'of', len(dataset), 'processing time',
              round(time() - old_time, 3), *metrics)

    def _print_epoch_status(self, epoch: int, old_time: int,
                            metric_dict: Dict[str, float]):
        metrics = [f"{k}:  {round(v, 3)}" for (k, v) in metric_dict.items()]
        print(epoch + 1, 'of', self.epochs, 'processing time',
              round(time() - old_time, 3), *metrics)

    def _track_results(self, prefix: str, metric_name: str,
                       metric_value: float) -> Tuple[str, float]:
        self._tracker.add_scalar(f"{prefix}_{metric_name}", metric_value)
        return f"{prefix}_{metric_name}", metric_value

    def _track_metrics(self, prefix: str, loss: float,
                       total: int) -> Dict[str, float]:
        result: Dict[str, float] = dict()
        for metric in self.metrics:
            name, val = self._track_results(prefix, metric.name, metric.value)
            result[name] = val
        name, val = self._track_results(prefix, "loss", loss / total)
        result[name] = val
        return result

    def _save_checkpoint(self, train_metric: Dict[str, float],
                         test_metric: Dict[str, float], epoch: int):
        state_dict = {
            'model_state_dict':
            self.model.state_dict(),
            'optimizer':
            self.optimizer_bundle.optimizer.state_dict(),
            'scheduler':
            None if self.optimizer_bundle.scheduler is None else
            self.optimizer_bundle.scheduler.state_dict(),
            'epoch':
            epoch
        }
        state_dict.update(train_metric)
        state_dict.update(test_metric)
        torch.save(state_dict, self._save_path.replace('.csv', '.pt'))

    def train(self):
        """Train the model.

        The model is trained for a total number of epochs given the number of epochs provided in the constructor.
        This includes epochs this model was trained previously.

        Returns:
            The path to the saturation ans metric logs.
        """
        if self._experiment_done:
            return
        old_time = time()
        for epoch in range(self._initial_epoch, self.epochs):
            print('Start training epoch', epoch + 1)
            train_metric = self.train_epoch()
            test_metric = self.test()
            train_metric.update(test_metric)
            self._print_epoch_status(epoch=epoch,
                                     old_time=old_time,
                                     metric_dict=train_metric)
            old_time = time()

            if self.optimizer_bundle.scheduler is not None:
                self.optimizer_bundle.scheduler.step()
            self._tracker.add_saturations()
            self._save_checkpoint(train_metric=train_metric,
                                  test_metric=test_metric,
                                  epoch=epoch)
        self._tracker.close()
        return self._save_path + '.csv'

    def train_epoch(self) -> Dict[str, float]:
        """Train a single epoch.

        Returns:
            A dictionary containing all metrics computed incrementally during training.
        """
        self.model.train()
        self._reset_metrics()
        running_loss = 0
        total = 0
        old_time = time()
        for batch, data in enumerate(self.data_bundle.train_dataset):
            if batch % 10 == 0 and batch != 0:
                self._print_status(batch, old_time,
                                   self.data_bundle.train_dataset)
                old_time = time()

            inputs, labels = data
            inputs, labels = inputs.to(self.device), labels.to(self.device)

            self.optimizer_bundle.optimizer.zero_grad()
            outputs = self.model(inputs)
            _, predicted = torch.max(outputs.data, 1)
            self._eval_metrics(labels, outputs)

            loss = self.criterion(outputs, labels)
            loss.backward()
            self.optimizer_bundle.optimizer.step()

            running_loss += loss.item()
            total += self.batch_size
        return self._track_metrics('training', running_loss, total)

    def test(self):
        """Evaluate the model on the test set.

        Returns:
            The metric computed on the test set.
        """
        self._reset_metrics()
        self.model.eval()
        total = 0
        test_loss = 0
        with torch.no_grad():
            old_time = time()
            for batch, data in enumerate(self.data_bundle.test_dataset):
                inputs, labels = data
                inputs, labels = inputs.to(self.device), labels.to(self.device)

                outputs = self.model(inputs)
                loss = self.criterion(outputs, labels)
                _, predicted = torch.max(outputs.data, 1)

                total += labels.size(0)
                test_loss += loss.item()

                self._eval_metrics(labels, outputs)

                if batch % 10 == 0 or batch == (
                        len(self.data_bundle.test_dataset) - 1):
                    self._print_status(batch, old_time,
                                       self.data_bundle.test_dataset)
                    old_time = time()

            test_metrics = self._track_metrics('test', test_loss, total)
        return test_metrics
예제 #4
0
class TestStepResult(ExecutableItem):
    id = attrib(default=None)
예제 #5
0
class Label(object):
    name = attrib(default=None)
    value = attrib(default=None)
예제 #6
0
class GoogleTrackBase(Track):
    """Adds an id argument."""
    id = attrib(default=Factory(lambda: None))
예제 #7
0
class OfflineRenderDriver(object):
    """Obtain and store ancillary rendering parameters, and use them to perform file-to-file rendering."""

    target_layout = attrib()
    speakers_file = attrib()
    output_gain_db = attrib()
    fail_on_overload = attrib()
    enable_block_duration_fix = attrib()
    config = attrib(default=Factory(dict))

    programme_id = attrib(default=None)
    complementary_object_ids = attrib(default=Factory(list))

    conversion_mode = attrib(default=None)

    blocksize = 8192

    @classmethod
    def add_args(cls, parser):
        """Add arguments to an ArgumentParser that can be used by from_args."""
        formats_string = ", ".join(bs2051.layout_names)
        parser.add_argument(
            "-s",
            "--system",
            required=True,
            metavar="target_system",
            help="Target output system, accoring to ITU-R BS.2051. "
            "Available systems are: {}".format(formats_string))

        parser.add_argument("-l",
                            "--layout",
                            type=argparse.FileType("r"),
                            metavar="layout_file",
                            help="Layout config file")
        parser.add_argument("--output-gain-db",
                            type=float,
                            metavar="gain_db",
                            default=0,
                            help="output gain in dB (default: 0)")
        parser.add_argument(
            "--fail-on-overload",
            "-c",
            action="store_true",
            help="fail if an overload condition is detected in the output")
        parser.add_argument(
            "--enable-block-duration-fix",
            action="store_true",
            help="automatically try to fix faulty block format durations")

        parser.add_argument("--programme",
                            metavar="id",
                            help="select an audioProgramme to render by ID")
        parser.add_argument(
            "--comp-object",
            metavar="id",
            action="append",
            default=[],
            help="select an audioObject by ID from a complementary group")

        parser.add_argument(
            '--apply-conversion',
            choices=("to_cartesian", "to_polar"),
            help=
            'Apply conversion to Objects audioBlockFormats before rendering')

    @classmethod
    def from_args(cls, args):
        return cls(
            target_layout=args.system,
            speakers_file=args.layout,
            output_gain_db=args.output_gain_db,
            fail_on_overload=args.fail_on_overload,
            enable_block_duration_fix=args.enable_block_duration_fix,
            programme_id=args.programme,
            complementary_object_ids=args.comp_object,
            conversion_mode=args.apply_conversion,
        )

    def load_output_layout(self):
        """Load the specified layout.

        Returns:
            layout (Layout): loudspeaker layout
            upmix (sparse array or None): optional matrix to apply after rendering
            n_channels (int): number of channels required in output file
        """
        spkr_layout = bs2051.get_layout(self.target_layout)

        if self.speakers_file is not None:
            real_layout = layout.load_real_layout(self.speakers_file)
            spkr_layout, upmix = spkr_layout.with_real_layout(real_layout)
            spkr_layout.check_positions()
            spkr_layout.check_upmix_matrix(upmix)
            upmix = scipy.sparse.csc_matrix(upmix.T)
            n_channels = upmix.shape[1]
        else:
            upmix = None
            n_channels = len(spkr_layout.channels)

        return spkr_layout, upmix, n_channels

    @property
    def output_gain_linear(self):
        return 10.0**(self.output_gain_db / 20.0)

    @classmethod
    def lookup_adm_element(cls, adm, element_id, element_type,
                           element_type_name):
        """Lookup an element in adm by type and ID, with nice error messages."""
        if element_id is None:
            return None

        try:
            element = adm[element_id]
        except KeyError:
            raise KeyError(
                "could not find {element_type_name} with ID {element_id}".
                format(
                    element_type_name=element_type_name,
                    element_id=element_id,
                ))

        if not isinstance(element, element_type):
            raise ValueError(
                "{element_id} is not an {element_type_name}".format(
                    element_type_name=element_type_name,
                    element_id=element_id,
                ))

        return element

    def get_audio_programme(self, adm):
        return self.lookup_adm_element(adm, self.programme_id, AudioProgramme,
                                       "audioProgramme")

    def get_complementary_objects(self, adm):
        return [
            self.lookup_adm_element(adm, obj_id, AudioObject, "audioObject")
            for obj_id in self.complementary_object_ids
        ]

    def apply_conversion(self, selected_items):
        if self.conversion_mode is None:
            return selected_items
        elif self.conversion_mode == "to_cartesian":
            return convert_objects_to_cartesian(selected_items)
        elif self.conversion_mode == "to_polar":
            return convert_objects_to_polar(selected_items)
        else:
            assert False

    def get_rendering_items(self, adm):
        """Get rendering items from the input file adm.

        Parameters:
            adm (ADM): ADM to get the RenderingItems from

        Returns:
            list of RenderingItem: selected rendering items
        """
        audio_programme = self.get_audio_programme(adm)
        comp_objects = self.get_complementary_objects(adm)
        selected_items = select_rendering_items(
            adm,
            audio_programme=audio_programme,
            selected_complementary_objects=comp_objects)

        selected_items = preprocess_rendering_items(selected_items)

        selected_items = self.apply_conversion(selected_items)

        return selected_items

    def render_input_file(self, infile, spkr_layout, upmix=None):
        """Get sample blocks of the input file after rendering.

        Parameters:
            infile (Bw64AdmReader): file to read from
            spkr_layout (Layout): layout to render to
            upmix (sparse array or None): optional upmix to apply

        Yields:
            2D sample blocks
        """
        renderer = Renderer(spkr_layout, **self.config)
        renderer.set_rendering_items(self.get_rendering_items(infile.adm))

        for input_samples in chain(infile.iter_sample_blocks(self.blocksize),
                                   [None]):
            if input_samples is None:
                output_samples = renderer.get_tail(infile.sampleRate,
                                                   infile.channels)
            else:
                output_samples = renderer.render(infile.sampleRate,
                                                 input_samples)

            output_samples *= self.output_gain_linear

            if upmix is not None:
                output_samples *= upmix

            yield output_samples

    def run(self, input_file, output_file):
        """Render input_file to output_file."""
        spkr_layout, upmix, n_channels = self.load_output_layout()

        output_monitor = PeakMonitor(n_channels)

        with openBw64Adm(input_file) as infile:
            infile.adm.validate()
            timing_fixes.check_blockFormat_timings(
                infile.adm, fix=self.enable_block_duration_fix)

            formatInfo = FormatInfoChunk(formatTag=1,
                                         channelCount=n_channels,
                                         sampleRate=infile.sampleRate,
                                         bitsPerSample=infile.bitdepth)
            with openBw64(output_file, "w", formatInfo=formatInfo) as outfile:
                for output_block in self.render_input_file(
                        infile, spkr_layout, upmix):
                    output_monitor.process(output_block)
                    outfile.write(output_block)

        output_monitor.warn_overloaded()
        if self.fail_on_overload and output_monitor.has_overloaded():
            raise Exception("error: output overloaded")
예제 #8
0
class AggregationAuthorizationProperties(ResourceProperties):
    AuthorizedAccountId = attrib(default=None)
    AuthorizedAwsRegion = attrib(default=None)
예제 #9
0
class ConfigurationAggregatorProperties(ResourceProperties):
    AccountAggregationSources = attrib(default=None)
    ConfigurationAggregatorName = attrib(default=None)
    OrganizationAggregationSource = attrib(default=None)
예제 #10
0
class ConfigurationRecorderProperties(ResourceProperties):
    Name = attrib(default=None)
    RecordingGroup = attrib(default=None)
    RoleARN = attrib(default=None)
예제 #11
0
class DeliveryChannelProperties(ResourceProperties):
    ConfigSnapshotDeliveryProperties = attrib(default=None)
    Name = attrib(default=None)
    S3BucketName = attrib(default=None)
    S3KeyPrefix = attrib(default=None)
    SnsTopicARN = attrib(default=None)
예제 #12
0
class PackagesNotFoundError(CondaException):
    """
    Conda 4.5 exception - this reports all missing packages.
    """

    packages = attrib(default=())
예제 #13
0
class UnknownCondaError(CondaException):
    data = attrib(default=Factory(dict))
예제 #14
0
class CondaException(Exception, NonStrictAttrs):
    command = attrib()
    message = attrib(default=None)
예제 #15
0
파일: models.py 프로젝트: folkengine/dndme
class Monster(Combatant):
    cr = attrib(default=0)
    xp = attrib(default=0)
    size = attrib(default="medium")
    mtype = attrib(default="humanoid")
    alignment = attrib(default="unaligned")

    str = attrib(default=10)
    dex = attrib(default=10)
    con = attrib(default=10)
    int = attrib(default=10)
    wis = attrib(default=10)
    cha = attrib(default=10)

    def __getattr__(self, attr_name):
        if attr_name[3:] == '_mod' and \
                attr_name[:3] in ('str', 'dex', 'con', 'int', 'wis', 'cha'):
            return self.ability_modifier(getattr(self, attr_name[:3]))
        elif attr_name == 'initiative_mod':
            return self.ability_modifier(getattr(self, 'dex'))
        else:
            raise AttributeError(
                f"'Monster' object has no attribute '{attr_name}'")

    def ability_modifier(self, stat):
        return floor((stat - 10) / 2)

    armor = attrib(default="")
    speed = attrib(default=30)
    skills = attrib(default=attr_factory(dict))
    resist = attrib(default=attr_factory(list))
    immune = attrib(default=attr_factory(list))
    vulnerable = attrib(default=attr_factory(list))
    languages = attrib(default=attr_factory(list))
    features = attrib(default=attr_factory(dict))
    actions = attrib(default=attr_factory(dict))
    legendary_actions = attrib(default=attr_factory(dict))
    reactions = attrib(default=attr_factory(dict))
    notes = attrib(default="")
    origin = attrib(default="origin unknown")
예제 #16
0
class PathRequirement(Requirement):
    _name: str = attrib()
    _path: str = attrib()
    _extras: Set[str] = attrib()
    _environment_markers: Optional[EnvironmentMarker] = attrib()
    _logger: Logger = attrib()

    def name(self) -> str:
        return canonicalize_name(self._name)

    def extras(self) -> Set[str]:
        return self._extras

    def logger(self) -> Logger:
        return self._logger

    def add(self, other: Requirement,
            target_platform: TargetPlatform) -> Requirement:
        if not self.applies_to_target(target_platform):
            return other
        elif not other.applies_to_target(target_platform):
            return self
        elif self.name() != other.name():
            raise IncompatibleRequirements(
                "Cannot add requirements with different names `{name1}` and `{name2}`"
                .format(name1=self.name(), name2=other.name()))
        else:
            if isinstance(other, VersionRequirement):
                return self
            elif isinstance(other, UrlRequirement):
                raise IncompatibleRequirements(
                    "Cannot combine requirements with path `{path} and url `{url}`"
                    .format(path=self.path, url=other.url))
            elif isinstance(other, PathRequirement):
                if self.path != other.path:
                    raise IncompatibleRequirements(
                        "Cannot combine requirements with different paths `{path1}` and `{path2}`"
                        .format(path1=self.path, path2=other.path))
                else:
                    return self
            else:
                raise IncompatibleRequirements(
                    "Did not recognize requirement type of {}".format(other))

    def source(self) -> PathSource:
        return PathSource(path=self._path)

    def environment_markers(self) -> Optional[EnvironmentMarker]:
        return self._environment_markers

    def to_line(self) -> str:
        extras = "[" + ",".join(self.extras()) + "]" if self.extras() else ""
        return "file://{path}#egg={name}{extras}".format(path=self._path,
                                                         extras=extras,
                                                         name=self.name())

    def path(self) -> str:
        return self._path

    def change_path(self, mapping: Callable[[str], str]) -> "PathRequirement":
        return evolve(self, path=mapping(self._path))
예제 #17
0
class ObjectRecognizerAsTemplateLearner(TemplateLearner):
    _object_recognizer: ObjectRecognizer = attrib(
        validator=instance_of(ObjectRecognizer))
    _language_mode: LanguageMode = attrib(validator=instance_of(LanguageMode),
                                          kw_only=True)
    _concepts_to_templates: ImmutableSetMultiDict[Concept,
                                                  SurfaceTemplate] = attrib(
                                                      init=False)

    def learn_from(
        self,
        language_perception_semantic_alignment:
        LanguagePerceptionSemanticAlignment,
        offset: int = 0,
    ) -> None:
        # The object recognizer doesn't learn anything.
        # It just recognizes predefined object patterns.
        pass

    @staticmethod
    def _enrich_post_process(
        perception_graph_after_matching: PerceptionGraph,
        immutable_new_nodes: AbstractSet[SemanticNode],
    ) -> Tuple[PerceptionGraph, AbstractSet[SemanticNode]]:
        new_nodes = []
        perception_graph_after_processing = perception_graph_after_matching
        for candiate_object_graph in extract_candidate_objects(
                perception_graph_after_matching,
                sort_by_increasing_size=False):
            fake_pattern_graph = PerceptionGraphPattern.from_graph(
                candiate_object_graph)
            fake_object_semantic_node = ObjectSemanticNode(
                concept=FunctionalObjectConcept("unknown_object"))
            perception_graph_after_processing = replace_match_with_object_graph_node(
                matched_object_node=fake_object_semantic_node,
                current_perception=perception_graph_after_processing,
                pattern_match=PerceptionGraphPatternMatch(
                    matched_pattern=fake_pattern_graph.
                    perception_graph_pattern,
                    graph_matched_against=perception_graph_after_processing,
                    matched_sub_graph=candiate_object_graph,
                    pattern_node_to_matched_graph_node=fake_pattern_graph.
                    perception_graph_node_to_pattern_node,
                ),
            ).perception_graph_after_replacement
            new_nodes.append(fake_object_semantic_node)

        return (
            perception_graph_after_processing,
            immutableset(chain(immutable_new_nodes, new_nodes)),
        )

    def enrich_during_learning(
        self, language_perception_semantic_alignment:
        LanguagePerceptionSemanticAlignment
    ) -> LanguagePerceptionSemanticAlignment:
        return self._object_recognizer.match_objects_with_language(
            language_perception_semantic_alignment,
            post_process=self._enrich_post_process)

    def enrich_during_description(
        self, perception_semantic_alignment: PerceptionSemanticAlignment
    ) -> PerceptionSemanticAlignment:
        (new_perception_semantic_alignment,
         _) = self._object_recognizer.match_objects(
             perception_semantic_alignment,
             post_process=self._enrich_post_process)
        return new_perception_semantic_alignment

    def templates_for_concept(
            self, concept: Concept) -> ImmutableSet[SurfaceTemplate]:
        if self._language_mode == LanguageMode.ENGLISH:
            return self._concepts_to_templates[concept]
        elif self._language_mode == LanguageMode.CHINESE:
            if concept.debug_string == "you":
                return immutableset([
                    SurfaceTemplate.for_object_name(
                        "ni3", language_mode=self._language_mode)
                ])
            if concept.debug_string == "me":
                return immutableset([
                    SurfaceTemplate.for_object_name(
                        "wo3", language_mode=self._language_mode)
                ])
            mappings = (
                GAILA_PHASE_1_CHINESE_LEXICON._ontology_node_to_word  # pylint:disable=protected-access
            )
            for k, v in mappings.items():
                if k.handle == concept.debug_string:
                    return immutableset([
                        SurfaceTemplate.for_object_name(
                            v.base_form, language_mode=self._language_mode)
                    ])
        # FunctionalObjectConcepts mean we have recognized an object but don't have
        # Knowledge of what the lexicalization is. So we just return an empty set
        if isinstance(concept, FunctionalObjectConcept):
            return immutableset()
        raise RuntimeError(f"Invalid concept {concept}")

    def log_hypotheses(self, log_output_path: Path) -> None:
        for concept, hypothesis in self.concepts_to_patterns().items():
            hypothesis.render_to_file(
                graph_name="perception",
                output_file=Path(log_output_path /
                                 f"{str(type(self))}-{concept.debug_string}"),
            )

    def concepts_to_patterns(self) -> Dict[Concept, PerceptionGraphPattern]:
        return {
            k: v
            for k, v in
            self._object_recognizer._concepts_to_static_patterns.items()  # pylint:disable=protected-access
        }

    @_concepts_to_templates.default
    def _init_concepts_to_templates(
            self) -> ImmutableSetMultiDict[Concept, SurfaceTemplate]:
        # Ground is added explicitly to this list because the code
        # Which matches the ground matches by recognition and not shape
        # See: `ObjectRecognizer.match_objects`
        return immutablesetmultidict((
            concept,
            SurfaceTemplate.for_object_name(name,
                                            language_mode=self._language_mode),
        ) for (concept, name) in (
            list(self._object_recognizer._concepts_to_names.items()  # pylint:disable=protected-access
                 ) + [(GROUND_OBJECT_CONCEPT, "ground")]))
예제 #18
0
class VersionRequirement(Requirement):
    _name: str = attrib()
    _versions: List[Tuple[str, str]] = attrib()
    _extras: Set[str] = attrib()
    _environment_markers: Optional[EnvironmentMarker] = attrib()
    _logger: Logger = attrib()

    def name(self) -> str:
        return canonicalize_name(self._name)

    def extras(self) -> Set[str]:
        return self._extras

    def logger(self) -> Logger:
        return self._logger

    def add(self, other: Requirement,
            target_platform: TargetPlatform) -> Requirement:
        if not self.applies_to_target(target_platform):
            return other
        elif not other.applies_to_target(target_platform):
            return self
        elif self.name() != other.name():
            raise IncompatibleRequirements(
                "Cannot add requirments with different names `{name1}` and `{name2}`"
                .format(name1=self.name(), name2=other.name()))
        else:
            if isinstance(other, PathRequirement):
                return other
            elif isinstance(other, UrlRequirement):
                return other
            elif isinstance(other, VersionRequirement):
                return VersionRequirement(
                    name=self.name(),
                    extras=self._extras.union(other._extras),
                    versions=self.version() + other.version(),
                    environment_markers=None,
                    logger=self.logger(),
                )
            else:
                raise IncompatibleRequirements(
                    "Did not recognize requirement type of {}".format(other))

    def source(self) -> None:
        return None

    def environment_markers(self) -> Optional[EnvironmentMarker]:
        return self._environment_markers

    def version(self) -> List[Tuple[str, str]]:
        return self._versions

    def to_line(self) -> str:
        version = ", ".join([
            "{operator} {specifier}".format(operator=operator,
                                            specifier=specifier)
            for operator, specifier in self._versions
        ])
        extras = ("[{extras}]".format(
            extras=",".join(self.extras())) if self.extras() else "")
        return "{name}{extras} {version}".format(name=self._name,
                                                 version=version,
                                                 extras=extras)
예제 #19
0
class CH4(CH5):
    _name = attrib(init=False, default='Chain Heal (Rank 4)')
    _base = attrib(init=False, default=(605, 692))
예제 #20
0
class UrlRequirement(Requirement):
    _name: str = attrib()
    _url: str = attrib()
    _extras: Set[str] = attrib()
    _environment_markers: Optional[EnvironmentMarker] = attrib()
    _logger: Logger = attrib()

    def name(self) -> str:
        return canonicalize_name(self._name)

    def extras(self) -> Set[str]:
        return self._extras

    def logger(self) -> Logger:
        return self._logger

    def add(self, other: Requirement,
            target_platform: TargetPlatform) -> Requirement:
        if not self.applies_to_target(target_platform):
            return other
        elif not other.applies_to_target(target_platform):
            return self
        elif self.name() != other.name():
            raise IncompatibleRequirements(
                "Cannot add requirments with different names `{name1}` and `{name2}`"
                .format(name1=self.name(), name2=other.name()))
        else:
            if isinstance(other, VersionRequirement):
                return self
            elif isinstance(other, PathRequirement):
                raise IncompatibleRequirements(
                    "Cannot combine requirements with with url `{url}` and path `{path}`"
                    .format(url=self.url, path=other.path))
            elif isinstance(other, UrlRequirement):
                if self.url != other.url:
                    raise IncompatibleRequirements(
                        "Cannot combine requirements with different urls `{url1}` and `{url2}`"
                        .format(url1=self.url, url2=other.url))
                else:
                    return self
            else:
                raise IncompatibleRequirements(
                    "Did not recognize requirement type of {}".format(other))

    def source(self) -> PackageSource:
        if self._url.startswith("git+"):
            return self._handle_git_source(self._url[4:])
        elif self._url.startswith("git://"):
            return self._handle_git_source(self._url)
        elif self._url.startswith("hg+"):
            return self._handle_hg_source(self._url[3:])
        elif self.url_scheme() == "file":
            return PathSource(path=self.url_path())
        else:
            return UrlSource(url=self._url, logger=self._logger)

    def environment_markers(self) -> Optional[EnvironmentMarker]:
        return self._environment_markers

    def _handle_hg_source(self, url: str) -> HgSource:
        try:
            url, rev = url.split("@")
        except ValueError:
            return HgSource(url=url, logger=self._logger)
        else:
            return HgSource(url=url, revision=rev, logger=self._logger)

    def _handle_git_source(self, url: str) -> GitSource:
        try:
            url, rev = url.split("@")
        except ValueError:
            return GitSource(url=url, logger=self._logger)
        else:
            return GitSource(url=url, logger=self._logger, revision=rev)

    def to_line(self) -> str:
        extras = "[" + ",".join(self.extras()) + "]" if self.extras() else ""
        return "{url}#egg={name}{extras}".format(url=self._url,
                                                 name=self.name(),
                                                 extras=extras)

    def url(self) -> str:
        return self._url

    def url_scheme(self) -> str:
        url = urlparse(self.url())
        return url.scheme

    def url_path(self) -> str:
        url = urlparse(self.url())
        return url.path
예제 #21
0
class ExecutableItem(object):
    name = attrib(default=None)
    status = attrib(default=None)
    statusDetails = attrib(default=None)
    stage = attrib(default=None)
    description = attrib(default=None)
    descriptionHtml = attrib(default=None)
    steps = attrib(default=Factory(list))
    attachments = attrib(default=Factory(list))
    parameters = attrib(default=Factory(list))
    start = attrib(default=None)
    stop = attrib(default=None)
예제 #22
0
class DataDictionary:
    """The parent class of a container that provides information on a tabular data structure."""

    __columns__ = attrib(init=False, repr=False)

    @property
    def columns(self):
        """Show column names."""
        return self.__columns__

    def list_columns(self):
        """List all columns."""
        return [getattr(self, col) for col in self.__columns__]

    # def __repr__(self):
    #     name = self.__class__.__qualname__
    #     cols = "\n\t".join([repr(getattr(self, col)) for col in self.__columns__])
    #     return f"{name}[DataDictionary]\n\t{cols}\n"

    def _cols_valid(self, dataframe: pd.DataFrame):
        """Test match of columns in datadictionary vs dataframe."""
        results, msgs = [], []
        col_dict = {col.name: col for col in self.list_columns()}
        cols = list(dict.fromkeys(list(col_dict) + list(dataframe.columns)))
        for col in cols:
            dd_col = col_dict.get(col, None)
            df_col = dataframe.get(col, None)
            if dd_col is None:
                results.append(False)
                msg = f"The column [{col}] is missing from the DataDictionary but is in the dataframe."
                msgs.append(msg)
            if df_col is None:
                results.append(False)
                msg = f"The column [{col}] is missing from the dataframe but is in the DataDictionary."
                msgs.append(msg)
            if dd_col is not None and df_col is not None:
                results.append(True)
        return results, msgs

    def _types_valid(self, dataframe: pd.DataFrame):
        """Test column types in dataframe."""
        results, msgs = [], []
        for dd_col in self.list_columns():
            df_col = dataframe.get(dd_col.name, None)
            if df_col is not None:
                test_eq = dd_col.dtype.value == df_col.dtype.name
                if test_eq is False:
                    msg = f"The column [{dd_col.name}] in the DataDictionary has type [{dd_col.dtype.value}] "
                    msg += f"which is different from type in the dataframe passed [{df_col.dtype.name}]."
                    msgs.append(msg)
                results.append(test_eq)

        return results, msgs

    def _validators_valid(self, dataframe: pd.DataFrame):
        """Test validators for columns in dataframe."""
        results, msgs = [], []
        for dd_col in self.list_columns():
            df_col = dataframe.get(dd_col.name, None)
            if df_col is not None and len(dd_col.validator) > 0:
                for validator in dd_col.validator:
                    try:
                        validator(inst=self, attr=dd_col, value=df_col)
                        results.append(True)
                    except:
                        results.append(False)
                        msgs.append(
                            f"The column [{dd_col.name}] failed {str(validator)[1:-1]}."
                        )

        return results, msgs

    def validate(
        self, dataframe: pd.DataFrame, types: bool = True, validators: bool = True
    ):
        """Validate passed dataframe types and/or validators against DataDictionary.

        :param pd.DataFrame dataframe: The dataframe to validate.
        :param bool types: Test DataDictionary types against dataframe. Default is True.
        :param bool validators: Test DataDictionary validators against dataframe. Default is True.

        :return: True if test pass.
        :rtype: bool

        :raises DataDictionaryValidateError: If any of the following rules are broken -
            - All columns present in the data dictionary are in the dataframe and vice versa.
            - If types is True, test the data types of the dataframe match the data dictionary.
            - If validators is True, test that any attached validator pass for a column.
        """
        rets, msgs = self._cols_valid(dataframe)

        def _update_ret_msg(fn, dataframe):
            ret, msg = fn(dataframe)
            rets.extend(ret)
            msgs.extend(msg)

        if types is True:
            _update_ret_msg(self._types_valid, dataframe)
        if validators is True:
            _update_ret_msg(self._validators_valid, dataframe)

        if not all(rets):
            msg = "\n".join(msgs)
            raise DataDictionaryValidateError("\n" + msg)
        return True

    def def_parameter(
        self,
        column: str,
        add_dtype_validator: bool = False,
        dtype_mapping: Mapping = PANDAS_DTYPE_CONVERSION,
    ):
        """Define a parameter for a model from a data dictioanry column.

        :param str column: The column name.
        :param bool add_type_validator: If True, an instance_of validator is added to the parameter.
        :param Mapping dtype_mapping: A conversion mapping of PandasDtypes to different types.
        """
        col = getattr(self, column, None)
        if col is None:
            msg = f"The column [{column}] does not belong to the data dictionary."
            raise AttributeError(msg)
        dtype = dtype_mapping.get(col.dtype)
        validator = col.validator.copy()
        if add_dtype_validator is True:
            validator.append(instance_of(dtype))
        return def_parameter(
            dtype=dtype, description=col.description, validator=validator,
        )

    def def_sensitivity(self, column):
        raise NotImplementedError("This feature is not implemented yet.")

    def def_meta(self, column):
        raise NotImplementedError("This feature is not implemented yet.")

    def def_column(self, column):
        """Define a column for another data dictionary using an existing data dictioanry column.

        :param str column: The column name.
        """
        col = getattr(self, column, None)
        if col is None:
            msg = f"The column [{column}] does not belong to the data dictionary."
            raise AttributeError(msg)
        return col
예제 #23
0
class Parameter(object):
    name = attrib(default=None)
    value = attrib(default=None)
예제 #24
0
파일: models.py 프로젝트: folkengine/dndme
class Encounter:

    name = attrib(default="")
    location = attrib(default="")
    notes = attrib(default="")
    groups = attrib(default=[])
예제 #25
0
class Link(object):
    type = attrib(default=None)
    url = attrib(default=None)
    name = attrib(default=None)
예제 #26
0
파일: models.py 프로젝트: folkengine/dndme
class Combatant:
    name = attrib(default="")
    race = attrib(default="")
    ac = attrib(default=0)

    senses = attrib(default=attr_factory(dict))
    conditions = attrib(default=attr_factory(dict))

    _max_hp = attrib(default=10)
    _cur_hp = attrib(default=10)

    @property
    def max_hp(self):
        return self._max_hp

    @max_hp.setter
    def max_hp(self, value):
        try:
            self._max_hp = int(value)
        except ValueError:
            # we have an expression for max hp, so roll it
            self._max_hp = dice.roll_dice_expr(value)
        # setting max_hp for the first time? we should set cur_hp too
        if self.cur_hp is None:
            self.cur_hp = self._max_hp

    @property
    def cur_hp(self):
        return self._cur_hp

    @cur_hp.setter
    def cur_hp(self, value):
        if value > self.max_hp:
            value = self.max_hp
        if value < 0:
            if abs(value) >= self.max_hp:
                self.conditions = {'dead': inf}
            value = 0
        self._cur_hp = value

    def set_condition(self, condition, duration=inf):
        self.conditions[condition] = duration

    def unset_condition(self, condition):
        try:
            self.conditions.pop(condition)
        except KeyError:
            # We can probably safely ignore failures here,
            # since it shouldn't be the end of the world
            # to remove a condition that isn't in effect.
            pass

    def decrement_condition_durations(self):
        conditions_removed = []

        for condition in list(self.conditions):
            self.conditions[condition] -= 1

            if self.conditions[condition] == 0:
                self.conditions.pop(condition)
                conditions_removed.append(condition)

        return conditions_removed
예제 #27
0
class Attachment(object):
    name = attrib(default=None)
    source = attrib(default=None)
    type = attrib(default=None)
예제 #28
0
파일: models.py 프로젝트: folkengine/dndme
class Character(Combatant):
    cclass = attrib(default="Fighter")
    level = attrib(default=1)
    initiative_mod = attrib(default=0)
예제 #29
0
class SubspaceRulesClassifier(BaseEstimator):
    rules: List[Rule] = attrib()
    max_depth: int = attrib()
    random_state: int = attrib(default=42)
    train_default_on_whole_set: bool = attrib(default=True)
    depth_strategy: DepthStrategy = attrib(
        default=DepthStrategy.SUBTRACT_FROM_RULES)
    _clf_by_rule: Dict[Rule, any] = attrib(init=False, factory=dict)
    _default_clf: any = attrib(init=False)

    def fit(self, x, y):
        x, y = check_X_y(x, y)

        already_covered_indicies = set()

        for rule in self.rules:
            if self.depth_strategy == DepthStrategy.SUBTRACT_FROM_RULES:
                depth = self.max_depth - len(rule.statements)
            else:
                depth = self.max_depth

            matching_sample_indicies = pipe(
                x,
                map(to_instance),
                enumerate,
                filter(lambda idx_with_instance: rule.describes(
                    idx_with_instance[1])),
                map(lambda idx_with_instance: idx_with_instance[0]),
                list,
            )

            logger.debug(
                f"Depth={depth}, matching_samples={len(matching_sample_indicies)}"
            )

            if depth >= 1 and len(matching_sample_indicies) >= 1:

                if any(
                        already_covered_indicies.intersection(
                            set(matching_sample_indicies))):
                    raise Exception("Rules are overlapping")

                already_covered_indicies.update(matching_sample_indicies)

                x_train = x[matching_sample_indicies]
                y_train = y[matching_sample_indicies]

                clf = DecisionTreeClassifier(random_state=self.random_state,
                                             max_depth=depth)
                clf.fit(x_train, y_train)
                self._clf_by_rule[rule] = clf

        not_covered_indices = list(
            set(range(len(x))).difference(already_covered_indicies))
        default_clf = DecisionTreeClassifier(random_state=self.random_state,
                                             max_depth=self.max_depth)
        if len(not_covered_indices) == 0 or self.train_default_on_whole_set:
            default_clf.fit(x, y)
        else:
            x_train = x[not_covered_indices]
            y_train = y[not_covered_indices]
            default_clf.fit(x_train, y_train)

        self._default_clf = default_clf

    def predict(self, x):

        instance_idx_by_rule = defaultdict(list)
        not_covered_indicies = set(range(len(x)))
        for rule in self._clf_by_rule.keys():
            for idx, single_x in enumerate(x):
                if rule.describes(to_instance(single_x)):
                    instance_idx_by_rule[rule].append(idx)
                    not_covered_indicies.remove(idx)

        label_by_idx = OrderedDict()
        for rule, indicies in instance_idx_by_rule.items():
            clf = self._clf_by_rule[rule]
            x_to_classify = x[indicies]

            for label, idx in zip(clf.predict(x_to_classify), indicies):
                label_by_idx[idx] = label

        x_indicies_not_covered = list(not_covered_indicies)

        if len(x_indicies_not_covered) > 0:
            x_not_covered_to_classify = x[x_indicies_not_covered]

            for label, idx in zip(
                    self._default_clf.predict(x_not_covered_to_classify),
                    x_indicies_not_covered,
            ):
                label_by_idx[idx] = label

        return list(label_by_idx.values())

    def _classify_instance(self, x: List[any]):
        clf = self._find_corresponding_classifier(to_instance(x))

        return clf.predict([x])[0]

    def _find_corresponding_classifier(self, x: Instance) -> any:
        matching_rules = pipe(self._clf_by_rule.keys(),
                              filter(lambda r: r.describes(x)), list)

        if len(matching_rules) > 1:
            raise Exception("Too many matching rules")

        if len(matching_rules) == 0:
            return self._default_clf

        return self._clf_by_rule[matching_rules[0]]
예제 #30
0
class ExperimentDataAfterSplit3D:
    x_test = attrib()
    x_train = attrib()
    y_train = attrib()
    y_test = attrib()
    shape: tuple = attrib()