예제 #1
0
def extract_user_stories(
    related_identifiers: List[Dict],
    user_story_typeid="user-story"
) -> Tuple[Union[None, List], Union[None, List]]:
    """Extract the user stories from a record.

    Args:
        related_identifiers (List[Dict]): List with Relation Object.

        user_story_typeid (str): Resource Type ID of the user-story records.

    Returns:
        Tuple[Union[None, List], Union[None, List]]: Tuple with the following content:
            - First tuple position: Related identifiers that are not user stories
            - Second tuple position: User Stories identifiers.

    See:
        For more information about the Relation Object, please, check the `related-identifiers`
        page available on InvenioRDM Documentation:
        - https://inveniordm.docs.cern.ch/reference/metadata/#related-identifiersworks-0-n
    """
    return py_.partition(
        related_identifiers,
        lambda x: py_.get(x, "ui.resource_type.id") != user_story_typeid,
    )
예제 #2
0
def report_events_summary(events: List[CalendarEvent]) -> Dict[str, float]:
    # Don't count day long events
    events = [ev for ev in events if not is_day_long_event(ev)]
    total_hours = calculate_time_spent(events) / 60

    external_events, internal_events = py_.partition(events, is_event_external)
    external_hours = calculate_time_spent(external_events) / 60
    personal_events, rest_events = py_.partition(internal_events, is_event_personal)
    personal_hours = calculate_time_spent(personal_events) / 60

    return {
        "total": total_hours,
        "external": external_hours,
        "personal": personal_hours,
        "1:1": calculate_time_spent([ev for ev in rest_events if is_event_one_on_one(ev)]) / 60,
        "rest": calculate_time_spent([ev for ev in rest_events if not is_event_one_on_one(ev)]) / 60
    }
예제 #3
0
    def update_targets(self, items):
        """
        Inserts the new task_types into the task_types collection

        Args:
            items ([dict]): task_type dicts to insert into task_types collection
        """
        with_task_type, without_task_type = py_.partition(items, lambda i: i["task_type"])
        if without_task_type:
            self.logger.error("No task type found for {}".format(without_task_type))
        if len(with_task_type) > 0:
            self.task_types.update(with_task_type)
예제 #4
0
    def combine_time_entities_from_slots(
            self, slot_tracker: Optional[List[Dict[str, Any]]],
            entities: List[BaseEntity]) -> List[BaseEntity]:
        previously_filled_time_entity = self.pick_previously_filled_time_entity(
            self.get_tracked_slots(slot_tracker))

        if not previously_filled_time_entity:
            return entities

        time_entities, other_entities = py_.partition(
            entities,
            lambda entity: entity.entity_type in CombineDateTimeOverSlots.
            SUPPORTED_ENTITIES,
        )
        combined_time_entities = [
            self.join(entity, previously_filled_time_entity)
            for entity in time_entities
        ]
        return combined_time_entities + other_entities
예제 #5
0
    def update_targets(self, items):
        target = self.targets[0]
        xas_averaged = target.collection
        valids, invalids = py_.partition(
            mark_lu(py_.flatten(items), target.lu_field, self.dt_fetch),
            'valid')
        # Remove documents flagging now-valid data as invalid.
        xas_averaged.delete_many(
            mark_invalid({"mp_id": {
                "$in": py_.pluck(valids, 'mp_id')
            }}))

        for doc in valids:
            xas_averaged.update_one(py_.pick(doc, 'mp_id', 'element'),
                                    {'$set': doc},
                                    upsert=True)
        for doc in invalids:
            xas_averaged.update_one(mark_invalid(py_.pick(doc, 'mp_id')),
                                    {'$set': doc},
                                    upsert=True)
예제 #6
0
 def update_targets(self, items):
     xas_averaged = self.targets[0]
     xas_averaged.ensure_index([("valid", 1), ("mp_id", 1)])
     xas_averaged.ensure_index([("mp_id", 1), ("element", 1)])
     xas_averaged.ensure_index([("chemsys", 1), ("element", 1)])
     valids, invalids = py_.partition(
         mark_lu(py_.flatten(items), xas_averaged.lu_field, self.dt_fetch),
         'valid')
     # Remove documents flagging now-valid data as invalid.
     xas_averaged.collection.delete_many(
         mark_invalid({"mp_id": {
             "$in": py_.pluck(valids, 'mp_id')
         }}))
     bulk = xas_averaged.collection.initialize_ordered_bulk_op()
     for doc in valids:
         (bulk.find(py_.pick(doc, 'mp_id',
                             'element')).upsert().replace_one(doc))
     for doc in invalids:
         (bulk.find(mark_invalid(py_.pick(
             doc, 'mp_id'))).upsert().replace_one(doc))
     bulk.execute()