Beispiel #1
0
class TestEntity(db.Model):
  """Test entity class."""

  json_property = json_util.JsonProperty(TestJsonType)
  json_property_default_value = json_util.JsonProperty(
      TestJsonType, default=TestJsonType())
  empty_json_property = json_util.JsonProperty(EmptyDictJsonType)
class TestEntity(db.Model):
    """Test entity class."""

    json_property = json_util.JsonProperty(TestJsonType)
    json_property_default_value = json_util.JsonProperty(
        TestJsonType, default=TestJsonType())
    int_property = db.IntegerProperty()
    datetime_property = db.DateTimeProperty(auto_now=True)

    a = db.IntegerProperty()
    b = db.IntegerProperty()
class Source(db.Model):
    graph_id = property(lambda x: str(x.key().name()))
    graph_type = db.StringProperty(choices=GRAPH_TYPES)

    # cached/derived from fb data
    name = db.StringProperty(indexed=False)
    feed_history_in_seconds = db.IntegerProperty(indexed=False)

    fb_info = json_util.JsonProperty(dict, indexed=False)
    latitude = db.FloatProperty(indexed=False)
    longitude = db.FloatProperty(indexed=False)

    street_dance_related = db.BooleanProperty()

    # probably to assume for a given event? rough weighting factor?
    # do we want to delete these now?
    freestyle = db.FloatProperty(indexed=False)
    choreo = db.FloatProperty(indexed=False)

    #STR_ID_MIGRATE
    creating_fb_uid = db.IntegerProperty(indexed=False)
    creation_time = db.DateTimeProperty(indexed=False, auto_now_add=True)
    last_scrape_time = db.DateTimeProperty(indexed=False)

    num_all_events = db.IntegerProperty(indexed=False)
    num_potential_events = db.IntegerProperty(indexed=False)
    num_real_events = db.IntegerProperty(indexed=False)
    num_false_negatives = db.IntegerProperty(indexed=False)

    def fraction_potential_are_real(self, bias=1):
        num_real_events = (self.num_real_events or 0) + bias
        num_potential_events = (self.num_potential_events or 0) + bias
        if num_potential_events:
            return 1.0 * num_real_events / num_potential_events
        else:
            return 0

    def fraction_real_are_false_negative(self, bias=1):
        if self.num_real_events:
            #TODO(lambert): figure out why num_false_negatives is None, in particular for source id=107687589275667 even after saving
            num_false_negatives = (self.num_false_negatives or 0) + bias
            num_real_events = (self.num_real_events or 0) + bias
            return 1.0 * num_false_negatives / num_real_events
        else:
            return 0

    def compute_derived_properties(self, fb_source_common, fb_source_data):
        if fb_source_common[
                'empty']:  # only update these when we have feed data
            self.fb_info = {}
        else:
            self.fb_info = fb_source_data[
                'info']  # LookupThing* (and all fb_info dependencies). Only used for /search_pages functionality
            self.graph_type = _type_for_fb_source(fb_source_common)
            if 'name' not in fb_source_common['info']:
                logging.error(
                    'cannot find name for fb event data: %s, cannot update source data...',
                    fb_source_common)
                return
            self.name = fb_source_common['info']['name']
            feed = fb_source_common['feed']['data']
            if len(feed):
                dt = datetime.datetime.strptime(feed[-1]['created_time'],
                                                '%Y-%m-%dT%H:%M:%S+0000')
                td = datetime.datetime.now() - dt
                total_seconds = td.seconds + td.days * 24 * 3600
                self.feed_history_in_seconds = total_seconds
                #logging.info('feed time delta is %s', self.feed_history_in_seconds)
            else:
                self.feed_history_in_seconds = 0
            location = fb_source_data['info'].get('location')
            if location:
                if location.get('latitude'):
                    self.latitude = float(location.get('latitude'))
                    self.longitude = float(location.get('longitude'))
                else:
                    component_names = [
                        'street', 'city', 'state', 'zip', 'region', 'country'
                    ]
                    components = [
                        location.get(x) for x in component_names
                        if location.get(x)
                    ]
                    address = ', '.join(components)
                    geocode = gmaps_api.lookup_address(address)
                    if geocode:
                        self.latitude, self.longitude = geocode.latlng()
Beispiel #4
0
class ShardState(db.Model):
    """Single shard execution state.

  The shard state is stored in the datastore and is later aggregated by
  controller task. ShardState key_name is equal to shard_id.

  Shard state contains critical state to ensure the correctness of
  shard execution. It is the single source of truth about a shard's
  progress. For example:
  1. A slice is allowed to run only if its payload matches shard state's
     expectation.
  2. A slice is considered running only if it has acquired the shard's lock.
  3. A slice is considered done only if it has successfully committed shard
     state to db.

  Properties about the shard:
    active: if we have this shard still running as boolean.
    counters_map: shard's counters map as CountersMap. All counters yielded
      within mapreduce are stored here.
    mapreduce_id: unique id of the mapreduce.
    shard_id: unique id of this shard as string.
    shard_number: ordered number for this shard.
    retries: the number of times this shard has been retried.
    result_status: If not None, the final status of this shard.
    update_time: The last time this shard state was updated.
    shard_description: A string description of the work this shard will do.
    last_work_item: A string description of the last work item processed.
    writer_state: writer state for this shard. The shard's output writer
      instance can save in-memory output references to this field in its
      "finalize" method.

   Properties about slice management:
    slice_id: slice id of current executing slice. A slice's task
      will not run unless its slice_id matches this. Initial
      value is 0. By the end of slice execution, this number is
      incremented by 1.
    slice_start_time: a slice updates this to now at the beginning of
      execution. If the transaction succeeds, the current task holds
      a lease of slice duration + some grace period. During this time, no
      other task with the same slice_id will execute. Upon slice failure,
      the task should try to unset this value to allow retries to carry on
      ASAP.
    slice_request_id: the request id that holds/held the lease. When lease has
      expired, new request needs to verify that said request has indeed
      ended according to logs API. Do this only when lease has expired
      because logs API is expensive. This field should always be set/unset
      with slice_start_time. It is possible Logs API doesn't log a request
      at all or doesn't log the end of a request. So a new request can
      proceed after a long conservative timeout.
    slice_retries: the number of times a slice has been retried due to
      processing data when lock is held. Taskqueue/datastore errors
      related to slice/shard management are not counted. This count is
      only a lower bound and is used to determined when to fail a slice
      completely.
    acquired_once: whether the lock for this slice has been acquired at
      least once. When this is True, duplicates in outputs are possible.
  """

    RESULT_SUCCESS = "success"
    RESULT_FAILED = "failed"
    # Shard can be in aborted state when user issued abort, or controller
    # issued abort because some other shard failed.
    RESULT_ABORTED = "aborted"

    _RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED])

    # Maximum number of shard states to hold in memory at any time.
    _MAX_STATES_IN_MEMORY = 10

    # Functional properties.
    mapreduce_id = db.StringProperty(required=True)
    active = db.BooleanProperty(default=True, indexed=False)
    counters_map = json_util.JsonProperty(CountersMap,
                                          default=CountersMap(),
                                          indexed=False)
    result_status = db.StringProperty(choices=_RESULTS, indexed=False)
    retries = db.IntegerProperty(default=0, indexed=False)
    writer_state = json_util.JsonProperty(dict, indexed=False)
    slice_id = db.IntegerProperty(default=0, indexed=False)
    slice_start_time = db.DateTimeProperty(indexed=False)
    slice_request_id = db.ByteStringProperty(indexed=False)
    slice_retries = db.IntegerProperty(default=0, indexed=False)
    acquired_once = db.BooleanProperty(default=False, indexed=False)

    # For UI purposes only.
    update_time = db.DateTimeProperty(auto_now=True, indexed=False)
    shard_description = db.TextProperty(default="")
    last_work_item = db.TextProperty(default="")

    def __str__(self):
        kv = {
            "active": self.active,
            "slice_id": self.slice_id,
            "last_work_item": self.last_work_item,
            "update_time": self.update_time
        }
        if self.result_status:
            kv["result_status"] = self.result_status
        if self.retries:
            kv["retries"] = self.retries
        if self.slice_start_time:
            kv["slice_start_time"] = self.slice_start_time
        if self.slice_retries:
            kv["slice_retries"] = self.slice_retries
        if self.slice_request_id:
            kv["slice_request_id"] = self.slice_request_id
        if self.acquired_once:
            kv["acquired_once"] = self.acquired_once
        keys = kv.keys()
        keys.sort()

        result = "ShardState is {"
        for k in keys:
            result += k + ":" + str(kv[k]) + ","
        result += "}"
        return result

    def reset_for_retry(self):
        """Reset self for shard retry."""
        self.retries += 1
        self.last_work_item = ""
        self.active = True
        self.result_status = None
        self.counters_map = CountersMap()
        self.slice_id = 0
        self.slice_start_time = None
        self.slice_request_id = None
        self.slice_retries = 0
        self.acquired_once = False

    def advance_for_next_slice(self, recovery_slice=False):
        """Advance self for next slice.

    Args:
      recovery_slice: True if this slice is running recovery logic.
        See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery
        for more info.
    """
        self.slice_start_time = None
        self.slice_request_id = None
        self.slice_retries = 0
        self.acquired_once = False
        if recovery_slice:
            self.slice_id += 2
        else:
            self.slice_id += 1

    def set_for_failure(self):
        self.active = False
        self.result_status = self.RESULT_FAILED

    def set_for_abort(self):
        self.active = False
        self.result_status = self.RESULT_ABORTED

    def set_for_success(self):
        self.active = False
        self.result_status = self.RESULT_SUCCESS
        self.slice_start_time = None
        self.slice_request_id = None
        self.slice_retries = 0
        self.acquired_once = False

    def copy_from(self, other_state):
        """Copy data from another shard state entity to self."""
        for prop in self.properties().values():
            setattr(self, prop.name, getattr(other_state, prop.name))

    def __eq__(self, other):
        if not isinstance(other, self.__class__):
            return False
        return self.properties() == other.properties()

    def get_shard_number(self):
        """Gets the shard number from the key name."""
        return int(self.key().name().split("-")[-1])

    shard_number = property(get_shard_number)

    def get_shard_id(self):
        """Returns the shard ID."""
        return self.key().name()

    shard_id = property(get_shard_id)

    @classmethod
    def kind(cls):
        """Returns entity kind."""
        return "_AE_MR_ShardState"

    @classmethod
    def shard_id_from_number(cls, mapreduce_id, shard_number):
        """Get shard id by mapreduce id and shard number.

    Args:
      mapreduce_id: mapreduce id as string.
      shard_number: shard number to compute id for as int.

    Returns:
      shard id as string.
    """
        return "%s-%d" % (mapreduce_id, shard_number)

    @classmethod
    def get_key_by_shard_id(cls, shard_id):
        """Retrieves the Key for this ShardState.

    Args:
      shard_id: The shard ID to fetch.

    Returns:
      The Datatore key to use to retrieve this ShardState.
    """
        return db.Key.from_path(cls.kind(), shard_id)

    @classmethod
    def get_by_shard_id(cls, shard_id):
        """Get shard state from datastore by shard_id.

    Args:
      shard_id: shard id as string.

    Returns:
      ShardState for given shard id or None if it's not found.
    """
        return cls.get_by_key_name(shard_id)

    @classmethod
    def find_by_mapreduce_state(cls, mapreduce_state):
        """Find all shard states for given mapreduce.

    Deprecated. Use find_all_by_mapreduce_state.
    This will be removed after 1.8.9 release.

    Args:
      mapreduce_state: MapreduceState instance

    Returns:
      A list of ShardStates.
    """
        return list(cls.find_all_by_mapreduce_state(mapreduce_state))

    @classmethod
    def find_all_by_mapreduce_state(cls, mapreduce_state):
        """Find all shard states for given mapreduce.

    Never runs within a transaction since it may touch >5 entity groups (one
    for each shard).

    Args:
      mapreduce_state: MapreduceState instance

    Yields:
      shard states sorted by shard id.
    """
        keys = cls.calculate_keys_by_mapreduce_state(mapreduce_state)
        i = 0
        while i < len(keys):

            @db.non_transactional
            def no_tx_get(i):
                return db.get(keys[i:i + cls._MAX_STATES_IN_MEMORY])

            # We need a separate function to so that we can mix non-transactional and
            # use be a generator
            states = no_tx_get(i)
            for s in states:
                i += 1
                if s is not None:
                    yield s

    @classmethod
    def calculate_keys_by_mapreduce_state(cls, mapreduce_state):
        """Calculate all shard states keys for given mapreduce.

    Args:
      mapreduce_state: MapreduceState instance

    Returns:
      A list of keys for shard states, sorted by shard id.
      The corresponding shard states may not exist.
    """
        if mapreduce_state is None:
            return []

        keys = []
        for i in range(mapreduce_state.mapreduce_spec.mapper.shard_count):
            shard_id = cls.shard_id_from_number(mapreduce_state.key().name(),
                                                i)
            keys.append(cls.get_key_by_shard_id(shard_id))
        return keys

    @classmethod
    def create_new(cls, mapreduce_id, shard_number):
        """Create new shard state.

    Args:
      mapreduce_id: unique mapreduce id as string.
      shard_number: shard number for which to create shard state.

    Returns:
      new instance of ShardState ready to put into datastore.
    """
        shard_id = cls.shard_id_from_number(mapreduce_id, shard_number)
        state = cls(key_name=shard_id, mapreduce_id=mapreduce_id)
        return state
Beispiel #5
0
class MapreduceState(db.Model):
    """Holds accumulated state of mapreduce execution.

  MapreduceState is stored in datastore with a key name equal to the
  mapreduce ID. Only controller tasks can write to MapreduceState.

  Properties:
    mapreduce_spec: cached deserialized MapreduceSpec instance. read-only
    active: if this MR is still running.
    last_poll_time: last time controller job has polled this mapreduce.
    counters_map: shard's counters map as CountersMap. Mirrors
      counters_map_json.
    chart_url: last computed mapreduce status chart url. This chart displays the
      progress of all the shards the best way it can.
    sparkline_url: last computed mapreduce status chart url in small format.
    result_status: If not None, the final status of the job.
    active_shards: How many shards are still processing. This starts as 0,
      then set by KickOffJob handler to be the actual number of input
      readers after input splitting, and is updated by Controller task
      as shards finish.
    start_time: When the job started.
    writer_state: Json property to be used by writer to store its state.
      This is filled when single output per job. Will be deprecated.
      Use OutputWriter.get_filenames instead.
  """

    RESULT_SUCCESS = "success"
    RESULT_FAILED = "failed"
    RESULT_ABORTED = "aborted"

    _RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED])

    # Functional properties.
    mapreduce_spec = json_util.JsonProperty(MapreduceSpec, indexed=False)
    active = db.BooleanProperty(default=True, indexed=False)
    last_poll_time = db.DateTimeProperty(required=True)
    counters_map = json_util.JsonProperty(CountersMap,
                                          default=CountersMap(),
                                          indexed=False)
    app_id = db.StringProperty(required=False, indexed=True)
    writer_state = json_util.JsonProperty(dict, indexed=False)
    active_shards = db.IntegerProperty(default=0, indexed=False)
    failed_shards = db.IntegerProperty(default=0, indexed=False)
    aborted_shards = db.IntegerProperty(default=0, indexed=False)
    result_status = db.StringProperty(required=False, choices=_RESULTS)

    # For UI purposes only.
    chart_url = db.TextProperty(default="")
    chart_width = db.IntegerProperty(default=300, indexed=False)
    sparkline_url = db.TextProperty(default="")
    start_time = db.DateTimeProperty(auto_now_add=True)

    @classmethod
    def kind(cls):
        """Returns entity kind."""
        return "_AE_MR_MapreduceState"

    @classmethod
    def get_key_by_job_id(cls, mapreduce_id):
        """Retrieves the Key for a Job.

    Args:
      mapreduce_id: The job to retrieve.

    Returns:
      Datastore Key that can be used to fetch the MapreduceState.
    """
        return db.Key.from_path(cls.kind(), str(mapreduce_id))

    @classmethod
    def get_by_job_id(cls, mapreduce_id):
        """Retrieves the instance of state for a Job.

    Args:
      mapreduce_id: The mapreduce job to retrieve.

    Returns:
      instance of MapreduceState for passed id.
    """
        return db.get(cls.get_key_by_job_id(mapreduce_id))

    def set_processed_counts(self, shards_processed):
        """Updates a chart url to display processed count for each shard.

    Args:
      shards_processed: list of integers with number of processed entities in
        each shard
    """
        chart = google_chart_api.BarChart(shards_processed)
        shard_count = len(shards_processed)

        if shards_processed:
            # Only 16 labels on the whole chart.
            stride_length = max(1, shard_count / 16)
            chart.bottom.labels = []
            for x in xrange(shard_count):
                if (x % stride_length == 0 or x == shard_count - 1):
                    chart.bottom.labels.append(x)
                else:
                    chart.bottom.labels.append("")
            chart.left.labels = ["0", str(max(shards_processed))]
            chart.left.min = 0

        self.chart_width = min(700, max(300, shard_count * 20))
        self.chart_url = chart.display.Url(self.chart_width, 200)

    def get_processed(self):
        """Number of processed entities.

    Returns:
      The total number of processed entities as int.
    """
        return self.counters_map.get(context.COUNTER_MAPPER_CALLS)

    processed = property(get_processed)

    @staticmethod
    def create_new(mapreduce_id=None, gettime=datetime.datetime.now):
        """Create a new MapreduceState.

    Args:
      mapreduce_id: Mapreduce id as string.
      gettime: Used for testing.
    """
        if not mapreduce_id:
            mapreduce_id = MapreduceState.new_mapreduce_id()
        state = MapreduceState(key_name=mapreduce_id, last_poll_time=gettime())
        state.set_processed_counts([])
        return state

    @staticmethod
    def new_mapreduce_id():
        """Generate new mapreduce id."""
        return _get_descending_key()

    def __eq__(self, other):
        if not isinstance(other, self.__class__):
            return False
        return self.properties() == other.properties()
Beispiel #6
0
class MapreduceState(db.Model):
    """Holds accumulated state of mapreduce execution.

  MapreduceState is stored in datastore with a key name equal to the
  mapreduce ID. Only controller tasks can write to MapreduceState.

  Properties:
    mapreduce_spec: cached deserialized MapreduceSpec instance. read-only
    active: if this MR is still running.
    last_poll_time: last time controller job has polled this mapreduce.
    counters_map: shard's counters map as CountersMap. Mirrors
      counters_map_json.
    chart_url: last computed mapreduce status chart url. This chart displays the
      progress of all the shards the best way it can.
    sparkline_url: last computed mapreduce status chart url in small format.
    result_status: If not None, the final status of the job.
    active_shards: How many shards are still processing. This starts as 0,
      then set by KickOffJob handler to be the actual number of input
      readers after input splitting, and is updated by Controller task
      as shards finish.
    start_time: When the job started.
    writer_state: Json property to be used by writer to store its state.
      This is filled when single output per job. Will be deprecated.
      Use OutputWriter.get_filenames instead.
  """

    RESULT_SUCCESS = "success"
    RESULT_FAILED = "failed"
    RESULT_ABORTED = "aborted"

    _RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED])

    # Functional properties.
    # TODO(user): Replace mapreduce_spec with job_config.
    mapreduce_spec = json_util.JsonProperty(MapreduceSpec, indexed=False)
    active = db.BooleanProperty(default=True, indexed=False)
    last_poll_time = db.DateTimeProperty(required=True)
    counters_map = json_util.JsonProperty(CountersMap,
                                          default=CountersMap(),
                                          indexed=False)
    app_id = db.StringProperty(required=False, indexed=True)
    writer_state = json_util.JsonProperty(dict, indexed=False)
    active_shards = db.IntegerProperty(default=0, indexed=False)
    failed_shards = db.IntegerProperty(default=0, indexed=False)
    aborted_shards = db.IntegerProperty(default=0, indexed=False)
    result_status = db.StringProperty(required=False, choices=_RESULTS)

    # For UI purposes only.
    chart_url = db.TextProperty(default="")
    chart_width = db.IntegerProperty(default=300, indexed=False)
    sparkline_url = db.TextProperty(default="")
    start_time = db.DateTimeProperty(auto_now_add=True)

    @classmethod
    def kind(cls):
        """Returns entity kind."""
        return "_AE_MR_MapreduceState"

    @classmethod
    def get_key_by_job_id(cls, mapreduce_id):
        """Retrieves the Key for a Job.

    Args:
      mapreduce_id: The job to retrieve.

    Returns:
      Datastore Key that can be used to fetch the MapreduceState.
    """
        return db.Key.from_path(cls.kind(), str(mapreduce_id))

    @classmethod
    def get_by_job_id(cls, mapreduce_id):
        """Retrieves the instance of state for a Job.

    Args:
      mapreduce_id: The mapreduce job to retrieve.

    Returns:
      instance of MapreduceState for passed id.
    """
        return db.get(cls.get_key_by_job_id(mapreduce_id))

    def set_processed_counts(self, shards_processed, shards_status):
        """Updates a chart url to display processed count for each shard.

    Args:
      shards_processed: list of integers with number of processed entities in
        each shard
    """
        chart = google_chart_api.BarChart()

        def filter_status(status_to_filter):
            return [
                count if status == status_to_filter else 0
                for count, status in zip(shards_processed, shards_status)
            ]

        if shards_status:
            # Each index will have only one non-zero count, so stack them to color-
            # code the bars by status
            # These status values are computed in _update_state_from_shard_states,
            # in mapreduce/handlers.py.
            chart.stacked = True
            chart.AddBars(filter_status("unknown"), color="404040")
            chart.AddBars(filter_status("success"), color="00ac42")
            chart.AddBars(filter_status("running"), color="3636a9")
            chart.AddBars(filter_status("aborted"), color="e29e24")
            chart.AddBars(filter_status("failed"), color="f6350f")
        else:
            chart.AddBars(shards_processed)

        shard_count = len(shards_processed)

        if shard_count > 95:
            # Auto-spacing does not work for large numbers of shards.
            pixels_per_shard = 700.0 / shard_count
            bar_thickness = int(pixels_per_shard * .9)

            chart.style = bar_chart.BarChartStyle(
                bar_thickness=bar_thickness,
                bar_gap=0.1,
                use_fractional_gap_spacing=True)

        if shards_processed and shard_count <= 95:
            # Adding labels puts us in danger of exceeding the URL length, only
            # do it when we have a small amount of data to plot.
            # Only 16 labels on the whole chart.
            stride_length = max(1, shard_count / 16)
            chart.bottom.labels = []
            for x in xrange(shard_count):
                if (x % stride_length == 0 or x == shard_count - 1):
                    chart.bottom.labels.append(x)
                else:
                    chart.bottom.labels.append("")
            chart.left.labels = ["0", str(max(shards_processed))]
            chart.left.min = 0

        self.chart_width = min(700, max(300, shard_count * 20))
        self.chart_url = chart.display.Url(self.chart_width, 200)

    def get_processed(self):
        """Number of processed entities.

    Returns:
      The total number of processed entities as int.
    """
        return self.counters_map.get(context.COUNTER_MAPPER_CALLS)

    processed = property(get_processed)

    @staticmethod
    def create_new(mapreduce_id=None, gettime=datetime.datetime.now):
        """Create a new MapreduceState.

    Args:
      mapreduce_id: Mapreduce id as string.
      gettime: Used for testing.
    """
        if not mapreduce_id:
            mapreduce_id = MapreduceState.new_mapreduce_id()
        state = MapreduceState(key_name=mapreduce_id, last_poll_time=gettime())
        state.set_processed_counts([], [])
        return state

    @staticmethod
    def new_mapreduce_id():
        """Generate new mapreduce id."""
        return util._get_descending_key()

    def __eq__(self, other):
        if not isinstance(other, self.__class__):
            return False
        return self.properties() == other.properties()